mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 23:52:03 +00:00
Merge branch 'master' into ADQM-956
This commit is contained in:
commit
8f882b270e
43
.github/workflows/master.yml
vendored
43
.github/workflows/master.yml
vendored
@ -850,6 +850,48 @@ jobs:
|
|||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
|
BuilderBinRISCV64:
|
||||||
|
needs: [DockerHubPush]
|
||||||
|
runs-on: [self-hosted, builder]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/build_check
|
||||||
|
IMAGES_PATH=${{runner.temp}}/images_path
|
||||||
|
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||||
|
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||||
|
BUILD_NAME=binary_riscv64
|
||||||
|
EOF
|
||||||
|
- name: Download changed images
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
name: changed_images
|
||||||
|
path: ${{ env.IMAGES_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
submodules: true
|
||||||
|
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||||
|
- name: Build
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||||
|
- name: Upload build URLs to artifacts
|
||||||
|
if: ${{ success() || failure() }}
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: ${{ env.BUILD_URLS }}
|
||||||
|
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
############################################################################################
|
############################################################################################
|
||||||
##################################### Docker images #######################################
|
##################################### Docker images #######################################
|
||||||
############################################################################################
|
############################################################################################
|
||||||
@ -932,6 +974,7 @@ jobs:
|
|||||||
- BuilderBinDarwinAarch64
|
- BuilderBinDarwinAarch64
|
||||||
- BuilderBinFreeBSD
|
- BuilderBinFreeBSD
|
||||||
- BuilderBinPPC64
|
- BuilderBinPPC64
|
||||||
|
- BuilderBinRISCV64
|
||||||
- BuilderBinAmd64Compat
|
- BuilderBinAmd64Compat
|
||||||
- BuilderBinAarch64V80Compat
|
- BuilderBinAarch64V80Compat
|
||||||
- BuilderBinClangTidy
|
- BuilderBinClangTidy
|
||||||
|
45
.github/workflows/nightly.yml
vendored
45
.github/workflows/nightly.yml
vendored
@ -75,51 +75,6 @@ jobs:
|
|||||||
Codebrowser:
|
Codebrowser:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
uses: ./.github/workflows/woboq.yml
|
uses: ./.github/workflows/woboq.yml
|
||||||
BuilderCoverity:
|
|
||||||
needs: DockerHubPush
|
|
||||||
runs-on: [self-hosted, builder]
|
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
BUILD_NAME=coverity
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
EOF
|
|
||||||
echo "COVERITY_TOKEN=${{ secrets.COVERITY_TOKEN }}" >> "$GITHUB_ENV"
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
|
||||||
name: changed_images
|
|
||||||
path: ${{ env.IMAGES_PATH }}
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload Coverity Analysis
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
run: |
|
|
||||||
curl --form token="${COVERITY_TOKEN}" \
|
|
||||||
--form email='security+coverity@clickhouse.com' \
|
|
||||||
--form file="@$TEMP_PATH/$BUILD_NAME/coverity-scan.tar.gz" \
|
|
||||||
--form version="${GITHUB_REF#refs/heads/}-${GITHUB_SHA::6}" \
|
|
||||||
--form description="Nighly Scan: $(date +'%Y-%m-%dT%H:%M:%S')" \
|
|
||||||
https://scan.coverity.com/builds?project=ClickHouse%2FClickHouse
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
SonarCloud:
|
SonarCloud:
|
||||||
runs-on: [self-hosted, builder]
|
runs-on: [self-hosted, builder]
|
||||||
env:
|
env:
|
||||||
|
42
.github/workflows/pull_request.yml
vendored
42
.github/workflows/pull_request.yml
vendored
@ -911,6 +911,47 @@ jobs:
|
|||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
|
BuilderBinRISCV64:
|
||||||
|
needs: [DockerHubPush, FastTest, StyleCheck]
|
||||||
|
runs-on: [self-hosted, builder]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/build_check
|
||||||
|
IMAGES_PATH=${{runner.temp}}/images_path
|
||||||
|
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||||
|
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||||
|
BUILD_NAME=binary_riscv64
|
||||||
|
EOF
|
||||||
|
- name: Download changed images
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
name: changed_images
|
||||||
|
path: ${{ env.IMAGES_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
submodules: true
|
||||||
|
- name: Build
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||||
|
- name: Upload build URLs to artifacts
|
||||||
|
if: ${{ success() || failure() }}
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: ${{ env.BUILD_URLS }}
|
||||||
|
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
############################################################################################
|
############################################################################################
|
||||||
##################################### Docker images #######################################
|
##################################### Docker images #######################################
|
||||||
############################################################################################
|
############################################################################################
|
||||||
@ -992,6 +1033,7 @@ jobs:
|
|||||||
- BuilderBinDarwinAarch64
|
- BuilderBinDarwinAarch64
|
||||||
- BuilderBinFreeBSD
|
- BuilderBinFreeBSD
|
||||||
- BuilderBinPPC64
|
- BuilderBinPPC64
|
||||||
|
- BuilderBinRISCV64
|
||||||
- BuilderBinAmd64Compat
|
- BuilderBinAmd64Compat
|
||||||
- BuilderBinAarch64V80Compat
|
- BuilderBinAarch64V80Compat
|
||||||
- BuilderBinClangTidy
|
- BuilderBinClangTidy
|
||||||
|
@ -87,7 +87,6 @@ if (ENABLE_FUZZING)
|
|||||||
set (ENABLE_CLICKHOUSE_ODBC_BRIDGE OFF)
|
set (ENABLE_CLICKHOUSE_ODBC_BRIDGE OFF)
|
||||||
set (ENABLE_LIBRARIES 0)
|
set (ENABLE_LIBRARIES 0)
|
||||||
set (ENABLE_SSL 1)
|
set (ENABLE_SSL 1)
|
||||||
set (USE_UNWIND ON)
|
|
||||||
set (ENABLE_EMBEDDED_COMPILER 0)
|
set (ENABLE_EMBEDDED_COMPILER 0)
|
||||||
set (ENABLE_EXAMPLES 0)
|
set (ENABLE_EXAMPLES 0)
|
||||||
set (ENABLE_UTILS 0)
|
set (ENABLE_UTILS 0)
|
||||||
@ -344,9 +343,9 @@ if (COMPILER_CLANG)
|
|||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdiagnostics-absolute-paths")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdiagnostics-absolute-paths")
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fdiagnostics-absolute-paths")
|
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fdiagnostics-absolute-paths")
|
||||||
|
|
||||||
if (NOT ENABLE_TESTS AND NOT SANITIZE)
|
if (NOT ENABLE_TESTS AND NOT SANITIZE AND OS_LINUX)
|
||||||
# https://clang.llvm.org/docs/ThinLTO.html
|
# https://clang.llvm.org/docs/ThinLTO.html
|
||||||
# Applies to clang only.
|
# Applies to clang and linux only.
|
||||||
# Disabled when building with tests or sanitizers.
|
# Disabled when building with tests or sanitizers.
|
||||||
option(ENABLE_THINLTO "Clang-specific link time optimization" ON)
|
option(ENABLE_THINLTO "Clang-specific link time optimization" ON)
|
||||||
endif()
|
endif()
|
||||||
|
@ -15,9 +15,8 @@
|
|||||||
|
|
||||||
|
|
||||||
static thread_local uint64_t current_tid = 0;
|
static thread_local uint64_t current_tid = 0;
|
||||||
uint64_t getThreadId()
|
|
||||||
{
|
static void setCurrentThreadId()
|
||||||
if (!current_tid)
|
|
||||||
{
|
{
|
||||||
#if defined(OS_ANDROID)
|
#if defined(OS_ANDROID)
|
||||||
current_tid = gettid();
|
current_tid = gettid();
|
||||||
@ -35,5 +34,15 @@ uint64_t getThreadId()
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uint64_t getThreadId()
|
||||||
|
{
|
||||||
|
if (!current_tid)
|
||||||
|
setCurrentThreadId();
|
||||||
|
|
||||||
return current_tid;
|
return current_tid;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void updateCurrentThreadIdAfterFork()
|
||||||
|
{
|
||||||
|
setCurrentThreadId();
|
||||||
|
}
|
||||||
|
@ -3,3 +3,5 @@
|
|||||||
|
|
||||||
/// Obtain thread id from OS. The value is cached in thread local variable.
|
/// Obtain thread id from OS. The value is cached in thread local variable.
|
||||||
uint64_t getThreadId();
|
uint64_t getThreadId();
|
||||||
|
|
||||||
|
void updateCurrentThreadIdAfterFork();
|
||||||
|
@ -15,6 +15,7 @@ set(CMAKE_OSX_DEPLOYMENT_TARGET 10.15)
|
|||||||
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||||
find_package(Threads REQUIRED)
|
find_package(Threads REQUIRED)
|
||||||
|
|
||||||
|
include (cmake/unwind.cmake)
|
||||||
include (cmake/cxx.cmake)
|
include (cmake/cxx.cmake)
|
||||||
link_libraries(global-group)
|
link_libraries(global-group)
|
||||||
|
|
||||||
|
@ -18,6 +18,9 @@ if (NOT PARALLEL_COMPILE_JOBS AND TOTAL_PHYSICAL_MEMORY AND MAX_COMPILER_MEMORY)
|
|||||||
if (NOT PARALLEL_COMPILE_JOBS)
|
if (NOT PARALLEL_COMPILE_JOBS)
|
||||||
set (PARALLEL_COMPILE_JOBS 1)
|
set (PARALLEL_COMPILE_JOBS 1)
|
||||||
endif ()
|
endif ()
|
||||||
|
if (NOT NUMBER_OF_LOGICAL_CORES OR PARALLEL_COMPILE_JOBS LESS NUMBER_OF_LOGICAL_CORES)
|
||||||
|
set (PARALLEL_COMPILE_JOBS_LESS TRUE)
|
||||||
|
endif()
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (PARALLEL_COMPILE_JOBS AND (NOT NUMBER_OF_LOGICAL_CORES OR PARALLEL_COMPILE_JOBS LESS NUMBER_OF_LOGICAL_CORES))
|
if (PARALLEL_COMPILE_JOBS AND (NOT NUMBER_OF_LOGICAL_CORES OR PARALLEL_COMPILE_JOBS LESS NUMBER_OF_LOGICAL_CORES))
|
||||||
@ -33,6 +36,9 @@ if (NOT PARALLEL_LINK_JOBS AND TOTAL_PHYSICAL_MEMORY AND MAX_LINKER_MEMORY)
|
|||||||
if (NOT PARALLEL_LINK_JOBS)
|
if (NOT PARALLEL_LINK_JOBS)
|
||||||
set (PARALLEL_LINK_JOBS 1)
|
set (PARALLEL_LINK_JOBS 1)
|
||||||
endif ()
|
endif ()
|
||||||
|
if (NOT NUMBER_OF_LOGICAL_CORES OR PARALLEL_LINK_JOBS LESS NUMBER_OF_LOGICAL_CORES)
|
||||||
|
set (PARALLEL_LINK_JOBS_LESS TRUE)
|
||||||
|
endif()
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
# ThinLTO provides its own parallel linking
|
# ThinLTO provides its own parallel linking
|
||||||
@ -56,4 +62,10 @@ if (PARALLEL_COMPILE_JOBS OR PARALLEL_LINK_JOBS)
|
|||||||
message(STATUS
|
message(STATUS
|
||||||
"${CMAKE_CURRENT_SOURCE_DIR}: Have ${TOTAL_PHYSICAL_MEMORY} megabytes of memory.
|
"${CMAKE_CURRENT_SOURCE_DIR}: Have ${TOTAL_PHYSICAL_MEMORY} megabytes of memory.
|
||||||
Limiting concurrent linkers jobs to ${PARALLEL_LINK_JOBS} and compiler jobs to ${PARALLEL_COMPILE_JOBS} (system has ${NUMBER_OF_LOGICAL_CORES} logical cores)")
|
Limiting concurrent linkers jobs to ${PARALLEL_LINK_JOBS} and compiler jobs to ${PARALLEL_COMPILE_JOBS} (system has ${NUMBER_OF_LOGICAL_CORES} logical cores)")
|
||||||
|
if (PARALLEL_COMPILE_JOBS_LESS)
|
||||||
|
message(WARNING "The autocalculated compile jobs limit (${PARALLEL_COMPILE_JOBS}) underutilizes CPU cores (${NUMBER_OF_LOGICAL_CORES}). Set PARALLEL_COMPILE_JOBS to override.")
|
||||||
|
endif()
|
||||||
|
if (PARALLEL_LINK_JOBS_LESS)
|
||||||
|
message(WARNING "The autocalculated link jobs limit (${PARALLEL_LINK_JOBS}) underutilizes CPU cores (${NUMBER_OF_LOGICAL_CORES}). Set PARALLEL_LINK_JOBS to override.")
|
||||||
|
endif()
|
||||||
endif ()
|
endif ()
|
||||||
|
@ -33,6 +33,18 @@ if (CMAKE_CROSSCOMPILING)
|
|||||||
elseif (ARCH_PPC64LE)
|
elseif (ARCH_PPC64LE)
|
||||||
set (ENABLE_GRPC OFF CACHE INTERNAL "")
|
set (ENABLE_GRPC OFF CACHE INTERNAL "")
|
||||||
set (ENABLE_SENTRY OFF CACHE INTERNAL "")
|
set (ENABLE_SENTRY OFF CACHE INTERNAL "")
|
||||||
|
elseif (ARCH_RISCV64)
|
||||||
|
# RISC-V support is preliminary
|
||||||
|
set (GLIBC_COMPATIBILITY OFF CACHE INTERNAL "")
|
||||||
|
set (ENABLE_LDAP OFF CACHE INTERNAL "")
|
||||||
|
set (OPENSSL_NO_ASM ON CACHE INTERNAL "")
|
||||||
|
set (ENABLE_JEMALLOC ON CACHE INTERNAL "")
|
||||||
|
set (ENABLE_PARQUET OFF CACHE INTERNAL "")
|
||||||
|
set (ENABLE_GRPC OFF CACHE INTERNAL "")
|
||||||
|
set (ENABLE_HDFS OFF CACHE INTERNAL "")
|
||||||
|
set (ENABLE_MYSQL OFF CACHE INTERNAL "")
|
||||||
|
# It might be ok, but we need to update 'sysroot'
|
||||||
|
set (ENABLE_RUST OFF CACHE INTERNAL "")
|
||||||
elseif (ARCH_S390X)
|
elseif (ARCH_S390X)
|
||||||
set (ENABLE_GRPC OFF CACHE INTERNAL "")
|
set (ENABLE_GRPC OFF CACHE INTERNAL "")
|
||||||
set (ENABLE_SENTRY OFF CACHE INTERNAL "")
|
set (ENABLE_SENTRY OFF CACHE INTERNAL "")
|
||||||
|
@ -1,13 +1 @@
|
|||||||
option (USE_UNWIND "Enable libunwind (better stacktraces)" ${ENABLE_LIBRARIES})
|
|
||||||
|
|
||||||
if (USE_UNWIND)
|
|
||||||
add_subdirectory(contrib/libunwind-cmake)
|
add_subdirectory(contrib/libunwind-cmake)
|
||||||
set (UNWIND_LIBRARIES unwind)
|
|
||||||
set (EXCEPTION_HANDLING_LIBRARY ${UNWIND_LIBRARIES})
|
|
||||||
|
|
||||||
message (STATUS "Using libunwind: ${UNWIND_LIBRARIES}")
|
|
||||||
else ()
|
|
||||||
set (EXCEPTION_HANDLING_LIBRARY gcc_eh)
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
message (STATUS "Using exception handler: ${EXCEPTION_HANDLING_LIBRARY}")
|
|
||||||
|
@ -170,16 +170,13 @@ endif ()
|
|||||||
|
|
||||||
target_compile_definitions(_jemalloc PRIVATE -DJEMALLOC_PROF=1)
|
target_compile_definitions(_jemalloc PRIVATE -DJEMALLOC_PROF=1)
|
||||||
|
|
||||||
if (USE_UNWIND)
|
|
||||||
# jemalloc provides support for two different libunwind flavors: the original HP libunwind and the one coming with gcc / g++ / libstdc++.
|
# jemalloc provides support for two different libunwind flavors: the original HP libunwind and the one coming with gcc / g++ / libstdc++.
|
||||||
# The latter is identified by `JEMALLOC_PROF_LIBGCC` and uses `_Unwind_Backtrace` method instead of `unw_backtrace`.
|
# The latter is identified by `JEMALLOC_PROF_LIBGCC` and uses `_Unwind_Backtrace` method instead of `unw_backtrace`.
|
||||||
# At the time ClickHouse uses LLVM libunwind which follows libgcc's way of backtracing.
|
# At the time ClickHouse uses LLVM libunwind which follows libgcc's way of backtracing.
|
||||||
|
#
|
||||||
# ClickHouse has to provide `unw_backtrace` method by the means of [commit 8e2b31e](https://github.com/ClickHouse/libunwind/commit/8e2b31e766dd502f6df74909e04a7dbdf5182eb1).
|
# ClickHouse has to provide `unw_backtrace` method by the means of [commit 8e2b31e](https://github.com/ClickHouse/libunwind/commit/8e2b31e766dd502f6df74909e04a7dbdf5182eb1).
|
||||||
|
|
||||||
target_compile_definitions (_jemalloc PRIVATE -DJEMALLOC_PROF_LIBGCC=1)
|
target_compile_definitions (_jemalloc PRIVATE -DJEMALLOC_PROF_LIBGCC=1)
|
||||||
target_link_libraries (_jemalloc PRIVATE unwind)
|
target_link_libraries (_jemalloc PRIVATE unwind)
|
||||||
endif ()
|
|
||||||
|
|
||||||
# for RTLD_NEXT
|
# for RTLD_NEXT
|
||||||
target_compile_options(_jemalloc PRIVATE -D_GNU_SOURCE)
|
target_compile_options(_jemalloc PRIVATE -D_GNU_SOURCE)
|
||||||
|
@ -61,9 +61,7 @@ target_include_directories(cxx SYSTEM BEFORE PUBLIC $<$<COMPILE_LANGUAGE:CXX>:$
|
|||||||
target_compile_definitions(cxx PRIVATE -D_LIBCPP_BUILDING_LIBRARY -DLIBCXX_BUILDING_LIBCXXABI)
|
target_compile_definitions(cxx PRIVATE -D_LIBCPP_BUILDING_LIBRARY -DLIBCXX_BUILDING_LIBCXXABI)
|
||||||
|
|
||||||
# Enable capturing stack traces for all exceptions.
|
# Enable capturing stack traces for all exceptions.
|
||||||
if (USE_UNWIND)
|
|
||||||
target_compile_definitions(cxx PUBLIC -DSTD_EXCEPTION_HAS_STACK_TRACE=1)
|
target_compile_definitions(cxx PUBLIC -DSTD_EXCEPTION_HAS_STACK_TRACE=1)
|
||||||
endif ()
|
|
||||||
|
|
||||||
if (USE_MUSL)
|
if (USE_MUSL)
|
||||||
target_compile_definitions(cxx PUBLIC -D_LIBCPP_HAS_MUSL_LIBC=1)
|
target_compile_definitions(cxx PUBLIC -D_LIBCPP_HAS_MUSL_LIBC=1)
|
||||||
|
@ -35,12 +35,10 @@ target_include_directories(cxxabi SYSTEM BEFORE
|
|||||||
)
|
)
|
||||||
target_compile_definitions(cxxabi PRIVATE -D_LIBCPP_BUILDING_LIBRARY)
|
target_compile_definitions(cxxabi PRIVATE -D_LIBCPP_BUILDING_LIBRARY)
|
||||||
target_compile_options(cxxabi PRIVATE -nostdinc++ -fno-sanitize=undefined -Wno-macro-redefined) # If we don't disable UBSan, infinite recursion happens in dynamic_cast.
|
target_compile_options(cxxabi PRIVATE -nostdinc++ -fno-sanitize=undefined -Wno-macro-redefined) # If we don't disable UBSan, infinite recursion happens in dynamic_cast.
|
||||||
target_link_libraries(cxxabi PUBLIC ${EXCEPTION_HANDLING_LIBRARY})
|
target_link_libraries(cxxabi PUBLIC unwind)
|
||||||
|
|
||||||
# Enable capturing stack traces for all exceptions.
|
# Enable capturing stack traces for all exceptions.
|
||||||
if (USE_UNWIND)
|
|
||||||
target_compile_definitions(cxxabi PUBLIC -DSTD_EXCEPTION_HAS_STACK_TRACE=1)
|
target_compile_definitions(cxxabi PUBLIC -DSTD_EXCEPTION_HAS_STACK_TRACE=1)
|
||||||
endif ()
|
|
||||||
|
|
||||||
install(
|
install(
|
||||||
TARGETS cxxabi
|
TARGETS cxxabi
|
||||||
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
esac
|
esac
|
||||||
|
|
||||||
ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release"
|
ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release"
|
||||||
ARG VERSION="23.6.1.1524"
|
ARG VERSION="23.6.2.18"
|
||||||
ARG PACKAGES="clickhouse-keeper"
|
ARG PACKAGES="clickhouse-keeper"
|
||||||
|
|
||||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||||
|
@ -49,8 +49,8 @@ ENV CARGO_HOME=/rust/cargo
|
|||||||
ENV PATH="/rust/cargo/bin:${PATH}"
|
ENV PATH="/rust/cargo/bin:${PATH}"
|
||||||
RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \
|
RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \
|
||||||
chmod 777 -R /rust && \
|
chmod 777 -R /rust && \
|
||||||
rustup toolchain install nightly && \
|
rustup toolchain install nightly-2023-07-04 && \
|
||||||
rustup default nightly && \
|
rustup default nightly-2023-07-04 && \
|
||||||
rustup component add rust-src && \
|
rustup component add rust-src && \
|
||||||
rustup target add aarch64-unknown-linux-gnu && \
|
rustup target add aarch64-unknown-linux-gnu && \
|
||||||
rustup target add x86_64-apple-darwin && \
|
rustup target add x86_64-apple-darwin && \
|
||||||
|
@ -138,6 +138,7 @@ def parse_env_variables(
|
|||||||
ARM_V80COMPAT_SUFFIX = "-aarch64-v80compat"
|
ARM_V80COMPAT_SUFFIX = "-aarch64-v80compat"
|
||||||
FREEBSD_SUFFIX = "-freebsd"
|
FREEBSD_SUFFIX = "-freebsd"
|
||||||
PPC_SUFFIX = "-ppc64le"
|
PPC_SUFFIX = "-ppc64le"
|
||||||
|
RISCV_SUFFIX = "-riscv64"
|
||||||
AMD64_COMPAT_SUFFIX = "-amd64-compat"
|
AMD64_COMPAT_SUFFIX = "-amd64-compat"
|
||||||
|
|
||||||
result = []
|
result = []
|
||||||
@ -150,6 +151,7 @@ def parse_env_variables(
|
|||||||
is_cross_arm = compiler.endswith(ARM_SUFFIX)
|
is_cross_arm = compiler.endswith(ARM_SUFFIX)
|
||||||
is_cross_arm_v80compat = compiler.endswith(ARM_V80COMPAT_SUFFIX)
|
is_cross_arm_v80compat = compiler.endswith(ARM_V80COMPAT_SUFFIX)
|
||||||
is_cross_ppc = compiler.endswith(PPC_SUFFIX)
|
is_cross_ppc = compiler.endswith(PPC_SUFFIX)
|
||||||
|
is_cross_riscv = compiler.endswith(RISCV_SUFFIX)
|
||||||
is_cross_freebsd = compiler.endswith(FREEBSD_SUFFIX)
|
is_cross_freebsd = compiler.endswith(FREEBSD_SUFFIX)
|
||||||
is_amd64_compat = compiler.endswith(AMD64_COMPAT_SUFFIX)
|
is_amd64_compat = compiler.endswith(AMD64_COMPAT_SUFFIX)
|
||||||
|
|
||||||
@ -206,6 +208,11 @@ def parse_env_variables(
|
|||||||
cmake_flags.append(
|
cmake_flags.append(
|
||||||
"-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-ppc64le.cmake"
|
"-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-ppc64le.cmake"
|
||||||
)
|
)
|
||||||
|
elif is_cross_riscv:
|
||||||
|
cc = compiler[: -len(RISCV_SUFFIX)]
|
||||||
|
cmake_flags.append(
|
||||||
|
"-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-riscv64.cmake"
|
||||||
|
)
|
||||||
elif is_amd64_compat:
|
elif is_amd64_compat:
|
||||||
cc = compiler[: -len(AMD64_COMPAT_SUFFIX)]
|
cc = compiler[: -len(AMD64_COMPAT_SUFFIX)]
|
||||||
result.append("DEB_ARCH=amd64")
|
result.append("DEB_ARCH=amd64")
|
||||||
@ -370,6 +377,7 @@ def parse_args() -> argparse.Namespace:
|
|||||||
"clang-16-aarch64",
|
"clang-16-aarch64",
|
||||||
"clang-16-aarch64-v80compat",
|
"clang-16-aarch64-v80compat",
|
||||||
"clang-16-ppc64le",
|
"clang-16-ppc64le",
|
||||||
|
"clang-16-riscv64",
|
||||||
"clang-16-amd64-compat",
|
"clang-16-amd64-compat",
|
||||||
"clang-16-freebsd",
|
"clang-16-freebsd",
|
||||||
),
|
),
|
||||||
|
@ -33,7 +33,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
# lts / testing / prestable / etc
|
# lts / testing / prestable / etc
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||||
ARG VERSION="23.6.1.1524"
|
ARG VERSION="23.6.2.18"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
|
|
||||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||||
|
@ -23,7 +23,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
|||||||
|
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||||
ARG VERSION="23.6.1.1524"
|
ARG VERSION="23.6.2.18"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
|
|
||||||
# set non-empty deb_location_url url to create a docker image
|
# set non-empty deb_location_url url to create a docker image
|
||||||
|
@ -166,7 +166,6 @@ function run_cmake
|
|||||||
"-DENABLE_UTILS=0"
|
"-DENABLE_UTILS=0"
|
||||||
"-DENABLE_EMBEDDED_COMPILER=0"
|
"-DENABLE_EMBEDDED_COMPILER=0"
|
||||||
"-DENABLE_THINLTO=0"
|
"-DENABLE_THINLTO=0"
|
||||||
"-DUSE_UNWIND=1"
|
|
||||||
"-DENABLE_NURAFT=1"
|
"-DENABLE_NURAFT=1"
|
||||||
"-DENABLE_SIMDJSON=1"
|
"-DENABLE_SIMDJSON=1"
|
||||||
"-DENABLE_JEMALLOC=1"
|
"-DENABLE_JEMALLOC=1"
|
||||||
|
@ -291,7 +291,7 @@ quit
|
|||||||
if [ "$server_died" == 1 ]
|
if [ "$server_died" == 1 ]
|
||||||
then
|
then
|
||||||
# The server has died.
|
# The server has died.
|
||||||
if ! rg --text -o 'Received signal.*|Logical error.*|Assertion.*failed|Failed assertion.*|.*runtime error: .*|.*is located.*|(SUMMARY|ERROR): [a-zA-Z]+Sanitizer:.*|.*_LIBCPP_ASSERT.*' server.log > description.txt
|
if ! rg --text -o 'Received signal.*|Logical error.*|Assertion.*failed|Failed assertion.*|.*runtime error: .*|.*is located.*|(SUMMARY|ERROR): [a-zA-Z]+Sanitizer:.*|.*_LIBCPP_ASSERT.*|.*Child process was terminated by signal 9.*' server.log > description.txt
|
||||||
then
|
then
|
||||||
echo "Lost connection to server. See the logs." > description.txt
|
echo "Lost connection to server. See the logs." > description.txt
|
||||||
fi
|
fi
|
||||||
|
@ -47,11 +47,13 @@ ENV TZ=Etc/UTC
|
|||||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||||
|
|
||||||
ENV DOCKER_CHANNEL stable
|
ENV DOCKER_CHANNEL stable
|
||||||
|
# Unpin the docker version after the release 24.0.3 is released
|
||||||
|
# https://github.com/moby/moby/issues/45770#issuecomment-1618255130
|
||||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
|
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
|
||||||
&& add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -c -s) ${DOCKER_CHANNEL}" \
|
&& add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -c -s) ${DOCKER_CHANNEL}" \
|
||||||
&& apt-get update \
|
&& apt-get update \
|
||||||
&& env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
|
&& env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
|
||||||
docker-ce \
|
docker-ce='5:23.*' \
|
||||||
&& rm -rf \
|
&& rm -rf \
|
||||||
/var/lib/apt/lists/* \
|
/var/lib/apt/lists/* \
|
||||||
/var/cache/debconf \
|
/var/cache/debconf \
|
||||||
|
@ -92,8 +92,8 @@ sudo clickhouse stop ||:
|
|||||||
|
|
||||||
for _ in $(seq 1 60); do if [[ $(wget --timeout=1 -q 'localhost:8123' -O-) == 'Ok.' ]]; then sleep 1 ; else break; fi ; done
|
for _ in $(seq 1 60); do if [[ $(wget --timeout=1 -q 'localhost:8123' -O-) == 'Ok.' ]]; then sleep 1 ; else break; fi ; done
|
||||||
|
|
||||||
grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server.log ||:
|
rg -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server.log ||:
|
||||||
pigz < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.gz &
|
zstd < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.zst &
|
||||||
|
|
||||||
# Compressed (FIXME: remove once only github actions will be left)
|
# Compressed (FIXME: remove once only github actions will be left)
|
||||||
rm /var/log/clickhouse-server/clickhouse-server.log
|
rm /var/log/clickhouse-server/clickhouse-server.log
|
||||||
|
@ -33,7 +33,6 @@ RUN apt-get update -y \
|
|||||||
qemu-user-static \
|
qemu-user-static \
|
||||||
sqlite3 \
|
sqlite3 \
|
||||||
sudo \
|
sudo \
|
||||||
telnet \
|
|
||||||
tree \
|
tree \
|
||||||
unixodbc \
|
unixodbc \
|
||||||
wget \
|
wget \
|
||||||
|
@ -8,8 +8,6 @@ RUN apt-get update -y \
|
|||||||
apt-get install --yes --no-install-recommends \
|
apt-get install --yes --no-install-recommends \
|
||||||
bash \
|
bash \
|
||||||
tzdata \
|
tzdata \
|
||||||
fakeroot \
|
|
||||||
debhelper \
|
|
||||||
parallel \
|
parallel \
|
||||||
expect \
|
expect \
|
||||||
python3 \
|
python3 \
|
||||||
@ -20,7 +18,6 @@ RUN apt-get update -y \
|
|||||||
sudo \
|
sudo \
|
||||||
openssl \
|
openssl \
|
||||||
netcat-openbsd \
|
netcat-openbsd \
|
||||||
telnet \
|
|
||||||
brotli \
|
brotli \
|
||||||
&& apt-get clean
|
&& apt-get clean
|
||||||
|
|
||||||
|
@ -8,8 +8,6 @@ RUN apt-get update -y \
|
|||||||
apt-get install --yes --no-install-recommends \
|
apt-get install --yes --no-install-recommends \
|
||||||
bash \
|
bash \
|
||||||
tzdata \
|
tzdata \
|
||||||
fakeroot \
|
|
||||||
debhelper \
|
|
||||||
parallel \
|
parallel \
|
||||||
expect \
|
expect \
|
||||||
python3 \
|
python3 \
|
||||||
@ -20,7 +18,6 @@ RUN apt-get update -y \
|
|||||||
sudo \
|
sudo \
|
||||||
openssl \
|
openssl \
|
||||||
netcat-openbsd \
|
netcat-openbsd \
|
||||||
telnet \
|
|
||||||
brotli \
|
brotli \
|
||||||
&& apt-get clean
|
&& apt-get clean
|
||||||
|
|
||||||
|
@ -67,6 +67,13 @@ start
|
|||||||
stop
|
stop
|
||||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.initial.log
|
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.initial.log
|
||||||
|
|
||||||
|
# Start server from previous release
|
||||||
|
# Let's enable S3 storage by default
|
||||||
|
export USE_S3_STORAGE_FOR_MERGE_TREE=1
|
||||||
|
# Previous version may not be ready for fault injections
|
||||||
|
export ZOOKEEPER_FAULT_INJECTION=0
|
||||||
|
configure
|
||||||
|
|
||||||
# force_sync=false doesn't work correctly on some older versions
|
# force_sync=false doesn't work correctly on some older versions
|
||||||
sudo cat /etc/clickhouse-server/config.d/keeper_port.xml \
|
sudo cat /etc/clickhouse-server/config.d/keeper_port.xml \
|
||||||
| sed "s|<force_sync>false</force_sync>|<force_sync>true</force_sync>|" \
|
| sed "s|<force_sync>false</force_sync>|<force_sync>true</force_sync>|" \
|
||||||
@ -76,17 +83,11 @@ sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-serv
|
|||||||
# But we still need default disk because some tables loaded only into it
|
# But we still need default disk because some tables loaded only into it
|
||||||
sudo cat /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml \
|
sudo cat /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml \
|
||||||
| sed "s|<main><disk>s3</disk></main>|<main><disk>s3</disk></main><default><disk>default</disk></default>|" \
|
| sed "s|<main><disk>s3</disk></main>|<main><disk>s3</disk></main><default><disk>default</disk></default>|" \
|
||||||
> /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp mv /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
> /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp
|
||||||
|
mv /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
||||||
sudo chown clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
sudo chown clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
||||||
sudo chgrp clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
sudo chgrp clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
||||||
|
|
||||||
# Start server from previous release
|
|
||||||
# Let's enable S3 storage by default
|
|
||||||
export USE_S3_STORAGE_FOR_MERGE_TREE=1
|
|
||||||
# Previous version may not be ready for fault injections
|
|
||||||
export ZOOKEEPER_FAULT_INJECTION=0
|
|
||||||
configure
|
|
||||||
|
|
||||||
# it contains some new settings, but we can safely remove it
|
# it contains some new settings, but we can safely remove it
|
||||||
rm /etc/clickhouse-server/config.d/merge_tree.xml
|
rm /etc/clickhouse-server/config.d/merge_tree.xml
|
||||||
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
|
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
|
||||||
|
@ -44,7 +44,6 @@ RUN apt-get update \
|
|||||||
clang-${LLVM_VERSION} \
|
clang-${LLVM_VERSION} \
|
||||||
clang-tidy-${LLVM_VERSION} \
|
clang-tidy-${LLVM_VERSION} \
|
||||||
cmake \
|
cmake \
|
||||||
fakeroot \
|
|
||||||
gdb \
|
gdb \
|
||||||
git \
|
git \
|
||||||
gperf \
|
gperf \
|
||||||
@ -94,7 +93,10 @@ RUN mkdir /tmp/ccache \
|
|||||||
&& rm -rf /tmp/ccache
|
&& rm -rf /tmp/ccache
|
||||||
|
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
ARG SCCACHE_VERSION=v0.4.1
|
ARG SCCACHE_VERSION=v0.5.4
|
||||||
|
ENV SCCACHE_IGNORE_SERVER_IO_ERROR=1
|
||||||
|
# sccache requires a value for the region. So by default we use The Default Region
|
||||||
|
ENV SCCACHE_REGION=us-east-1
|
||||||
RUN arch=${TARGETARCH:-amd64} \
|
RUN arch=${TARGETARCH:-amd64} \
|
||||||
&& case $arch in \
|
&& case $arch in \
|
||||||
amd64) rarch=x86_64 ;; \
|
amd64) rarch=x86_64 ;; \
|
||||||
|
@ -33,6 +33,9 @@ then
|
|||||||
elif [ "${ARCH}" = "powerpc64le" -o "${ARCH}" = "ppc64le" ]
|
elif [ "${ARCH}" = "powerpc64le" -o "${ARCH}" = "ppc64le" ]
|
||||||
then
|
then
|
||||||
DIR="powerpc64le"
|
DIR="powerpc64le"
|
||||||
|
elif [ "${ARCH}" = "riscv64" ]
|
||||||
|
then
|
||||||
|
DIR="riscv64"
|
||||||
fi
|
fi
|
||||||
elif [ "${OS}" = "FreeBSD" ]
|
elif [ "${OS}" = "FreeBSD" ]
|
||||||
then
|
then
|
||||||
|
20
docs/changelogs/v22.8.20.11-lts.md
Normal file
20
docs/changelogs/v22.8.20.11-lts.md
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v22.8.20.11-lts (c9ca79e24e8) FIXME as compared to v22.8.19.10-lts (989bc2fe8b0)
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Fix broken index analysis when binary operator contains a null constant argument [#50177](https://github.com/ClickHouse/ClickHouse/pull/50177) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Fix incorrect constant folding [#50536](https://github.com/ClickHouse/ClickHouse/pull/50536) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix fuzzer failure in ActionsDAG [#51301](https://github.com/ClickHouse/ClickHouse/pull/51301) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix segfault in MathUnary [#51499](https://github.com/ClickHouse/ClickHouse/pull/51499) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Decoupled commits from [#51180](https://github.com/ClickHouse/ClickHouse/issues/51180) for backports [#51561](https://github.com/ClickHouse/ClickHouse/pull/51561) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
25
docs/changelogs/v23.6.2.18-stable.md
Normal file
25
docs/changelogs/v23.6.2.18-stable.md
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v23.6.2.18-stable (89f39a7ccfe) FIXME as compared to v23.6.1.1524-stable (d1c7e13d088)
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
* Backported in [#51888](https://github.com/ClickHouse/ClickHouse/issues/51888): Update cargo dependencies. [#51721](https://github.com/ClickHouse/ClickHouse/pull/51721) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Fix reading from empty column in `parseSipHashKey` [#51804](https://github.com/ClickHouse/ClickHouse/pull/51804) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Allow parametric UDFs [#51964](https://github.com/ClickHouse/ClickHouse/pull/51964) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Remove the usage of Analyzer setting in the client [#51578](https://github.com/ClickHouse/ClickHouse/pull/51578) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix 02116_tuple_element with Analyzer [#51669](https://github.com/ClickHouse/ClickHouse/pull/51669) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix SQLLogic docker images [#51719](https://github.com/ClickHouse/ClickHouse/pull/51719) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix source image for sqllogic [#51728](https://github.com/ClickHouse/ClickHouse/pull/51728) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Pin for docker-ce [#51743](https://github.com/ClickHouse/ClickHouse/pull/51743) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
@ -23,7 +23,7 @@ sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
|||||||
``` bash
|
``` bash
|
||||||
cd ClickHouse
|
cd ClickHouse
|
||||||
mkdir build-riscv64
|
mkdir build-riscv64
|
||||||
CC=clang-16 CXX=clang++-16 cmake . -Bbuild-riscv64 -G Ninja -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-riscv64.cmake -DGLIBC_COMPATIBILITY=OFF -DENABLE_LDAP=OFF -DOPENSSL_NO_ASM=ON -DENABLE_JEMALLOC=ON -DENABLE_PARQUET=OFF -DUSE_UNWIND=OFF -DENABLE_GRPC=OFF -DENABLE_HDFS=OFF -DENABLE_MYSQL=OFF
|
CC=clang-16 CXX=clang++-16 cmake . -Bbuild-riscv64 -G Ninja -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-riscv64.cmake -DGLIBC_COMPATIBILITY=OFF -DENABLE_LDAP=OFF -DOPENSSL_NO_ASM=ON -DENABLE_JEMALLOC=ON -DENABLE_PARQUET=OFF -DENABLE_GRPC=OFF -DENABLE_HDFS=OFF -DENABLE_MYSQL=OFF
|
||||||
ninja -C build-riscv64
|
ninja -C build-riscv64
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -33,6 +33,15 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name
|
|||||||
|
|
||||||
- `options` — MongoDB connection string options (optional parameter).
|
- `options` — MongoDB connection string options (optional parameter).
|
||||||
|
|
||||||
|
:::tip
|
||||||
|
If you are using the MongoDB Atlas cloud offering please add these options:
|
||||||
|
|
||||||
|
```
|
||||||
|
'connectTimeoutMS=10000&ssl=true&authSource=admin'
|
||||||
|
```
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
## Usage Example {#usage-example}
|
## Usage Example {#usage-example}
|
||||||
|
|
||||||
Create a table in ClickHouse which allows to read data from MongoDB collection:
|
Create a table in ClickHouse which allows to read data from MongoDB collection:
|
||||||
|
@ -54,7 +54,7 @@ $ sudo mysql
|
|||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
mysql> CREATE USER 'clickhouse'@'localhost' IDENTIFIED BY 'clickhouse';
|
mysql> CREATE USER 'clickhouse'@'localhost' IDENTIFIED BY 'clickhouse';
|
||||||
mysql> GRANT ALL PRIVILEGES ON *.* TO 'clickhouse'@'clickhouse' WITH GRANT OPTION;
|
mysql> GRANT ALL PRIVILEGES ON *.* TO 'clickhouse'@'localhost' WITH GRANT OPTION;
|
||||||
```
|
```
|
||||||
|
|
||||||
Then configure the connection in `/etc/odbc.ini`.
|
Then configure the connection in `/etc/odbc.ini`.
|
||||||
@ -66,7 +66,7 @@ DRIVER = /usr/local/lib/libmyodbc5w.so
|
|||||||
SERVER = 127.0.0.1
|
SERVER = 127.0.0.1
|
||||||
PORT = 3306
|
PORT = 3306
|
||||||
DATABASE = test
|
DATABASE = test
|
||||||
USERNAME = clickhouse
|
USER = clickhouse
|
||||||
PASSWORD = clickhouse
|
PASSWORD = clickhouse
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -83,6 +83,9 @@ $ isql -v mysqlconn
|
|||||||
Table in MySQL:
|
Table in MySQL:
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
|
mysql> CREATE DATABASE test;
|
||||||
|
Query OK, 1 row affected (0,01 sec)
|
||||||
|
|
||||||
mysql> CREATE TABLE `test`.`test` (
|
mysql> CREATE TABLE `test`.`test` (
|
||||||
-> `int_id` INT NOT NULL AUTO_INCREMENT,
|
-> `int_id` INT NOT NULL AUTO_INCREMENT,
|
||||||
-> `int_nullable` INT NULL DEFAULT NULL,
|
-> `int_nullable` INT NULL DEFAULT NULL,
|
||||||
@ -91,10 +94,10 @@ mysql> CREATE TABLE `test`.`test` (
|
|||||||
-> PRIMARY KEY (`int_id`));
|
-> PRIMARY KEY (`int_id`));
|
||||||
Query OK, 0 rows affected (0,09 sec)
|
Query OK, 0 rows affected (0,09 sec)
|
||||||
|
|
||||||
mysql> insert into test (`int_id`, `float`) VALUES (1,2);
|
mysql> insert into test.test (`int_id`, `float`) VALUES (1,2);
|
||||||
Query OK, 1 row affected (0,00 sec)
|
Query OK, 1 row affected (0,00 sec)
|
||||||
|
|
||||||
mysql> select * from test;
|
mysql> select * from test.test;
|
||||||
+------+----------+-----+----------+
|
+------+----------+-----+----------+
|
||||||
| int_id | int_nullable | float | float_nullable |
|
| int_id | int_nullable | float | float_nullable |
|
||||||
+------+----------+-----+----------+
|
+------+----------+-----+----------+
|
||||||
|
@ -37,8 +37,8 @@ The [Merge](/docs/en/engines/table-engines/special/merge.md/#merge) engine does
|
|||||||
``` sql
|
``` sql
|
||||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||||
(
|
(
|
||||||
name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1],
|
name1 [type1] [DEFAULT|MATERIALIZED|ALIAS|EPHEMERAL expr1] [TTL expr1] [CODEC(codec1)] [[NOT] NULL|PRIMARY KEY],
|
||||||
name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2],
|
name2 [type2] [DEFAULT|MATERIALIZED|ALIAS|EPHEMERAL expr2] [TTL expr2] [CODEC(codec2)] [[NOT] NULL|PRIMARY KEY],
|
||||||
...
|
...
|
||||||
INDEX index_name1 expr1 TYPE type1(...) [GRANULARITY value1],
|
INDEX index_name1 expr1 TYPE type1(...) [GRANULARITY value1],
|
||||||
INDEX index_name2 expr2 TYPE type2(...) [GRANULARITY value2],
|
INDEX index_name2 expr2 TYPE type2(...) [GRANULARITY value2],
|
||||||
|
@ -17,7 +17,8 @@ Default value: 0.
|
|||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
insert into table_1 values (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd');
|
INSERT INTO table_1 VALUES (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd');
|
||||||
|
SELECT * FROM table_1;
|
||||||
```
|
```
|
||||||
```response
|
```response
|
||||||
┌─x─┬─y────┐
|
┌─x─┬─y────┐
|
||||||
@ -30,7 +31,7 @@ insert into table_1 values (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd');
|
|||||||
```sql
|
```sql
|
||||||
SELECT *
|
SELECT *
|
||||||
FROM table_1
|
FROM table_1
|
||||||
SETTINGS additional_table_filters = (('table_1', 'x != 2'))
|
SETTINGS additional_table_filters = {'table_1': 'x != 2'}
|
||||||
```
|
```
|
||||||
```response
|
```response
|
||||||
┌─x─┬─y────┐
|
┌─x─┬─y────┐
|
||||||
@ -50,7 +51,8 @@ Default value: `''`.
|
|||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
insert into table_1 values (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd');
|
INSERT INTO table_1 VALUES (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd');
|
||||||
|
SElECT * FROM table_1;
|
||||||
```
|
```
|
||||||
```response
|
```response
|
||||||
┌─x─┬─y────┐
|
┌─x─┬─y────┐
|
||||||
@ -3201,6 +3203,40 @@ ENGINE = Log
|
|||||||
└──────────────────────────────────────────────────────────────────────────┘
|
└──────────────────────────────────────────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## default_temporary_table_engine {#default_temporary_table_engine}
|
||||||
|
|
||||||
|
Same as [default_table_engine](#default_table_engine) but for temporary tables.
|
||||||
|
|
||||||
|
Default value: `Memory`.
|
||||||
|
|
||||||
|
In this example, any new temporary table that does not specify an `Engine` will use the `Log` table engine:
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SET default_temporary_table_engine = 'Log';
|
||||||
|
|
||||||
|
CREATE TEMPORARY TABLE my_table (
|
||||||
|
x UInt32,
|
||||||
|
y UInt32
|
||||||
|
);
|
||||||
|
|
||||||
|
SHOW CREATE TEMPORARY TABLE my_table;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─statement────────────────────────────────────────────────────────────────┐
|
||||||
|
│ CREATE TEMPORARY TABLE default.my_table
|
||||||
|
(
|
||||||
|
`x` UInt32,
|
||||||
|
`y` UInt32
|
||||||
|
)
|
||||||
|
ENGINE = Log
|
||||||
|
└──────────────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## data_type_default_nullable {#data_type_default_nullable}
|
## data_type_default_nullable {#data_type_default_nullable}
|
||||||
|
|
||||||
Allows data types without explicit modifiers [NULL or NOT NULL](../../sql-reference/statements/create/table.md/#null-modifiers) in column definition will be [Nullable](../../sql-reference/data-types/nullable.md/#data_type-nullable).
|
Allows data types without explicit modifiers [NULL or NOT NULL](../../sql-reference/statements/create/table.md/#null-modifiers) in column definition will be [Nullable](../../sql-reference/data-types/nullable.md/#data_type-nullable).
|
||||||
@ -3501,7 +3537,7 @@ Possible values:
|
|||||||
- Any positive integer.
|
- Any positive integer.
|
||||||
- 0 - Disabled (infinite timeout).
|
- 0 - Disabled (infinite timeout).
|
||||||
|
|
||||||
Default value: 180.
|
Default value: 30.
|
||||||
|
|
||||||
## http_receive_timeout {#http_receive_timeout}
|
## http_receive_timeout {#http_receive_timeout}
|
||||||
|
|
||||||
@ -3512,7 +3548,7 @@ Possible values:
|
|||||||
- Any positive integer.
|
- Any positive integer.
|
||||||
- 0 - Disabled (infinite timeout).
|
- 0 - Disabled (infinite timeout).
|
||||||
|
|
||||||
Default value: 180.
|
Default value: 30.
|
||||||
|
|
||||||
## check_query_single_value_result {#check_query_single_value_result}
|
## check_query_single_value_result {#check_query_single_value_result}
|
||||||
|
|
||||||
|
@ -9,7 +9,6 @@ Columns:
|
|||||||
|
|
||||||
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — Event date.
|
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — Event date.
|
||||||
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Event time.
|
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Event time.
|
||||||
- `event_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — Event time with microseconds resolution.
|
|
||||||
- `name` ([String](../../sql-reference/data-types/string.md)) — Metric name.
|
- `name` ([String](../../sql-reference/data-types/string.md)) — Metric name.
|
||||||
- `value` ([Float64](../../sql-reference/data-types/float.md)) — Metric value.
|
- `value` ([Float64](../../sql-reference/data-types/float.md)) — Metric value.
|
||||||
|
|
||||||
@ -20,18 +19,18 @@ SELECT * FROM system.asynchronous_metric_log LIMIT 10
|
|||||||
```
|
```
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
┌─event_date─┬──────────event_time─┬────event_time_microseconds─┬─name─────────────────────────────────────┬─────value─┐
|
┌─event_date─┬──────────event_time─┬─name─────────────────────────────────────┬─────value─┐
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ CPUFrequencyMHz_0 │ 2120.9 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ CPUFrequencyMHz_0 │ 2120.9 │
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.arenas.all.pmuzzy │ 743 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.arenas.all.pmuzzy │ 743 │
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.arenas.all.pdirty │ 26288 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.arenas.all.pdirty │ 26288 │
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.background_thread.run_intervals │ 0 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.background_thread.run_intervals │ 0 │
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.background_thread.num_runs │ 0 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.background_thread.num_runs │ 0 │
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.retained │ 60694528 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.retained │ 60694528 │
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.mapped │ 303161344 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.mapped │ 303161344 │
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.resident │ 260931584 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.resident │ 260931584 │
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.metadata │ 12079488 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.metadata │ 12079488 │
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.allocated │ 133756128 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.allocated │ 133756128 │
|
||||||
└────────────┴─────────────────────┴────────────────────────────┴──────────────────────────────────────────┴───────────┘
|
└────────────┴─────────────────────┴──────────────────────────────────────────┴───────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
@ -13,6 +13,7 @@ System tables provide information about:
|
|||||||
|
|
||||||
- Server states, processes, and environment.
|
- Server states, processes, and environment.
|
||||||
- Server’s internal processes.
|
- Server’s internal processes.
|
||||||
|
- Options used when the ClickHouse binary was built.
|
||||||
|
|
||||||
System tables:
|
System tables:
|
||||||
|
|
||||||
|
45
docs/en/operations/system-tables/jemalloc_bins.md
Normal file
45
docs/en/operations/system-tables/jemalloc_bins.md
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
---
|
||||||
|
slug: /en/operations/system-tables/jemalloc_bins
|
||||||
|
---
|
||||||
|
# jemalloc_bins
|
||||||
|
|
||||||
|
Contains information about memory allocations done via jemalloc allocator in different size classes (bins) aggregated from all arenas.
|
||||||
|
These statistics might not be absolutely accurate because of thread local caching in jemalloc.
|
||||||
|
|
||||||
|
Columns:
|
||||||
|
|
||||||
|
- `index` (UInt64) — Index of the bin ordered by size
|
||||||
|
- `large` (Bool) — True for large allocations and False for small
|
||||||
|
- `size` (UInt64) — Size of allocations in this bin
|
||||||
|
- `allocations` (UInt64) — Number of allocations
|
||||||
|
- `deallocations` (UInt64) — Number of deallocations
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Find the sizes of allocations that contributed the most to the current overall memory usage.
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT
|
||||||
|
*,
|
||||||
|
allocations - deallocations AS active_allocations,
|
||||||
|
size * active_allocations AS allocated_bytes
|
||||||
|
FROM system.jemalloc_bins
|
||||||
|
WHERE allocated_bytes > 0
|
||||||
|
ORDER BY allocated_bytes DESC
|
||||||
|
LIMIT 10
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─index─┬─large─┬─────size─┬─allocactions─┬─deallocations─┬─active_allocations─┬─allocated_bytes─┐
|
||||||
|
│ 82 │ 1 │ 50331648 │ 1 │ 0 │ 1 │ 50331648 │
|
||||||
|
│ 10 │ 0 │ 192 │ 512336 │ 370710 │ 141626 │ 27192192 │
|
||||||
|
│ 69 │ 1 │ 5242880 │ 6 │ 2 │ 4 │ 20971520 │
|
||||||
|
│ 3 │ 0 │ 48 │ 16938224 │ 16559484 │ 378740 │ 18179520 │
|
||||||
|
│ 28 │ 0 │ 4096 │ 122924 │ 119142 │ 3782 │ 15491072 │
|
||||||
|
│ 61 │ 1 │ 1310720 │ 44569 │ 44558 │ 11 │ 14417920 │
|
||||||
|
│ 39 │ 1 │ 28672 │ 1285 │ 913 │ 372 │ 10665984 │
|
||||||
|
│ 4 │ 0 │ 64 │ 2837225 │ 2680568 │ 156657 │ 10026048 │
|
||||||
|
│ 6 │ 0 │ 96 │ 2617803 │ 2531435 │ 86368 │ 8291328 │
|
||||||
|
│ 36 │ 1 │ 16384 │ 22431 │ 21970 │ 461 │ 7553024 │
|
||||||
|
└───────┴───────┴──────────┴──────────────┴───────────────┴────────────────────┴─────────────────┘
|
||||||
|
```
|
@ -171,12 +171,13 @@ Result:
|
|||||||
└──────────────────────────────┘
|
└──────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
Executable user defined functions can take constant parameters configured in `command` setting (works only for user defined functions with `executable` type).
|
Executable user defined functions can take constant parameters configured in `command` setting (works only for user defined functions with `executable` type). It also requires the `execute_direct` option (to ensure no shell argument expansion vulnerability).
|
||||||
File `test_function_parameter_python.xml` (`/etc/clickhouse-server/test_function_parameter_python.xml` with default path settings).
|
File `test_function_parameter_python.xml` (`/etc/clickhouse-server/test_function_parameter_python.xml` with default path settings).
|
||||||
```xml
|
```xml
|
||||||
<functions>
|
<functions>
|
||||||
<function>
|
<function>
|
||||||
<type>executable</type>
|
<type>executable</type>
|
||||||
|
<execute_direct>true</execute_direct>
|
||||||
<name>test_function_parameter_python</name>
|
<name>test_function_parameter_python</name>
|
||||||
<return_type>String</return_type>
|
<return_type>String</return_type>
|
||||||
<argument>
|
<argument>
|
||||||
|
@ -5,15 +5,28 @@ sidebar_label: SAMPLE BY
|
|||||||
title: "Manipulating Sampling-Key Expressions"
|
title: "Manipulating Sampling-Key Expressions"
|
||||||
---
|
---
|
||||||
|
|
||||||
Syntax:
|
# Manipulating SAMPLE BY expression
|
||||||
|
|
||||||
|
The following operations are available:
|
||||||
|
|
||||||
|
## MODIFY
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
ALTER TABLE [db].name [ON CLUSTER cluster] MODIFY SAMPLE BY new_expression
|
ALTER TABLE [db].name [ON CLUSTER cluster] MODIFY SAMPLE BY new_expression
|
||||||
```
|
```
|
||||||
|
|
||||||
The command changes the [sampling key](../../../engines/table-engines/mergetree-family/mergetree.md) of the table to `new_expression` (an expression or a tuple of expressions).
|
The command changes the [sampling key](../../../engines/table-engines/mergetree-family/mergetree.md) of the table to `new_expression` (an expression or a tuple of expressions). The primary key must contain the new sample key.
|
||||||
|
|
||||||
The command is lightweight in the sense that it only changes metadata. The primary key must contain the new sample key.
|
## REMOVE
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
ALTER TABLE [db].name [ON CLUSTER cluster] REMOVE SAMPLE BY
|
||||||
|
```
|
||||||
|
|
||||||
|
The command removes the [sampling key](../../../engines/table-engines/mergetree-family/mergetree.md) of the table.
|
||||||
|
|
||||||
|
|
||||||
|
The commands `MODIFY` and `REMOVE` are lightweight in the sense that they only change metadata or remove files.
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
It only works for tables in the [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md) family (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) tables).
|
It only works for tables in the [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md) family (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) tables).
|
||||||
|
@ -30,6 +30,14 @@ mongodb(host:port, database, collection, user, password, structure [, options])
|
|||||||
|
|
||||||
- `options` - MongoDB connection string options (optional parameter).
|
- `options` - MongoDB connection string options (optional parameter).
|
||||||
|
|
||||||
|
:::tip
|
||||||
|
If you are using the MongoDB Atlas cloud offering please add these options:
|
||||||
|
|
||||||
|
```
|
||||||
|
'connectTimeoutMS=10000&ssl=true&authSource=admin'
|
||||||
|
```
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
**Returned Value**
|
**Returned Value**
|
||||||
|
|
||||||
|
@ -8,7 +8,6 @@ slug: /ru/operations/system-tables/asynchronous_metric_log
|
|||||||
Столбцы:
|
Столбцы:
|
||||||
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — дата события.
|
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — дата события.
|
||||||
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — время события.
|
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — время события.
|
||||||
- `event_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — время события в микросекундах.
|
|
||||||
- `name` ([String](../../sql-reference/data-types/string.md)) — название метрики.
|
- `name` ([String](../../sql-reference/data-types/string.md)) — название метрики.
|
||||||
- `value` ([Float64](../../sql-reference/data-types/float.md)) — значение метрики.
|
- `value` ([Float64](../../sql-reference/data-types/float.md)) — значение метрики.
|
||||||
|
|
||||||
|
@ -3,13 +3,6 @@ slug: /zh/development/build
|
|||||||
---
|
---
|
||||||
# 如何构建 ClickHouse 发布包 {#ru-he-gou-jian-clickhouse-fa-bu-bao}
|
# 如何构建 ClickHouse 发布包 {#ru-he-gou-jian-clickhouse-fa-bu-bao}
|
||||||
|
|
||||||
## 安装 Git 和 Pbuilder {#an-zhuang-git-he-pbuilder}
|
|
||||||
|
|
||||||
``` bash
|
|
||||||
sudo apt-get update
|
|
||||||
sudo apt-get install git pbuilder debhelper lsb-release fakeroot sudo debian-archive-keyring debian-keyring
|
|
||||||
```
|
|
||||||
|
|
||||||
## 拉取 ClickHouse 源码 {#la-qu-clickhouse-yuan-ma}
|
## 拉取 ClickHouse 源码 {#la-qu-clickhouse-yuan-ma}
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
|
@ -8,7 +8,6 @@ slug: /zh/operations/system-tables/asynchronous_metric_log
|
|||||||
列:
|
列:
|
||||||
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — 事件日期。
|
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — 事件日期。
|
||||||
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — 事件时间。
|
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — 事件时间。
|
||||||
- `event_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — 事件时间(微秒)。
|
|
||||||
- `name` ([String](../../sql-reference/data-types/string.md)) — 指标名。
|
- `name` ([String](../../sql-reference/data-types/string.md)) — 指标名。
|
||||||
- `value` ([Float64](../../sql-reference/data-types/float.md)) — 指标值。
|
- `value` ([Float64](../../sql-reference/data-types/float.md)) — 指标值。
|
||||||
|
|
||||||
@ -17,18 +16,18 @@ slug: /zh/operations/system-tables/asynchronous_metric_log
|
|||||||
SELECT * FROM system.asynchronous_metric_log LIMIT 10
|
SELECT * FROM system.asynchronous_metric_log LIMIT 10
|
||||||
```
|
```
|
||||||
``` text
|
``` text
|
||||||
┌─event_date─┬──────────event_time─┬────event_time_microseconds─┬─name─────────────────────────────────────┬─────value─┐
|
┌─event_date─┬──────────event_time─┬─name─────────────────────────────────────┬─────value─┐
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ CPUFrequencyMHz_0 │ 2120.9 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ CPUFrequencyMHz_0 │ 2120.9 │
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.arenas.all.pmuzzy │ 743 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.arenas.all.pmuzzy │ 743 │
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.arenas.all.pdirty │ 26288 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.arenas.all.pdirty │ 26288 │
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.background_thread.run_intervals │ 0 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.background_thread.run_intervals │ 0 │
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.background_thread.num_runs │ 0 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.background_thread.num_runs │ 0 │
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.retained │ 60694528 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.retained │ 60694528 │
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.mapped │ 303161344 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.mapped │ 303161344 │
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.resident │ 260931584 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.resident │ 260931584 │
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.metadata │ 12079488 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.metadata │ 12079488 │
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.allocated │ 133756128 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.allocated │ 133756128 │
|
||||||
└────────────┴─────────────────────┴────────────────────────────┴──────────────────────────────────────────┴───────────┘
|
└────────────┴─────────────────────┴──────────────────────────────────────────┴───────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
**另请参阅**
|
**另请参阅**
|
||||||
|
@ -192,7 +192,7 @@ SELECT coalesce(mail, phone, CAST(icq,'Nullable(String)')) FROM aBook
|
|||||||
**返回值**
|
**返回值**
|
||||||
|
|
||||||
- 如果`x`不为`NULL`,返回非`Nullable`类型的原始值。
|
- 如果`x`不为`NULL`,返回非`Nullable`类型的原始值。
|
||||||
- 如果`x`为`NULL`,返回对应非`Nullable`类型的默认值。
|
- 如果`x`为`NULL`,则返回任意值。
|
||||||
|
|
||||||
**示例**
|
**示例**
|
||||||
|
|
||||||
|
@ -135,7 +135,7 @@ func TestConfigFileFrameCopy(t *testing.T) {
|
|||||||
sizes := map[string]int64{
|
sizes := map[string]int64{
|
||||||
"users.xml": int64(2017),
|
"users.xml": int64(2017),
|
||||||
"default-password.xml": int64(188),
|
"default-password.xml": int64(188),
|
||||||
"config.xml": int64(61662),
|
"config.xml": int64(59506),
|
||||||
"server-include.xml": int64(168),
|
"server-include.xml": int64(168),
|
||||||
"user-include.xml": int64(559),
|
"user-include.xml": int64(559),
|
||||||
}
|
}
|
||||||
@ -189,7 +189,7 @@ func TestConfigFileFrameCopy(t *testing.T) {
|
|||||||
sizes := map[string]int64{
|
sizes := map[string]int64{
|
||||||
"users.yaml": int64(1023),
|
"users.yaml": int64(1023),
|
||||||
"default-password.yaml": int64(132),
|
"default-password.yaml": int64(132),
|
||||||
"config.yaml": int64(42512),
|
"config.yaml": int64(41633),
|
||||||
"server-include.yaml": int64(21),
|
"server-include.yaml": int64(21),
|
||||||
"user-include.yaml": int64(120),
|
"user-include.yaml": int64(120),
|
||||||
}
|
}
|
||||||
|
@ -649,73 +649,6 @@
|
|||||||
</replica>
|
</replica>
|
||||||
</shard>
|
</shard>
|
||||||
</test_shard_localhost>
|
</test_shard_localhost>
|
||||||
<test_cluster_two_shards_localhost>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
</test_cluster_two_shards_localhost>
|
|
||||||
<test_cluster_two_shards>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.1</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.2</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
</test_cluster_two_shards>
|
|
||||||
<test_cluster_two_shards_internal_replication>
|
|
||||||
<shard>
|
|
||||||
<internal_replication>true</internal_replication>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.1</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
<shard>
|
|
||||||
<internal_replication>true</internal_replication>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.2</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
</test_cluster_two_shards_internal_replication>
|
|
||||||
<test_shard_localhost_secure>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>9440</port>
|
|
||||||
<secure>1</secure>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
</test_shard_localhost_secure>
|
|
||||||
<test_unavailable_shard>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>1</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
</test_unavailable_shard>
|
|
||||||
</remote_servers>
|
</remote_servers>
|
||||||
|
|
||||||
<!-- The list of hosts allowed to use in URL-related storage engines and table functions.
|
<!-- The list of hosts allowed to use in URL-related storage engines and table functions.
|
||||||
|
@ -547,46 +547,6 @@ remote_servers:
|
|||||||
port: 9000
|
port: 9000
|
||||||
# Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority).
|
# Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority).
|
||||||
# priority: 1
|
# priority: 1
|
||||||
test_cluster_two_shards_localhost:
|
|
||||||
shard:
|
|
||||||
- replica:
|
|
||||||
host: localhost
|
|
||||||
port: 9000
|
|
||||||
- replica:
|
|
||||||
host: localhost
|
|
||||||
port: 9000
|
|
||||||
test_cluster_two_shards:
|
|
||||||
shard:
|
|
||||||
- replica:
|
|
||||||
host: 127.0.0.1
|
|
||||||
port: 9000
|
|
||||||
- replica:
|
|
||||||
host: 127.0.0.2
|
|
||||||
port: 9000
|
|
||||||
test_cluster_two_shards_internal_replication:
|
|
||||||
shard:
|
|
||||||
- internal_replication: true
|
|
||||||
replica:
|
|
||||||
host: 127.0.0.1
|
|
||||||
port: 9000
|
|
||||||
- internal_replication: true
|
|
||||||
replica:
|
|
||||||
host: 127.0.0.2
|
|
||||||
port: 9000
|
|
||||||
test_shard_localhost_secure:
|
|
||||||
shard:
|
|
||||||
replica:
|
|
||||||
host: localhost
|
|
||||||
port: 9440
|
|
||||||
secure: 1
|
|
||||||
test_unavailable_shard:
|
|
||||||
shard:
|
|
||||||
- replica:
|
|
||||||
host: localhost
|
|
||||||
port: 9000
|
|
||||||
- replica:
|
|
||||||
host: localhost
|
|
||||||
port: 1
|
|
||||||
|
|
||||||
# The list of hosts allowed to use in URL-related storage engines and table functions.
|
# The list of hosts allowed to use in URL-related storage engines and table functions.
|
||||||
# If this section is not present in configuration, all hosts are allowed.
|
# If this section is not present in configuration, all hosts are allowed.
|
||||||
|
@ -649,73 +649,6 @@
|
|||||||
</replica>
|
</replica>
|
||||||
</shard>
|
</shard>
|
||||||
</test_shard_localhost>
|
</test_shard_localhost>
|
||||||
<test_cluster_two_shards_localhost>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
</test_cluster_two_shards_localhost>
|
|
||||||
<test_cluster_two_shards>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.1</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.2</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
</test_cluster_two_shards>
|
|
||||||
<test_cluster_two_shards_internal_replication>
|
|
||||||
<shard>
|
|
||||||
<internal_replication>true</internal_replication>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.1</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
<shard>
|
|
||||||
<internal_replication>true</internal_replication>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.2</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
</test_cluster_two_shards_internal_replication>
|
|
||||||
<test_shard_localhost_secure>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>9440</port>
|
|
||||||
<secure>1</secure>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
</test_shard_localhost_secure>
|
|
||||||
<test_unavailable_shard>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>1</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
</test_unavailable_shard>
|
|
||||||
</remote_servers>
|
</remote_servers>
|
||||||
|
|
||||||
<!-- The list of hosts allowed to use in URL-related storage engines and table functions.
|
<!-- The list of hosts allowed to use in URL-related storage engines and table functions.
|
||||||
|
@ -59,7 +59,7 @@ public:
|
|||||||
String relative_path_from = validatePathAndGetAsRelative(path_from);
|
String relative_path_from = validatePathAndGetAsRelative(path_from);
|
||||||
String relative_path_to = validatePathAndGetAsRelative(path_to);
|
String relative_path_to = validatePathAndGetAsRelative(path_to);
|
||||||
|
|
||||||
disk_from->copy(relative_path_from, disk_to, relative_path_to);
|
disk_from->copyDirectoryContent(relative_path_from, disk_to, relative_path_to);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -42,7 +42,7 @@ int mainEntryClickHouseKeeperConverter(int argc, char ** argv)
|
|||||||
{
|
{
|
||||||
auto keeper_context = std::make_shared<KeeperContext>(true);
|
auto keeper_context = std::make_shared<KeeperContext>(true);
|
||||||
keeper_context->setDigestEnabled(true);
|
keeper_context->setDigestEnabled(true);
|
||||||
keeper_context->setSnapshotDisk(std::make_shared<DiskLocal>("Keeper-snapshots", options["output-dir"].as<std::string>(), 0));
|
keeper_context->setSnapshotDisk(std::make_shared<DiskLocal>("Keeper-snapshots", options["output-dir"].as<std::string>()));
|
||||||
|
|
||||||
DB::KeeperStorage storage(/* tick_time_ms */ 500, /* superdigest */ "", keeper_context, /* initialize_system_nodes */ false);
|
DB::KeeperStorage storage(/* tick_time_ms */ 500, /* superdigest */ "", keeper_context, /* initialize_system_nodes */ false);
|
||||||
|
|
||||||
|
@ -485,7 +485,7 @@ try
|
|||||||
LOG_INFO(log, "Closed all listening sockets.");
|
LOG_INFO(log, "Closed all listening sockets.");
|
||||||
|
|
||||||
if (current_connections > 0)
|
if (current_connections > 0)
|
||||||
current_connections = waitServersToFinish(*servers, config().getInt("shutdown_wait_unfinished", 5));
|
current_connections = waitServersToFinish(*servers, servers_lock, config().getInt("shutdown_wait_unfinished", 5));
|
||||||
|
|
||||||
if (current_connections)
|
if (current_connections)
|
||||||
LOG_INFO(log, "Closed connections to Keeper. But {} remain. Probably some users cannot finish their connections after context shutdown.", current_connections);
|
LOG_INFO(log, "Closed connections to Keeper. But {} remain. Probably some users cannot finish their connections after context shutdown.", current_connections);
|
||||||
|
@ -8,7 +8,9 @@
|
|||||||
#include <Poco/Logger.h>
|
#include <Poco/Logger.h>
|
||||||
#include <Poco/NullChannel.h>
|
#include <Poco/NullChannel.h>
|
||||||
#include <Poco/SimpleFileChannel.h>
|
#include <Poco/SimpleFileChannel.h>
|
||||||
|
#include <Databases/DatabaseFilesystem.h>
|
||||||
#include <Databases/DatabaseMemory.h>
|
#include <Databases/DatabaseMemory.h>
|
||||||
|
#include <Databases/DatabasesOverlay.h>
|
||||||
#include <Storages/System/attachSystemTables.h>
|
#include <Storages/System/attachSystemTables.h>
|
||||||
#include <Storages/System/attachInformationSchemaTables.h>
|
#include <Storages/System/attachInformationSchemaTables.h>
|
||||||
#include <Interpreters/DatabaseCatalog.h>
|
#include <Interpreters/DatabaseCatalog.h>
|
||||||
@ -50,6 +52,8 @@
|
|||||||
#include <base/argsToConfig.h>
|
#include <base/argsToConfig.h>
|
||||||
#include <filesystem>
|
#include <filesystem>
|
||||||
|
|
||||||
|
#include "config.h"
|
||||||
|
|
||||||
#if defined(FUZZING_MODE)
|
#if defined(FUZZING_MODE)
|
||||||
#include <Functions/getFuzzerData.h>
|
#include <Functions/getFuzzerData.h>
|
||||||
#endif
|
#endif
|
||||||
@ -71,6 +75,15 @@ namespace ErrorCodes
|
|||||||
extern const int FILE_ALREADY_EXISTS;
|
extern const int FILE_ALREADY_EXISTS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void applySettingsOverridesForLocal(ContextMutablePtr context)
|
||||||
|
{
|
||||||
|
Settings settings = context->getSettings();
|
||||||
|
|
||||||
|
settings.allow_introspection_functions = true;
|
||||||
|
settings.storage_file_read_method = LocalFSReadMethod::mmap;
|
||||||
|
|
||||||
|
context->setSettings(settings);
|
||||||
|
}
|
||||||
|
|
||||||
void LocalServer::processError(const String &) const
|
void LocalServer::processError(const String &) const
|
||||||
{
|
{
|
||||||
@ -170,6 +183,13 @@ static DatabasePtr createMemoryDatabaseIfNotExists(ContextPtr context, const Str
|
|||||||
return system_database;
|
return system_database;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static DatabasePtr createClickHouseLocalDatabaseOverlay(const String & name_, ContextPtr context_)
|
||||||
|
{
|
||||||
|
auto databaseCombiner = std::make_shared<DatabasesOverlay>(name_, context_);
|
||||||
|
databaseCombiner->registerNextDatabase(std::make_shared<DatabaseFilesystem>(name_, "", context_));
|
||||||
|
databaseCombiner->registerNextDatabase(std::make_shared<DatabaseMemory>(name_, context_));
|
||||||
|
return databaseCombiner;
|
||||||
|
}
|
||||||
|
|
||||||
/// If path is specified and not empty, will try to setup server environment and load existing metadata
|
/// If path is specified and not empty, will try to setup server environment and load existing metadata
|
||||||
void LocalServer::tryInitPath()
|
void LocalServer::tryInitPath()
|
||||||
@ -657,6 +677,12 @@ void LocalServer::processConfig()
|
|||||||
CompiledExpressionCacheFactory::instance().init(compiled_expression_cache_size, compiled_expression_cache_elements_size);
|
CompiledExpressionCacheFactory::instance().init(compiled_expression_cache_size, compiled_expression_cache_elements_size);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/// NOTE: it is important to apply any overrides before
|
||||||
|
/// setDefaultProfiles() calls since it will copy current context (i.e.
|
||||||
|
/// there is separate context for Buffer tables).
|
||||||
|
applySettingsOverridesForLocal(global_context);
|
||||||
|
applyCmdOptions(global_context);
|
||||||
|
|
||||||
/// Load global settings from default_profile and system_profile.
|
/// Load global settings from default_profile and system_profile.
|
||||||
global_context->setDefaultProfiles(config());
|
global_context->setDefaultProfiles(config());
|
||||||
|
|
||||||
@ -669,9 +695,8 @@ void LocalServer::processConfig()
|
|||||||
* if such tables will not be dropped, clickhouse-server will not be able to load them due to security reasons.
|
* if such tables will not be dropped, clickhouse-server will not be able to load them due to security reasons.
|
||||||
*/
|
*/
|
||||||
std::string default_database = config().getString("default_database", "_local");
|
std::string default_database = config().getString("default_database", "_local");
|
||||||
DatabaseCatalog::instance().attachDatabase(default_database, std::make_shared<DatabaseMemory>(default_database, global_context));
|
DatabaseCatalog::instance().attachDatabase(default_database, createClickHouseLocalDatabaseOverlay(default_database, global_context));
|
||||||
global_context->setCurrentDatabase(default_database);
|
global_context->setCurrentDatabase(default_database);
|
||||||
applyCmdOptions(global_context);
|
|
||||||
|
|
||||||
if (config().has("path"))
|
if (config().has("path"))
|
||||||
{
|
{
|
||||||
|
@ -1146,7 +1146,16 @@ try
|
|||||||
size_t merges_mutations_memory_usage_soft_limit = server_settings_.merges_mutations_memory_usage_soft_limit;
|
size_t merges_mutations_memory_usage_soft_limit = server_settings_.merges_mutations_memory_usage_soft_limit;
|
||||||
|
|
||||||
size_t default_merges_mutations_server_memory_usage = static_cast<size_t>(memory_amount * server_settings_.merges_mutations_memory_usage_to_ram_ratio);
|
size_t default_merges_mutations_server_memory_usage = static_cast<size_t>(memory_amount * server_settings_.merges_mutations_memory_usage_to_ram_ratio);
|
||||||
if (merges_mutations_memory_usage_soft_limit == 0 || merges_mutations_memory_usage_soft_limit > default_merges_mutations_server_memory_usage)
|
if (merges_mutations_memory_usage_soft_limit == 0)
|
||||||
|
{
|
||||||
|
merges_mutations_memory_usage_soft_limit = default_merges_mutations_server_memory_usage;
|
||||||
|
LOG_INFO(log, "Setting merges_mutations_memory_usage_soft_limit was set to {}"
|
||||||
|
" ({} available * {:.2f} merges_mutations_memory_usage_to_ram_ratio)",
|
||||||
|
formatReadableSizeWithBinarySuffix(merges_mutations_memory_usage_soft_limit),
|
||||||
|
formatReadableSizeWithBinarySuffix(memory_amount),
|
||||||
|
server_settings_.merges_mutations_memory_usage_to_ram_ratio);
|
||||||
|
}
|
||||||
|
else if (merges_mutations_memory_usage_soft_limit > default_merges_mutations_server_memory_usage)
|
||||||
{
|
{
|
||||||
merges_mutations_memory_usage_soft_limit = default_merges_mutations_server_memory_usage;
|
merges_mutations_memory_usage_soft_limit = default_merges_mutations_server_memory_usage;
|
||||||
LOG_WARNING(log, "Setting merges_mutations_memory_usage_soft_limit was set to {}"
|
LOG_WARNING(log, "Setting merges_mutations_memory_usage_soft_limit was set to {}"
|
||||||
@ -1523,7 +1532,7 @@ try
|
|||||||
LOG_INFO(log, "Closed all listening sockets.");
|
LOG_INFO(log, "Closed all listening sockets.");
|
||||||
|
|
||||||
if (current_connections > 0)
|
if (current_connections > 0)
|
||||||
current_connections = waitServersToFinish(servers_to_start_before_tables, config().getInt("shutdown_wait_unfinished", 5));
|
current_connections = waitServersToFinish(servers_to_start_before_tables, servers_lock, config().getInt("shutdown_wait_unfinished", 5));
|
||||||
|
|
||||||
if (current_connections)
|
if (current_connections)
|
||||||
LOG_INFO(log, "Closed connections to servers for tables. But {} remain. Probably some tables of other users cannot finish their connections after context shutdown.", current_connections);
|
LOG_INFO(log, "Closed connections to servers for tables. But {} remain. Probably some tables of other users cannot finish their connections after context shutdown.", current_connections);
|
||||||
@ -1581,6 +1590,15 @@ try
|
|||||||
/// After attaching system databases we can initialize system log.
|
/// After attaching system databases we can initialize system log.
|
||||||
global_context->initializeSystemLogs();
|
global_context->initializeSystemLogs();
|
||||||
global_context->setSystemZooKeeperLogAfterInitializationIfNeeded();
|
global_context->setSystemZooKeeperLogAfterInitializationIfNeeded();
|
||||||
|
/// Build loggers before tables startup to make log messages from tables
|
||||||
|
/// attach available in system.text_log
|
||||||
|
{
|
||||||
|
String level_str = config().getString("text_log.level", "");
|
||||||
|
int level = level_str.empty() ? INT_MAX : Poco::Logger::parseLevel(level_str);
|
||||||
|
setTextLog(global_context->getTextLog(), level);
|
||||||
|
|
||||||
|
buildLoggers(config(), logger());
|
||||||
|
}
|
||||||
/// After the system database is created, attach virtual system tables (in addition to query_log and part_log)
|
/// After the system database is created, attach virtual system tables (in addition to query_log and part_log)
|
||||||
attachSystemTablesServer(global_context, *database_catalog.getSystemDatabase(), has_zookeeper);
|
attachSystemTablesServer(global_context, *database_catalog.getSystemDatabase(), has_zookeeper);
|
||||||
attachInformationSchema(global_context, *database_catalog.getDatabase(DatabaseCatalog::INFORMATION_SCHEMA));
|
attachInformationSchema(global_context, *database_catalog.getDatabase(DatabaseCatalog::INFORMATION_SCHEMA));
|
||||||
@ -1609,7 +1627,7 @@ try
|
|||||||
|
|
||||||
/// Init trace collector only after trace_log system table was created
|
/// Init trace collector only after trace_log system table was created
|
||||||
/// Disable it if we collect test coverage information, because it will work extremely slow.
|
/// Disable it if we collect test coverage information, because it will work extremely slow.
|
||||||
#if USE_UNWIND && !WITH_COVERAGE
|
#if !WITH_COVERAGE
|
||||||
/// Profilers cannot work reliably with any other libunwind or without PHDR cache.
|
/// Profilers cannot work reliably with any other libunwind or without PHDR cache.
|
||||||
if (hasPHDRCache())
|
if (hasPHDRCache())
|
||||||
{
|
{
|
||||||
@ -1632,10 +1650,6 @@ try
|
|||||||
|
|
||||||
/// Describe multiple reasons when query profiler cannot work.
|
/// Describe multiple reasons when query profiler cannot work.
|
||||||
|
|
||||||
#if !USE_UNWIND
|
|
||||||
LOG_INFO(log, "Query Profiler and TraceCollector are disabled because they cannot work without bundled unwind (stack unwinding) library.");
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if WITH_COVERAGE
|
#if WITH_COVERAGE
|
||||||
LOG_INFO(log, "Query Profiler and TraceCollector are disabled because they work extremely slow with test coverage.");
|
LOG_INFO(log, "Query Profiler and TraceCollector are disabled because they work extremely slow with test coverage.");
|
||||||
#endif
|
#endif
|
||||||
@ -1707,14 +1721,6 @@ try
|
|||||||
/// Must be done after initialization of `servers`, because async_metrics will access `servers` variable from its thread.
|
/// Must be done after initialization of `servers`, because async_metrics will access `servers` variable from its thread.
|
||||||
async_metrics.start();
|
async_metrics.start();
|
||||||
|
|
||||||
{
|
|
||||||
String level_str = config().getString("text_log.level", "");
|
|
||||||
int level = level_str.empty() ? INT_MAX : Poco::Logger::parseLevel(level_str);
|
|
||||||
setTextLog(global_context->getTextLog(), level);
|
|
||||||
}
|
|
||||||
|
|
||||||
buildLoggers(config(), logger());
|
|
||||||
|
|
||||||
main_config_reloader->start();
|
main_config_reloader->start();
|
||||||
access_control.startPeriodicReloading();
|
access_control.startPeriodicReloading();
|
||||||
|
|
||||||
@ -1827,7 +1833,7 @@ try
|
|||||||
global_context->getProcessList().killAllQueries();
|
global_context->getProcessList().killAllQueries();
|
||||||
|
|
||||||
if (current_connections)
|
if (current_connections)
|
||||||
current_connections = waitServersToFinish(servers, config().getInt("shutdown_wait_unfinished", 5));
|
current_connections = waitServersToFinish(servers, servers_lock, config().getInt("shutdown_wait_unfinished", 5));
|
||||||
|
|
||||||
if (current_connections)
|
if (current_connections)
|
||||||
LOG_WARNING(log, "Closed connections. But {} remain."
|
LOG_WARNING(log, "Closed connections. But {} remain."
|
||||||
|
@ -1,49 +0,0 @@
|
|||||||
<clickhouse>
|
|
||||||
<remote_servers>
|
|
||||||
|
|
||||||
<![CDATA[
|
|
||||||
You can run additional servers simply as
|
|
||||||
./clickhouse-server -- --path=9001 --tcp_port=9001
|
|
||||||
]]>
|
|
||||||
|
|
||||||
<single_remote_shard_at_port_9001>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>9001</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
</single_remote_shard_at_port_9001>
|
|
||||||
|
|
||||||
<two_remote_shards_at_port_9001_9002>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>9001</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>9002</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
</two_remote_shards_at_port_9001_9002>
|
|
||||||
|
|
||||||
<two_shards_one_local_one_remote_at_port_9001>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>9001</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
</two_shards_one_local_one_remote_at_port_9001>
|
|
||||||
|
|
||||||
</remote_servers>
|
|
||||||
</clickhouse>
|
|
@ -805,7 +805,7 @@
|
|||||||
-->
|
-->
|
||||||
<remote_servers>
|
<remote_servers>
|
||||||
<!-- Test only shard config for testing distributed storage -->
|
<!-- Test only shard config for testing distributed storage -->
|
||||||
<test_shard_localhost>
|
<default>
|
||||||
<!-- Inter-server per-cluster secret for Distributed queries
|
<!-- Inter-server per-cluster secret for Distributed queries
|
||||||
default: no secret (no authentication will be performed)
|
default: no secret (no authentication will be performed)
|
||||||
|
|
||||||
@ -838,158 +838,11 @@
|
|||||||
<port>9000</port>
|
<port>9000</port>
|
||||||
<!-- Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority). -->
|
<!-- Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority). -->
|
||||||
<!-- <priority>1</priority> -->
|
<!-- <priority>1</priority> -->
|
||||||
|
<!-- Use SSL? Default: no -->
|
||||||
|
<!-- <secure>0</secure> -->
|
||||||
</replica>
|
</replica>
|
||||||
</shard>
|
</shard>
|
||||||
</test_shard_localhost>
|
</default>
|
||||||
<test_cluster_one_shard_three_replicas_localhost>
|
|
||||||
<shard>
|
|
||||||
<internal_replication>false</internal_replication>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.1</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.2</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.3</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
<!--shard>
|
|
||||||
<internal_replication>false</internal_replication>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.1</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.2</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.3</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard-->
|
|
||||||
</test_cluster_one_shard_three_replicas_localhost>
|
|
||||||
<parallel_replicas>
|
|
||||||
<shard>
|
|
||||||
<internal_replication>false</internal_replication>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.1</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.2</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.3</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.4</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.5</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.6</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.7</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.8</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.9</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.10</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
<!-- Unavailable replica -->
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.11</host>
|
|
||||||
<port>1234</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
</parallel_replicas>
|
|
||||||
<test_cluster_two_shards_localhost>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
</test_cluster_two_shards_localhost>
|
|
||||||
<test_cluster_two_shards>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.1</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.2</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
</test_cluster_two_shards>
|
|
||||||
<test_cluster_two_shards_internal_replication>
|
|
||||||
<shard>
|
|
||||||
<internal_replication>true</internal_replication>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.1</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
<shard>
|
|
||||||
<internal_replication>true</internal_replication>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.2</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
</test_cluster_two_shards_internal_replication>
|
|
||||||
<test_shard_localhost_secure>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>9440</port>
|
|
||||||
<secure>1</secure>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
</test_shard_localhost_secure>
|
|
||||||
<test_unavailable_shard>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>1</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
</test_unavailable_shard>
|
|
||||||
</remote_servers>
|
</remote_servers>
|
||||||
|
|
||||||
<!-- The list of hosts allowed to use in URL-related storage engines and table functions.
|
<!-- The list of hosts allowed to use in URL-related storage engines and table functions.
|
||||||
|
@ -515,7 +515,7 @@ remap_executable: false
|
|||||||
# https://clickhouse.com/docs/en/operations/table_engines/distributed/
|
# https://clickhouse.com/docs/en/operations/table_engines/distributed/
|
||||||
remote_servers:
|
remote_servers:
|
||||||
# Test only shard config for testing distributed storage
|
# Test only shard config for testing distributed storage
|
||||||
test_shard_localhost:
|
default:
|
||||||
# Inter-server per-cluster secret for Distributed queries
|
# Inter-server per-cluster secret for Distributed queries
|
||||||
# default: no secret (no authentication will be performed)
|
# default: no secret (no authentication will be performed)
|
||||||
|
|
||||||
@ -546,46 +546,8 @@ remote_servers:
|
|||||||
port: 9000
|
port: 9000
|
||||||
# Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority).
|
# Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority).
|
||||||
# priority: 1
|
# priority: 1
|
||||||
test_cluster_two_shards_localhost:
|
# Use SSL? Default: no
|
||||||
shard:
|
# secure: 0
|
||||||
- replica:
|
|
||||||
host: localhost
|
|
||||||
port: 9000
|
|
||||||
- replica:
|
|
||||||
host: localhost
|
|
||||||
port: 9000
|
|
||||||
test_cluster_two_shards:
|
|
||||||
shard:
|
|
||||||
- replica:
|
|
||||||
host: 127.0.0.1
|
|
||||||
port: 9000
|
|
||||||
- replica:
|
|
||||||
host: 127.0.0.2
|
|
||||||
port: 9000
|
|
||||||
test_cluster_two_shards_internal_replication:
|
|
||||||
shard:
|
|
||||||
- internal_replication: true
|
|
||||||
replica:
|
|
||||||
host: 127.0.0.1
|
|
||||||
port: 9000
|
|
||||||
- internal_replication: true
|
|
||||||
replica:
|
|
||||||
host: 127.0.0.2
|
|
||||||
port: 9000
|
|
||||||
test_shard_localhost_secure:
|
|
||||||
shard:
|
|
||||||
replica:
|
|
||||||
host: localhost
|
|
||||||
port: 9440
|
|
||||||
secure: 1
|
|
||||||
test_unavailable_shard:
|
|
||||||
shard:
|
|
||||||
- replica:
|
|
||||||
host: localhost
|
|
||||||
port: 9000
|
|
||||||
- replica:
|
|
||||||
host: localhost
|
|
||||||
port: 1
|
|
||||||
|
|
||||||
# The list of hosts allowed to use in URL-related storage engines and table functions.
|
# The list of hosts allowed to use in URL-related storage engines and table functions.
|
||||||
# If this section is not present in configuration, all hosts are allowed.
|
# If this section is not present in configuration, all hosts are allowed.
|
||||||
|
@ -12,7 +12,8 @@
|
|||||||
--chart-background: white;
|
--chart-background: white;
|
||||||
--shadow-color: rgba(0, 0, 0, 0.25);
|
--shadow-color: rgba(0, 0, 0, 0.25);
|
||||||
--input-shadow-color: rgba(0, 255, 0, 1);
|
--input-shadow-color: rgba(0, 255, 0, 1);
|
||||||
--error-color: white;
|
--error-color: red;
|
||||||
|
--auth-error-color: white;
|
||||||
--legend-background: rgba(255, 255, 255, 0.75);
|
--legend-background: rgba(255, 255, 255, 0.75);
|
||||||
--title-color: #666;
|
--title-color: #666;
|
||||||
--text-color: black;
|
--text-color: black;
|
||||||
@ -258,7 +259,7 @@
|
|||||||
width: 60%;
|
width: 60%;
|
||||||
padding: .5rem;
|
padding: .5rem;
|
||||||
|
|
||||||
color: var(--error-color);
|
color: var(--auth-error-color);
|
||||||
|
|
||||||
display: flex;
|
display: flex;
|
||||||
flex-flow: row nowrap;
|
flex-flow: row nowrap;
|
||||||
@ -906,9 +907,9 @@ async function draw(idx, chart, url_params, query) {
|
|||||||
|
|
||||||
if (error) {
|
if (error) {
|
||||||
const errorMatch = errorMessages.find(({ regex }) => error.match(regex))
|
const errorMatch = errorMessages.find(({ regex }) => error.match(regex))
|
||||||
if (errorMatch) {
|
|
||||||
const match = error.match(errorMatch.regex)
|
const match = error.match(errorMatch.regex)
|
||||||
const message = errorMatch.messageFunc(match)
|
const message = errorMatch.messageFunc(match)
|
||||||
|
if (message) {
|
||||||
const authError = new Error(message)
|
const authError = new Error(message)
|
||||||
throw authError
|
throw authError
|
||||||
}
|
}
|
||||||
@ -1019,13 +1020,15 @@ async function drawAll() {
|
|||||||
firstLoad = false;
|
firstLoad = false;
|
||||||
} else {
|
} else {
|
||||||
enableReloadButton();
|
enableReloadButton();
|
||||||
|
enableRunButton();
|
||||||
}
|
}
|
||||||
if (!results.includes(false)) {
|
if (results.includes(true)) {
|
||||||
const element = document.querySelector('.inputs');
|
const element = document.querySelector('.inputs');
|
||||||
element.classList.remove('unconnected');
|
element.classList.remove('unconnected');
|
||||||
const add = document.querySelector('#add');
|
const add = document.querySelector('#add');
|
||||||
add.style.display = 'block';
|
add.style.display = 'block';
|
||||||
} else {
|
}
|
||||||
|
else {
|
||||||
const charts = document.querySelector('#charts')
|
const charts = document.querySelector('#charts')
|
||||||
charts.style.height = '0px';
|
charts.style.height = '0px';
|
||||||
}
|
}
|
||||||
@ -1050,6 +1053,13 @@ function disableReloadButton() {
|
|||||||
reloadButton.classList.add('disabled')
|
reloadButton.classList.add('disabled')
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function disableRunButton() {
|
||||||
|
const runButton = document.getElementById('run')
|
||||||
|
runButton.value = 'Reloading...'
|
||||||
|
runButton.disabled = true
|
||||||
|
runButton.classList.add('disabled')
|
||||||
|
}
|
||||||
|
|
||||||
function enableReloadButton() {
|
function enableReloadButton() {
|
||||||
const reloadButton = document.getElementById('reload')
|
const reloadButton = document.getElementById('reload')
|
||||||
reloadButton.value = 'Reload'
|
reloadButton.value = 'Reload'
|
||||||
@ -1057,11 +1067,19 @@ function enableReloadButton() {
|
|||||||
reloadButton.classList.remove('disabled')
|
reloadButton.classList.remove('disabled')
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function enableRunButton() {
|
||||||
|
const runButton = document.getElementById('run')
|
||||||
|
runButton.value = 'Ok'
|
||||||
|
runButton.disabled = false
|
||||||
|
runButton.classList.remove('disabled')
|
||||||
|
}
|
||||||
|
|
||||||
function reloadAll() {
|
function reloadAll() {
|
||||||
updateParams();
|
updateParams();
|
||||||
drawAll();
|
drawAll();
|
||||||
saveState();
|
saveState();
|
||||||
disableReloadButton()
|
disableReloadButton();
|
||||||
|
disableRunButton();
|
||||||
}
|
}
|
||||||
|
|
||||||
document.getElementById('params').onsubmit = function(event) {
|
document.getElementById('params').onsubmit = function(event) {
|
||||||
|
@ -155,7 +155,7 @@ namespace
|
|||||||
|
|
||||||
|
|
||||||
AccessRightsElement::AccessRightsElement(AccessFlags access_flags_, std::string_view database_)
|
AccessRightsElement::AccessRightsElement(AccessFlags access_flags_, std::string_view database_)
|
||||||
: access_flags(access_flags_), database(database_), any_database(false)
|
: access_flags(access_flags_), database(database_), parameter(database_), any_database(false), any_parameter(false)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -70,7 +70,7 @@ enum class AccessType
|
|||||||
M(ALTER_FREEZE_PARTITION, "FREEZE PARTITION, UNFREEZE", TABLE, ALTER_TABLE) \
|
M(ALTER_FREEZE_PARTITION, "FREEZE PARTITION, UNFREEZE", TABLE, ALTER_TABLE) \
|
||||||
\
|
\
|
||||||
M(ALTER_DATABASE_SETTINGS, "ALTER DATABASE SETTING, ALTER MODIFY DATABASE SETTING, MODIFY DATABASE SETTING", DATABASE, ALTER_DATABASE) /* allows to execute ALTER MODIFY SETTING */\
|
M(ALTER_DATABASE_SETTINGS, "ALTER DATABASE SETTING, ALTER MODIFY DATABASE SETTING, MODIFY DATABASE SETTING", DATABASE, ALTER_DATABASE) /* allows to execute ALTER MODIFY SETTING */\
|
||||||
M(ALTER_NAMED_COLLECTION, "", NAMED_COLLECTION, NAMED_COLLECTION_CONTROL) /* allows to execute ALTER NAMED COLLECTION */\
|
M(ALTER_NAMED_COLLECTION, "", NAMED_COLLECTION, NAMED_COLLECTION_ADMIN) /* allows to execute ALTER NAMED COLLECTION */\
|
||||||
\
|
\
|
||||||
M(ALTER_TABLE, "", GROUP, ALTER) \
|
M(ALTER_TABLE, "", GROUP, ALTER) \
|
||||||
M(ALTER_DATABASE, "", GROUP, ALTER) \
|
M(ALTER_DATABASE, "", GROUP, ALTER) \
|
||||||
@ -92,7 +92,7 @@ enum class AccessType
|
|||||||
M(CREATE_ARBITRARY_TEMPORARY_TABLE, "", GLOBAL, CREATE) /* allows to create and manipulate temporary tables
|
M(CREATE_ARBITRARY_TEMPORARY_TABLE, "", GLOBAL, CREATE) /* allows to create and manipulate temporary tables
|
||||||
with arbitrary table engine */\
|
with arbitrary table engine */\
|
||||||
M(CREATE_FUNCTION, "", GLOBAL, CREATE) /* allows to execute CREATE FUNCTION */ \
|
M(CREATE_FUNCTION, "", GLOBAL, CREATE) /* allows to execute CREATE FUNCTION */ \
|
||||||
M(CREATE_NAMED_COLLECTION, "", NAMED_COLLECTION, NAMED_COLLECTION_CONTROL) /* allows to execute CREATE NAMED COLLECTION */ \
|
M(CREATE_NAMED_COLLECTION, "", NAMED_COLLECTION, NAMED_COLLECTION_ADMIN) /* allows to execute CREATE NAMED COLLECTION */ \
|
||||||
M(CREATE, "", GROUP, ALL) /* allows to execute {CREATE|ATTACH} */ \
|
M(CREATE, "", GROUP, ALL) /* allows to execute {CREATE|ATTACH} */ \
|
||||||
\
|
\
|
||||||
M(DROP_DATABASE, "", DATABASE, DROP) /* allows to execute {DROP|DETACH} DATABASE */\
|
M(DROP_DATABASE, "", DATABASE, DROP) /* allows to execute {DROP|DETACH} DATABASE */\
|
||||||
@ -101,7 +101,7 @@ enum class AccessType
|
|||||||
implicitly enabled by the grant DROP_TABLE */\
|
implicitly enabled by the grant DROP_TABLE */\
|
||||||
M(DROP_DICTIONARY, "", DICTIONARY, DROP) /* allows to execute {DROP|DETACH} DICTIONARY */\
|
M(DROP_DICTIONARY, "", DICTIONARY, DROP) /* allows to execute {DROP|DETACH} DICTIONARY */\
|
||||||
M(DROP_FUNCTION, "", GLOBAL, DROP) /* allows to execute DROP FUNCTION */\
|
M(DROP_FUNCTION, "", GLOBAL, DROP) /* allows to execute DROP FUNCTION */\
|
||||||
M(DROP_NAMED_COLLECTION, "", NAMED_COLLECTION, NAMED_COLLECTION_CONTROL) /* allows to execute DROP NAMED COLLECTION */\
|
M(DROP_NAMED_COLLECTION, "", NAMED_COLLECTION, NAMED_COLLECTION_ADMIN) /* allows to execute DROP NAMED COLLECTION */\
|
||||||
M(DROP, "", GROUP, ALL) /* allows to execute {DROP|DETACH} */\
|
M(DROP, "", GROUP, ALL) /* allows to execute {DROP|DETACH} */\
|
||||||
\
|
\
|
||||||
M(UNDROP_TABLE, "", TABLE, ALL) /* allows to execute {UNDROP} TABLE */\
|
M(UNDROP_TABLE, "", TABLE, ALL) /* allows to execute {UNDROP} TABLE */\
|
||||||
@ -140,9 +140,10 @@ enum class AccessType
|
|||||||
M(SHOW_SETTINGS_PROFILES, "SHOW PROFILES, SHOW CREATE SETTINGS PROFILE, SHOW CREATE PROFILE", GLOBAL, SHOW_ACCESS) \
|
M(SHOW_SETTINGS_PROFILES, "SHOW PROFILES, SHOW CREATE SETTINGS PROFILE, SHOW CREATE PROFILE", GLOBAL, SHOW_ACCESS) \
|
||||||
M(SHOW_ACCESS, "", GROUP, ACCESS_MANAGEMENT) \
|
M(SHOW_ACCESS, "", GROUP, ACCESS_MANAGEMENT) \
|
||||||
M(ACCESS_MANAGEMENT, "", GROUP, ALL) \
|
M(ACCESS_MANAGEMENT, "", GROUP, ALL) \
|
||||||
M(SHOW_NAMED_COLLECTIONS, "SHOW NAMED COLLECTIONS", NAMED_COLLECTION, NAMED_COLLECTION_CONTROL) \
|
M(SHOW_NAMED_COLLECTIONS, "SHOW NAMED COLLECTIONS", NAMED_COLLECTION, NAMED_COLLECTION_ADMIN) \
|
||||||
M(SHOW_NAMED_COLLECTIONS_SECRETS, "SHOW NAMED COLLECTIONS SECRETS", NAMED_COLLECTION, NAMED_COLLECTION_CONTROL) \
|
M(SHOW_NAMED_COLLECTIONS_SECRETS, "SHOW NAMED COLLECTIONS SECRETS", NAMED_COLLECTION, NAMED_COLLECTION_ADMIN) \
|
||||||
M(NAMED_COLLECTION_CONTROL, "", NAMED_COLLECTION, ALL) \
|
M(NAMED_COLLECTION, "NAMED COLLECTION USAGE, USE NAMED COLLECTION", NAMED_COLLECTION, NAMED_COLLECTION_ADMIN) \
|
||||||
|
M(NAMED_COLLECTION_ADMIN, "NAMED COLLECTION CONTROL", NAMED_COLLECTION, ALL) \
|
||||||
\
|
\
|
||||||
M(SYSTEM_SHUTDOWN, "SYSTEM KILL, SHUTDOWN", GLOBAL, SYSTEM) \
|
M(SYSTEM_SHUTDOWN, "SYSTEM KILL, SHUTDOWN", GLOBAL, SYSTEM) \
|
||||||
M(SYSTEM_DROP_DNS_CACHE, "SYSTEM DROP DNS, DROP DNS CACHE, DROP DNS", GLOBAL, SYSTEM_DROP_CACHE) \
|
M(SYSTEM_DROP_DNS_CACHE, "SYSTEM DROP DNS, DROP DNS CACHE, DROP DNS", GLOBAL, SYSTEM_DROP_CACHE) \
|
||||||
@ -157,7 +158,6 @@ enum class AccessType
|
|||||||
M(SYSTEM_DROP_CACHE, "DROP CACHE", GROUP, SYSTEM) \
|
M(SYSTEM_DROP_CACHE, "DROP CACHE", GROUP, SYSTEM) \
|
||||||
M(SYSTEM_RELOAD_CONFIG, "RELOAD CONFIG", GLOBAL, SYSTEM_RELOAD) \
|
M(SYSTEM_RELOAD_CONFIG, "RELOAD CONFIG", GLOBAL, SYSTEM_RELOAD) \
|
||||||
M(SYSTEM_RELOAD_USERS, "RELOAD USERS", GLOBAL, SYSTEM_RELOAD) \
|
M(SYSTEM_RELOAD_USERS, "RELOAD USERS", GLOBAL, SYSTEM_RELOAD) \
|
||||||
M(SYSTEM_RELOAD_SYMBOLS, "RELOAD SYMBOLS", GLOBAL, SYSTEM_RELOAD) \
|
|
||||||
M(SYSTEM_RELOAD_DICTIONARY, "SYSTEM RELOAD DICTIONARIES, RELOAD DICTIONARY, RELOAD DICTIONARIES", GLOBAL, SYSTEM_RELOAD) \
|
M(SYSTEM_RELOAD_DICTIONARY, "SYSTEM RELOAD DICTIONARIES, RELOAD DICTIONARY, RELOAD DICTIONARIES", GLOBAL, SYSTEM_RELOAD) \
|
||||||
M(SYSTEM_RELOAD_MODEL, "SYSTEM RELOAD MODELS, RELOAD MODEL, RELOAD MODELS", GLOBAL, SYSTEM_RELOAD) \
|
M(SYSTEM_RELOAD_MODEL, "SYSTEM RELOAD MODELS, RELOAD MODEL, RELOAD MODELS", GLOBAL, SYSTEM_RELOAD) \
|
||||||
M(SYSTEM_RELOAD_FUNCTION, "SYSTEM RELOAD FUNCTIONS, RELOAD FUNCTION, RELOAD FUNCTIONS", GLOBAL, SYSTEM_RELOAD) \
|
M(SYSTEM_RELOAD_FUNCTION, "SYSTEM RELOAD FUNCTIONS, RELOAD FUNCTION, RELOAD FUNCTIONS", GLOBAL, SYSTEM_RELOAD) \
|
||||||
|
@ -328,7 +328,7 @@ namespace
|
|||||||
|
|
||||||
if (!named_collection_control)
|
if (!named_collection_control)
|
||||||
{
|
{
|
||||||
user->access.revoke(AccessType::NAMED_COLLECTION_CONTROL);
|
user->access.revoke(AccessType::NAMED_COLLECTION_ADMIN);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!show_named_collections_secrets)
|
if (!show_named_collections_secrets)
|
||||||
|
@ -53,7 +53,7 @@ TEST(AccessRights, Union)
|
|||||||
"SHOW ROW POLICIES, SYSTEM MERGES, SYSTEM TTL MERGES, SYSTEM FETCHES, "
|
"SHOW ROW POLICIES, SYSTEM MERGES, SYSTEM TTL MERGES, SYSTEM FETCHES, "
|
||||||
"SYSTEM MOVES, SYSTEM SENDS, SYSTEM REPLICATION QUEUES, "
|
"SYSTEM MOVES, SYSTEM SENDS, SYSTEM REPLICATION QUEUES, "
|
||||||
"SYSTEM DROP REPLICA, SYSTEM SYNC REPLICA, SYSTEM RESTART REPLICA, "
|
"SYSTEM DROP REPLICA, SYSTEM SYNC REPLICA, SYSTEM RESTART REPLICA, "
|
||||||
"SYSTEM RESTORE REPLICA, SYSTEM WAIT LOADING PARTS, SYSTEM SYNC DATABASE REPLICA, SYSTEM FLUSH DISTRIBUTED, dictGet ON db1.*, GRANT NAMED COLLECTION CONTROL ON db1");
|
"SYSTEM RESTORE REPLICA, SYSTEM WAIT LOADING PARTS, SYSTEM SYNC DATABASE REPLICA, SYSTEM FLUSH DISTRIBUTED, dictGet ON db1.*, GRANT NAMED COLLECTION ADMIN ON db1");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -51,7 +51,8 @@ private:
|
|||||||
T value = T{};
|
T value = T{};
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static constexpr bool is_nullable = false;
|
static constexpr bool result_is_nullable = false;
|
||||||
|
static constexpr bool should_skip_null_arguments = true;
|
||||||
static constexpr bool is_any = false;
|
static constexpr bool is_any = false;
|
||||||
|
|
||||||
bool has() const
|
bool has() const
|
||||||
@ -501,7 +502,8 @@ private:
|
|||||||
char small_data[MAX_SMALL_STRING_SIZE]; /// Including the terminating zero.
|
char small_data[MAX_SMALL_STRING_SIZE]; /// Including the terminating zero.
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static constexpr bool is_nullable = false;
|
static constexpr bool result_is_nullable = false;
|
||||||
|
static constexpr bool should_skip_null_arguments = true;
|
||||||
static constexpr bool is_any = false;
|
static constexpr bool is_any = false;
|
||||||
|
|
||||||
bool has() const
|
bool has() const
|
||||||
@ -769,7 +771,7 @@ static_assert(
|
|||||||
|
|
||||||
|
|
||||||
/// For any other value types.
|
/// For any other value types.
|
||||||
template <bool IS_NULLABLE = false>
|
template <bool RESULT_IS_NULLABLE = false>
|
||||||
struct SingleValueDataGeneric
|
struct SingleValueDataGeneric
|
||||||
{
|
{
|
||||||
private:
|
private:
|
||||||
@ -779,12 +781,13 @@ private:
|
|||||||
bool has_value = false;
|
bool has_value = false;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static constexpr bool is_nullable = IS_NULLABLE;
|
static constexpr bool result_is_nullable = RESULT_IS_NULLABLE;
|
||||||
|
static constexpr bool should_skip_null_arguments = !RESULT_IS_NULLABLE;
|
||||||
static constexpr bool is_any = false;
|
static constexpr bool is_any = false;
|
||||||
|
|
||||||
bool has() const
|
bool has() const
|
||||||
{
|
{
|
||||||
if constexpr (is_nullable)
|
if constexpr (result_is_nullable)
|
||||||
return has_value;
|
return has_value;
|
||||||
return !value.isNull();
|
return !value.isNull();
|
||||||
}
|
}
|
||||||
@ -820,14 +823,14 @@ public:
|
|||||||
void change(const IColumn & column, size_t row_num, Arena *)
|
void change(const IColumn & column, size_t row_num, Arena *)
|
||||||
{
|
{
|
||||||
column.get(row_num, value);
|
column.get(row_num, value);
|
||||||
if constexpr (is_nullable)
|
if constexpr (result_is_nullable)
|
||||||
has_value = true;
|
has_value = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void change(const Self & to, Arena *)
|
void change(const Self & to, Arena *)
|
||||||
{
|
{
|
||||||
value = to.value;
|
value = to.value;
|
||||||
if constexpr (is_nullable)
|
if constexpr (result_is_nullable)
|
||||||
has_value = true;
|
has_value = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -844,7 +847,7 @@ public:
|
|||||||
|
|
||||||
bool changeFirstTime(const Self & to, Arena * arena)
|
bool changeFirstTime(const Self & to, Arena * arena)
|
||||||
{
|
{
|
||||||
if (!has() && (is_nullable || to.has()))
|
if (!has() && (result_is_nullable || to.has()))
|
||||||
{
|
{
|
||||||
change(to, arena);
|
change(to, arena);
|
||||||
return true;
|
return true;
|
||||||
@ -879,7 +882,7 @@ public:
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if constexpr (is_nullable)
|
if constexpr (result_is_nullable)
|
||||||
{
|
{
|
||||||
Field new_value;
|
Field new_value;
|
||||||
column.get(row_num, new_value);
|
column.get(row_num, new_value);
|
||||||
@ -910,7 +913,7 @@ public:
|
|||||||
{
|
{
|
||||||
if (!to.has())
|
if (!to.has())
|
||||||
return false;
|
return false;
|
||||||
if constexpr (is_nullable)
|
if constexpr (result_is_nullable)
|
||||||
{
|
{
|
||||||
if (!has())
|
if (!has())
|
||||||
{
|
{
|
||||||
@ -945,7 +948,7 @@ public:
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if constexpr (is_nullable)
|
if constexpr (result_is_nullable)
|
||||||
{
|
{
|
||||||
Field new_value;
|
Field new_value;
|
||||||
column.get(row_num, new_value);
|
column.get(row_num, new_value);
|
||||||
@ -975,7 +978,7 @@ public:
|
|||||||
{
|
{
|
||||||
if (!to.has())
|
if (!to.has())
|
||||||
return false;
|
return false;
|
||||||
if constexpr (is_nullable)
|
if constexpr (result_is_nullable)
|
||||||
{
|
{
|
||||||
if (!value.isNull() && (to.value.isNull() || value < to.value))
|
if (!value.isNull() && (to.value.isNull() || value < to.value))
|
||||||
{
|
{
|
||||||
@ -1138,13 +1141,20 @@ struct AggregateFunctionAnyLastData : Data
|
|||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/** The aggregate function 'singleValueOrNull' is used to implement subquery operators,
|
||||||
|
* such as x = ALL (SELECT ...)
|
||||||
|
* It checks if there is only one unique non-NULL value in the data.
|
||||||
|
* If there is only one unique value - returns it.
|
||||||
|
* If there are zero or at least two distinct values - returns NULL.
|
||||||
|
*/
|
||||||
template <typename Data>
|
template <typename Data>
|
||||||
struct AggregateFunctionSingleValueOrNullData : Data
|
struct AggregateFunctionSingleValueOrNullData : Data
|
||||||
{
|
{
|
||||||
static constexpr bool is_nullable = true;
|
|
||||||
|
|
||||||
using Self = AggregateFunctionSingleValueOrNullData;
|
using Self = AggregateFunctionSingleValueOrNullData;
|
||||||
|
|
||||||
|
static constexpr bool result_is_nullable = true;
|
||||||
|
|
||||||
bool first_value = true;
|
bool first_value = true;
|
||||||
bool is_null = false;
|
bool is_null = false;
|
||||||
|
|
||||||
@ -1166,7 +1176,7 @@ struct AggregateFunctionSingleValueOrNullData : Data
|
|||||||
if (!to.has())
|
if (!to.has())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (first_value)
|
if (first_value && !to.first_value)
|
||||||
{
|
{
|
||||||
first_value = false;
|
first_value = false;
|
||||||
this->change(to, arena);
|
this->change(to, arena);
|
||||||
@ -1311,7 +1321,7 @@ public:
|
|||||||
|
|
||||||
static DataTypePtr createResultType(const DataTypePtr & type_)
|
static DataTypePtr createResultType(const DataTypePtr & type_)
|
||||||
{
|
{
|
||||||
if constexpr (Data::is_nullable)
|
if constexpr (Data::result_is_nullable)
|
||||||
return makeNullable(type_);
|
return makeNullable(type_);
|
||||||
return type_;
|
return type_;
|
||||||
}
|
}
|
||||||
@ -1431,13 +1441,13 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
AggregateFunctionPtr getOwnNullAdapter(
|
AggregateFunctionPtr getOwnNullAdapter(
|
||||||
const AggregateFunctionPtr & nested_function,
|
const AggregateFunctionPtr & original_function,
|
||||||
const DataTypes & /*arguments*/,
|
const DataTypes & /*arguments*/,
|
||||||
const Array & /*params*/,
|
const Array & /*params*/,
|
||||||
const AggregateFunctionProperties & /*properties*/) const override
|
const AggregateFunctionProperties & /*properties*/) const override
|
||||||
{
|
{
|
||||||
if (Data::is_nullable)
|
if (Data::result_is_nullable && !Data::should_skip_null_arguments)
|
||||||
return nested_function;
|
return original_function;
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -116,7 +116,6 @@ namespace ErrorCodes
|
|||||||
extern const int UNKNOWN_TABLE;
|
extern const int UNKNOWN_TABLE;
|
||||||
extern const int ILLEGAL_COLUMN;
|
extern const int ILLEGAL_COLUMN;
|
||||||
extern const int NUMBER_OF_COLUMNS_DOESNT_MATCH;
|
extern const int NUMBER_OF_COLUMNS_DOESNT_MATCH;
|
||||||
extern const int FUNCTION_CANNOT_HAVE_PARAMETERS;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Query analyzer implementation overview. Please check documentation in QueryAnalysisPass.h first.
|
/** Query analyzer implementation overview. Please check documentation in QueryAnalysisPass.h first.
|
||||||
@ -4897,11 +4896,6 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
|
|||||||
lambda_expression_untyped->formatASTForErrorMessage(),
|
lambda_expression_untyped->formatASTForErrorMessage(),
|
||||||
scope.scope_node->formatASTForErrorMessage());
|
scope.scope_node->formatASTForErrorMessage());
|
||||||
|
|
||||||
if (!parameters.empty())
|
|
||||||
{
|
|
||||||
throw Exception(ErrorCodes::FUNCTION_CANNOT_HAVE_PARAMETERS, "Function {} is not parametric", function_node.formatASTForErrorMessage());
|
|
||||||
}
|
|
||||||
|
|
||||||
auto lambda_expression_clone = lambda_expression_untyped->clone();
|
auto lambda_expression_clone = lambda_expression_untyped->clone();
|
||||||
|
|
||||||
IdentifierResolveScope lambda_scope(lambda_expression_clone, &scope /*parent_scope*/);
|
IdentifierResolveScope lambda_scope(lambda_expression_clone, &scope /*parent_scope*/);
|
||||||
@ -5018,12 +5012,9 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
|
|||||||
}
|
}
|
||||||
|
|
||||||
FunctionOverloadResolverPtr function = UserDefinedExecutableFunctionFactory::instance().tryGet(function_name, scope.context, parameters);
|
FunctionOverloadResolverPtr function = UserDefinedExecutableFunctionFactory::instance().tryGet(function_name, scope.context, parameters);
|
||||||
bool is_executable_udf = false;
|
|
||||||
|
|
||||||
if (!function)
|
if (!function)
|
||||||
function = FunctionFactory::instance().tryGet(function_name, scope.context);
|
function = FunctionFactory::instance().tryGet(function_name, scope.context);
|
||||||
else
|
|
||||||
is_executable_udf = true;
|
|
||||||
|
|
||||||
if (!function)
|
if (!function)
|
||||||
{
|
{
|
||||||
@ -5074,12 +5065,6 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
|
|||||||
return result_projection_names;
|
return result_projection_names;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Executable UDFs may have parameters. They are checked in UserDefinedExecutableFunctionFactory.
|
|
||||||
if (!parameters.empty() && !is_executable_udf)
|
|
||||||
{
|
|
||||||
throw Exception(ErrorCodes::FUNCTION_CANNOT_HAVE_PARAMETERS, "Function {} is not parametric", function_name);
|
|
||||||
}
|
|
||||||
|
|
||||||
/** For lambda arguments we need to initialize lambda argument types DataTypeFunction using `getLambdaArgumentTypes` function.
|
/** For lambda arguments we need to initialize lambda argument types DataTypeFunction using `getLambdaArgumentTypes` function.
|
||||||
* Then each lambda arguments are initialized with columns, where column source is lambda.
|
* Then each lambda arguments are initialized with columns, where column source is lambda.
|
||||||
* This information is important for later steps of query processing.
|
* This information is important for later steps of query processing.
|
||||||
|
@ -253,6 +253,7 @@ std::unique_ptr<WriteBuffer> BackupWriterS3::writeFile(const String & file_name)
|
|||||||
{
|
{
|
||||||
return std::make_unique<WriteBufferFromS3>(
|
return std::make_unique<WriteBufferFromS3>(
|
||||||
client,
|
client,
|
||||||
|
client, // already has long timeout
|
||||||
s3_uri.bucket,
|
s3_uri.bucket,
|
||||||
fs::path(s3_uri.key) / file_name,
|
fs::path(s3_uri.key) / file_name,
|
||||||
DBMS_DEFAULT_BUFFER_SIZE,
|
DBMS_DEFAULT_BUFFER_SIZE,
|
||||||
|
@ -24,7 +24,7 @@ protected:
|
|||||||
/// Make local disk.
|
/// Make local disk.
|
||||||
temp_dir = std::make_unique<Poco::TemporaryFile>();
|
temp_dir = std::make_unique<Poco::TemporaryFile>();
|
||||||
temp_dir->createDirectories();
|
temp_dir->createDirectories();
|
||||||
local_disk = std::make_shared<DiskLocal>("local_disk", temp_dir->path() + "/", 0);
|
local_disk = std::make_shared<DiskLocal>("local_disk", temp_dir->path() + "/");
|
||||||
|
|
||||||
/// Make encrypted disk.
|
/// Make encrypted disk.
|
||||||
auto settings = std::make_unique<DiskEncryptedSettings>();
|
auto settings = std::make_unique<DiskEncryptedSettings>();
|
||||||
@ -38,7 +38,7 @@ protected:
|
|||||||
settings->current_key = key;
|
settings->current_key = key;
|
||||||
settings->current_key_fingerprint = fingerprint;
|
settings->current_key_fingerprint = fingerprint;
|
||||||
|
|
||||||
encrypted_disk = std::make_shared<DiskEncrypted>("encrypted_disk", std::move(settings), true);
|
encrypted_disk = std::make_shared<DiskEncrypted>("encrypted_disk", std::move(settings));
|
||||||
}
|
}
|
||||||
|
|
||||||
void TearDown() override
|
void TearDown() override
|
||||||
|
@ -206,11 +206,10 @@ add_library (clickhouse_new_delete STATIC Common/new_delete.cpp)
|
|||||||
target_link_libraries (clickhouse_new_delete PRIVATE clickhouse_common_io)
|
target_link_libraries (clickhouse_new_delete PRIVATE clickhouse_common_io)
|
||||||
if (TARGET ch_contrib::jemalloc)
|
if (TARGET ch_contrib::jemalloc)
|
||||||
target_link_libraries (clickhouse_new_delete PRIVATE ch_contrib::jemalloc)
|
target_link_libraries (clickhouse_new_delete PRIVATE ch_contrib::jemalloc)
|
||||||
|
target_link_libraries (clickhouse_common_io PRIVATE ch_contrib::jemalloc)
|
||||||
|
target_link_libraries (clickhouse_storages_system PRIVATE ch_contrib::jemalloc)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (TARGET ch_contrib::jemalloc)
|
|
||||||
target_link_libraries (clickhouse_common_io PRIVATE ch_contrib::jemalloc)
|
|
||||||
endif()
|
|
||||||
target_link_libraries (clickhouse_common_io PUBLIC ch_contrib::sparsehash)
|
target_link_libraries (clickhouse_common_io PUBLIC ch_contrib::sparsehash)
|
||||||
|
|
||||||
add_subdirectory(Access/Common)
|
add_subdirectory(Access/Common)
|
||||||
|
@ -575,9 +575,11 @@ try
|
|||||||
}
|
}
|
||||||
|
|
||||||
auto flags = O_WRONLY | O_EXCL;
|
auto flags = O_WRONLY | O_EXCL;
|
||||||
if (query_with_output->is_outfile_append)
|
|
||||||
|
auto file_exists = fs::exists(out_file);
|
||||||
|
if (file_exists && query_with_output->is_outfile_append)
|
||||||
flags |= O_APPEND;
|
flags |= O_APPEND;
|
||||||
else if (query_with_output->is_outfile_truncate)
|
else if (file_exists && query_with_output->is_outfile_truncate)
|
||||||
flags |= O_TRUNC;
|
flags |= O_TRUNC;
|
||||||
else
|
else
|
||||||
flags |= O_CREAT;
|
flags |= O_CREAT;
|
||||||
@ -2297,7 +2299,9 @@ void ClientBase::runInteractive()
|
|||||||
catch (const ErrnoException & e)
|
catch (const ErrnoException & e)
|
||||||
{
|
{
|
||||||
if (e.getErrno() != EEXIST)
|
if (e.getErrno() != EEXIST)
|
||||||
throw;
|
{
|
||||||
|
std::cerr << getCurrentExceptionMessage(false) << '\n';
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -59,7 +59,15 @@ ConnectionParameters::ConnectionParameters(const Poco::Util::AbstractConfigurati
|
|||||||
quota_key = config.getString("quota_key", "");
|
quota_key = config.getString("quota_key", "");
|
||||||
|
|
||||||
/// By default compression is disabled if address looks like localhost.
|
/// By default compression is disabled if address looks like localhost.
|
||||||
compression = config.getBool("compression", !isLocalAddress(DNSResolver::instance().resolveHost(host)))
|
|
||||||
|
/// Avoid DNS request if the host is "localhost".
|
||||||
|
/// If ClickHouse is run under QEMU-user with a binary for a different architecture,
|
||||||
|
/// and there are all listed startup dependency shared libraries available, but not the runtime dependencies of glibc,
|
||||||
|
/// the glibc cannot open "plugins" for DNS resolving, and the DNS resolution does not work.
|
||||||
|
/// At the same time, I want clickhouse-local to always work, regardless.
|
||||||
|
/// TODO: get rid of glibc, or replace getaddrinfo to c-ares.
|
||||||
|
|
||||||
|
compression = config.getBool("compression", host != "localhost" && !isLocalAddress(DNSResolver::instance().resolveHost(host)))
|
||||||
? Protocol::Compression::Enable : Protocol::Compression::Disable;
|
? Protocol::Compression::Enable : Protocol::Compression::Disable;
|
||||||
|
|
||||||
timeouts = ConnectionTimeouts(
|
timeouts = ConnectionTimeouts(
|
||||||
|
@ -101,9 +101,8 @@ static String getLoadSuggestionQuery(Int32 suggestion_limit, bool basic_suggesti
|
|||||||
add_column("name", "columns", true, suggestion_limit);
|
add_column("name", "columns", true, suggestion_limit);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// FIXME: Forbid this query using new analyzer because of bug https://github.com/ClickHouse/ClickHouse/issues/50669
|
/// FIXME: This query does not work with the new analyzer because of bug https://github.com/ClickHouse/ClickHouse/issues/50669
|
||||||
/// We should remove this restriction after resolving this bug.
|
query = "SELECT DISTINCT arrayJoin(extractAll(name, '[\\\\w_]{2,}')) AS res FROM (" + query + ") WHERE notEmpty(res)";
|
||||||
query = "SELECT DISTINCT arrayJoin(extractAll(name, '[\\\\w_]{2,}')) AS res FROM (" + query + ") WHERE notEmpty(res) SETTINGS allow_experimental_analyzer=0";
|
|
||||||
return query;
|
return query;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -107,8 +107,8 @@ struct FloatCompareHelper
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
template <class U> struct CompareHelper<Float32, U> : public FloatCompareHelper<Float32> {};
|
template <typename U> struct CompareHelper<Float32, U> : public FloatCompareHelper<Float32> {};
|
||||||
template <class U> struct CompareHelper<Float64, U> : public FloatCompareHelper<Float64> {};
|
template <typename U> struct CompareHelper<Float64, U> : public FloatCompareHelper<Float64> {};
|
||||||
|
|
||||||
|
|
||||||
/** A template for columns that use a simple array to store.
|
/** A template for columns that use a simple array to store.
|
||||||
|
@ -8,7 +8,7 @@
|
|||||||
* See also: https://gcc.gnu.org/legacy-ml/gcc-help/2017-12/msg00021.html
|
* See also: https://gcc.gnu.org/legacy-ml/gcc-help/2017-12/msg00021.html
|
||||||
*/
|
*/
|
||||||
#ifdef NDEBUG
|
#ifdef NDEBUG
|
||||||
__attribute__((__weak__)) extern const size_t MMAP_THRESHOLD = 64 * (1ULL << 20);
|
__attribute__((__weak__)) extern const size_t MMAP_THRESHOLD = 128 * (1ULL << 20);
|
||||||
#else
|
#else
|
||||||
/**
|
/**
|
||||||
* In debug build, use small mmap threshold to reproduce more memory
|
* In debug build, use small mmap threshold to reproduce more memory
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
|
|
||||||
/// Available metrics. Add something here as you wish.
|
/// Available metrics. Add something here as you wish.
|
||||||
#define APPLY_FOR_METRICS(M) \
|
#define APPLY_FOR_BUILTIN_METRICS(M) \
|
||||||
M(Query, "Number of executing queries") \
|
M(Query, "Number of executing queries") \
|
||||||
M(Merge, "Number of executing background merges") \
|
M(Merge, "Number of executing background merges") \
|
||||||
M(Move, "Number of currently executing moves") \
|
M(Move, "Number of currently executing moves") \
|
||||||
@ -93,8 +93,8 @@
|
|||||||
M(ThreadPoolFSReaderThreadsActive, "Number of threads in the thread pool for local_filesystem_read_method=threadpool running a task.") \
|
M(ThreadPoolFSReaderThreadsActive, "Number of threads in the thread pool for local_filesystem_read_method=threadpool running a task.") \
|
||||||
M(BackupsIOThreads, "Number of threads in the BackupsIO thread pool.") \
|
M(BackupsIOThreads, "Number of threads in the BackupsIO thread pool.") \
|
||||||
M(BackupsIOThreadsActive, "Number of threads in the BackupsIO thread pool running a task.") \
|
M(BackupsIOThreadsActive, "Number of threads in the BackupsIO thread pool running a task.") \
|
||||||
M(DiskObjectStorageAsyncThreads, "Number of threads in the async thread pool for DiskObjectStorage.") \
|
M(DiskObjectStorageAsyncThreads, "Obsolete metric, shows nothing.") \
|
||||||
M(DiskObjectStorageAsyncThreadsActive, "Number of threads in the async thread pool for DiskObjectStorage running a task.") \
|
M(DiskObjectStorageAsyncThreadsActive, "Obsolete metric, shows nothing.") \
|
||||||
M(StorageHiveThreads, "Number of threads in the StorageHive thread pool.") \
|
M(StorageHiveThreads, "Number of threads in the StorageHive thread pool.") \
|
||||||
M(StorageHiveThreadsActive, "Number of threads in the StorageHive thread pool running a task.") \
|
M(StorageHiveThreadsActive, "Number of threads in the StorageHive thread pool running a task.") \
|
||||||
M(TablesLoaderThreads, "Number of threads in the tables loader thread pool.") \
|
M(TablesLoaderThreads, "Number of threads in the tables loader thread pool.") \
|
||||||
@ -141,6 +141,8 @@
|
|||||||
M(MergeTreeOutdatedPartsLoaderThreadsActive, "Number of active threads in the threadpool for loading Outdated data parts.") \
|
M(MergeTreeOutdatedPartsLoaderThreadsActive, "Number of active threads in the threadpool for loading Outdated data parts.") \
|
||||||
M(MergeTreePartsCleanerThreads, "Number of threads in the MergeTree parts cleaner thread pool.") \
|
M(MergeTreePartsCleanerThreads, "Number of threads in the MergeTree parts cleaner thread pool.") \
|
||||||
M(MergeTreePartsCleanerThreadsActive, "Number of threads in the MergeTree parts cleaner thread pool running a task.") \
|
M(MergeTreePartsCleanerThreadsActive, "Number of threads in the MergeTree parts cleaner thread pool running a task.") \
|
||||||
|
M(IDiskCopierThreads, "Number of threads for copying data between disks of different types.") \
|
||||||
|
M(IDiskCopierThreadsActive, "Number of threads for copying data between disks of different types running a task.") \
|
||||||
M(SystemReplicasThreads, "Number of threads in the system.replicas thread pool.") \
|
M(SystemReplicasThreads, "Number of threads in the system.replicas thread pool.") \
|
||||||
M(SystemReplicasThreadsActive, "Number of threads in the system.replicas thread pool running a task.") \
|
M(SystemReplicasThreadsActive, "Number of threads in the system.replicas thread pool running a task.") \
|
||||||
M(RestartReplicaThreads, "Number of threads in the RESTART REPLICA thread pool.") \
|
M(RestartReplicaThreads, "Number of threads in the RESTART REPLICA thread pool.") \
|
||||||
@ -200,7 +202,13 @@
|
|||||||
M(MergeTreeReadTaskRequestsSent, "The current number of callback requests in flight from the remote server back to the initiator server to choose the read task (for MergeTree tables). Measured on the remote server side.") \
|
M(MergeTreeReadTaskRequestsSent, "The current number of callback requests in flight from the remote server back to the initiator server to choose the read task (for MergeTree tables). Measured on the remote server side.") \
|
||||||
M(MergeTreeAllRangesAnnouncementsSent, "The current number of announcement being sent in flight from the remote server to the initiator server about the set of data parts (for MergeTree tables). Measured on the remote server side.") \
|
M(MergeTreeAllRangesAnnouncementsSent, "The current number of announcement being sent in flight from the remote server to the initiator server about the set of data parts (for MergeTree tables). Measured on the remote server side.") \
|
||||||
M(CreatedTimersInQueryProfiler, "Number of Created thread local timers in QueryProfiler") \
|
M(CreatedTimersInQueryProfiler, "Number of Created thread local timers in QueryProfiler") \
|
||||||
M(ActiveTimersInQueryProfiler, "Number of Active thread local timers in QueryProfiler")
|
M(ActiveTimersInQueryProfiler, "Number of Active thread local timers in QueryProfiler") \
|
||||||
|
|
||||||
|
#ifdef APPLY_FOR_EXTERNAL_METRICS
|
||||||
|
#define APPLY_FOR_METRICS(M) APPLY_FOR_BUILTIN_METRICS(M) APPLY_FOR_EXTERNAL_METRICS(M)
|
||||||
|
#else
|
||||||
|
#define APPLY_FOR_METRICS(M) APPLY_FOR_BUILTIN_METRICS(M)
|
||||||
|
#endif
|
||||||
|
|
||||||
namespace CurrentMetrics
|
namespace CurrentMetrics
|
||||||
{
|
{
|
||||||
|
@ -13,7 +13,7 @@
|
|||||||
* - system.errors table
|
* - system.errors table
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define APPLY_FOR_ERROR_CODES(M) \
|
#define APPLY_FOR_BUILTIN_ERROR_CODES(M) \
|
||||||
M(0, OK) \
|
M(0, OK) \
|
||||||
M(1, UNSUPPORTED_METHOD) \
|
M(1, UNSUPPORTED_METHOD) \
|
||||||
M(2, UNSUPPORTED_PARAMETER) \
|
M(2, UNSUPPORTED_PARAMETER) \
|
||||||
@ -589,6 +589,12 @@
|
|||||||
M(1002, UNKNOWN_EXCEPTION) \
|
M(1002, UNKNOWN_EXCEPTION) \
|
||||||
/* See END */
|
/* See END */
|
||||||
|
|
||||||
|
#ifdef APPLY_FOR_EXTERNAL_ERROR_CODES
|
||||||
|
#define APPLY_FOR_ERROR_CODES(M) APPLY_FOR_BUILTIN_ERROR_CODES(M) APPLY_FOR_EXTERNAL_ERROR_CODES(M)
|
||||||
|
#else
|
||||||
|
#define APPLY_FOR_ERROR_CODES(M) APPLY_FOR_BUILTIN_ERROR_CODES(M)
|
||||||
|
#endif
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
namespace ErrorCodes
|
namespace ErrorCodes
|
||||||
|
@ -418,6 +418,18 @@ PreformattedMessage getCurrentExceptionMessageAndPattern(bool with_stacktrace, b
|
|||||||
<< " (version " << VERSION_STRING << VERSION_OFFICIAL << ")";
|
<< " (version " << VERSION_STRING << VERSION_OFFICIAL << ")";
|
||||||
}
|
}
|
||||||
catch (...) {}
|
catch (...) {}
|
||||||
|
|
||||||
|
// #ifdef ABORT_ON_LOGICAL_ERROR
|
||||||
|
// try
|
||||||
|
// {
|
||||||
|
// throw;
|
||||||
|
// }
|
||||||
|
// catch (const std::logic_error &)
|
||||||
|
// {
|
||||||
|
// abortOnFailedAssertion(stream.str());
|
||||||
|
// }
|
||||||
|
// catch (...) {}
|
||||||
|
// #endif
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
#include <Common/DateLUT.h>
|
||||||
#include <Common/LoggingFormatStringHelpers.h>
|
#include <Common/LoggingFormatStringHelpers.h>
|
||||||
#include <Common/SipHash.h>
|
#include <Common/SipHash.h>
|
||||||
#include <Common/thread_local_rng.h>
|
#include <Common/thread_local_rng.h>
|
||||||
@ -74,3 +75,101 @@ void LogFrequencyLimiterIml::cleanup(time_t too_old_threshold_s)
|
|||||||
std::erase_if(logged_messages, [old](const auto & elem) { return elem.second.first < old; });
|
std::erase_if(logged_messages, [old](const auto & elem) { return elem.second.first < old; });
|
||||||
last_cleanup = now;
|
last_cleanup = now;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
std::mutex LogSeriesLimiter::mutex;
|
||||||
|
time_t LogSeriesLimiter::last_cleanup = 0;
|
||||||
|
|
||||||
|
LogSeriesLimiter::LogSeriesLimiter(Poco::Logger * logger_, size_t allowed_count_, time_t interval_s_)
|
||||||
|
: logger(logger_)
|
||||||
|
{
|
||||||
|
if (allowed_count_ == 0)
|
||||||
|
{
|
||||||
|
accepted = false;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (interval_s_ == 0)
|
||||||
|
{
|
||||||
|
accepted = true;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
time_t now = time(nullptr);
|
||||||
|
UInt128 name_hash = sipHash128(logger->name().c_str(), logger->name().size());
|
||||||
|
|
||||||
|
std::lock_guard lock(mutex);
|
||||||
|
|
||||||
|
if (last_cleanup == 0)
|
||||||
|
last_cleanup = now;
|
||||||
|
|
||||||
|
auto & series_records = getSeriesRecords();
|
||||||
|
|
||||||
|
static const time_t cleanup_delay_s = 600;
|
||||||
|
if (last_cleanup + cleanup_delay_s >= now)
|
||||||
|
{
|
||||||
|
time_t old = now - cleanup_delay_s;
|
||||||
|
std::erase_if(series_records, [old](const auto & elem) { return get<0>(elem.second) < old; });
|
||||||
|
last_cleanup = now;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto register_as_first = [&] () TSA_REQUIRES(mutex)
|
||||||
|
{
|
||||||
|
assert(allowed_count_ > 0);
|
||||||
|
accepted = true;
|
||||||
|
series_records[name_hash] = std::make_tuple(now, 1, 1);
|
||||||
|
};
|
||||||
|
|
||||||
|
if (!series_records.contains(name_hash))
|
||||||
|
{
|
||||||
|
register_as_first();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto & [last_time, accepted_count, total_count] = series_records[name_hash];
|
||||||
|
if (last_time + interval_s_ <= now)
|
||||||
|
{
|
||||||
|
debug_message = fmt::format(
|
||||||
|
" (LogSeriesLimiter: on interval from {} to {} accepted series {} / {} for the logger {} : {})",
|
||||||
|
DateLUT::instance().timeToString(last_time),
|
||||||
|
DateLUT::instance().timeToString(now),
|
||||||
|
accepted_count,
|
||||||
|
total_count,
|
||||||
|
logger->name(),
|
||||||
|
double(name_hash));
|
||||||
|
|
||||||
|
register_as_first();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (accepted_count < allowed_count_)
|
||||||
|
{
|
||||||
|
accepted = true;
|
||||||
|
++accepted_count;
|
||||||
|
}
|
||||||
|
++total_count;
|
||||||
|
}
|
||||||
|
|
||||||
|
void LogSeriesLimiter::log(Poco::Message & message)
|
||||||
|
{
|
||||||
|
std::string_view pattern = message.getFormatString();
|
||||||
|
if (pattern.empty())
|
||||||
|
{
|
||||||
|
/// Do not filter messages without a format string
|
||||||
|
if (auto * channel = logger->getChannel())
|
||||||
|
channel->log(message);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!accepted)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (!debug_message.empty())
|
||||||
|
{
|
||||||
|
message.appendText(debug_message);
|
||||||
|
debug_message.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (auto * channel = logger->getChannel())
|
||||||
|
channel->log(message);
|
||||||
|
}
|
||||||
|
@ -191,6 +191,41 @@ public:
|
|||||||
Poco::Logger * getLogger() { return logger; }
|
Poco::Logger * getLogger() { return logger; }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/// This wrapper helps to avoid too noisy log messages from similar objects.
|
||||||
|
/// Once an instance of LogSeriesLimiter type is created the decision is done
|
||||||
|
/// All followed message which use this instance is either printed or muted all together.
|
||||||
|
/// LogSeriesLimiter differs from LogFrequencyLimiterIml in a way that
|
||||||
|
/// LogSeriesLimiter is useful for accept or mute series of logs when LogFrequencyLimiterIml works for each line independently.
|
||||||
|
class LogSeriesLimiter
|
||||||
|
{
|
||||||
|
static std::mutex mutex;
|
||||||
|
static time_t last_cleanup;
|
||||||
|
|
||||||
|
/// Hash(logger_name) -> (last_logged_time_s, accepted, muted)
|
||||||
|
using SeriesRecords = std::unordered_map<UInt64, std::tuple<time_t, size_t, size_t>>;
|
||||||
|
|
||||||
|
static SeriesRecords & getSeriesRecords() TSA_REQUIRES(mutex)
|
||||||
|
{
|
||||||
|
static SeriesRecords records;
|
||||||
|
return records;
|
||||||
|
}
|
||||||
|
|
||||||
|
Poco::Logger * logger = nullptr;
|
||||||
|
bool accepted = false;
|
||||||
|
String debug_message;
|
||||||
|
public:
|
||||||
|
LogSeriesLimiter(Poco::Logger * logger_, size_t allowed_count_, time_t interval_s_);
|
||||||
|
|
||||||
|
LogSeriesLimiter & operator -> () { return *this; }
|
||||||
|
bool is(Poco::Message::Priority priority) { return logger->is(priority); }
|
||||||
|
LogSeriesLimiter * getChannel() {return this; }
|
||||||
|
const String & name() const { return logger->name(); }
|
||||||
|
|
||||||
|
void log(Poco::Message & message);
|
||||||
|
|
||||||
|
Poco::Logger * getLogger() { return logger; }
|
||||||
|
};
|
||||||
|
|
||||||
/// This wrapper is useful to save formatted message into a String before sending it to a logger
|
/// This wrapper is useful to save formatted message into a String before sending it to a logger
|
||||||
class LogToStrImpl
|
class LogToStrImpl
|
||||||
{
|
{
|
||||||
|
42
src/Common/MemoryTrackerSwitcher.h
Normal file
42
src/Common/MemoryTrackerSwitcher.h
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <Common/CurrentThread.h>
|
||||||
|
#include <Common/MemoryTracker.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int LOGICAL_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct MemoryTrackerSwitcher
|
||||||
|
{
|
||||||
|
explicit MemoryTrackerSwitcher(MemoryTracker * new_tracker)
|
||||||
|
{
|
||||||
|
if (!current_thread)
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "current_thread is not initialized");
|
||||||
|
|
||||||
|
auto * thread_tracker = CurrentThread::getMemoryTracker();
|
||||||
|
prev_untracked_memory = current_thread->untracked_memory;
|
||||||
|
prev_memory_tracker_parent = thread_tracker->getParent();
|
||||||
|
|
||||||
|
current_thread->untracked_memory = 0;
|
||||||
|
thread_tracker->setParent(new_tracker);
|
||||||
|
}
|
||||||
|
|
||||||
|
~MemoryTrackerSwitcher()
|
||||||
|
{
|
||||||
|
CurrentThread::flushUntrackedMemory();
|
||||||
|
auto * thread_tracker = CurrentThread::getMemoryTracker();
|
||||||
|
|
||||||
|
current_thread->untracked_memory = prev_untracked_memory;
|
||||||
|
thread_tracker->setParent(prev_memory_tracker_parent);
|
||||||
|
}
|
||||||
|
|
||||||
|
MemoryTracker * prev_memory_tracker_parent = nullptr;
|
||||||
|
Int64 prev_untracked_memory = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
@ -540,7 +540,7 @@ bool OptimizedRegularExpressionImpl<thread_safe>::match(const char * subject, si
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return re2->Match(StringPieceType(subject, subject_size), 0, subject_size, RegexType::UNANCHORED, nullptr, 0);
|
return re2->Match({subject, subject_size}, 0, subject_size, RegexType::UNANCHORED, nullptr, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -585,9 +585,9 @@ bool OptimizedRegularExpressionImpl<thread_safe>::match(const char * subject, si
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
StringPieceType piece;
|
std::string_view piece;
|
||||||
|
|
||||||
if (!RegexType::PartialMatch(StringPieceType(subject, subject_size), *re2, &piece))
|
if (!RegexType::PartialMatch({subject, subject_size}, *re2, &piece))
|
||||||
return false;
|
return false;
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -652,10 +652,10 @@ unsigned OptimizedRegularExpressionImpl<thread_safe>::match(const char * subject
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
DB::PODArrayWithStackMemory<StringPieceType, 128> pieces(limit);
|
DB::PODArrayWithStackMemory<std::string_view, 128> pieces(limit);
|
||||||
|
|
||||||
if (!re2->Match(
|
if (!re2->Match(
|
||||||
StringPieceType(subject, subject_size),
|
{subject, subject_size},
|
||||||
0,
|
0,
|
||||||
subject_size,
|
subject_size,
|
||||||
RegexType::UNANCHORED,
|
RegexType::UNANCHORED,
|
||||||
|
@ -52,7 +52,6 @@ public:
|
|||||||
using MatchVec = std::vector<Match>;
|
using MatchVec = std::vector<Match>;
|
||||||
|
|
||||||
using RegexType = std::conditional_t<thread_safe, re2::RE2, re2_st::RE2>;
|
using RegexType = std::conditional_t<thread_safe, re2::RE2, re2_st::RE2>;
|
||||||
using StringPieceType = std::conditional_t<thread_safe, re2::StringPiece, re2_st::StringPiece>;
|
|
||||||
|
|
||||||
OptimizedRegularExpressionImpl(const std::string & regexp_, int options = 0); /// NOLINT
|
OptimizedRegularExpressionImpl(const std::string & regexp_, int options = 0); /// NOLINT
|
||||||
/// StringSearcher store pointers to required_substring, it must be updated on move.
|
/// StringSearcher store pointers to required_substring, it must be updated on move.
|
||||||
|
@ -1,9 +1,11 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <mutex>
|
|
||||||
#include <condition_variable>
|
#include <condition_variable>
|
||||||
#include <Poco/Timespan.h>
|
#include <mutex>
|
||||||
|
#include <type_traits>
|
||||||
|
#include <variant>
|
||||||
#include <boost/noncopyable.hpp>
|
#include <boost/noncopyable.hpp>
|
||||||
|
#include <Poco/Timespan.h>
|
||||||
|
|
||||||
#include <Common/logger_useful.h>
|
#include <Common/logger_useful.h>
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
@ -15,14 +17,6 @@ namespace ProfileEvents
|
|||||||
extern const Event ConnectionPoolIsFullMicroseconds;
|
extern const Event ConnectionPoolIsFullMicroseconds;
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace DB
|
|
||||||
{
|
|
||||||
namespace ErrorCodes
|
|
||||||
{
|
|
||||||
extern const int LOGICAL_ERROR;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/** A class from which you can inherit and get a pool of something. Used for database connection pools.
|
/** A class from which you can inherit and get a pool of something. Used for database connection pools.
|
||||||
* Descendant class must provide a method for creating a new object to place in the pool.
|
* Descendant class must provide a method for creating a new object to place in the pool.
|
||||||
*/
|
*/
|
||||||
@ -35,6 +29,22 @@ public:
|
|||||||
using ObjectPtr = std::shared_ptr<Object>;
|
using ObjectPtr = std::shared_ptr<Object>;
|
||||||
using Ptr = std::shared_ptr<PoolBase<TObject>>;
|
using Ptr = std::shared_ptr<PoolBase<TObject>>;
|
||||||
|
|
||||||
|
enum class BehaviourOnLimit
|
||||||
|
{
|
||||||
|
/**
|
||||||
|
* Default behaviour - when limit on pool size is reached, callers will wait until object will be returned back in pool.
|
||||||
|
*/
|
||||||
|
Wait,
|
||||||
|
|
||||||
|
/**
|
||||||
|
* If no free objects in pool - allocate a new object, but not store it in pool.
|
||||||
|
* This behaviour is needed when we simply don't want to waste time waiting or if we cannot guarantee that query could be processed using fixed amount of connections.
|
||||||
|
* For example, when we read from table on s3, one GetObject request corresponds to the whole FileSystemCache segment. This segments are shared between different
|
||||||
|
* reading tasks, so in general case connection could be taken from pool by one task and returned back by another one. And these tasks are processed completely independently.
|
||||||
|
*/
|
||||||
|
AllocateNewBypassingPool,
|
||||||
|
};
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
||||||
/** The object with the flag, whether it is currently used. */
|
/** The object with the flag, whether it is currently used. */
|
||||||
@ -89,37 +99,53 @@ public:
|
|||||||
Object & operator*() && = delete;
|
Object & operator*() && = delete;
|
||||||
const Object & operator*() const && = delete;
|
const Object & operator*() const && = delete;
|
||||||
|
|
||||||
Object * operator->() & { return &*data->data.object; }
|
Object * operator->() & { return castToObjectPtr(); }
|
||||||
const Object * operator->() const & { return &*data->data.object; }
|
const Object * operator->() const & { return castToObjectPtr(); }
|
||||||
Object & operator*() & { return *data->data.object; }
|
Object & operator*() & { return *castToObjectPtr(); }
|
||||||
const Object & operator*() const & { return *data->data.object; }
|
const Object & operator*() const & { return *castToObjectPtr(); }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Expire an object to make it reallocated later.
|
* Expire an object to make it reallocated later.
|
||||||
*/
|
*/
|
||||||
void expire()
|
void expire()
|
||||||
{
|
{
|
||||||
data->data.is_expired = true;
|
if (data.index() == 1)
|
||||||
|
std::get<1>(data)->data.is_expired = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool isNull() const { return data == nullptr; }
|
bool isNull() const { return data.index() == 0 ? !std::get<0>(data) : !std::get<1>(data); }
|
||||||
|
|
||||||
PoolBase * getPool() const
|
|
||||||
{
|
|
||||||
if (!data)
|
|
||||||
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Attempt to get pool from uninitialized entry");
|
|
||||||
return &data->data.pool;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::shared_ptr<PoolEntryHelper> data;
|
/**
|
||||||
|
* Plain object will be stored instead of PoolEntryHelper if fallback was made in get() (see BehaviourOnLimit::AllocateNewBypassingPool).
|
||||||
|
*/
|
||||||
|
std::variant<ObjectPtr, std::shared_ptr<PoolEntryHelper>> data;
|
||||||
|
|
||||||
|
explicit Entry(ObjectPtr && object) : data(std::move(object)) { }
|
||||||
|
|
||||||
explicit Entry(PooledObject & object) : data(std::make_shared<PoolEntryHelper>(object)) { }
|
explicit Entry(PooledObject & object) : data(std::make_shared<PoolEntryHelper>(object)) { }
|
||||||
|
|
||||||
|
auto castToObjectPtr() const
|
||||||
|
{
|
||||||
|
return std::visit(
|
||||||
|
[](const auto & ptr)
|
||||||
|
{
|
||||||
|
using T = std::decay_t<decltype(ptr)>;
|
||||||
|
if constexpr (std::is_same_v<ObjectPtr, T>)
|
||||||
|
return ptr.get();
|
||||||
|
else
|
||||||
|
return ptr->data.object.get();
|
||||||
|
},
|
||||||
|
data);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
virtual ~PoolBase() = default;
|
virtual ~PoolBase() = default;
|
||||||
|
|
||||||
/** Allocates the object. Wait for free object in pool for 'timeout'. With 'timeout' < 0, the timeout is infinite. */
|
/** Allocates the object.
|
||||||
|
* If 'behaviour_on_limit' is Wait - wait for free object in pool for 'timeout'. With 'timeout' < 0, the timeout is infinite.
|
||||||
|
* If 'behaviour_on_limit' is AllocateNewBypassingPool and there is no free object - a new object will be created but not stored in the pool.
|
||||||
|
*/
|
||||||
Entry get(Poco::Timespan::TimeDiff timeout)
|
Entry get(Poco::Timespan::TimeDiff timeout)
|
||||||
{
|
{
|
||||||
std::unique_lock lock(mutex);
|
std::unique_lock lock(mutex);
|
||||||
@ -150,6 +176,9 @@ public:
|
|||||||
return Entry(*items.back());
|
return Entry(*items.back());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (behaviour_on_limit == BehaviourOnLimit::AllocateNewBypassingPool)
|
||||||
|
return Entry(allocObject());
|
||||||
|
|
||||||
Stopwatch blocked;
|
Stopwatch blocked;
|
||||||
if (timeout < 0)
|
if (timeout < 0)
|
||||||
{
|
{
|
||||||
@ -184,6 +213,8 @@ private:
|
|||||||
/** The maximum size of the pool. */
|
/** The maximum size of the pool. */
|
||||||
unsigned max_items;
|
unsigned max_items;
|
||||||
|
|
||||||
|
BehaviourOnLimit behaviour_on_limit;
|
||||||
|
|
||||||
/** Pool. */
|
/** Pool. */
|
||||||
Objects items;
|
Objects items;
|
||||||
|
|
||||||
@ -192,11 +223,10 @@ private:
|
|||||||
std::condition_variable available;
|
std::condition_variable available;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
Poco::Logger * log;
|
Poco::Logger * log;
|
||||||
|
|
||||||
PoolBase(unsigned max_items_, Poco::Logger * log_)
|
PoolBase(unsigned max_items_, Poco::Logger * log_, BehaviourOnLimit behaviour_on_limit_ = BehaviourOnLimit::Wait)
|
||||||
: max_items(max_items_), log(log_)
|
: max_items(max_items_), behaviour_on_limit(behaviour_on_limit_), log(log_)
|
||||||
{
|
{
|
||||||
items.reserve(max_items);
|
items.reserve(max_items);
|
||||||
}
|
}
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
|
|
||||||
/// Available events. Add something here as you wish.
|
/// Available events. Add something here as you wish.
|
||||||
#define APPLY_FOR_EVENTS(M) \
|
#define APPLY_FOR_BUILTIN_EVENTS(M) \
|
||||||
M(Query, "Number of queries to be interpreted and potentially executed. Does not include queries that failed to parse or were rejected due to AST size limits, quota limits or limits on the number of simultaneously running queries. May include internal queries initiated by ClickHouse itself. Does not count subqueries.") \
|
M(Query, "Number of queries to be interpreted and potentially executed. Does not include queries that failed to parse or were rejected due to AST size limits, quota limits or limits on the number of simultaneously running queries. May include internal queries initiated by ClickHouse itself. Does not count subqueries.") \
|
||||||
M(SelectQuery, "Same as Query, but only for SELECT queries.") \
|
M(SelectQuery, "Same as Query, but only for SELECT queries.") \
|
||||||
M(InsertQuery, "Same as Query, but only for INSERT queries.") \
|
M(InsertQuery, "Same as Query, but only for INSERT queries.") \
|
||||||
@ -368,6 +368,10 @@ The server successfully detected this situation and will download merged part fr
|
|||||||
M(ReadBufferFromS3InitMicroseconds, "Time spent initializing connection to S3.") \
|
M(ReadBufferFromS3InitMicroseconds, "Time spent initializing connection to S3.") \
|
||||||
M(ReadBufferFromS3Bytes, "Bytes read from S3.") \
|
M(ReadBufferFromS3Bytes, "Bytes read from S3.") \
|
||||||
M(ReadBufferFromS3RequestsErrors, "Number of exceptions while reading from S3.") \
|
M(ReadBufferFromS3RequestsErrors, "Number of exceptions while reading from S3.") \
|
||||||
|
M(ReadBufferFromS3ResetSessions, "Number of HTTP sessions that were reset in ReadBufferFromS3.") \
|
||||||
|
M(ReadBufferFromS3PreservedSessions, "Number of HTTP sessions that were preserved in ReadBufferFromS3.") \
|
||||||
|
\
|
||||||
|
M(ReadWriteBufferFromHTTPPreservedSessions, "Number of HTTP sessions that were preserved in ReadWriteBufferFromHTTP.") \
|
||||||
\
|
\
|
||||||
M(WriteBufferFromS3Microseconds, "Time spent on writing to S3.") \
|
M(WriteBufferFromS3Microseconds, "Time spent on writing to S3.") \
|
||||||
M(WriteBufferFromS3Bytes, "Bytes written to S3.") \
|
M(WriteBufferFromS3Bytes, "Bytes written to S3.") \
|
||||||
@ -536,6 +540,11 @@ The server successfully detected this situation and will download merged part fr
|
|||||||
M(LogError, "Number of log messages with level Error") \
|
M(LogError, "Number of log messages with level Error") \
|
||||||
M(LogFatal, "Number of log messages with level Fatal") \
|
M(LogFatal, "Number of log messages with level Fatal") \
|
||||||
|
|
||||||
|
#ifdef APPLY_FOR_EXTERNAL_EVENTS
|
||||||
|
#define APPLY_FOR_EVENTS(M) APPLY_FOR_BUILTIN_EVENTS(M) APPLY_FOR_EXTERNAL_EVENTS(M)
|
||||||
|
#else
|
||||||
|
#define APPLY_FOR_EVENTS(M) APPLY_FOR_BUILTIN_EVENTS(M)
|
||||||
|
#endif
|
||||||
|
|
||||||
namespace ProfileEvents
|
namespace ProfileEvents
|
||||||
{
|
{
|
||||||
|
@ -91,7 +91,7 @@ namespace ErrorCodes
|
|||||||
extern const int NOT_IMPLEMENTED;
|
extern const int NOT_IMPLEMENTED;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if USE_UNWIND
|
#ifndef __APPLE__
|
||||||
Timer::Timer()
|
Timer::Timer()
|
||||||
: log(&Poco::Logger::get("Timer"))
|
: log(&Poco::Logger::get("Timer"))
|
||||||
{}
|
{}
|
||||||
@ -120,6 +120,15 @@ void Timer::createIfNecessary(UInt64 thread_id, int clock_type, int pause_signal
|
|||||||
throw Exception(ErrorCodes::CANNOT_CREATE_TIMER, "Failed to create thread timer. The function "
|
throw Exception(ErrorCodes::CANNOT_CREATE_TIMER, "Failed to create thread timer. The function "
|
||||||
"'timer_create' returned non-zero but didn't set errno. This is bug in your OS.");
|
"'timer_create' returned non-zero but didn't set errno. This is bug in your OS.");
|
||||||
|
|
||||||
|
/// For example, it cannot be created if the server is run under QEMU:
|
||||||
|
/// "Failed to create thread timer, errno: 11, strerror: Resource temporarily unavailable."
|
||||||
|
|
||||||
|
/// You could accidentally run the server under QEMU without being aware,
|
||||||
|
/// if you use Docker image for a different architecture,
|
||||||
|
/// and you have the "binfmt-misc" kernel module, and "qemu-user" tools.
|
||||||
|
|
||||||
|
/// Also, it cannot be created if the server has too many threads.
|
||||||
|
|
||||||
throwFromErrno("Failed to create thread timer", ErrorCodes::CANNOT_CREATE_TIMER);
|
throwFromErrno("Failed to create thread timer", ErrorCodes::CANNOT_CREATE_TIMER);
|
||||||
}
|
}
|
||||||
timer_id.emplace(local_timer_id);
|
timer_id.emplace(local_timer_id);
|
||||||
@ -200,13 +209,13 @@ QueryProfilerBase<ProfilerImpl>::QueryProfilerBase(UInt64 thread_id, int clock_t
|
|||||||
UNUSED(pause_signal);
|
UNUSED(pause_signal);
|
||||||
|
|
||||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "QueryProfiler disabled because they cannot work under sanitizers");
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "QueryProfiler disabled because they cannot work under sanitizers");
|
||||||
#elif !USE_UNWIND
|
#elif defined(__APPLE__)
|
||||||
UNUSED(thread_id);
|
UNUSED(thread_id);
|
||||||
UNUSED(clock_type);
|
UNUSED(clock_type);
|
||||||
UNUSED(period);
|
UNUSED(period);
|
||||||
UNUSED(pause_signal);
|
UNUSED(pause_signal);
|
||||||
|
|
||||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "QueryProfiler cannot work with stock libunwind");
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "QueryProfiler cannot work on OSX");
|
||||||
#else
|
#else
|
||||||
/// Sanity check.
|
/// Sanity check.
|
||||||
if (!hasPHDRCache())
|
if (!hasPHDRCache())
|
||||||
@ -255,7 +264,7 @@ QueryProfilerBase<ProfilerImpl>::~QueryProfilerBase()
|
|||||||
template <typename ProfilerImpl>
|
template <typename ProfilerImpl>
|
||||||
void QueryProfilerBase<ProfilerImpl>::cleanup()
|
void QueryProfilerBase<ProfilerImpl>::cleanup()
|
||||||
{
|
{
|
||||||
#if USE_UNWIND
|
#ifndef __APPLE__
|
||||||
timer.stop();
|
timer.stop();
|
||||||
signal_handler_disarmed = true;
|
signal_handler_disarmed = true;
|
||||||
#endif
|
#endif
|
||||||
|
@ -28,7 +28,7 @@ namespace DB
|
|||||||
* Note that signal handler implementation is defined by template parameter. See QueryProfilerReal and QueryProfilerCPU.
|
* Note that signal handler implementation is defined by template parameter. See QueryProfilerReal and QueryProfilerCPU.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#if USE_UNWIND
|
#ifndef __APPLE__
|
||||||
class Timer
|
class Timer
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
@ -60,7 +60,7 @@ private:
|
|||||||
|
|
||||||
Poco::Logger * log;
|
Poco::Logger * log;
|
||||||
|
|
||||||
#if USE_UNWIND
|
#ifndef __APPLE__
|
||||||
inline static thread_local Timer timer = Timer();
|
inline static thread_local Timer timer = Timer();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -5,7 +5,6 @@
|
|||||||
#include <atomic>
|
#include <atomic>
|
||||||
|
|
||||||
#include <re2/re2.h>
|
#include <re2/re2.h>
|
||||||
#include <re2/stringpiece.h>
|
|
||||||
|
|
||||||
#include <Poco/Util/AbstractConfiguration.h>
|
#include <Poco/Util/AbstractConfiguration.h>
|
||||||
|
|
||||||
@ -44,7 +43,7 @@ private:
|
|||||||
const std::string regexp_string;
|
const std::string regexp_string;
|
||||||
|
|
||||||
const RE2 regexp;
|
const RE2 regexp;
|
||||||
const re2::StringPiece replacement;
|
const std::string_view replacement;
|
||||||
|
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
mutable std::atomic<std::uint64_t> matches_count = 0;
|
mutable std::atomic<std::uint64_t> matches_count = 0;
|
||||||
|
@ -20,13 +20,10 @@
|
|||||||
#include <sstream>
|
#include <sstream>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
#include <fmt/format.h>
|
#include <fmt/format.h>
|
||||||
|
#include <libunwind.h>
|
||||||
|
|
||||||
#include "config.h"
|
#include "config.h"
|
||||||
|
|
||||||
#if USE_UNWIND
|
|
||||||
# include <libunwind.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
/// Currently this variable is set up once on server startup.
|
/// Currently this variable is set up once on server startup.
|
||||||
@ -211,8 +208,7 @@ void StackTrace::symbolize(
|
|||||||
const StackTrace::FramePointers & frame_pointers, [[maybe_unused]] size_t offset, size_t size, StackTrace::Frames & frames)
|
const StackTrace::FramePointers & frame_pointers, [[maybe_unused]] size_t offset, size_t size, StackTrace::Frames & frames)
|
||||||
{
|
{
|
||||||
#if defined(__ELF__) && !defined(OS_FREEBSD)
|
#if defined(__ELF__) && !defined(OS_FREEBSD)
|
||||||
auto symbol_index_ptr = DB::SymbolIndex::instance();
|
const DB::SymbolIndex & symbol_index = DB::SymbolIndex::instance();
|
||||||
const DB::SymbolIndex & symbol_index = *symbol_index_ptr;
|
|
||||||
std::unordered_map<std::string, DB::Dwarf> dwarfs;
|
std::unordered_map<std::string, DB::Dwarf> dwarfs;
|
||||||
|
|
||||||
for (size_t i = 0; i < offset; ++i)
|
for (size_t i = 0; i < offset; ++i)
|
||||||
@ -287,12 +283,8 @@ StackTrace::StackTrace(const ucontext_t & signal_context)
|
|||||||
|
|
||||||
void StackTrace::tryCapture()
|
void StackTrace::tryCapture()
|
||||||
{
|
{
|
||||||
#if USE_UNWIND
|
|
||||||
size = unw_backtrace(frame_pointers.data(), capacity);
|
size = unw_backtrace(frame_pointers.data(), capacity);
|
||||||
__msan_unpoison(frame_pointers.data(), size * sizeof(frame_pointers[0]));
|
__msan_unpoison(frame_pointers.data(), size * sizeof(frame_pointers[0]));
|
||||||
#else
|
|
||||||
size = 0;
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// ClickHouse uses bundled libc++ so type names will be the same on every system thus it's safe to hardcode them
|
/// ClickHouse uses bundled libc++ so type names will be the same on every system thus it's safe to hardcode them
|
||||||
@ -348,8 +340,7 @@ toStringEveryLineImpl([[maybe_unused]] bool fatal, const StackTraceRefTriple & s
|
|||||||
using enum DB::Dwarf::LocationInfoMode;
|
using enum DB::Dwarf::LocationInfoMode;
|
||||||
const auto mode = fatal ? FULL_WITH_INLINE : FAST;
|
const auto mode = fatal ? FULL_WITH_INLINE : FAST;
|
||||||
|
|
||||||
auto symbol_index_ptr = DB::SymbolIndex::instance();
|
const DB::SymbolIndex & symbol_index = DB::SymbolIndex::instance();
|
||||||
const DB::SymbolIndex & symbol_index = *symbol_index_ptr;
|
|
||||||
std::unordered_map<String, DB::Dwarf> dwarfs;
|
std::unordered_map<String, DB::Dwarf> dwarfs;
|
||||||
|
|
||||||
for (size_t i = stack_trace.offset; i < stack_trace.size; ++i)
|
for (size_t i = stack_trace.offset; i < stack_trace.size; ++i)
|
||||||
|
@ -793,88 +793,6 @@ public:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
// Searches for needle surrounded by token-separators.
|
|
||||||
// Separators are anything inside ASCII (0-128) and not alphanum.
|
|
||||||
// Any value outside of basic ASCII (>=128) is considered a non-separator symbol, hence UTF-8 strings
|
|
||||||
// should work just fine. But any Unicode whitespace is not considered a token separtor.
|
|
||||||
template <typename StringSearcher>
|
|
||||||
class TokenSearcher : public StringSearcherBase
|
|
||||||
{
|
|
||||||
StringSearcher searcher;
|
|
||||||
size_t needle_size;
|
|
||||||
|
|
||||||
public:
|
|
||||||
|
|
||||||
template <typename CharT>
|
|
||||||
requires (sizeof(CharT) == 1)
|
|
||||||
static bool isValidNeedle(const CharT * needle_, size_t needle_size_)
|
|
||||||
{
|
|
||||||
return std::none_of(needle_, needle_ + needle_size_, isTokenSeparator);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename CharT>
|
|
||||||
requires (sizeof(CharT) == 1)
|
|
||||||
TokenSearcher(const CharT * needle_, size_t needle_size_)
|
|
||||||
: searcher(needle_, needle_size_)
|
|
||||||
, needle_size(needle_size_)
|
|
||||||
{
|
|
||||||
/// The caller is responsible for calling isValidNeedle()
|
|
||||||
chassert(isValidNeedle(needle_, needle_size_));
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename CharT>
|
|
||||||
requires (sizeof(CharT) == 1)
|
|
||||||
ALWAYS_INLINE bool compare(const CharT * haystack, const CharT * haystack_end, const CharT * pos) const
|
|
||||||
{
|
|
||||||
// use searcher only if pos is in the beginning of token and pos + searcher.needle_size is end of token.
|
|
||||||
if (isToken(haystack, haystack_end, pos))
|
|
||||||
return searcher.compare(haystack, haystack_end, pos);
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename CharT>
|
|
||||||
requires (sizeof(CharT) == 1)
|
|
||||||
const CharT * search(const CharT * haystack, const CharT * const haystack_end) const
|
|
||||||
{
|
|
||||||
// use searcher.search(), then verify that returned value is a token
|
|
||||||
// if it is not, skip it and re-run
|
|
||||||
|
|
||||||
const auto * pos = haystack;
|
|
||||||
while (pos < haystack_end)
|
|
||||||
{
|
|
||||||
pos = searcher.search(pos, haystack_end);
|
|
||||||
if (pos == haystack_end || isToken(haystack, haystack_end, pos))
|
|
||||||
return pos;
|
|
||||||
|
|
||||||
// assuming that heendle does not contain any token separators.
|
|
||||||
pos += needle_size;
|
|
||||||
}
|
|
||||||
return haystack_end;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename CharT>
|
|
||||||
requires (sizeof(CharT) == 1)
|
|
||||||
const CharT * search(const CharT * haystack, size_t haystack_size) const
|
|
||||||
{
|
|
||||||
return search(haystack, haystack + haystack_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename CharT>
|
|
||||||
requires (sizeof(CharT) == 1)
|
|
||||||
ALWAYS_INLINE bool isToken(const CharT * haystack, const CharT * const haystack_end, const CharT* p) const
|
|
||||||
{
|
|
||||||
return (p == haystack || isTokenSeparator(*(p - 1)))
|
|
||||||
&& (p + needle_size >= haystack_end || isTokenSeparator(*(p + needle_size)));
|
|
||||||
}
|
|
||||||
|
|
||||||
ALWAYS_INLINE static bool isTokenSeparator(const uint8_t c)
|
|
||||||
{
|
|
||||||
return !(isAlphaNumericASCII(c) || !isASCII(c));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
using ASCIICaseSensitiveStringSearcher = impl::StringSearcher<true, true>;
|
using ASCIICaseSensitiveStringSearcher = impl::StringSearcher<true, true>;
|
||||||
@ -882,9 +800,6 @@ using ASCIICaseInsensitiveStringSearcher = impl::StringSearcher<false, true>;
|
|||||||
using UTF8CaseSensitiveStringSearcher = impl::StringSearcher<true, false>;
|
using UTF8CaseSensitiveStringSearcher = impl::StringSearcher<true, false>;
|
||||||
using UTF8CaseInsensitiveStringSearcher = impl::StringSearcher<false, false>;
|
using UTF8CaseInsensitiveStringSearcher = impl::StringSearcher<false, false>;
|
||||||
|
|
||||||
using ASCIICaseSensitiveTokenSearcher = impl::TokenSearcher<ASCIICaseSensitiveStringSearcher>;
|
|
||||||
using ASCIICaseInsensitiveTokenSearcher = impl::TokenSearcher<ASCIICaseInsensitiveStringSearcher>;
|
|
||||||
|
|
||||||
/// Use only with short haystacks where cheap initialization is required.
|
/// Use only with short haystacks where cheap initialization is required.
|
||||||
template <bool CaseInsensitive>
|
template <bool CaseInsensitive>
|
||||||
struct StdLibASCIIStringSearcher
|
struct StdLibASCIIStringSearcher
|
||||||
|
@ -9,7 +9,6 @@
|
|||||||
|
|
||||||
#include <link.h>
|
#include <link.h>
|
||||||
|
|
||||||
//#include <iostream>
|
|
||||||
#include <filesystem>
|
#include <filesystem>
|
||||||
|
|
||||||
#include <base/sort.h>
|
#include <base/sort.h>
|
||||||
@ -510,7 +509,7 @@ const T * find(const void * address, const std::vector<T> & vec)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void SymbolIndex::update()
|
void SymbolIndex::load()
|
||||||
{
|
{
|
||||||
dl_iterate_phdr(collectSymbols, &data);
|
dl_iterate_phdr(collectSymbols, &data);
|
||||||
|
|
||||||
@ -550,24 +549,12 @@ String SymbolIndex::getBuildIDHex() const
|
|||||||
return build_id_hex;
|
return build_id_hex;
|
||||||
}
|
}
|
||||||
|
|
||||||
MultiVersion<SymbolIndex> & SymbolIndex::instanceImpl()
|
const SymbolIndex & SymbolIndex::instance()
|
||||||
{
|
{
|
||||||
static MultiVersion<SymbolIndex> instance(std::unique_ptr<SymbolIndex>(new SymbolIndex));
|
static SymbolIndex instance;
|
||||||
return instance;
|
return instance;
|
||||||
}
|
}
|
||||||
|
|
||||||
MultiVersion<SymbolIndex>::Version SymbolIndex::instance()
|
|
||||||
{
|
|
||||||
return instanceImpl().get();
|
|
||||||
}
|
|
||||||
|
|
||||||
void SymbolIndex::reload()
|
|
||||||
{
|
|
||||||
instanceImpl().set(std::unique_ptr<SymbolIndex>(new SymbolIndex));
|
|
||||||
/// Also drop stacktrace cache.
|
|
||||||
StackTrace::dropCache();
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -8,8 +8,6 @@
|
|||||||
#include <Common/Elf.h>
|
#include <Common/Elf.h>
|
||||||
#include <boost/noncopyable.hpp>
|
#include <boost/noncopyable.hpp>
|
||||||
|
|
||||||
#include <Common/MultiVersion.h>
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -20,11 +18,10 @@ namespace DB
|
|||||||
class SymbolIndex : private boost::noncopyable
|
class SymbolIndex : private boost::noncopyable
|
||||||
{
|
{
|
||||||
protected:
|
protected:
|
||||||
SymbolIndex() { update(); }
|
SymbolIndex() { load(); }
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static MultiVersion<SymbolIndex>::Version instance();
|
static const SymbolIndex & instance();
|
||||||
static void reload();
|
|
||||||
|
|
||||||
struct Symbol
|
struct Symbol
|
||||||
{
|
{
|
||||||
@ -90,8 +87,7 @@ public:
|
|||||||
private:
|
private:
|
||||||
Data data;
|
Data data;
|
||||||
|
|
||||||
void update();
|
void load();
|
||||||
static MultiVersion<SymbolIndex> & instanceImpl();
|
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -67,8 +67,8 @@ ThreadGroup::ThreadGroup()
|
|||||||
: master_thread_id(CurrentThread::get().thread_id)
|
: master_thread_id(CurrentThread::get().thread_id)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
ThreadStatus::ThreadStatus()
|
ThreadStatus::ThreadStatus(bool check_current_thread_on_destruction_)
|
||||||
: thread_id{getThreadId()}
|
: thread_id{getThreadId()}, check_current_thread_on_destruction(check_current_thread_on_destruction_)
|
||||||
{
|
{
|
||||||
last_rusage = std::make_unique<RUsageCounters>();
|
last_rusage = std::make_unique<RUsageCounters>();
|
||||||
|
|
||||||
@ -199,10 +199,14 @@ ThreadStatus::~ThreadStatus()
|
|||||||
if (deleter)
|
if (deleter)
|
||||||
deleter();
|
deleter();
|
||||||
|
|
||||||
|
chassert(!check_current_thread_on_destruction || current_thread == this);
|
||||||
|
|
||||||
/// Only change current_thread if it's currently being used by this ThreadStatus
|
/// Only change current_thread if it's currently being used by this ThreadStatus
|
||||||
/// For example, PushingToViews chain creates and deletes ThreadStatus instances while running in the main query thread
|
/// For example, PushingToViews chain creates and deletes ThreadStatus instances while running in the main query thread
|
||||||
if (current_thread == this)
|
if (current_thread == this)
|
||||||
current_thread = nullptr;
|
current_thread = nullptr;
|
||||||
|
else if (check_current_thread_on_destruction)
|
||||||
|
LOG_ERROR(log, "current_thread contains invalid address");
|
||||||
}
|
}
|
||||||
|
|
||||||
void ThreadStatus::updatePerformanceCounters()
|
void ThreadStatus::updatePerformanceCounters()
|
||||||
|
@ -224,8 +224,10 @@ private:
|
|||||||
|
|
||||||
Poco::Logger * log = nullptr;
|
Poco::Logger * log = nullptr;
|
||||||
|
|
||||||
|
bool check_current_thread_on_destruction;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
ThreadStatus();
|
explicit ThreadStatus(bool check_current_thread_on_destruction_ = true);
|
||||||
~ThreadStatus();
|
~ThreadStatus();
|
||||||
|
|
||||||
ThreadGroupPtr getThreadGroup() const;
|
ThreadGroupPtr getThreadGroup() const;
|
||||||
|
@ -730,9 +730,6 @@ using VolnitskyUTF8 = VolnitskyBase<true, false, UTF8CaseSensitiveStringSearcher
|
|||||||
using VolnitskyCaseInsensitive = VolnitskyBase<false, true, ASCIICaseInsensitiveStringSearcher>; /// ignores non-ASCII bytes
|
using VolnitskyCaseInsensitive = VolnitskyBase<false, true, ASCIICaseInsensitiveStringSearcher>; /// ignores non-ASCII bytes
|
||||||
using VolnitskyCaseInsensitiveUTF8 = VolnitskyBase<false, false, UTF8CaseInsensitiveStringSearcher>;
|
using VolnitskyCaseInsensitiveUTF8 = VolnitskyBase<false, false, UTF8CaseInsensitiveStringSearcher>;
|
||||||
|
|
||||||
using VolnitskyCaseSensitiveToken = VolnitskyBase<true, true, ASCIICaseSensitiveTokenSearcher>;
|
|
||||||
using VolnitskyCaseInsensitiveToken = VolnitskyBase<false, true, ASCIICaseInsensitiveTokenSearcher>;
|
|
||||||
|
|
||||||
using MultiVolnitsky = MultiVolnitskyBase<true, true, ASCIICaseSensitiveStringSearcher>;
|
using MultiVolnitsky = MultiVolnitskyBase<true, true, ASCIICaseSensitiveStringSearcher>;
|
||||||
using MultiVolnitskyUTF8 = MultiVolnitskyBase<true, false, UTF8CaseSensitiveStringSearcher>;
|
using MultiVolnitskyUTF8 = MultiVolnitskyBase<true, false, UTF8CaseSensitiveStringSearcher>;
|
||||||
using MultiVolnitskyCaseInsensitive = MultiVolnitskyBase<false, true, ASCIICaseInsensitiveStringSearcher>;
|
using MultiVolnitskyCaseInsensitive = MultiVolnitskyBase<false, true, ASCIICaseInsensitiveStringSearcher>;
|
||||||
|
@ -27,7 +27,7 @@ static thread_local size_t max_stack_size = 0;
|
|||||||
* @param out_address - if not nullptr, here the address of the stack will be written.
|
* @param out_address - if not nullptr, here the address of the stack will be written.
|
||||||
* @return stack size
|
* @return stack size
|
||||||
*/
|
*/
|
||||||
size_t getStackSize(void ** out_address)
|
static size_t getStackSize(void ** out_address)
|
||||||
{
|
{
|
||||||
using namespace DB;
|
using namespace DB;
|
||||||
|
|
||||||
@ -54,7 +54,15 @@ size_t getStackSize(void ** out_address)
|
|||||||
throwFromErrno("Cannot pthread_attr_get_np", ErrorCodes::CANNOT_PTHREAD_ATTR);
|
throwFromErrno("Cannot pthread_attr_get_np", ErrorCodes::CANNOT_PTHREAD_ATTR);
|
||||||
# else
|
# else
|
||||||
if (0 != pthread_getattr_np(pthread_self(), &attr))
|
if (0 != pthread_getattr_np(pthread_self(), &attr))
|
||||||
|
{
|
||||||
|
if (errno == ENOENT)
|
||||||
|
{
|
||||||
|
/// Most likely procfs is not mounted.
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
else
|
||||||
throwFromErrno("Cannot pthread_getattr_np", ErrorCodes::CANNOT_PTHREAD_ATTR);
|
throwFromErrno("Cannot pthread_getattr_np", ErrorCodes::CANNOT_PTHREAD_ATTR);
|
||||||
|
}
|
||||||
# endif
|
# endif
|
||||||
|
|
||||||
SCOPE_EXIT({ pthread_attr_destroy(&attr); });
|
SCOPE_EXIT({ pthread_attr_destroy(&attr); });
|
||||||
@ -83,6 +91,10 @@ __attribute__((__weak__)) void checkStackSize()
|
|||||||
if (!stack_address)
|
if (!stack_address)
|
||||||
max_stack_size = getStackSize(&stack_address);
|
max_stack_size = getStackSize(&stack_address);
|
||||||
|
|
||||||
|
/// The check is impossible.
|
||||||
|
if (!max_stack_size)
|
||||||
|
return;
|
||||||
|
|
||||||
const void * frame_address = __builtin_frame_address(0);
|
const void * frame_address = __builtin_frame_address(0);
|
||||||
uintptr_t int_frame_address = reinterpret_cast<uintptr_t>(frame_address);
|
uintptr_t int_frame_address = reinterpret_cast<uintptr_t>(frame_address);
|
||||||
uintptr_t int_stack_address = reinterpret_cast<uintptr_t>(stack_address);
|
uintptr_t int_stack_address = reinterpret_cast<uintptr_t>(stack_address);
|
||||||
|
@ -9,7 +9,6 @@
|
|||||||
#cmakedefine01 USE_AWS_S3
|
#cmakedefine01 USE_AWS_S3
|
||||||
#cmakedefine01 USE_AZURE_BLOB_STORAGE
|
#cmakedefine01 USE_AZURE_BLOB_STORAGE
|
||||||
#cmakedefine01 USE_BROTLI
|
#cmakedefine01 USE_BROTLI
|
||||||
#cmakedefine01 USE_UNWIND
|
|
||||||
#cmakedefine01 USE_CASSANDRA
|
#cmakedefine01 USE_CASSANDRA
|
||||||
#cmakedefine01 USE_SENTRY
|
#cmakedefine01 USE_SENTRY
|
||||||
#cmakedefine01 USE_GRPC
|
#cmakedefine01 USE_GRPC
|
||||||
|
@ -22,8 +22,7 @@ int main(int argc, char ** argv)
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto symbol_index_ptr = SymbolIndex::instance();
|
const SymbolIndex & symbol_index = SymbolIndex::instance();
|
||||||
const SymbolIndex & symbol_index = *symbol_index_ptr;
|
|
||||||
|
|
||||||
for (const auto & elem : symbol_index.symbols())
|
for (const auto & elem : symbol_index.symbols())
|
||||||
std::cout << elem.name << ": " << elem.address_begin << " ... " << elem.address_end << "\n";
|
std::cout << elem.name << ": " << elem.address_begin << " ... " << elem.address_end << "\n";
|
||||||
|
@ -16,7 +16,7 @@ std::string_view getResource(std::string_view name)
|
|||||||
|
|
||||||
#if defined USE_MUSL
|
#if defined USE_MUSL
|
||||||
/// If static linking is used, we cannot use dlsym and have to parse ELF symbol table by ourself.
|
/// If static linking is used, we cannot use dlsym and have to parse ELF symbol table by ourself.
|
||||||
return DB::SymbolIndex::instance()->getResource(name_replaced);
|
return DB::SymbolIndex::instance().getResource(name_replaced);
|
||||||
|
|
||||||
#else
|
#else
|
||||||
// In most `dlsym(3)` APIs, one passes the symbol name as it appears via
|
// In most `dlsym(3)` APIs, one passes the symbol name as it appears via
|
||||||
|
@ -15,12 +15,15 @@ namespace Poco { class Logger; }
|
|||||||
#define LogToStr(x, y) std::make_unique<LogToStrImpl>(x, y)
|
#define LogToStr(x, y) std::make_unique<LogToStrImpl>(x, y)
|
||||||
#define LogFrequencyLimiter(x, y) std::make_unique<LogFrequencyLimiterIml>(x, y)
|
#define LogFrequencyLimiter(x, y) std::make_unique<LogFrequencyLimiterIml>(x, y)
|
||||||
|
|
||||||
|
using LogSeriesLimiterPtr = std::shared_ptr<LogSeriesLimiter>;
|
||||||
|
|
||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
[[maybe_unused]] const ::Poco::Logger * getLogger(const ::Poco::Logger * logger) { return logger; }
|
[[maybe_unused]] const ::Poco::Logger * getLogger(const ::Poco::Logger * logger) { return logger; }
|
||||||
[[maybe_unused]] const ::Poco::Logger * getLogger(const std::atomic<::Poco::Logger *> & logger) { return logger.load(); }
|
[[maybe_unused]] const ::Poco::Logger * getLogger(const std::atomic<::Poco::Logger *> & logger) { return logger.load(); }
|
||||||
[[maybe_unused]] std::unique_ptr<LogToStrImpl> getLogger(std::unique_ptr<LogToStrImpl> && logger) { return logger; }
|
[[maybe_unused]] std::unique_ptr<LogToStrImpl> getLogger(std::unique_ptr<LogToStrImpl> && logger) { return logger; }
|
||||||
[[maybe_unused]] std::unique_ptr<LogFrequencyLimiterIml> getLogger(std::unique_ptr<LogFrequencyLimiterIml> && logger) { return logger; }
|
[[maybe_unused]] std::unique_ptr<LogFrequencyLimiterIml> getLogger(std::unique_ptr<LogFrequencyLimiterIml> && logger) { return logger; }
|
||||||
|
[[maybe_unused]] LogSeriesLimiterPtr getLogger(LogSeriesLimiterPtr & logger) { return logger; }
|
||||||
}
|
}
|
||||||
|
|
||||||
#define LOG_IMPL_FIRST_ARG(X, ...) X
|
#define LOG_IMPL_FIRST_ARG(X, ...) X
|
||||||
|
@ -3,7 +3,6 @@
|
|||||||
#include <IO/ReadBufferFromString.h>
|
#include <IO/ReadBufferFromString.h>
|
||||||
#include <IO/Operators.h>
|
#include <IO/Operators.h>
|
||||||
#include <re2/re2.h>
|
#include <re2/re2.h>
|
||||||
#include <re2/stringpiece.h>
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
#include <iomanip>
|
#include <iomanip>
|
||||||
@ -33,14 +32,14 @@ std::string makeRegexpPatternFromGlobs(const std::string & initial_str_with_glob
|
|||||||
std::string escaped_with_globs = buf_for_escaping.str();
|
std::string escaped_with_globs = buf_for_escaping.str();
|
||||||
|
|
||||||
static const re2::RE2 enum_or_range(R"({([\d]+\.\.[\d]+|[^{}*,]+,[^{}*]*[^{}*,])})"); /// regexp for {expr1,expr2,expr3} or {M..N}, where M and N - non-negative integers, expr's should be without "{", "}", "*" and ","
|
static const re2::RE2 enum_or_range(R"({([\d]+\.\.[\d]+|[^{}*,]+,[^{}*]*[^{}*,])})"); /// regexp for {expr1,expr2,expr3} or {M..N}, where M and N - non-negative integers, expr's should be without "{", "}", "*" and ","
|
||||||
re2::StringPiece input(escaped_with_globs);
|
std::string_view input(escaped_with_globs);
|
||||||
re2::StringPiece matched;
|
std::string_view matched;
|
||||||
std::ostringstream oss_for_replacing; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
|
std::ostringstream oss_for_replacing; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
|
||||||
oss_for_replacing.exceptions(std::ios::failbit);
|
oss_for_replacing.exceptions(std::ios::failbit);
|
||||||
size_t current_index = 0;
|
size_t current_index = 0;
|
||||||
while (RE2::FindAndConsume(&input, enum_or_range, &matched))
|
while (RE2::FindAndConsume(&input, enum_or_range, &matched))
|
||||||
{
|
{
|
||||||
std::string buffer{matched};
|
std::string buffer(matched);
|
||||||
oss_for_replacing << escaped_with_globs.substr(current_index, matched.data() - escaped_with_globs.data() - current_index - 1) << '(';
|
oss_for_replacing << escaped_with_globs.substr(current_index, matched.data() - escaped_with_globs.data() - current_index - 1) << '(';
|
||||||
|
|
||||||
if (buffer.find(',') == std::string::npos)
|
if (buffer.find(',') == std::string::npos)
|
||||||
|
@ -42,7 +42,6 @@ private:
|
|||||||
UInt32 getMaxCompressedDataSize(UInt32 uncompressed_size) const override;
|
UInt32 getMaxCompressedDataSize(UInt32 uncompressed_size) const override;
|
||||||
|
|
||||||
mutable LZ4::PerformanceStatistics lz4_stat;
|
mutable LZ4::PerformanceStatistics lz4_stat;
|
||||||
ASTPtr codec_desc;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -220,7 +220,7 @@ KeeperContext::Storage KeeperContext::getLogsPathFromConfig(const Poco::Util::Ab
|
|||||||
if (!fs::exists(path))
|
if (!fs::exists(path))
|
||||||
fs::create_directories(path);
|
fs::create_directories(path);
|
||||||
|
|
||||||
return std::make_shared<DiskLocal>("LocalLogDisk", path, 0);
|
return std::make_shared<DiskLocal>("LocalLogDisk", path);
|
||||||
};
|
};
|
||||||
|
|
||||||
/// the most specialized path
|
/// the most specialized path
|
||||||
@ -246,7 +246,7 @@ KeeperContext::Storage KeeperContext::getSnapshotsPathFromConfig(const Poco::Uti
|
|||||||
if (!fs::exists(path))
|
if (!fs::exists(path))
|
||||||
fs::create_directories(path);
|
fs::create_directories(path);
|
||||||
|
|
||||||
return std::make_shared<DiskLocal>("LocalSnapshotDisk", path, 0);
|
return std::make_shared<DiskLocal>("LocalSnapshotDisk", path);
|
||||||
};
|
};
|
||||||
|
|
||||||
/// the most specialized path
|
/// the most specialized path
|
||||||
@ -272,7 +272,7 @@ KeeperContext::Storage KeeperContext::getStatePathFromConfig(const Poco::Util::A
|
|||||||
if (!fs::exists(path))
|
if (!fs::exists(path))
|
||||||
fs::create_directories(path);
|
fs::create_directories(path);
|
||||||
|
|
||||||
return std::make_shared<DiskLocal>("LocalStateFileDisk", path, 0);
|
return std::make_shared<DiskLocal>("LocalStateFileDisk", path);
|
||||||
};
|
};
|
||||||
|
|
||||||
if (config.has("keeper_server.state_storage_disk"))
|
if (config.has("keeper_server.state_storage_disk"))
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user