mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-14 10:22:10 +00:00
Merge branch 'master' into more-changes
This commit is contained in:
commit
5a89e03125
216
.github/workflows/master.yml
vendored
216
.github/workflows/master.yml
vendored
@ -2870,6 +2870,216 @@ jobs:
|
|||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
IntegrationTestsAnalyzerAsan0:
|
||||||
|
needs: [BuilderDebAsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Integration tests (asan, analyzer)
|
||||||
|
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=0
|
||||||
|
RUN_BY_HASH_TOTAL=6
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Integration test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
IntegrationTestsAnalyzerAsan1:
|
||||||
|
needs: [BuilderDebAsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Integration tests (asan, analyzer)
|
||||||
|
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=1
|
||||||
|
RUN_BY_HASH_TOTAL=6
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Integration test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
IntegrationTestsAnalyzerAsan2:
|
||||||
|
needs: [BuilderDebAsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Integration tests (asan, analyzer)
|
||||||
|
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=2
|
||||||
|
RUN_BY_HASH_TOTAL=6
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Integration test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
IntegrationTestsAnalyzerAsan3:
|
||||||
|
needs: [BuilderDebAsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Integration tests (asan, analyzer)
|
||||||
|
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=3
|
||||||
|
RUN_BY_HASH_TOTAL=6
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Integration test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
IntegrationTestsAnalyzerAsan4:
|
||||||
|
needs: [BuilderDebAsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Integration tests (asan, analyzer)
|
||||||
|
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=4
|
||||||
|
RUN_BY_HASH_TOTAL=6
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Integration test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
IntegrationTestsAnalyzerAsan5:
|
||||||
|
needs: [BuilderDebAsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Integration tests (asan, analyzer)
|
||||||
|
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=5
|
||||||
|
RUN_BY_HASH_TOTAL=6
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Integration test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
IntegrationTestsTsan0:
|
IntegrationTestsTsan0:
|
||||||
needs: [BuilderDebTsan]
|
needs: [BuilderDebTsan]
|
||||||
runs-on: [self-hosted, stress-tester]
|
runs-on: [self-hosted, stress-tester]
|
||||||
@ -3963,6 +4173,12 @@ jobs:
|
|||||||
- IntegrationTestsAsan3
|
- IntegrationTestsAsan3
|
||||||
- IntegrationTestsAsan4
|
- IntegrationTestsAsan4
|
||||||
- IntegrationTestsAsan5
|
- IntegrationTestsAsan5
|
||||||
|
- IntegrationTestsAnalyzerAsan0
|
||||||
|
- IntegrationTestsAnalyzerAsan1
|
||||||
|
- IntegrationTestsAnalyzerAsan2
|
||||||
|
- IntegrationTestsAnalyzerAsan3
|
||||||
|
- IntegrationTestsAnalyzerAsan4
|
||||||
|
- IntegrationTestsAnalyzerAsan5
|
||||||
- IntegrationTestsRelease0
|
- IntegrationTestsRelease0
|
||||||
- IntegrationTestsRelease1
|
- IntegrationTestsRelease1
|
||||||
- IntegrationTestsRelease2
|
- IntegrationTestsRelease2
|
||||||
|
6
.github/workflows/pull_request.yml
vendored
6
.github/workflows/pull_request.yml
vendored
@ -5099,6 +5099,12 @@ jobs:
|
|||||||
- IntegrationTestsAsan3
|
- IntegrationTestsAsan3
|
||||||
- IntegrationTestsAsan4
|
- IntegrationTestsAsan4
|
||||||
- IntegrationTestsAsan5
|
- IntegrationTestsAsan5
|
||||||
|
- IntegrationTestsAnalyzerAsan0
|
||||||
|
- IntegrationTestsAnalyzerAsan1
|
||||||
|
- IntegrationTestsAnalyzerAsan2
|
||||||
|
- IntegrationTestsAnalyzerAsan3
|
||||||
|
- IntegrationTestsAnalyzerAsan4
|
||||||
|
- IntegrationTestsAnalyzerAsan5
|
||||||
- IntegrationTestsRelease0
|
- IntegrationTestsRelease0
|
||||||
- IntegrationTestsRelease1
|
- IntegrationTestsRelease1
|
||||||
- IntegrationTestsRelease2
|
- IntegrationTestsRelease2
|
||||||
|
@ -1,43 +1,38 @@
|
|||||||
# Usage:
|
# Limit compiler/linker job concurrency to avoid OOMs on subtrees where compilation/linking is memory-intensive.
|
||||||
# set (MAX_COMPILER_MEMORY 2000 CACHE INTERNAL "") # In megabytes
|
#
|
||||||
# set (MAX_LINKER_MEMORY 3500 CACHE INTERNAL "")
|
# Usage from CMake:
|
||||||
# include (cmake/limit_jobs.cmake)
|
# set (MAX_COMPILER_MEMORY 2000 CACHE INTERNAL "") # megabyte
|
||||||
|
# set (MAX_LINKER_MEMORY 3500 CACHE INTERNAL "") # megabyte
|
||||||
|
# include (cmake/limit_jobs.cmake)
|
||||||
|
#
|
||||||
|
# (bigger values mean fewer jobs)
|
||||||
|
|
||||||
cmake_host_system_information(RESULT TOTAL_PHYSICAL_MEMORY QUERY TOTAL_PHYSICAL_MEMORY) # Not available under freebsd
|
cmake_host_system_information(RESULT TOTAL_PHYSICAL_MEMORY QUERY TOTAL_PHYSICAL_MEMORY)
|
||||||
cmake_host_system_information(RESULT NUMBER_OF_LOGICAL_CORES QUERY NUMBER_OF_LOGICAL_CORES)
|
cmake_host_system_information(RESULT NUMBER_OF_LOGICAL_CORES QUERY NUMBER_OF_LOGICAL_CORES)
|
||||||
|
|
||||||
# 1 if not set
|
# Set to disable the automatic job-limiting
|
||||||
option(PARALLEL_COMPILE_JOBS "Maximum number of concurrent compilation jobs" "")
|
option(PARALLEL_COMPILE_JOBS "Maximum number of concurrent compilation jobs" OFF)
|
||||||
|
option(PARALLEL_LINK_JOBS "Maximum number of concurrent link jobs" OFF)
|
||||||
|
|
||||||
# 1 if not set
|
if (NOT PARALLEL_COMPILE_JOBS AND MAX_COMPILER_MEMORY)
|
||||||
option(PARALLEL_LINK_JOBS "Maximum number of concurrent link jobs" "")
|
|
||||||
|
|
||||||
if (NOT PARALLEL_COMPILE_JOBS AND TOTAL_PHYSICAL_MEMORY AND MAX_COMPILER_MEMORY)
|
|
||||||
math(EXPR PARALLEL_COMPILE_JOBS ${TOTAL_PHYSICAL_MEMORY}/${MAX_COMPILER_MEMORY})
|
math(EXPR PARALLEL_COMPILE_JOBS ${TOTAL_PHYSICAL_MEMORY}/${MAX_COMPILER_MEMORY})
|
||||||
|
|
||||||
if (NOT PARALLEL_COMPILE_JOBS)
|
if (NOT PARALLEL_COMPILE_JOBS)
|
||||||
set (PARALLEL_COMPILE_JOBS 1)
|
set (PARALLEL_COMPILE_JOBS 1)
|
||||||
endif ()
|
endif ()
|
||||||
if (NOT NUMBER_OF_LOGICAL_CORES OR PARALLEL_COMPILE_JOBS LESS NUMBER_OF_LOGICAL_CORES)
|
if (PARALLEL_COMPILE_JOBS LESS NUMBER_OF_LOGICAL_CORES)
|
||||||
set (PARALLEL_COMPILE_JOBS_LESS TRUE)
|
message(WARNING "The auto-calculated compile jobs limit (${PARALLEL_COMPILE_JOBS}) underutilizes CPU cores (${NUMBER_OF_LOGICAL_CORES}). Set PARALLEL_COMPILE_JOBS to override.")
|
||||||
endif()
|
endif()
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (PARALLEL_COMPILE_JOBS AND (NOT NUMBER_OF_LOGICAL_CORES OR PARALLEL_COMPILE_JOBS LESS NUMBER_OF_LOGICAL_CORES))
|
if (NOT PARALLEL_LINK_JOBS AND MAX_LINKER_MEMORY)
|
||||||
set(CMAKE_JOB_POOL_COMPILE compile_job_pool${CMAKE_CURRENT_SOURCE_DIR})
|
|
||||||
string (REGEX REPLACE "[^a-zA-Z0-9]+" "_" CMAKE_JOB_POOL_COMPILE ${CMAKE_JOB_POOL_COMPILE})
|
|
||||||
set_property(GLOBAL APPEND PROPERTY JOB_POOLS ${CMAKE_JOB_POOL_COMPILE}=${PARALLEL_COMPILE_JOBS})
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
|
|
||||||
if (NOT PARALLEL_LINK_JOBS AND TOTAL_PHYSICAL_MEMORY AND MAX_LINKER_MEMORY)
|
|
||||||
math(EXPR PARALLEL_LINK_JOBS ${TOTAL_PHYSICAL_MEMORY}/${MAX_LINKER_MEMORY})
|
math(EXPR PARALLEL_LINK_JOBS ${TOTAL_PHYSICAL_MEMORY}/${MAX_LINKER_MEMORY})
|
||||||
|
|
||||||
if (NOT PARALLEL_LINK_JOBS)
|
if (NOT PARALLEL_LINK_JOBS)
|
||||||
set (PARALLEL_LINK_JOBS 1)
|
set (PARALLEL_LINK_JOBS 1)
|
||||||
endif ()
|
endif ()
|
||||||
if (NOT NUMBER_OF_LOGICAL_CORES OR PARALLEL_LINK_JOBS LESS NUMBER_OF_LOGICAL_CORES)
|
if (PARALLEL_LINK_JOBS LESS NUMBER_OF_LOGICAL_CORES)
|
||||||
set (PARALLEL_LINK_JOBS_LESS TRUE)
|
message(WARNING "The auto-calculated link jobs limit (${PARALLEL_LINK_JOBS}) underutilizes CPU cores (${NUMBER_OF_LOGICAL_CORES}). Set PARALLEL_LINK_JOBS to override.")
|
||||||
endif()
|
endif()
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
@ -52,20 +47,16 @@ if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND ENABLE_THINLTO AND PARALLE
|
|||||||
set (PARALLEL_LINK_JOBS 2)
|
set (PARALLEL_LINK_JOBS 2)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (PARALLEL_LINK_JOBS AND (NOT NUMBER_OF_LOGICAL_CORES OR PARALLEL_LINK_JOBS LESS NUMBER_OF_LOGICAL_CORES))
|
message(STATUS "Building sub-tree with ${PARALLEL_COMPILE_JOBS} compile jobs and ${PARALLEL_LINK_JOBS} linker jobs (system: ${NUMBER_OF_LOGICAL_CORES} cores, ${TOTAL_PHYSICAL_MEMORY} MB DRAM, 'OFF' means the native core count).")
|
||||||
|
|
||||||
|
if (PARALLEL_COMPILE_JOBS LESS NUMBER_OF_LOGICAL_CORES)
|
||||||
|
set(CMAKE_JOB_POOL_COMPILE compile_job_pool${CMAKE_CURRENT_SOURCE_DIR})
|
||||||
|
string (REGEX REPLACE "[^a-zA-Z0-9]+" "_" CMAKE_JOB_POOL_COMPILE ${CMAKE_JOB_POOL_COMPILE})
|
||||||
|
set_property(GLOBAL APPEND PROPERTY JOB_POOLS ${CMAKE_JOB_POOL_COMPILE}=${PARALLEL_COMPILE_JOBS})
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
if (PARALLEL_LINK_JOBS LESS NUMBER_OF_LOGICAL_CORES)
|
||||||
set(CMAKE_JOB_POOL_LINK link_job_pool${CMAKE_CURRENT_SOURCE_DIR})
|
set(CMAKE_JOB_POOL_LINK link_job_pool${CMAKE_CURRENT_SOURCE_DIR})
|
||||||
string (REGEX REPLACE "[^a-zA-Z0-9]+" "_" CMAKE_JOB_POOL_LINK ${CMAKE_JOB_POOL_LINK})
|
string (REGEX REPLACE "[^a-zA-Z0-9]+" "_" CMAKE_JOB_POOL_LINK ${CMAKE_JOB_POOL_LINK})
|
||||||
set_property(GLOBAL APPEND PROPERTY JOB_POOLS ${CMAKE_JOB_POOL_LINK}=${PARALLEL_LINK_JOBS})
|
set_property(GLOBAL APPEND PROPERTY JOB_POOLS ${CMAKE_JOB_POOL_LINK}=${PARALLEL_LINK_JOBS})
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (PARALLEL_COMPILE_JOBS OR PARALLEL_LINK_JOBS)
|
|
||||||
message(STATUS
|
|
||||||
"${CMAKE_CURRENT_SOURCE_DIR}: Have ${TOTAL_PHYSICAL_MEMORY} megabytes of memory.
|
|
||||||
Limiting concurrent linkers jobs to ${PARALLEL_LINK_JOBS} and compiler jobs to ${PARALLEL_COMPILE_JOBS} (system has ${NUMBER_OF_LOGICAL_CORES} logical cores)")
|
|
||||||
if (PARALLEL_COMPILE_JOBS_LESS)
|
|
||||||
message(WARNING "The autocalculated compile jobs limit (${PARALLEL_COMPILE_JOBS}) underutilizes CPU cores (${NUMBER_OF_LOGICAL_CORES}). Set PARALLEL_COMPILE_JOBS to override.")
|
|
||||||
endif()
|
|
||||||
if (PARALLEL_LINK_JOBS_LESS)
|
|
||||||
message(WARNING "The autocalculated link jobs limit (${PARALLEL_LINK_JOBS}) underutilizes CPU cores (${NUMBER_OF_LOGICAL_CORES}). Set PARALLEL_LINK_JOBS to override.")
|
|
||||||
endif()
|
|
||||||
endif ()
|
|
||||||
|
2
contrib/cctz
vendored
2
contrib/cctz
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 5e05432420f9692418e2e12aff09859e420b14a2
|
Subproject commit 8529bcef5cd996b7c0f4d7475286b76b5d126c4c
|
@ -1,11 +1,11 @@
|
|||||||
---
|
---
|
||||||
slug: /en/operations/server-configuration-parameters/settings
|
slug: /en/operations/server-configuration-parameters/settings
|
||||||
sidebar_position: 57
|
sidebar_position: 57
|
||||||
sidebar_label: Server Settings
|
sidebar_label: Global Server Settings
|
||||||
description: This section contains descriptions of server settings that cannot be changed at the session or query level.
|
description: This section contains descriptions of server settings that cannot be changed at the session or query level.
|
||||||
---
|
---
|
||||||
|
|
||||||
# Server Settings
|
# Global Server Settings
|
||||||
|
|
||||||
This section contains descriptions of server settings that cannot be changed at the session or query level.
|
This section contains descriptions of server settings that cannot be changed at the session or query level.
|
||||||
|
|
||||||
|
@ -7,90 +7,16 @@ pagination_next: en/operations/settings/settings
|
|||||||
|
|
||||||
# Settings Overview
|
# Settings Overview
|
||||||
|
|
||||||
There are multiple ways to define ClickHouse settings. Settings are configured in layers, and each subsequent layer redefines the previous values of a setting.
|
There are two main groups of ClickHouse settings:
|
||||||
|
|
||||||
The order of priority for defining a setting is:
|
- Global server settings
|
||||||
|
- Query-level settings
|
||||||
|
|
||||||
1. Settings in the `users.xml` server configuration file
|
The main distinction between global server settings and query-level settings is that
|
||||||
|
global server settings must be set in configuration files while query-level settings
|
||||||
|
can be set in configuration files or with SQL queries.
|
||||||
|
|
||||||
- Set in the element `<profiles>`.
|
Read about [global server settings](/docs/en/operations/server-configuration-parameters/settings.md) to learn more about configuring your ClickHouse server at the global server level.
|
||||||
|
|
||||||
2. Session settings
|
Read about [query-level settings](/docs/en/operations/settings/settings-query-level.md) to learn more about configuring your ClickHouse server at the query-level.
|
||||||
|
|
||||||
- Send `SET setting=value` from the ClickHouse console client in interactive mode.
|
|
||||||
Similarly, you can use ClickHouse sessions in the HTTP protocol. To do this, you need to specify the `session_id` HTTP parameter.
|
|
||||||
|
|
||||||
3. Query settings
|
|
||||||
|
|
||||||
- When starting the ClickHouse console client in non-interactive mode, set the startup parameter `--setting=value`.
|
|
||||||
- When using the HTTP API, pass CGI parameters (`URL?setting_1=value&setting_2=value...`).
|
|
||||||
- Define settings in the [SETTINGS](../../sql-reference/statements/select/index.md#settings-in-select-query) clause of the SELECT query. The setting value is applied only to that query and is reset to the default or previous value after the query is executed.
|
|
||||||
|
|
||||||
View the [Settings](./settings.md) page for a description of the ClickHouse settings.
|
|
||||||
|
|
||||||
## Converting a Setting to its Default Value
|
|
||||||
|
|
||||||
If you change a setting and would like to revert it back to its default value, set the value to `DEFAULT`. The syntax looks like:
|
|
||||||
|
|
||||||
```sql
|
|
||||||
SET setting_name = DEFAULT
|
|
||||||
```
|
|
||||||
|
|
||||||
For example, the default value of `max_insert_block_size` is 1048449. Suppose you change its value to 100000:
|
|
||||||
|
|
||||||
```sql
|
|
||||||
SET max_insert_block_size=100000;
|
|
||||||
|
|
||||||
SELECT value FROM system.settings where name='max_insert_block_size';
|
|
||||||
```
|
|
||||||
|
|
||||||
The response is:
|
|
||||||
|
|
||||||
```response
|
|
||||||
┌─value──┐
|
|
||||||
│ 100000 │
|
|
||||||
└────────┘
|
|
||||||
```
|
|
||||||
|
|
||||||
The following command sets its value back to 1048449:
|
|
||||||
|
|
||||||
```sql
|
|
||||||
SET max_insert_block_size=DEFAULT;
|
|
||||||
|
|
||||||
SELECT value FROM system.settings where name='max_insert_block_size';
|
|
||||||
```
|
|
||||||
|
|
||||||
The setting is now back to its default:
|
|
||||||
|
|
||||||
```response
|
|
||||||
┌─value───┐
|
|
||||||
│ 1048449 │
|
|
||||||
└─────────┘
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
## Custom Settings {#custom_settings}
|
|
||||||
|
|
||||||
In addition to the common [settings](../../operations/settings/settings.md), users can define custom settings.
|
|
||||||
|
|
||||||
A custom setting name must begin with one of predefined prefixes. The list of these prefixes must be declared in the [custom_settings_prefixes](../../operations/server-configuration-parameters/settings.md#custom_settings_prefixes) parameter in the server configuration file.
|
|
||||||
|
|
||||||
```xml
|
|
||||||
<custom_settings_prefixes>custom_</custom_settings_prefixes>
|
|
||||||
```
|
|
||||||
|
|
||||||
To define a custom setting use `SET` command:
|
|
||||||
|
|
||||||
```sql
|
|
||||||
SET custom_a = 123;
|
|
||||||
```
|
|
||||||
|
|
||||||
To get the current value of a custom setting use `getSetting()` function:
|
|
||||||
|
|
||||||
```sql
|
|
||||||
SELECT getSetting('custom_a');
|
|
||||||
```
|
|
||||||
|
|
||||||
**See Also**
|
|
||||||
|
|
||||||
- [Server Configuration Settings](../../operations/server-configuration-parameters/settings.md)
|
|
||||||
|
@ -242,6 +242,26 @@ See also:
|
|||||||
- [DateTime data type.](../../sql-reference/data-types/datetime.md)
|
- [DateTime data type.](../../sql-reference/data-types/datetime.md)
|
||||||
- [Functions for working with dates and times.](../../sql-reference/functions/date-time-functions.md)
|
- [Functions for working with dates and times.](../../sql-reference/functions/date-time-functions.md)
|
||||||
|
|
||||||
|
## interval_output_format {#interval_output_format}
|
||||||
|
|
||||||
|
Allows choosing different output formats of the text representation of interval types.
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
|
||||||
|
- `kusto` - KQL-style output format.
|
||||||
|
|
||||||
|
ClickHouse outputs intervals in [KQL format](https://learn.microsoft.com/en-us/dotnet/standard/base-types/standard-timespan-format-strings#the-constant-c-format-specifier). For example, `toIntervalDay(2)` would be formatted as `2.00:00:00`. Please note that for interval types of varying length (ie. `IntervalMonth` and `IntervalYear`) the average number of seconds per interval is taken into account.
|
||||||
|
|
||||||
|
- `numeric` - Numeric output format.
|
||||||
|
|
||||||
|
ClickHouse outputs intervals as their underlying numeric representation. For example, `toIntervalDay(2)` would be formatted as `2`.
|
||||||
|
|
||||||
|
Default value: `numeric`.
|
||||||
|
|
||||||
|
See also:
|
||||||
|
|
||||||
|
- [Interval](../../sql-reference/data-types/special-data-types/interval.md)
|
||||||
|
|
||||||
## input_format_ipv4_default_on_conversion_error {#input_format_ipv4_default_on_conversion_error}
|
## input_format_ipv4_default_on_conversion_error {#input_format_ipv4_default_on_conversion_error}
|
||||||
|
|
||||||
Deserialization of IPv4 will use default values instead of throwing exception on conversion error.
|
Deserialization of IPv4 will use default values instead of throwing exception on conversion error.
|
||||||
|
217
docs/en/operations/settings/settings-query-level.md
Normal file
217
docs/en/operations/settings/settings-query-level.md
Normal file
@ -0,0 +1,217 @@
|
|||||||
|
---
|
||||||
|
sidebar_label: Query-level Settings
|
||||||
|
title: Query-level Settings
|
||||||
|
slug: /en/operations/settings/query-level
|
||||||
|
---
|
||||||
|
|
||||||
|
There are multiple ways to set ClickHouse query-level settings. Settings are configured in layers, and each subsequent layer redefines the previous values of a setting.
|
||||||
|
|
||||||
|
The order of priority for defining a setting is:
|
||||||
|
|
||||||
|
1. Applying a setting to a user directly, or within a settings profile
|
||||||
|
|
||||||
|
- SQL (recommended)
|
||||||
|
- adding one or more XML or YAML files to `/etc/clickhouse-server/users.d`
|
||||||
|
|
||||||
|
2. Session settings
|
||||||
|
|
||||||
|
- Send `SET setting=value` from the ClickHouse Cloud SQL console or
|
||||||
|
`clickhouse client` in interactive mode. Similarly, you can use ClickHouse
|
||||||
|
sessions in the HTTP protocol. To do this, you need to specify the
|
||||||
|
`session_id` HTTP parameter.
|
||||||
|
|
||||||
|
3. Query settings
|
||||||
|
|
||||||
|
- When starting `clickhouse client` in non-interactive mode, set the startup
|
||||||
|
parameter `--setting=value`.
|
||||||
|
- When using the HTTP API, pass CGI parameters (`URL?setting_1=value&setting_2=value...`).
|
||||||
|
- Define settings in the
|
||||||
|
[SETTINGS](../../sql-reference/statements/select/index.md#settings-in-select-query)
|
||||||
|
clause of the SELECT query. The setting value is applied only to that query
|
||||||
|
and is reset to the default or previous value after the query is executed.
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
These examples all set the value of the `async_insert` setting to `1`, and
|
||||||
|
show how to examine the settings in a running system.
|
||||||
|
|
||||||
|
### Using SQL to apply a setting to a user directly
|
||||||
|
|
||||||
|
This creates the user `ingester` with the setting `async_inset = 1`:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE USER ingester
|
||||||
|
IDENTIFIED WITH sha256_hash BY '7e099f39b84ea79559b3e85ea046804e63725fd1f46b37f281276aae20f86dc3'
|
||||||
|
# highlight-next-line
|
||||||
|
SETTINGS async_insert = 1
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Examine the settings profile and assignment
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SHOW ACCESS
|
||||||
|
```
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─ACCESS─────────────────────────────────────────────────────────────────────────────┐
|
||||||
|
│ ... │
|
||||||
|
# highlight-next-line
|
||||||
|
│ CREATE USER ingester IDENTIFIED WITH sha256_password SETTINGS async_insert = true │
|
||||||
|
│ ... │
|
||||||
|
└────────────────────────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
### Using SQL to create a settings profile and assign to a user
|
||||||
|
|
||||||
|
This creates the profile `log_ingest` with the setting `async_inset = 1`:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE
|
||||||
|
SETTINGS PROFILE log_ingest SETTINGS async_insert = 1
|
||||||
|
```
|
||||||
|
|
||||||
|
This creates the user `ingester` and assigns the user the settings profile `log_ingest`:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE USER ingester
|
||||||
|
IDENTIFIED WITH sha256_hash BY '7e099f39b84ea79559b3e85ea046804e63725fd1f46b37f281276aae20f86dc3'
|
||||||
|
# highlight-next-line
|
||||||
|
SETTINGS PROFILE log_ingest
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### Using XML to create a settings profile and user
|
||||||
|
|
||||||
|
```xml title=/etc/clickhouse-server/users.d/users.xml
|
||||||
|
<clickhouse>
|
||||||
|
# highlight-start
|
||||||
|
<profiles>
|
||||||
|
<log_ingest>
|
||||||
|
<async_insert>1</async_insert>
|
||||||
|
</log_ingest>
|
||||||
|
</profiles>
|
||||||
|
# highlight-end
|
||||||
|
|
||||||
|
<users>
|
||||||
|
<ingester>
|
||||||
|
<password_sha256_hex>7e099f39b84ea79559b3e85ea046804e63725fd1f46b37f281276aae20f86dc3</password_sha256_hex>
|
||||||
|
# highlight-start
|
||||||
|
<profile>log_ingest</profile>
|
||||||
|
# highlight-end
|
||||||
|
</ingester>
|
||||||
|
<default replace="true">
|
||||||
|
<password_sha256_hex>7e099f39b84ea79559b3e85ea046804e63725fd1f46b37f281276aae20f86dc3</password_sha256_hex>
|
||||||
|
<access_management>1</access_management>
|
||||||
|
<named_collection_control>1</named_collection_control>
|
||||||
|
</default>
|
||||||
|
</users>
|
||||||
|
</clickhouse>
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Examine the settings profile and assignment
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SHOW ACCESS
|
||||||
|
```
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─ACCESS─────────────────────────────────────────────────────────────────────────────┐
|
||||||
|
│ CREATE USER default IDENTIFIED WITH sha256_password │
|
||||||
|
# highlight-next-line
|
||||||
|
│ CREATE USER ingester IDENTIFIED WITH sha256_password SETTINGS PROFILE log_ingest │
|
||||||
|
│ CREATE SETTINGS PROFILE default │
|
||||||
|
# highlight-next-line
|
||||||
|
│ CREATE SETTINGS PROFILE log_ingest SETTINGS async_insert = true │
|
||||||
|
│ CREATE SETTINGS PROFILE readonly SETTINGS readonly = 1 │
|
||||||
|
│ ... │
|
||||||
|
└────────────────────────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### Assign a setting to a session
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SET async_insert =1;
|
||||||
|
SELECT value FROM system.settings where name='async_insert';
|
||||||
|
```
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─value──┐
|
||||||
|
│ 1 │
|
||||||
|
└────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### Assign a setting during a query
|
||||||
|
|
||||||
|
```sql
|
||||||
|
INSERT INTO YourTable
|
||||||
|
# highlight-next-line
|
||||||
|
SETTINGS async_insert=1
|
||||||
|
VALUES (...)
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Converting a Setting to its Default Value
|
||||||
|
|
||||||
|
If you change a setting and would like to revert it back to its default value, set the value to `DEFAULT`. The syntax looks like:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SET setting_name = DEFAULT
|
||||||
|
```
|
||||||
|
|
||||||
|
For example, the default value of `async_insert` is `0`. Suppose you change its value to `1`:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SET async_insert = 1;
|
||||||
|
|
||||||
|
SELECT value FROM system.settings where name='async_insert';
|
||||||
|
```
|
||||||
|
|
||||||
|
The response is:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─value──┐
|
||||||
|
│ 1 │
|
||||||
|
└────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
The following command sets its value back to 0:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SET async_insert = DEFAULT;
|
||||||
|
|
||||||
|
SELECT value FROM system.settings where name='async_insert';
|
||||||
|
```
|
||||||
|
|
||||||
|
The setting is now back to its default:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─value───┐
|
||||||
|
│ 0 │
|
||||||
|
└─────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## Custom Settings {#custom_settings}
|
||||||
|
|
||||||
|
In addition to the common [settings](../../operations/settings/settings.md), users can define custom settings.
|
||||||
|
|
||||||
|
A custom setting name must begin with one of predefined prefixes. The list of these prefixes must be declared in the [custom_settings_prefixes](../../operations/server-configuration-parameters/settings.md#custom_settings_prefixes) parameter in the server configuration file.
|
||||||
|
|
||||||
|
```xml
|
||||||
|
<custom_settings_prefixes>custom_</custom_settings_prefixes>
|
||||||
|
```
|
||||||
|
|
||||||
|
To define a custom setting use `SET` command:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SET custom_a = 123;
|
||||||
|
```
|
||||||
|
|
||||||
|
To get the current value of a custom setting use `getSetting()` function:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT getSetting('custom_a');
|
||||||
|
```
|
||||||
|
|
||||||
|
**See Also**
|
||||||
|
|
||||||
|
- View the [Settings](./settings.md) page for a description of the ClickHouse settings.
|
||||||
|
- [Global server settings](../../operations/server-configuration-parameters/settings.md)
|
@ -4524,6 +4524,7 @@ This setting allows to specify renaming pattern for files processed by `file` ta
|
|||||||
|
|
||||||
### Placeholders
|
### Placeholders
|
||||||
|
|
||||||
|
- `%a` — Full original filename (e.g., "sample.csv").
|
||||||
- `%f` — Original filename without extension (e.g., "sample").
|
- `%f` — Original filename without extension (e.g., "sample").
|
||||||
- `%e` — Original file extension with dot (e.g., ".csv").
|
- `%e` — Original file extension with dot (e.g., ".csv").
|
||||||
- `%t` — Timestamp (in microseconds).
|
- `%t` — Timestamp (in microseconds).
|
||||||
|
@ -39,6 +39,8 @@ Columns:
|
|||||||
|
|
||||||
- `data_uncompressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) – Total size of uncompressed data in the data part. All the auxiliary files (for example, files with marks) are not included.
|
- `data_uncompressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) – Total size of uncompressed data in the data part. All the auxiliary files (for example, files with marks) are not included.
|
||||||
|
|
||||||
|
- `primary_key_size` ([UInt64](../../sql-reference/data-types/int-uint.md)) – The amount of memory (in bytes) used by primary key values in the primary.idx/cidx file on disk.
|
||||||
|
|
||||||
- `marks_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) – The size of the file with marks.
|
- `marks_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) – The size of the file with marks.
|
||||||
|
|
||||||
- `secondary_indices_compressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) – Total size of compressed data for secondary indices in the data part. All the auxiliary files (for example, files with marks) are not included.
|
- `secondary_indices_compressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) – Total size of compressed data for secondary indices in the data part. All the auxiliary files (for example, files with marks) are not included.
|
||||||
|
@ -0,0 +1,32 @@
|
|||||||
|
---
|
||||||
|
slug: /en/sql-reference/aggregate-functions/reference/array_concat_agg
|
||||||
|
sidebar_position: 110
|
||||||
|
---
|
||||||
|
|
||||||
|
# array_concat_agg
|
||||||
|
- Alias of `groupArrayArray`. The function is case insensitive.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
```text
|
||||||
|
SELECT *
|
||||||
|
FROM t
|
||||||
|
|
||||||
|
┌─a───────┐
|
||||||
|
│ [1,2,3] │
|
||||||
|
│ [4,5] │
|
||||||
|
│ [6] │
|
||||||
|
└─────────┘
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT array_concat_agg(a) AS a
|
||||||
|
FROM t
|
||||||
|
|
||||||
|
┌─a─────────────┐
|
||||||
|
│ [1,2,3,4,5,6] │
|
||||||
|
└───────────────┘
|
||||||
|
```
|
@ -102,6 +102,8 @@ The function also works for strings.
|
|||||||
|
|
||||||
Can be optimized by enabling the [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns) setting. With `optimize_functions_to_subcolumns = 1` the function reads only [size0](../../sql-reference/data-types/array.md#array-size) subcolumn instead of reading and processing the whole array column. The query `SELECT length(arr) FROM table` transforms to `SELECT arr.size0 FROM TABLE`.
|
Can be optimized by enabling the [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns) setting. With `optimize_functions_to_subcolumns = 1` the function reads only [size0](../../sql-reference/data-types/array.md#array-size) subcolumn instead of reading and processing the whole array column. The query `SELECT length(arr) FROM table` transforms to `SELECT arr.size0 FROM TABLE`.
|
||||||
|
|
||||||
|
Alias: `OCTET_LENGTH`
|
||||||
|
|
||||||
## emptyArrayUInt8, emptyArrayUInt16, emptyArrayUInt32, emptyArrayUInt64
|
## emptyArrayUInt8, emptyArrayUInt16, emptyArrayUInt32, emptyArrayUInt64
|
||||||
|
|
||||||
## emptyArrayInt8, emptyArrayInt16, emptyArrayInt32, emptyArrayInt64
|
## emptyArrayInt8, emptyArrayInt16, emptyArrayInt32, emptyArrayInt64
|
||||||
@ -142,6 +144,7 @@ range([start, ] end [, step])
|
|||||||
|
|
||||||
- All arguments `start`, `end`, `step` must be below data types: `UInt8`, `UInt16`, `UInt32`, `UInt64`,`Int8`, `Int16`, `Int32`, `Int64`, as well as elements of the returned array, which's type is a super type of all arguments.
|
- All arguments `start`, `end`, `step` must be below data types: `UInt8`, `UInt16`, `UInt32`, `UInt64`,`Int8`, `Int16`, `Int32`, `Int64`, as well as elements of the returned array, which's type is a super type of all arguments.
|
||||||
- An exception is thrown if query results in arrays with a total length of more than number of elements specified by the [function_range_max_elements_in_block](../../operations/settings/settings.md#settings-function_range_max_elements_in_block) setting.
|
- An exception is thrown if query results in arrays with a total length of more than number of elements specified by the [function_range_max_elements_in_block](../../operations/settings/settings.md#settings-function_range_max_elements_in_block) setting.
|
||||||
|
- Returns Null if any argument has Nullable(Nothing) type. An exception is thrown if any argument has Null value (Nullable(T) type).
|
||||||
|
|
||||||
**Examples**
|
**Examples**
|
||||||
|
|
||||||
@ -878,7 +881,7 @@ A special function. See the section [“ArrayJoin function”](../../sql-referen
|
|||||||
|
|
||||||
## arrayDifference
|
## arrayDifference
|
||||||
|
|
||||||
Calculates an array of differences between adjacent array elements. The first element of the result array will be 0, the second `a[1] - a[0]`, the third `a[2] - a[1]`, etc. The type of elements in the result array is determined by the type inference rules for subtraction (e.g. `UInt8` - `UInt8` = `Int16`).
|
Calculates an array of differences between adjacent array elements. The first element of the result array will be 0, the second `a[1] - a[0]`, the third `a[2] - a[1]`, etc. The type of elements in the result array is determined by the type inference rules for subtraction (e.g. `UInt8` - `UInt8` = `Int16`).
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
@ -996,6 +999,24 @@ SELECT
|
|||||||
└──────────────┴───────────┘
|
└──────────────┴───────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## arrayJaccardIndex
|
||||||
|
|
||||||
|
Returns the [Jaccard index](https://en.wikipedia.org/wiki/Jaccard_index) of two arrays.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
``` sql
|
||||||
|
SELECT arrayJaccardIndex([1, 2], [2, 3]) AS res
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
``` text
|
||||||
|
┌─res────────────────┐
|
||||||
|
│ 0.3333333333333333 │
|
||||||
|
└────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## arrayReduce
|
## arrayReduce
|
||||||
|
|
||||||
Applies an aggregate function to array elements and returns its result. The name of the aggregation function is passed as a string in single quotes `'max'`, `'sum'`. When using parametric aggregate functions, the parameter is indicated after the function name in parentheses `'uniqUpTo(6)'`.
|
Applies an aggregate function to array elements and returns its result. The name of the aggregation function is passed as a string in single quotes `'max'`, `'sum'`. When using parametric aggregate functions, the parameter is indicated after the function name in parentheses `'uniqUpTo(6)'`.
|
||||||
|
@ -90,6 +90,8 @@ Returns the length of a string in bytes (not: in characters or Unicode code poin
|
|||||||
|
|
||||||
The function also works for arrays.
|
The function also works for arrays.
|
||||||
|
|
||||||
|
Alias: `OCTET_LENGTH`
|
||||||
|
|
||||||
## lengthUTF8
|
## lengthUTF8
|
||||||
|
|
||||||
Returns the length of a string in Unicode code points (not: in bytes or characters). It assumes that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined.
|
Returns the length of a string in Unicode code points (not: in bytes or characters). It assumes that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined.
|
||||||
@ -1253,3 +1255,15 @@ Result:
|
|||||||
│ A240 │
|
│ A240 │
|
||||||
└──────────────────┘
|
└──────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## initcap
|
||||||
|
|
||||||
|
Convert the first letter of each word to upper case and the rest to lower case. Words are sequences of alphanumeric characters separated by non-alphanumeric characters.
|
||||||
|
|
||||||
|
## initcapUTF8
|
||||||
|
|
||||||
|
Like [initcap](#initcap), assuming that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined.
|
||||||
|
|
||||||
|
Does not detect the language, e.g. for Turkish the result might not be exactly correct (i/İ vs. i/I).
|
||||||
|
|
||||||
|
If the length of the UTF-8 byte sequence is different for upper and lower case of a code point, the result may be incorrect for this code point.
|
||||||
|
@ -5,7 +5,27 @@ sidebar_label: WITH
|
|||||||
|
|
||||||
# WITH Clause
|
# WITH Clause
|
||||||
|
|
||||||
ClickHouse supports Common Table Expressions ([CTE](https://en.wikipedia.org/wiki/Hierarchical_and_recursive_queries_in_SQL)), that is provides to use results of `WITH` clause in the rest of `SELECT` query. Named subqueries can be included to the current and child query context in places where table objects are allowed. Recursion is prevented by hiding the current level CTEs from the WITH expression.
|
ClickHouse supports Common Table Expressions ([CTE](https://en.wikipedia.org/wiki/Hierarchical_and_recursive_queries_in_SQL)) and substitutes the code defined in the `WITH` clause in all places of use for the rest of `SELECT` query. Named subqueries can be included to the current and child query context in places where table objects are allowed. Recursion is prevented by hiding the current level CTEs from the WITH expression.
|
||||||
|
|
||||||
|
Please note that CTEs do not guarantee the same results in all places they are called because the query will be re-executed for each use case.
|
||||||
|
|
||||||
|
An example of such behavior is below
|
||||||
|
``` sql
|
||||||
|
with cte_numbers as
|
||||||
|
(
|
||||||
|
select
|
||||||
|
num
|
||||||
|
from generateRandom('num UInt64', NULL)
|
||||||
|
limit 1000000
|
||||||
|
)
|
||||||
|
select
|
||||||
|
count()
|
||||||
|
from cte_numbers
|
||||||
|
where num in (select num from cte_numbers)
|
||||||
|
```
|
||||||
|
If CTEs were to pass exactly the results and not just a piece of code, you would always see `1000000`
|
||||||
|
|
||||||
|
However, due to the fact that we are referring `cte_numbers` twice, random numbers are generated each time and, accordingly, we see different random results, `280501, 392454, 261636, 196227` and so on...
|
||||||
|
|
||||||
## Syntax
|
## Syntax
|
||||||
|
|
||||||
|
@ -4201,6 +4201,7 @@ SELECT *, timezone() FROM test_tz WHERE d = '2000-01-01 00:00:00' SETTINGS sessi
|
|||||||
### Шаблон
|
### Шаблон
|
||||||
Шаблон поддерживает следующие виды плейсхолдеров:
|
Шаблон поддерживает следующие виды плейсхолдеров:
|
||||||
|
|
||||||
|
- `%a` — Полное исходное имя файла (например "sample.csv").
|
||||||
- `%f` — Исходное имя файла без расширения (например "sample").
|
- `%f` — Исходное имя файла без расширения (например "sample").
|
||||||
- `%e` — Оригинальное расширение файла с точкой (например ".csv").
|
- `%e` — Оригинальное расширение файла с точкой (например ".csv").
|
||||||
- `%t` — Текущее время (в микросекундах).
|
- `%t` — Текущее время (в микросекундах).
|
||||||
|
@ -145,6 +145,8 @@ range([start, ] end [, step])
|
|||||||
|
|
||||||
- Если в результате запроса создаются массивы суммарной длиной больше, чем количество элементов, указанное настройкой [function_range_max_elements_in_block](../../operations/settings/settings.md#settings-function_range_max_elements_in_block), то генерируется исключение.
|
- Если в результате запроса создаются массивы суммарной длиной больше, чем количество элементов, указанное настройкой [function_range_max_elements_in_block](../../operations/settings/settings.md#settings-function_range_max_elements_in_block), то генерируется исключение.
|
||||||
|
|
||||||
|
- Возвращает Null если любой аргумент Nullable(Nothing) типа. Генерируется исключение если любой аргумент Null (Nullable(T) тип).
|
||||||
|
|
||||||
**Примеры**
|
**Примеры**
|
||||||
|
|
||||||
Запрос:
|
Запрос:
|
||||||
|
@ -1113,3 +1113,14 @@ A text with tags .
|
|||||||
The content within <b>CDATA</b>
|
The content within <b>CDATA</b>
|
||||||
Do Nothing for 2 Minutes 2:00
|
Do Nothing for 2 Minutes 2:00
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## initcap {#initcap}
|
||||||
|
|
||||||
|
Переводит первую букву каждого слова в строке в верхний регистр, а остальные — в нижний. Словами считаются последовательности алфавитно-цифровых символов, разделённые любыми другими символами.
|
||||||
|
|
||||||
|
## initcapUTF8 {#initcapUTF8}
|
||||||
|
|
||||||
|
Как [initcap](#initcap), предполагая, что строка содержит набор байтов, представляющий текст в кодировке UTF-8.
|
||||||
|
Не учитывает язык. То есть, для турецкого языка, результат может быть не совсем верным.
|
||||||
|
Если длина UTF-8 последовательности байтов различна для верхнего и нижнего регистра кодовой точки, то для этой кодовой точки результат работы может быть некорректным.
|
||||||
|
Если строка содержит набор байтов, не являющийся UTF-8, то поведение не определено.
|
||||||
|
@ -67,29 +67,38 @@ struct AggregateFunctionBoundingRatioData
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void serialize(WriteBuffer & buf) const
|
void serialize(WriteBuffer & buf) const;
|
||||||
{
|
void deserialize(ReadBuffer & buf);
|
||||||
writeBinary(empty, buf);
|
|
||||||
|
|
||||||
if (!empty)
|
|
||||||
{
|
|
||||||
writePODBinary(left, buf);
|
|
||||||
writePODBinary(right, buf);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void deserialize(ReadBuffer & buf)
|
|
||||||
{
|
|
||||||
readBinary(empty, buf);
|
|
||||||
|
|
||||||
if (!empty)
|
|
||||||
{
|
|
||||||
readPODBinary(left, buf);
|
|
||||||
readPODBinary(right, buf);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template <std::endian endian>
|
||||||
|
inline void transformEndianness(AggregateFunctionBoundingRatioData::Point & p)
|
||||||
|
{
|
||||||
|
transformEndianness<endian>(p.x);
|
||||||
|
transformEndianness<endian>(p.y);
|
||||||
|
}
|
||||||
|
|
||||||
|
void AggregateFunctionBoundingRatioData::serialize(WriteBuffer & buf) const
|
||||||
|
{
|
||||||
|
writeBinaryLittleEndian(empty, buf);
|
||||||
|
|
||||||
|
if (!empty)
|
||||||
|
{
|
||||||
|
writeBinaryLittleEndian(left, buf);
|
||||||
|
writeBinaryLittleEndian(right, buf);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void AggregateFunctionBoundingRatioData::deserialize(ReadBuffer & buf)
|
||||||
|
{
|
||||||
|
readBinaryLittleEndian(empty, buf);
|
||||||
|
|
||||||
|
if (!empty)
|
||||||
|
{
|
||||||
|
readBinaryLittleEndian(left, buf);
|
||||||
|
readBinaryLittleEndian(right, buf);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
class AggregateFunctionBoundingRatio final : public IAggregateFunctionDataHelper<AggregateFunctionBoundingRatioData, AggregateFunctionBoundingRatio>
|
class AggregateFunctionBoundingRatio final : public IAggregateFunctionDataHelper<AggregateFunctionBoundingRatioData, AggregateFunctionBoundingRatio>
|
||||||
{
|
{
|
||||||
|
@ -103,18 +103,18 @@ public:
|
|||||||
|
|
||||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||||
{
|
{
|
||||||
writeIntBinary(this->data(place).sum, buf);
|
writeBinaryLittleEndian(this->data(place).sum, buf);
|
||||||
writeIntBinary(this->data(place).first, buf);
|
writeBinaryLittleEndian(this->data(place).first, buf);
|
||||||
writeIntBinary(this->data(place).last, buf);
|
writeBinaryLittleEndian(this->data(place).last, buf);
|
||||||
writePODBinary<bool>(this->data(place).seen, buf);
|
writeBinaryLittleEndian(this->data(place).seen, buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||||
{
|
{
|
||||||
readIntBinary(this->data(place).sum, buf);
|
readBinaryLittleEndian(this->data(place).sum, buf);
|
||||||
readIntBinary(this->data(place).first, buf);
|
readBinaryLittleEndian(this->data(place).first, buf);
|
||||||
readIntBinary(this->data(place).last, buf);
|
readBinaryLittleEndian(this->data(place).last, buf);
|
||||||
readPODBinary<bool>(this->data(place).seen, buf);
|
readBinaryLittleEndian(this->data(place).seen, buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||||
|
@ -144,22 +144,22 @@ public:
|
|||||||
|
|
||||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||||
{
|
{
|
||||||
writeIntBinary(this->data(place).sum, buf);
|
writeBinaryLittleEndian(this->data(place).sum, buf);
|
||||||
writeIntBinary(this->data(place).first, buf);
|
writeBinaryLittleEndian(this->data(place).first, buf);
|
||||||
writeIntBinary(this->data(place).first_ts, buf);
|
writeBinaryLittleEndian(this->data(place).first_ts, buf);
|
||||||
writeIntBinary(this->data(place).last, buf);
|
writeBinaryLittleEndian(this->data(place).last, buf);
|
||||||
writeIntBinary(this->data(place).last_ts, buf);
|
writeBinaryLittleEndian(this->data(place).last_ts, buf);
|
||||||
writePODBinary<bool>(this->data(place).seen, buf);
|
writeBinaryLittleEndian(this->data(place).seen, buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||||
{
|
{
|
||||||
readIntBinary(this->data(place).sum, buf);
|
readBinaryLittleEndian(this->data(place).sum, buf);
|
||||||
readIntBinary(this->data(place).first, buf);
|
readBinaryLittleEndian(this->data(place).first, buf);
|
||||||
readIntBinary(this->data(place).first_ts, buf);
|
readBinaryLittleEndian(this->data(place).first_ts, buf);
|
||||||
readIntBinary(this->data(place).last, buf);
|
readBinaryLittleEndian(this->data(place).last, buf);
|
||||||
readIntBinary(this->data(place).last_ts, buf);
|
readBinaryLittleEndian(this->data(place).last_ts, buf);
|
||||||
readPODBinary<bool>(this->data(place).seen, buf);
|
readBinaryLittleEndian(this->data(place).seen, buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||||
|
@ -222,7 +222,6 @@ AggregateFunctionPtr AggregateFunctionFactory::tryGet(
|
|||||||
: nullptr;
|
: nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
std::optional<AggregateFunctionProperties> AggregateFunctionFactory::tryGetProperties(String name) const
|
std::optional<AggregateFunctionProperties> AggregateFunctionFactory::tryGetProperties(String name) const
|
||||||
{
|
{
|
||||||
if (name.size() > MAX_AGGREGATE_FUNCTION_NAME_LENGTH)
|
if (name.size() > MAX_AGGREGATE_FUNCTION_NAME_LENGTH)
|
||||||
|
@ -126,6 +126,7 @@ void registerAggregateFunctionGroupArray(AggregateFunctionFactory & factory)
|
|||||||
|
|
||||||
factory.registerFunction("groupArray", { createAggregateFunctionGroupArray<false>, properties });
|
factory.registerFunction("groupArray", { createAggregateFunctionGroupArray<false>, properties });
|
||||||
factory.registerAlias("array_agg", "groupArray", AggregateFunctionFactory::CaseInsensitive);
|
factory.registerAlias("array_agg", "groupArray", AggregateFunctionFactory::CaseInsensitive);
|
||||||
|
factory.registerAliasUnchecked("array_concat_agg", "groupArrayArray", AggregateFunctionFactory::CaseInsensitive);
|
||||||
factory.registerFunction("groupArraySample", { createAggregateFunctionGroupArraySample, properties });
|
factory.registerFunction("groupArraySample", { createAggregateFunctionGroupArraySample, properties });
|
||||||
factory.registerFunction("groupArrayLast", { createAggregateFunctionGroupArray<true>, properties });
|
factory.registerFunction("groupArrayLast", { createAggregateFunctionGroupArray<true>, properties });
|
||||||
}
|
}
|
||||||
|
@ -266,19 +266,20 @@ public:
|
|||||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||||
{
|
{
|
||||||
const auto & value = this->data(place).value;
|
const auto & value = this->data(place).value;
|
||||||
size_t size = value.size();
|
const size_t size = value.size();
|
||||||
writeVarUInt(size, buf);
|
writeVarUInt(size, buf);
|
||||||
buf.write(reinterpret_cast<const char *>(value.data()), size * sizeof(value[0]));
|
for (const auto & element : value)
|
||||||
|
writeBinaryLittleEndian(element, buf);
|
||||||
|
|
||||||
if constexpr (Trait::last)
|
if constexpr (Trait::last)
|
||||||
DB::writeIntBinary<size_t>(this->data(place).total_values, buf);
|
writeBinaryLittleEndian(this->data(place).total_values, buf);
|
||||||
|
|
||||||
if constexpr (Trait::sampler == Sampler::RNG)
|
if constexpr (Trait::sampler == Sampler::RNG)
|
||||||
{
|
{
|
||||||
DB::writeIntBinary<size_t>(this->data(place).total_values, buf);
|
writeBinaryLittleEndian(this->data(place).total_values, buf);
|
||||||
WriteBufferFromOwnString rng_buf;
|
WriteBufferFromOwnString rng_buf;
|
||||||
rng_buf << this->data(place).rng;
|
rng_buf << this->data(place).rng;
|
||||||
DB::writeStringBinary(rng_buf.str(), buf);
|
writeStringBinary(rng_buf.str(), buf);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -297,16 +298,17 @@ public:
|
|||||||
auto & value = this->data(place).value;
|
auto & value = this->data(place).value;
|
||||||
|
|
||||||
value.resize_exact(size, arena);
|
value.resize_exact(size, arena);
|
||||||
buf.readStrict(reinterpret_cast<char *>(value.data()), size * sizeof(value[0]));
|
for (auto & element : value)
|
||||||
|
readBinaryLittleEndian(element, buf);
|
||||||
|
|
||||||
if constexpr (Trait::last)
|
if constexpr (Trait::last)
|
||||||
DB::readIntBinary<size_t>(this->data(place).total_values, buf);
|
readBinaryLittleEndian(this->data(place).total_values, buf);
|
||||||
|
|
||||||
if constexpr (Trait::sampler == Sampler::RNG)
|
if constexpr (Trait::sampler == Sampler::RNG)
|
||||||
{
|
{
|
||||||
DB::readIntBinary<size_t>(this->data(place).total_values, buf);
|
readBinaryLittleEndian(this->data(place).total_values, buf);
|
||||||
std::string rng_string;
|
std::string rng_string;
|
||||||
DB::readStringBinary(rng_string, buf);
|
readStringBinary(rng_string, buf);
|
||||||
ReadBufferFromString rng_buf(rng_string);
|
ReadBufferFromString rng_buf(rng_string);
|
||||||
rng_buf >> this->data(place).rng;
|
rng_buf >> this->data(place).rng;
|
||||||
}
|
}
|
||||||
@ -603,14 +605,14 @@ public:
|
|||||||
node->write(buf);
|
node->write(buf);
|
||||||
|
|
||||||
if constexpr (Trait::last)
|
if constexpr (Trait::last)
|
||||||
DB::writeIntBinary<size_t>(data(place).total_values, buf);
|
writeBinaryLittleEndian(data(place).total_values, buf);
|
||||||
|
|
||||||
if constexpr (Trait::sampler == Sampler::RNG)
|
if constexpr (Trait::sampler == Sampler::RNG)
|
||||||
{
|
{
|
||||||
DB::writeIntBinary<size_t>(data(place).total_values, buf);
|
writeBinaryLittleEndian(data(place).total_values, buf);
|
||||||
WriteBufferFromOwnString rng_buf;
|
WriteBufferFromOwnString rng_buf;
|
||||||
rng_buf << data(place).rng;
|
rng_buf << data(place).rng;
|
||||||
DB::writeStringBinary(rng_buf.str(), buf);
|
writeStringBinary(rng_buf.str(), buf);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -636,13 +638,13 @@ public:
|
|||||||
value[i] = Node::read(buf, arena);
|
value[i] = Node::read(buf, arena);
|
||||||
|
|
||||||
if constexpr (Trait::last)
|
if constexpr (Trait::last)
|
||||||
DB::readIntBinary<size_t>(data(place).total_values, buf);
|
readBinaryLittleEndian(data(place).total_values, buf);
|
||||||
|
|
||||||
if constexpr (Trait::sampler == Sampler::RNG)
|
if constexpr (Trait::sampler == Sampler::RNG)
|
||||||
{
|
{
|
||||||
DB::readIntBinary<size_t>(data(place).total_values, buf);
|
readBinaryLittleEndian(data(place).total_values, buf);
|
||||||
std::string rng_string;
|
std::string rng_string;
|
||||||
DB::readStringBinary(rng_string, buf);
|
readStringBinary(rng_string, buf);
|
||||||
ReadBufferFromString rng_buf(rng_string);
|
ReadBufferFromString rng_buf(rng_string);
|
||||||
rng_buf >> data(place).rng;
|
rng_buf >> data(place).rng;
|
||||||
}
|
}
|
||||||
|
@ -233,35 +233,35 @@ public:
|
|||||||
|
|
||||||
void write(WriteBuffer & buf) const
|
void write(WriteBuffer & buf) const
|
||||||
{
|
{
|
||||||
writeIntBinary<size_t>(compress_threshold, buf);
|
writeBinaryLittleEndian(compress_threshold, buf);
|
||||||
writeFloatBinary<double>(relative_error, buf);
|
writeBinaryLittleEndian(relative_error, buf);
|
||||||
writeIntBinary<size_t>(count, buf);
|
writeBinaryLittleEndian(count, buf);
|
||||||
writeIntBinary<size_t>(sampled.size(), buf);
|
writeBinaryLittleEndian(sampled.size(), buf);
|
||||||
|
|
||||||
for (const auto & stats : sampled)
|
for (const auto & stats : sampled)
|
||||||
{
|
{
|
||||||
writeFloatBinary<T>(stats.value, buf);
|
writeBinaryLittleEndian(stats.value, buf);
|
||||||
writeIntBinary<Int64>(stats.g, buf);
|
writeBinaryLittleEndian(stats.g, buf);
|
||||||
writeIntBinary<Int64>(stats.delta, buf);
|
writeBinaryLittleEndian(stats.delta, buf);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void read(ReadBuffer & buf)
|
void read(ReadBuffer & buf)
|
||||||
{
|
{
|
||||||
readIntBinary<size_t>(compress_threshold, buf);
|
readBinaryLittleEndian(compress_threshold, buf);
|
||||||
readFloatBinary<double>(relative_error, buf);
|
readBinaryLittleEndian(relative_error, buf);
|
||||||
readIntBinary<size_t>(count, buf);
|
readBinaryLittleEndian(count, buf);
|
||||||
|
|
||||||
size_t sampled_len = 0;
|
size_t sampled_len = 0;
|
||||||
readIntBinary<size_t>(sampled_len, buf);
|
readBinaryLittleEndian(sampled_len, buf);
|
||||||
sampled.resize(sampled_len);
|
sampled.resize(sampled_len);
|
||||||
|
|
||||||
for (size_t i = 0; i < sampled_len; ++i)
|
for (size_t i = 0; i < sampled_len; ++i)
|
||||||
{
|
{
|
||||||
auto stats = sampled[i];
|
auto stats = sampled[i];
|
||||||
readFloatBinary<T>(stats.value, buf);
|
readBinaryLittleEndian(stats.value, buf);
|
||||||
readIntBinary<Int64>(stats.g, buf);
|
readBinaryLittleEndian(stats.g, buf);
|
||||||
readIntBinary<Int64>(stats.delta, buf);
|
readBinaryLittleEndian(stats.delta, buf);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -207,8 +207,8 @@ public:
|
|||||||
|
|
||||||
void read(DB::ReadBuffer & buf)
|
void read(DB::ReadBuffer & buf)
|
||||||
{
|
{
|
||||||
DB::readIntBinary<size_t>(sample_count, buf);
|
DB::readBinaryLittleEndian(sample_count, buf);
|
||||||
DB::readIntBinary<size_t>(total_values, buf);
|
DB::readBinaryLittleEndian(total_values, buf);
|
||||||
|
|
||||||
size_t size = std::min(total_values, sample_count);
|
size_t size = std::min(total_values, sample_count);
|
||||||
static constexpr size_t MAX_RESERVOIR_SIZE = 1_GiB;
|
static constexpr size_t MAX_RESERVOIR_SIZE = 1_GiB;
|
||||||
@ -224,22 +224,22 @@ public:
|
|||||||
rng_buf >> rng;
|
rng_buf >> rng;
|
||||||
|
|
||||||
for (size_t i = 0; i < samples.size(); ++i)
|
for (size_t i = 0; i < samples.size(); ++i)
|
||||||
DB::readBinary(samples[i], buf);
|
DB::readBinaryLittleEndian(samples[i], buf);
|
||||||
|
|
||||||
sorted = false;
|
sorted = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void write(DB::WriteBuffer & buf) const
|
void write(DB::WriteBuffer & buf) const
|
||||||
{
|
{
|
||||||
DB::writeIntBinary<size_t>(sample_count, buf);
|
DB::writeBinaryLittleEndian(sample_count, buf);
|
||||||
DB::writeIntBinary<size_t>(total_values, buf);
|
DB::writeBinaryLittleEndian(total_values, buf);
|
||||||
|
|
||||||
DB::WriteBufferFromOwnString rng_buf;
|
DB::WriteBufferFromOwnString rng_buf;
|
||||||
rng_buf << rng;
|
rng_buf << rng;
|
||||||
DB::writeStringBinary(rng_buf.str(), buf);
|
DB::writeStringBinary(rng_buf.str(), buf);
|
||||||
|
|
||||||
for (size_t i = 0; i < std::min(sample_count, total_values); ++i)
|
for (size_t i = 0; i < std::min(sample_count, total_values); ++i)
|
||||||
DB::writeBinary(samples[i], buf);
|
DB::writeBinaryLittleEndian(samples[i], buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -6223,7 +6223,11 @@ void QueryAnalyzer::resolveTableFunction(QueryTreeNodePtr & table_function_node,
|
|||||||
const auto & insertion_table = scope_context->getInsertionTable();
|
const auto & insertion_table = scope_context->getInsertionTable();
|
||||||
if (!insertion_table.empty())
|
if (!insertion_table.empty())
|
||||||
{
|
{
|
||||||
const auto & insert_structure = DatabaseCatalog::instance().getTable(insertion_table, scope_context)->getInMemoryMetadataPtr()->getColumns();
|
const auto & insert_structure = DatabaseCatalog::instance()
|
||||||
|
.getTable(insertion_table, scope_context)
|
||||||
|
->getInMemoryMetadataPtr()
|
||||||
|
->getColumns()
|
||||||
|
.getInsertable();
|
||||||
DB::ColumnsDescription structure_hint;
|
DB::ColumnsDescription structure_hint;
|
||||||
|
|
||||||
bool use_columns_from_insert_query = true;
|
bool use_columns_from_insert_query = true;
|
||||||
|
@ -848,6 +848,9 @@ ASTs QueryFuzzer::getDropQueriesForFuzzedTables(const ASTDropQuery & drop_query)
|
|||||||
|
|
||||||
void QueryFuzzer::notifyQueryFailed(ASTPtr ast)
|
void QueryFuzzer::notifyQueryFailed(ASTPtr ast)
|
||||||
{
|
{
|
||||||
|
if (ast == nullptr)
|
||||||
|
return;
|
||||||
|
|
||||||
auto remove_fuzzed_table = [this](const auto & table_name)
|
auto remove_fuzzed_table = [this](const auto & table_name)
|
||||||
{
|
{
|
||||||
auto pos = table_name.find("__fuzz_");
|
auto pos = table_name.find("__fuzz_");
|
||||||
|
@ -10,7 +10,6 @@
|
|||||||
#include <cassert>
|
#include <cassert>
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include <iostream>
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
|
||||||
|
|
||||||
|
@ -47,6 +47,7 @@ String FileRenamer::generateNewFilename(const String & filename) const
|
|||||||
// Define placeholders and their corresponding values
|
// Define placeholders and their corresponding values
|
||||||
std::map<String, String> placeholders =
|
std::map<String, String> placeholders =
|
||||||
{
|
{
|
||||||
|
{"%a", filename},
|
||||||
{"%f", file_base},
|
{"%f", file_base},
|
||||||
{"%e", file_ext},
|
{"%e", file_ext},
|
||||||
{"%t", timestamp},
|
{"%t", timestamp},
|
||||||
@ -69,16 +70,17 @@ bool FileRenamer::isEmpty() const
|
|||||||
bool FileRenamer::validateRenamingRule(const String & rule, bool throw_on_error)
|
bool FileRenamer::validateRenamingRule(const String & rule, bool throw_on_error)
|
||||||
{
|
{
|
||||||
// Check if the rule contains invalid placeholders
|
// Check if the rule contains invalid placeholders
|
||||||
re2::RE2 invalid_placeholder_pattern("^([^%]|%[fet%])*$");
|
re2::RE2 invalid_placeholder_pattern("^([^%]|%[afet%])*$");
|
||||||
if (!re2::RE2::FullMatch(rule, invalid_placeholder_pattern))
|
if (!re2::RE2::FullMatch(rule, invalid_placeholder_pattern))
|
||||||
{
|
{
|
||||||
if (throw_on_error)
|
if (throw_on_error)
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Invalid renaming rule: Allowed placeholders only %f, %e, %t, and %%");
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Invalid renaming rule: Allowed placeholders only %a, %f, %e, %t, and %%");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Replace valid placeholders with empty strings and count remaining percentage signs.
|
// Replace valid placeholders with empty strings and count remaining percentage signs.
|
||||||
String replaced_rule = rule;
|
String replaced_rule = rule;
|
||||||
|
boost::replace_all(replaced_rule, "%a", "");
|
||||||
boost::replace_all(replaced_rule, "%f", "");
|
boost::replace_all(replaced_rule, "%f", "");
|
||||||
boost::replace_all(replaced_rule, "%e", "");
|
boost::replace_all(replaced_rule, "%e", "");
|
||||||
boost::replace_all(replaced_rule, "%t", "");
|
boost::replace_all(replaced_rule, "%t", "");
|
||||||
|
@ -9,6 +9,7 @@ namespace DB
|
|||||||
/**
|
/**
|
||||||
* The FileRenamer class provides functionality for renaming files based on given pattern with placeholders
|
* The FileRenamer class provides functionality for renaming files based on given pattern with placeholders
|
||||||
* The supported placeholders are:
|
* The supported placeholders are:
|
||||||
|
* %a - Full original file name ("sample.csv")
|
||||||
* %f - Original filename without extension ("sample")
|
* %f - Original filename without extension ("sample")
|
||||||
* %e - Original file extension with dot (".csv")
|
* %e - Original file extension with dot (".csv")
|
||||||
* %t - Timestamp (in microseconds)
|
* %t - Timestamp (in microseconds)
|
||||||
|
@ -52,35 +52,38 @@ public:
|
|||||||
{
|
{
|
||||||
const auto & creator_map = getMap();
|
const auto & creator_map = getMap();
|
||||||
const auto & case_insensitive_creator_map = getCaseInsensitiveMap();
|
const auto & case_insensitive_creator_map = getCaseInsensitiveMap();
|
||||||
const String factory_name = getFactoryName();
|
|
||||||
|
|
||||||
String real_dict_name;
|
auto real_name_lowercase = Poco::toLower(real_name);
|
||||||
if (creator_map.count(real_name))
|
if (!creator_map.contains(real_name) && !case_insensitive_creator_map.contains(real_name_lowercase))
|
||||||
real_dict_name = real_name;
|
throw Exception(
|
||||||
else if (auto real_name_lowercase = Poco::toLower(real_name); case_insensitive_creator_map.count(real_name_lowercase))
|
ErrorCodes::LOGICAL_ERROR,
|
||||||
real_dict_name = real_name_lowercase;
|
"{}: can't create alias '{}', the real name '{}' is not registered",
|
||||||
else
|
getFactoryName(),
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "{}: can't create alias '{}', the real name '{}' is not registered",
|
alias_name,
|
||||||
factory_name, alias_name, real_name);
|
real_name);
|
||||||
|
|
||||||
|
registerAliasUnchecked(alias_name, real_name, case_sensitiveness);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// We need sure the real_name exactly exists when call the function directly.
|
||||||
|
void registerAliasUnchecked(const String & alias_name, const String & real_name, CaseSensitiveness case_sensitiveness = CaseSensitive)
|
||||||
|
{
|
||||||
String alias_name_lowercase = Poco::toLower(alias_name);
|
String alias_name_lowercase = Poco::toLower(alias_name);
|
||||||
|
String real_name_lowercase = Poco::toLower(real_name);
|
||||||
if (creator_map.count(alias_name) || case_insensitive_creator_map.count(alias_name_lowercase))
|
const String factory_name = getFactoryName();
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "{}: the alias name '{}' is already registered as real name",
|
|
||||||
factory_name, alias_name);
|
|
||||||
|
|
||||||
if (case_sensitiveness == CaseInsensitive)
|
if (case_sensitiveness == CaseInsensitive)
|
||||||
{
|
{
|
||||||
if (!case_insensitive_aliases.emplace(alias_name_lowercase, real_dict_name).second)
|
if (!case_insensitive_aliases.emplace(alias_name_lowercase, real_name).second)
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "{}: case insensitive alias name '{}' is not unique",
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "{}: case insensitive alias name '{}' is not unique", factory_name, alias_name);
|
||||||
factory_name, alias_name);
|
|
||||||
case_insensitive_name_mapping[alias_name_lowercase] = real_name;
|
case_insensitive_name_mapping[alias_name_lowercase] = real_name;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!aliases.emplace(alias_name, real_dict_name).second)
|
if (!aliases.emplace(alias_name, real_name).second)
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "{}: alias name '{}' is not unique", factory_name, alias_name);
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "{}: alias name '{}' is not unique", factory_name, alias_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
std::vector<String> getAllRegisteredNames() const override
|
std::vector<String> getAllRegisteredNames() const override
|
||||||
{
|
{
|
||||||
std::vector<String> result;
|
std::vector<String> result;
|
||||||
@ -93,7 +96,7 @@ public:
|
|||||||
bool isCaseInsensitive(const String & name) const
|
bool isCaseInsensitive(const String & name) const
|
||||||
{
|
{
|
||||||
String name_lowercase = Poco::toLower(name);
|
String name_lowercase = Poco::toLower(name);
|
||||||
return getCaseInsensitiveMap().count(name_lowercase) || case_insensitive_aliases.count(name_lowercase);
|
return getCaseInsensitiveMap().contains(name_lowercase) || case_insensitive_aliases.contains(name_lowercase);
|
||||||
}
|
}
|
||||||
|
|
||||||
const String & aliasTo(const String & name) const
|
const String & aliasTo(const String & name) const
|
||||||
@ -106,14 +109,11 @@ public:
|
|||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "{}: name '{}' is not alias", getFactoryName(), name);
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "{}: name '{}' is not alias", getFactoryName(), name);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool isAlias(const String & name) const
|
bool isAlias(const String & name) const { return aliases.contains(name) || case_insensitive_aliases.contains(name); }
|
||||||
{
|
|
||||||
return aliases.count(name) || case_insensitive_aliases.contains(name);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool hasNameOrAlias(const String & name) const
|
bool hasNameOrAlias(const String & name) const
|
||||||
{
|
{
|
||||||
return getMap().count(name) || getCaseInsensitiveMap().count(name) || isAlias(name);
|
return getMap().contains(name) || getCaseInsensitiveMap().contains(name) || isAlias(name);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the canonical name (the name used in registration) if it's different from `name`.
|
/// Return the canonical name (the name used in registration) if it's different from `name`.
|
||||||
@ -129,7 +129,7 @@ public:
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
using InnerMap = std::unordered_map<String, Value>; // name -> creator
|
using InnerMap = std::unordered_map<String, Value>; // name -> creator
|
||||||
using AliasMap = std::unordered_map<String, String>; // alias -> original type
|
using AliasMap = std::unordered_map<String, String>; // alias -> original name
|
||||||
|
|
||||||
virtual const InnerMap & getMap() const = 0;
|
virtual const InnerMap & getMap() const = 0;
|
||||||
virtual const InnerMap & getCaseInsensitiveMap() const = 0;
|
virtual const InnerMap & getCaseInsensitiveMap() const = 0;
|
||||||
|
@ -10,6 +10,27 @@ namespace ErrorCodes
|
|||||||
extern const int BAD_ARGUMENTS;
|
extern const int BAD_ARGUMENTS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Int64 IntervalKind::toAvgNanoseconds() const
|
||||||
|
{
|
||||||
|
static constexpr Int64 NANOSECONDS_PER_MICROSECOND = 1000;
|
||||||
|
static constexpr auto NANOSECONDS_PER_MILLISECOND = NANOSECONDS_PER_MICROSECOND * 1000;
|
||||||
|
static constexpr auto NANOSECONDS_PER_SECOND = NANOSECONDS_PER_MILLISECOND * 1000;
|
||||||
|
|
||||||
|
switch (kind)
|
||||||
|
{
|
||||||
|
case IntervalKind::Millisecond:
|
||||||
|
return NANOSECONDS_PER_MILLISECOND;
|
||||||
|
case IntervalKind::Microsecond:
|
||||||
|
return NANOSECONDS_PER_MICROSECOND;
|
||||||
|
case IntervalKind::Nanosecond:
|
||||||
|
return 1;
|
||||||
|
default:
|
||||||
|
return toAvgSeconds() * NANOSECONDS_PER_SECOND;
|
||||||
|
}
|
||||||
|
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
|
||||||
Int32 IntervalKind::toAvgSeconds() const
|
Int32 IntervalKind::toAvgSeconds() const
|
||||||
{
|
{
|
||||||
switch (kind)
|
switch (kind)
|
||||||
|
@ -29,6 +29,10 @@ struct IntervalKind
|
|||||||
|
|
||||||
constexpr std::string_view toString() const { return magic_enum::enum_name(kind); }
|
constexpr std::string_view toString() const { return magic_enum::enum_name(kind); }
|
||||||
|
|
||||||
|
/// Returns number of nanoseconds in one interval.
|
||||||
|
/// For `Month`, `Quarter` and `Year` the function returns an average number of nanoseconds.
|
||||||
|
Int64 toAvgNanoseconds() const;
|
||||||
|
|
||||||
/// Returns number of seconds in one interval.
|
/// Returns number of seconds in one interval.
|
||||||
/// For `Month`, `Quarter` and `Year` the function returns an average number of seconds.
|
/// For `Month`, `Quarter` and `Year` the function returns an average number of seconds.
|
||||||
Int32 toAvgSeconds() const;
|
Int32 toAvgSeconds() const;
|
||||||
|
@ -59,4 +59,10 @@ inline void transformEndianness(std::pair<A, B> & pair)
|
|||||||
transformEndianness<endian>(pair.first);
|
transformEndianness<endian>(pair.first);
|
||||||
transformEndianness<endian>(pair.second);
|
transformEndianness<endian>(pair.second);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <std::endian endian, typename T, typename Tag>
|
||||||
|
inline void transformEndianness(StrongTypedef<T, Tag> & x)
|
||||||
|
{
|
||||||
|
transformEndianness<endian>(x.toUnderType());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -25,8 +25,6 @@ void Pool::Entry::incrementRefCount()
|
|||||||
/// First reference, initialize thread
|
/// First reference, initialize thread
|
||||||
if (data->ref_count.fetch_add(1) == 0)
|
if (data->ref_count.fetch_add(1) == 0)
|
||||||
mysql_thread_init();
|
mysql_thread_init();
|
||||||
|
|
||||||
chassert(!data->removed_from_pool);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -43,7 +41,10 @@ void Pool::Entry::decrementRefCount()
|
|||||||
/// In Pool::Entry::disconnect() we remove connection from the list of pool's connections.
|
/// In Pool::Entry::disconnect() we remove connection from the list of pool's connections.
|
||||||
/// So now we must deallocate the memory.
|
/// So now we must deallocate the memory.
|
||||||
if (data->removed_from_pool)
|
if (data->removed_from_pool)
|
||||||
|
{
|
||||||
|
data->conn.disconnect();
|
||||||
::delete data;
|
::delete data;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -230,8 +231,6 @@ void Pool::removeConnection(Connection* connection)
|
|||||||
std::lock_guard lock(mutex);
|
std::lock_guard lock(mutex);
|
||||||
if (connection)
|
if (connection)
|
||||||
{
|
{
|
||||||
if (!connection->removed_from_pool)
|
|
||||||
connection->conn.disconnect();
|
|
||||||
connections.remove(connection);
|
connections.remove(connection);
|
||||||
connection->removed_from_pool = true;
|
connection->removed_from_pool = true;
|
||||||
}
|
}
|
||||||
@ -240,6 +239,7 @@ void Pool::removeConnection(Connection* connection)
|
|||||||
|
|
||||||
void Pool::Entry::disconnect()
|
void Pool::Entry::disconnect()
|
||||||
{
|
{
|
||||||
|
// Remove the Entry from the Pool. Actual disconnection is delayed until refcount == 0.
|
||||||
pool->removeConnection(data);
|
pool->removeConnection(data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,6 +18,20 @@
|
|||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#include <bit>
|
#include <bit>
|
||||||
|
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
|
||||||
|
String formatZxid(int64_t zxid)
|
||||||
|
{
|
||||||
|
/// ZooKeeper print zxid in hex and
|
||||||
|
String hex = getHexUIntLowercase(zxid);
|
||||||
|
/// without leading zeros
|
||||||
|
trimLeft(hex, '0');
|
||||||
|
return "0x" + hex;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -348,7 +362,7 @@ String ServerStatCommand::run()
|
|||||||
write("Sent", toString(stats.getPacketsSent()));
|
write("Sent", toString(stats.getPacketsSent()));
|
||||||
write("Connections", toString(keeper_info.alive_connections_count));
|
write("Connections", toString(keeper_info.alive_connections_count));
|
||||||
write("Outstanding", toString(keeper_info.outstanding_requests_count));
|
write("Outstanding", toString(keeper_info.outstanding_requests_count));
|
||||||
write("Zxid", toString(keeper_info.last_zxid));
|
write("Zxid", formatZxid(keeper_info.last_zxid));
|
||||||
write("Mode", keeper_info.getRole());
|
write("Mode", keeper_info.getRole());
|
||||||
write("Node count", toString(keeper_info.total_nodes_count));
|
write("Node count", toString(keeper_info.total_nodes_count));
|
||||||
|
|
||||||
@ -381,7 +395,7 @@ String StatCommand::run()
|
|||||||
write("Sent", toString(stats.getPacketsSent()));
|
write("Sent", toString(stats.getPacketsSent()));
|
||||||
write("Connections", toString(keeper_info.alive_connections_count));
|
write("Connections", toString(keeper_info.alive_connections_count));
|
||||||
write("Outstanding", toString(keeper_info.outstanding_requests_count));
|
write("Outstanding", toString(keeper_info.outstanding_requests_count));
|
||||||
write("Zxid", toString(keeper_info.last_zxid));
|
write("Zxid", formatZxid(keeper_info.last_zxid));
|
||||||
write("Mode", keeper_info.getRole());
|
write("Mode", keeper_info.getRole());
|
||||||
write("Node count", toString(keeper_info.total_nodes_count));
|
write("Node count", toString(keeper_info.total_nodes_count));
|
||||||
|
|
||||||
|
@ -577,6 +577,7 @@ class IColumn;
|
|||||||
M(Bool, optimize_skip_merged_partitions, false, "Skip partitions with one part with level > 0 in optimize final", 0) \
|
M(Bool, optimize_skip_merged_partitions, false, "Skip partitions with one part with level > 0 in optimize final", 0) \
|
||||||
M(Bool, optimize_on_insert, true, "Do the same transformation for inserted block of data as if merge was done on this block.", 0) \
|
M(Bool, optimize_on_insert, true, "Do the same transformation for inserted block of data as if merge was done on this block.", 0) \
|
||||||
M(Bool, optimize_use_projections, true, "Automatically choose projections to perform SELECT query", 0) ALIAS(allow_experimental_projection_optimization) \
|
M(Bool, optimize_use_projections, true, "Automatically choose projections to perform SELECT query", 0) ALIAS(allow_experimental_projection_optimization) \
|
||||||
|
M(Bool, optimize_use_implicit_projections, false, "Automatically choose implicit projections to perform SELECT query", 0) \
|
||||||
M(Bool, force_optimize_projection, false, "If projection optimization is enabled, SELECT queries need to use projection", 0) \
|
M(Bool, force_optimize_projection, false, "If projection optimization is enabled, SELECT queries need to use projection", 0) \
|
||||||
M(Bool, async_socket_for_remote, true, "Asynchronously read from socket executing remote query", 0) \
|
M(Bool, async_socket_for_remote, true, "Asynchronously read from socket executing remote query", 0) \
|
||||||
M(Bool, async_query_sending_for_remote, true, "Asynchronously create connections and send query to shards in remote query", 0) \
|
M(Bool, async_query_sending_for_remote, true, "Asynchronously create connections and send query to shards in remote query", 0) \
|
||||||
@ -736,7 +737,7 @@ class IColumn;
|
|||||||
M(String, workload, "default", "Name of workload to be used to access resources", 0) \
|
M(String, workload, "default", "Name of workload to be used to access resources", 0) \
|
||||||
M(Milliseconds, storage_system_stack_trace_pipe_read_timeout_ms, 100, "Maximum time to read from a pipe for receiving information from the threads when querying the `system.stack_trace` table. This setting is used for testing purposes and not meant to be changed by users.", 0) \
|
M(Milliseconds, storage_system_stack_trace_pipe_read_timeout_ms, 100, "Maximum time to read from a pipe for receiving information from the threads when querying the `system.stack_trace` table. This setting is used for testing purposes and not meant to be changed by users.", 0) \
|
||||||
\
|
\
|
||||||
M(String, rename_files_after_processing, "", "Rename successfully processed files according to the specified pattern; Pattern can include the following placeholders: `%f` (original filename without extension), `%e` (file extension with dot), `%t` (current timestamp in µs), and `%%` (% sign)", 0) \
|
M(String, rename_files_after_processing, "", "Rename successfully processed files according to the specified pattern; Pattern can include the following placeholders: `%a` (full original file name), `%f` (original filename without extension), `%e` (file extension with dot), `%t` (current timestamp in µs), and `%%` (% sign)", 0) \
|
||||||
\
|
\
|
||||||
M(Bool, parallelize_output_from_storages, true, "Parallelize output for reading step from storage. It allows parallelizing query processing right after reading from storage if possible", 0) \
|
M(Bool, parallelize_output_from_storages, true, "Parallelize output for reading step from storage. It allows parallelizing query processing right after reading from storage if possible", 0) \
|
||||||
M(String, insert_deduplication_token, "", "If not empty, used for duplicate detection instead of data digest", 0) \
|
M(String, insert_deduplication_token, "", "If not empty, used for duplicate detection instead of data digest", 0) \
|
||||||
@ -774,6 +775,7 @@ class IColumn;
|
|||||||
M(Bool, keeper_map_strict_mode, false, "Enforce additional checks during operations on KeeperMap. E.g. throw an exception on an insert for already existing key", 0) \
|
M(Bool, keeper_map_strict_mode, false, "Enforce additional checks during operations on KeeperMap. E.g. throw an exception on an insert for already existing key", 0) \
|
||||||
M(UInt64, extract_kvp_max_pairs_per_row, 1000, "Max number pairs that can be produced by extractKeyValuePairs function. Used to safeguard against consuming too much memory.", 0) \
|
M(UInt64, extract_kvp_max_pairs_per_row, 1000, "Max number pairs that can be produced by extractKeyValuePairs function. Used to safeguard against consuming too much memory.", 0) \
|
||||||
M(Timezone, session_timezone, "", "The default timezone for current session or query. The server default timezone if empty.", 0) \
|
M(Timezone, session_timezone, "", "The default timezone for current session or query. The server default timezone if empty.", 0) \
|
||||||
|
M(Bool, allow_create_index_without_type, false, "Allow CREATE INDEX query without TYPE. Query will be ignored. Made for SQL compatibility tests.", 0)\
|
||||||
// End of COMMON_SETTINGS
|
// End of COMMON_SETTINGS
|
||||||
// Please add settings related to formats into the FORMAT_FACTORY_SETTINGS and move obsolete settings to OBSOLETE_SETTINGS.
|
// Please add settings related to formats into the FORMAT_FACTORY_SETTINGS and move obsolete settings to OBSOLETE_SETTINGS.
|
||||||
|
|
||||||
@ -906,6 +908,7 @@ class IColumn;
|
|||||||
\
|
\
|
||||||
M(DateTimeInputFormat, date_time_input_format, FormatSettings::DateTimeInputFormat::Basic, "Method to read DateTime from text input formats. Possible values: 'basic', 'best_effort' and 'best_effort_us'.", 0) \
|
M(DateTimeInputFormat, date_time_input_format, FormatSettings::DateTimeInputFormat::Basic, "Method to read DateTime from text input formats. Possible values: 'basic', 'best_effort' and 'best_effort_us'.", 0) \
|
||||||
M(DateTimeOutputFormat, date_time_output_format, FormatSettings::DateTimeOutputFormat::Simple, "Method to write DateTime to text output. Possible values: 'simple', 'iso', 'unix_timestamp'.", 0) \
|
M(DateTimeOutputFormat, date_time_output_format, FormatSettings::DateTimeOutputFormat::Simple, "Method to write DateTime to text output. Possible values: 'simple', 'iso', 'unix_timestamp'.", 0) \
|
||||||
|
M(IntervalOutputFormat, interval_output_format, FormatSettings::IntervalOutputFormat::Numeric, "Textual representation of Interval. Possible values: 'kusto', 'numeric'.", 0) \
|
||||||
\
|
\
|
||||||
M(Bool, input_format_ipv4_default_on_conversion_error, false, "Deserialization of IPv4 will use default values instead of throwing exception on conversion error.", 0) \
|
M(Bool, input_format_ipv4_default_on_conversion_error, false, "Deserialization of IPv4 will use default values instead of throwing exception on conversion error.", 0) \
|
||||||
M(Bool, input_format_ipv6_default_on_conversion_error, false, "Deserialization of IPV6 will use default values instead of throwing exception on conversion error.", 0) \
|
M(Bool, input_format_ipv6_default_on_conversion_error, false, "Deserialization of IPV6 will use default values instead of throwing exception on conversion error.", 0) \
|
||||||
|
@ -80,6 +80,7 @@ namespace SettingsChangesHistory
|
|||||||
/// It's used to implement `compatibility` setting (see https://github.com/ClickHouse/ClickHouse/issues/35972)
|
/// It's used to implement `compatibility` setting (see https://github.com/ClickHouse/ClickHouse/issues/35972)
|
||||||
static std::map<ClickHouseVersion, SettingsChangesHistory::SettingsChanges> settings_changes_history =
|
static std::map<ClickHouseVersion, SettingsChangesHistory::SettingsChanges> settings_changes_history =
|
||||||
{
|
{
|
||||||
|
{"23.7", {{"optimize_use_implicit_projections", true, false, "Disable implicit projections due to unexpected results."}}},
|
||||||
{"23.6", {{"http_send_timeout", 180, 30, "3 minutes seems crazy long. Note that this is timeout for a single network write call, not for the whole upload operation."},
|
{"23.6", {{"http_send_timeout", 180, 30, "3 minutes seems crazy long. Note that this is timeout for a single network write call, not for the whole upload operation."},
|
||||||
{"http_receive_timeout", 180, 30, "See http_send_timeout."}}},
|
{"http_receive_timeout", 180, 30, "See http_send_timeout."}}},
|
||||||
{"23.5", {{"input_format_parquet_preserve_order", true, false, "Allow Parquet reader to reorder rows for better parallelism."},
|
{"23.5", {{"input_format_parquet_preserve_order", true, false, "Allow Parquet reader to reorder rows for better parallelism."},
|
||||||
|
@ -79,6 +79,10 @@ IMPLEMENT_SETTING_ENUM(DateTimeOutputFormat, ErrorCodes::BAD_ARGUMENTS,
|
|||||||
{"iso", FormatSettings::DateTimeOutputFormat::ISO},
|
{"iso", FormatSettings::DateTimeOutputFormat::ISO},
|
||||||
{"unix_timestamp", FormatSettings::DateTimeOutputFormat::UnixTimestamp}})
|
{"unix_timestamp", FormatSettings::DateTimeOutputFormat::UnixTimestamp}})
|
||||||
|
|
||||||
|
IMPLEMENT_SETTING_ENUM(IntervalOutputFormat, ErrorCodes::BAD_ARGUMENTS,
|
||||||
|
{{"kusto", FormatSettings::IntervalOutputFormat::Kusto},
|
||||||
|
{"numeric", FormatSettings::IntervalOutputFormat::Numeric}})
|
||||||
|
|
||||||
IMPLEMENT_SETTING_AUTO_ENUM(LogsLevel, ErrorCodes::BAD_ARGUMENTS)
|
IMPLEMENT_SETTING_AUTO_ENUM(LogsLevel, ErrorCodes::BAD_ARGUMENTS)
|
||||||
|
|
||||||
IMPLEMENT_SETTING_AUTO_ENUM(LogQueriesType, ErrorCodes::BAD_ARGUMENTS)
|
IMPLEMENT_SETTING_AUTO_ENUM(LogQueriesType, ErrorCodes::BAD_ARGUMENTS)
|
||||||
|
@ -72,6 +72,8 @@ DECLARE_SETTING_ENUM_WITH_RENAME(DateTimeInputFormat, FormatSettings::DateTimeIn
|
|||||||
|
|
||||||
DECLARE_SETTING_ENUM_WITH_RENAME(DateTimeOutputFormat, FormatSettings::DateTimeOutputFormat)
|
DECLARE_SETTING_ENUM_WITH_RENAME(DateTimeOutputFormat, FormatSettings::DateTimeOutputFormat)
|
||||||
|
|
||||||
|
DECLARE_SETTING_ENUM_WITH_RENAME(IntervalOutputFormat, FormatSettings::IntervalOutputFormat)
|
||||||
|
|
||||||
DECLARE_SETTING_ENUM_WITH_RENAME(ParquetVersion, FormatSettings::ParquetVersion)
|
DECLARE_SETTING_ENUM_WITH_RENAME(ParquetVersion, FormatSettings::ParquetVersion)
|
||||||
|
|
||||||
enum class LogsLevel
|
enum class LogsLevel
|
||||||
|
@ -1,16 +1,18 @@
|
|||||||
#include <DataTypes/DataTypeInterval.h>
|
#include <DataTypes/DataTypeInterval.h>
|
||||||
#include <DataTypes/DataTypeFactory.h>
|
#include <DataTypes/DataTypeFactory.h>
|
||||||
|
#include <DataTypes/Serializations/SerializationInterval.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
|
SerializationPtr DataTypeInterval::doGetDefaultSerialization() const { return std::make_shared<SerializationInterval>(kind); }
|
||||||
|
|
||||||
bool DataTypeInterval::equals(const IDataType & rhs) const
|
bool DataTypeInterval::equals(const IDataType & rhs) const
|
||||||
{
|
{
|
||||||
return typeid(rhs) == typeid(*this) && kind == static_cast<const DataTypeInterval &>(rhs).kind;
|
return typeid(rhs) == typeid(*this) && kind == static_cast<const DataTypeInterval &>(rhs).kind;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void registerDataTypeInterval(DataTypeFactory & factory)
|
void registerDataTypeInterval(DataTypeFactory & factory)
|
||||||
{
|
{
|
||||||
factory.registerSimpleDataType("IntervalNanosecond", [] { return DataTypePtr(std::make_shared<DataTypeInterval>(IntervalKind::Nanosecond)); });
|
factory.registerSimpleDataType("IntervalNanosecond", [] { return DataTypePtr(std::make_shared<DataTypeInterval>(IntervalKind::Nanosecond)); });
|
||||||
|
@ -24,6 +24,7 @@ public:
|
|||||||
|
|
||||||
explicit DataTypeInterval(IntervalKind kind_) : kind(kind_) {}
|
explicit DataTypeInterval(IntervalKind kind_) : kind(kind_) {}
|
||||||
|
|
||||||
|
SerializationPtr doGetDefaultSerialization() const override;
|
||||||
std::string doGetName() const override { return fmt::format("Interval{}", kind.toString()); }
|
std::string doGetName() const override { return fmt::format("Interval{}", kind.toString()); }
|
||||||
const char * getFamilyName() const override { return "Interval"; }
|
const char * getFamilyName() const override { return "Interval"; }
|
||||||
String getSQLCompatibleName() const override { return "TEXT"; }
|
String getSQLCompatibleName() const override { return "TEXT"; }
|
||||||
|
@ -410,21 +410,29 @@ inline bool isDateTime(const T & data_type) { return WhichDataType(data_type).is
|
|||||||
template <typename T>
|
template <typename T>
|
||||||
inline bool isDateTime64(const T & data_type) { return WhichDataType(data_type).isDateTime64(); }
|
inline bool isDateTime64(const T & data_type) { return WhichDataType(data_type).isDateTime64(); }
|
||||||
|
|
||||||
inline bool isEnum(const DataTypePtr & data_type) { return WhichDataType(data_type).isEnum(); }
|
template <typename T>
|
||||||
inline bool isDecimal(const DataTypePtr & data_type) { return WhichDataType(data_type).isDecimal(); }
|
inline bool isEnum(const T & data_type) { return WhichDataType(data_type).isEnum(); }
|
||||||
inline bool isTuple(const DataTypePtr & data_type) { return WhichDataType(data_type).isTuple(); }
|
template <typename T>
|
||||||
inline bool isArray(const DataTypePtr & data_type) { return WhichDataType(data_type).isArray(); }
|
inline bool isDecimal(const T & data_type) { return WhichDataType(data_type).isDecimal(); }
|
||||||
inline bool isMap(const DataTypePtr & data_type) {return WhichDataType(data_type).isMap(); }
|
template <typename T>
|
||||||
inline bool isInterval(const DataTypePtr & data_type) {return WhichDataType(data_type).isInterval(); }
|
inline bool isTuple(const T & data_type) { return WhichDataType(data_type).isTuple(); }
|
||||||
inline bool isNothing(const DataTypePtr & data_type) { return WhichDataType(data_type).isNothing(); }
|
template <typename T>
|
||||||
inline bool isUUID(const DataTypePtr & data_type) { return WhichDataType(data_type).isUUID(); }
|
inline bool isArray(const T & data_type) { return WhichDataType(data_type).isArray(); }
|
||||||
inline bool isIPv4(const DataTypePtr & data_type) { return WhichDataType(data_type).isIPv4(); }
|
template <typename T>
|
||||||
inline bool isIPv6(const DataTypePtr & data_type) { return WhichDataType(data_type).isIPv6(); }
|
inline bool isMap(const T & data_type) {return WhichDataType(data_type).isMap(); }
|
||||||
|
template <typename T>
|
||||||
|
inline bool isInterval(const T & data_type) {return WhichDataType(data_type).isInterval(); }
|
||||||
|
template <typename T>
|
||||||
|
inline bool isNothing(const T & data_type) { return WhichDataType(data_type).isNothing(); }
|
||||||
|
template <typename T>
|
||||||
|
inline bool isUUID(const T & data_type) { return WhichDataType(data_type).isUUID(); }
|
||||||
|
template <typename T>
|
||||||
|
inline bool isIPv4(const T & data_type) { return WhichDataType(data_type).isIPv4(); }
|
||||||
|
template <typename T>
|
||||||
|
inline bool isIPv6(const T & data_type) { return WhichDataType(data_type).isIPv6(); }
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
inline bool isObject(const T & data_type)
|
inline bool isObject(const T & data_type) { return WhichDataType(data_type).isObject();
|
||||||
{
|
|
||||||
return WhichDataType(data_type).isObject();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
|
209
src/DataTypes/Serializations/SerializationInterval.cpp
Normal file
209
src/DataTypes/Serializations/SerializationInterval.cpp
Normal file
@ -0,0 +1,209 @@
|
|||||||
|
#include "SerializationInterval.h"
|
||||||
|
|
||||||
|
#include <Columns/ColumnsNumber.h>
|
||||||
|
#include <IO/WriteBuffer.h>
|
||||||
|
#include <Parsers/Kusto/Formatters.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
using ColumnInterval = DataTypeInterval::ColumnType;
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int ILLEGAL_COLUMN;
|
||||||
|
extern const int NOT_IMPLEMENTED;
|
||||||
|
}
|
||||||
|
|
||||||
|
void SerializationKustoInterval::serializeText(
|
||||||
|
const IColumn & column, const size_t row, WriteBuffer & ostr, const FormatSettings &) const
|
||||||
|
{
|
||||||
|
const auto * interval_column = checkAndGetColumn<ColumnInterval>(column);
|
||||||
|
if (!interval_column)
|
||||||
|
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Expected column of underlying type of Interval");
|
||||||
|
|
||||||
|
const auto & value = interval_column->getData()[row];
|
||||||
|
const auto ticks = kind.toAvgNanoseconds() * value / 100;
|
||||||
|
const auto interval_as_string = formatKQLTimespan(ticks);
|
||||||
|
ostr.write(interval_as_string.c_str(), interval_as_string.length());
|
||||||
|
}
|
||||||
|
|
||||||
|
void SerializationKustoInterval::deserializeText(
|
||||||
|
[[maybe_unused]] IColumn & column,
|
||||||
|
[[maybe_unused]] ReadBuffer & istr,
|
||||||
|
[[maybe_unused]] const FormatSettings & settings,
|
||||||
|
[[maybe_unused]] const bool whole) const
|
||||||
|
{
|
||||||
|
throw Exception(
|
||||||
|
ErrorCodes::NOT_IMPLEMENTED, "Deserialization is not implemented for {}", kind.toNameOfFunctionToIntervalDataType());
|
||||||
|
}
|
||||||
|
|
||||||
|
SerializationInterval::SerializationInterval(IntervalKind interval_kind_) : interval_kind(std::move(interval_kind_))
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
void SerializationInterval::deserializeBinary(Field & field, ReadBuffer & istr, const FormatSettings & settings) const
|
||||||
|
{
|
||||||
|
dispatch(
|
||||||
|
static_cast<void (ISerialization::*)(Field &, ReadBuffer &, const FormatSettings &) const>(&ISerialization::deserializeBinary),
|
||||||
|
settings.interval.output_format,
|
||||||
|
field,
|
||||||
|
istr,
|
||||||
|
settings);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SerializationInterval::deserializeBinary(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
||||||
|
{
|
||||||
|
dispatch(
|
||||||
|
static_cast<void (ISerialization::*)(IColumn &, ReadBuffer &, const FormatSettings &) const>(&ISerialization::deserializeBinary),
|
||||||
|
settings.interval.output_format,
|
||||||
|
column,
|
||||||
|
istr,
|
||||||
|
settings);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SerializationInterval::deserializeBinaryBulk(IColumn & column, ReadBuffer & istr, size_t limit, double avg_value_size_hint) const
|
||||||
|
{
|
||||||
|
dispatch(
|
||||||
|
&ISerialization::deserializeBinaryBulk, FormatSettings::IntervalOutputFormat::Numeric, column, istr, limit, avg_value_size_hint);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SerializationInterval::deserializeBinaryBulkStatePrefix(
|
||||||
|
DeserializeBinaryBulkSettings & settings, DeserializeBinaryBulkStatePtr & state) const
|
||||||
|
{
|
||||||
|
dispatch(&ISerialization::deserializeBinaryBulkStatePrefix, FormatSettings::IntervalOutputFormat::Numeric, settings, state);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void SerializationInterval::deserializeBinaryBulkWithMultipleStreams(
|
||||||
|
ColumnPtr & column,
|
||||||
|
size_t limit,
|
||||||
|
DeserializeBinaryBulkSettings & settings,
|
||||||
|
DeserializeBinaryBulkStatePtr & state,
|
||||||
|
SubstreamsCache * cache) const
|
||||||
|
{
|
||||||
|
dispatch(
|
||||||
|
&ISerialization::deserializeBinaryBulkWithMultipleStreams,
|
||||||
|
FormatSettings::IntervalOutputFormat::Numeric,
|
||||||
|
column,
|
||||||
|
limit,
|
||||||
|
settings,
|
||||||
|
state,
|
||||||
|
cache);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void SerializationInterval::deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
||||||
|
{
|
||||||
|
dispatch(&ISerialization::deserializeTextCSV, settings.interval.output_format, column, istr, settings);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SerializationInterval::deserializeTextEscaped(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
||||||
|
{
|
||||||
|
dispatch(&ISerialization::deserializeTextEscaped, settings.interval.output_format, column, istr, settings);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SerializationInterval::deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
||||||
|
{
|
||||||
|
dispatch(&ISerialization::deserializeTextJSON, settings.interval.output_format, column, istr, settings);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SerializationInterval::deserializeTextQuoted(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
||||||
|
{
|
||||||
|
dispatch(&ISerialization::deserializeTextQuoted, settings.interval.output_format, column, istr, settings);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SerializationInterval::deserializeTextRaw(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
||||||
|
{
|
||||||
|
dispatch(&ISerialization::deserializeTextRaw, settings.interval.output_format, column, istr, settings);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void SerializationInterval::deserializeWholeText(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
||||||
|
{
|
||||||
|
dispatch(&ISerialization::deserializeWholeText, settings.interval.output_format, column, istr, settings);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SerializationInterval::serializeBinary(const Field & field, WriteBuffer & ostr, const FormatSettings & settings) const
|
||||||
|
{
|
||||||
|
dispatch(
|
||||||
|
static_cast<void (ISerialization::*)(const Field &, WriteBuffer &, const FormatSettings &) const>(&ISerialization::serializeBinary),
|
||||||
|
settings.interval.output_format,
|
||||||
|
field,
|
||||||
|
ostr,
|
||||||
|
settings);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SerializationInterval::serializeBinary(const IColumn & column, size_t row, WriteBuffer & ostr, const FormatSettings & settings) const
|
||||||
|
{
|
||||||
|
dispatch(
|
||||||
|
static_cast<void (ISerialization::*)(const IColumn &, size_t, WriteBuffer &, const FormatSettings &) const>(
|
||||||
|
&ISerialization::serializeBinary),
|
||||||
|
settings.interval.output_format,
|
||||||
|
column,
|
||||||
|
row,
|
||||||
|
ostr,
|
||||||
|
settings);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SerializationInterval::serializeBinaryBulk(const IColumn & column, WriteBuffer & ostr, size_t offset, size_t limit) const
|
||||||
|
{
|
||||||
|
dispatch(&ISerialization::serializeBinaryBulk, FormatSettings::IntervalOutputFormat::Numeric, column, ostr, offset, limit);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SerializationInterval::serializeBinaryBulkStatePrefix(
|
||||||
|
const IColumn & column, SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const
|
||||||
|
{
|
||||||
|
dispatch(&ISerialization::serializeBinaryBulkStatePrefix, FormatSettings::IntervalOutputFormat::Numeric, column, settings, state);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SerializationInterval::serializeBinaryBulkStateSuffix(
|
||||||
|
SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const
|
||||||
|
{
|
||||||
|
dispatch(&ISerialization::serializeBinaryBulkStateSuffix, FormatSettings::IntervalOutputFormat::Numeric, settings, state);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SerializationInterval::serializeBinaryBulkWithMultipleStreams(
|
||||||
|
const IColumn & column, size_t offset, size_t limit, SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const
|
||||||
|
{
|
||||||
|
dispatch(
|
||||||
|
&ISerialization::serializeBinaryBulkWithMultipleStreams,
|
||||||
|
FormatSettings::IntervalOutputFormat::Numeric,
|
||||||
|
column,
|
||||||
|
offset,
|
||||||
|
limit,
|
||||||
|
settings,
|
||||||
|
state);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SerializationInterval::serializeText(const IColumn & column, size_t row, WriteBuffer & ostr, const FormatSettings & settings) const
|
||||||
|
{
|
||||||
|
dispatch(&ISerialization::serializeText, settings.interval.output_format, column, row, ostr, settings);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SerializationInterval::serializeTextCSV(const IColumn & column, size_t row, WriteBuffer & ostr, const FormatSettings & settings) const
|
||||||
|
{
|
||||||
|
dispatch(&ISerialization::serializeTextCSV, settings.interval.output_format, column, row, ostr, settings);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SerializationInterval::serializeTextEscaped(
|
||||||
|
const IColumn & column, size_t row, WriteBuffer & ostr, const FormatSettings & settings) const
|
||||||
|
{
|
||||||
|
dispatch(&ISerialization::serializeTextEscaped, settings.interval.output_format, column, row, ostr, settings);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SerializationInterval::serializeTextJSON(const IColumn & column, size_t row, WriteBuffer & ostr, const FormatSettings & settings) const
|
||||||
|
{
|
||||||
|
dispatch(&ISerialization::serializeTextJSON, settings.interval.output_format, column, row, ostr, settings);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SerializationInterval::serializeTextQuoted(
|
||||||
|
const IColumn & column, size_t row, WriteBuffer & ostr, const FormatSettings & settings) const
|
||||||
|
{
|
||||||
|
dispatch(&ISerialization::serializeTextQuoted, settings.interval.output_format, column, row, ostr, settings);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SerializationInterval::serializeTextRaw(const IColumn & column, size_t row, WriteBuffer & ostr, const FormatSettings & settings) const
|
||||||
|
{
|
||||||
|
dispatch(&ISerialization::serializeTextRaw, settings.interval.output_format, column, row, ostr, settings);
|
||||||
|
}
|
||||||
|
}
|
90
src/DataTypes/Serializations/SerializationInterval.h
Normal file
90
src/DataTypes/Serializations/SerializationInterval.h
Normal file
@ -0,0 +1,90 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "ISerialization.h"
|
||||||
|
#include "SerializationCustomSimpleText.h"
|
||||||
|
|
||||||
|
#include <DataTypes/DataTypeInterval.h>
|
||||||
|
#include <Formats/FormatSettings.h>
|
||||||
|
#include <Common/IntervalKind.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int NOT_IMPLEMENTED;
|
||||||
|
}
|
||||||
|
|
||||||
|
class SerializationKustoInterval : public SerializationCustomSimpleText
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
explicit SerializationKustoInterval(IntervalKind kind_) : SerializationCustomSimpleText(nullptr), kind(kind_) { }
|
||||||
|
|
||||||
|
void serializeText(const IColumn & column, size_t row, WriteBuffer & ostr, const FormatSettings & settings) const override;
|
||||||
|
void deserializeText(IColumn & column, ReadBuffer & istr, const FormatSettings & settings, bool whole) const override;
|
||||||
|
|
||||||
|
private:
|
||||||
|
IntervalKind kind;
|
||||||
|
};
|
||||||
|
|
||||||
|
class SerializationInterval : public ISerialization
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
explicit SerializationInterval(IntervalKind kind_);
|
||||||
|
|
||||||
|
void deserializeBinary(Field & field, ReadBuffer & istr, const FormatSettings & settings) const override;
|
||||||
|
void deserializeBinary(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override;
|
||||||
|
void deserializeBinaryBulk(IColumn & column, ReadBuffer & istr, size_t limit, double avg_value_size_hint) const override;
|
||||||
|
void deserializeBinaryBulkStatePrefix(DeserializeBinaryBulkSettings & settings, DeserializeBinaryBulkStatePtr & state) const override;
|
||||||
|
void deserializeBinaryBulkWithMultipleStreams(
|
||||||
|
ColumnPtr & column,
|
||||||
|
size_t limit,
|
||||||
|
DeserializeBinaryBulkSettings & settings,
|
||||||
|
DeserializeBinaryBulkStatePtr & state,
|
||||||
|
SubstreamsCache * cache) const override;
|
||||||
|
void deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override;
|
||||||
|
void deserializeTextEscaped(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override;
|
||||||
|
void deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override;
|
||||||
|
void deserializeTextQuoted(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override;
|
||||||
|
void deserializeTextRaw(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override;
|
||||||
|
void deserializeWholeText(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override;
|
||||||
|
|
||||||
|
void serializeBinary(const Field & field, WriteBuffer & ostr, const FormatSettings & settings) const override;
|
||||||
|
void serializeBinary(const IColumn & column, size_t row, WriteBuffer & ostr, const FormatSettings & settings) const override;
|
||||||
|
void serializeBinaryBulk(const IColumn & column, WriteBuffer & ostr, size_t offset, size_t limit) const override;
|
||||||
|
void serializeBinaryBulkStatePrefix(
|
||||||
|
const IColumn & column, SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const override;
|
||||||
|
void serializeBinaryBulkStateSuffix(SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const override;
|
||||||
|
void serializeBinaryBulkWithMultipleStreams(
|
||||||
|
const IColumn & column,
|
||||||
|
size_t offset,
|
||||||
|
size_t limit,
|
||||||
|
SerializeBinaryBulkSettings & settings,
|
||||||
|
SerializeBinaryBulkStatePtr & state) const override;
|
||||||
|
void serializeText(const IColumn & column, size_t row, WriteBuffer & ostr, const FormatSettings & settings) const override;
|
||||||
|
void serializeTextCSV(const IColumn & column, size_t row, WriteBuffer & ostr, const FormatSettings & settings) const override;
|
||||||
|
void serializeTextEscaped(const IColumn & column, size_t row, WriteBuffer & ostr, const FormatSettings & settings) const override;
|
||||||
|
void serializeTextJSON(const IColumn & column, size_t row, WriteBuffer & ostr, const FormatSettings & settings) const override;
|
||||||
|
void serializeTextQuoted(const IColumn & column, size_t row, WriteBuffer & ostr, const FormatSettings & settings) const override;
|
||||||
|
void serializeTextRaw(const IColumn & column, size_t row, WriteBuffer & ostr, const FormatSettings & settings) const override;
|
||||||
|
|
||||||
|
private:
|
||||||
|
template <typename... Args, std::invocable<const ISerialization *, Args...> Method>
|
||||||
|
void dispatch(const Method method, const FormatSettings::IntervalOutputFormat format, Args &&... args) const
|
||||||
|
{
|
||||||
|
const ISerialization * serialization = nullptr;
|
||||||
|
if (format == FormatSettings::IntervalOutputFormat::Kusto)
|
||||||
|
serialization = &serialization_kusto;
|
||||||
|
else if (format == FormatSettings::IntervalOutputFormat::Numeric)
|
||||||
|
serialization = &serialization_numeric;
|
||||||
|
|
||||||
|
if (!serialization)
|
||||||
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Option {} is not implemented", magic_enum::enum_name(format));
|
||||||
|
|
||||||
|
(serialization->*method)(std::forward<Args>(args)...);
|
||||||
|
}
|
||||||
|
|
||||||
|
IntervalKind interval_kind;
|
||||||
|
SerializationKustoInterval serialization_kusto{interval_kind};
|
||||||
|
SerializationNumber<typename DataTypeInterval::FieldType> serialization_numeric;
|
||||||
|
};
|
||||||
|
}
|
@ -2,7 +2,6 @@
|
|||||||
#include <Core/Field.h>
|
#include <Core/Field.h>
|
||||||
#include <DataTypes/DataTypeFactory.h>
|
#include <DataTypes/DataTypeFactory.h>
|
||||||
#include <DataTypes/IDataType.h>
|
#include <DataTypes/IDataType.h>
|
||||||
#include <DataTypes/getLeastSupertype.h>
|
|
||||||
#include <DataTypes/getMostSubtype.h>
|
#include <DataTypes/getMostSubtype.h>
|
||||||
#include <Formats/FormatSettings.h>
|
#include <Formats/FormatSettings.h>
|
||||||
#include <IO/ReadBuffer.h>
|
#include <IO/ReadBuffer.h>
|
||||||
|
@ -87,6 +87,7 @@ FormatSettings getFormatSettings(ContextPtr context, const Settings & settings)
|
|||||||
format_settings.custom.skip_trailing_empty_lines = settings.input_format_custom_skip_trailing_empty_lines;
|
format_settings.custom.skip_trailing_empty_lines = settings.input_format_custom_skip_trailing_empty_lines;
|
||||||
format_settings.date_time_input_format = settings.date_time_input_format;
|
format_settings.date_time_input_format = settings.date_time_input_format;
|
||||||
format_settings.date_time_output_format = settings.date_time_output_format;
|
format_settings.date_time_output_format = settings.date_time_output_format;
|
||||||
|
format_settings.interval.output_format = settings.interval_output_format;
|
||||||
format_settings.input_format_ipv4_default_on_conversion_error = settings.input_format_ipv4_default_on_conversion_error;
|
format_settings.input_format_ipv4_default_on_conversion_error = settings.input_format_ipv4_default_on_conversion_error;
|
||||||
format_settings.input_format_ipv6_default_on_conversion_error = settings.input_format_ipv6_default_on_conversion_error;
|
format_settings.input_format_ipv6_default_on_conversion_error = settings.input_format_ipv6_default_on_conversion_error;
|
||||||
format_settings.bool_true_representation = settings.bool_true_representation;
|
format_settings.bool_true_representation = settings.bool_true_representation;
|
||||||
|
@ -77,6 +77,17 @@ struct FormatSettings
|
|||||||
|
|
||||||
DateTimeOutputFormat date_time_output_format = DateTimeOutputFormat::Simple;
|
DateTimeOutputFormat date_time_output_format = DateTimeOutputFormat::Simple;
|
||||||
|
|
||||||
|
enum class IntervalOutputFormat
|
||||||
|
{
|
||||||
|
Kusto,
|
||||||
|
Numeric
|
||||||
|
};
|
||||||
|
|
||||||
|
struct
|
||||||
|
{
|
||||||
|
IntervalOutputFormat output_format = IntervalOutputFormat::Numeric;
|
||||||
|
} interval;
|
||||||
|
|
||||||
bool input_format_ipv4_default_on_conversion_error = false;
|
bool input_format_ipv4_default_on_conversion_error = false;
|
||||||
bool input_format_ipv6_default_on_conversion_error = false;
|
bool input_format_ipv6_default_on_conversion_error = false;
|
||||||
|
|
||||||
|
@ -1112,6 +1112,11 @@ private:
|
|||||||
bool c0_const = isColumnConst(*c0);
|
bool c0_const = isColumnConst(*c0);
|
||||||
bool c1_const = isColumnConst(*c1);
|
bool c1_const = isColumnConst(*c1);
|
||||||
|
|
||||||
|
/// This is a paranoid check to protect from a broken query analysis.
|
||||||
|
if (c0->isNullable() != c1->isNullable())
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||||
|
"Logical error: columns are assumed to be of identical types, but they are different in Nullable");
|
||||||
|
|
||||||
if (c0_const && c1_const)
|
if (c0_const && c1_const)
|
||||||
{
|
{
|
||||||
UInt8 res = 0;
|
UInt8 res = 0;
|
||||||
|
@ -203,6 +203,21 @@ struct ConvertImpl
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if constexpr (std::is_same_v<FromDataType, DataTypeUUID> && std::is_same_v<ToDataType,DataTypeUInt128>)
|
||||||
|
{
|
||||||
|
static_assert(std::is_same_v<DataTypeUInt128::FieldType, DataTypeUUID::FieldType::UnderlyingType>, "UInt128 and UUID types must be same");
|
||||||
|
if constexpr (std::endian::native == std::endian::little)
|
||||||
|
{
|
||||||
|
vec_to[i].items[1] = vec_from[i].toUnderType().items[0];
|
||||||
|
vec_to[i].items[0] = vec_from[i].toUnderType().items[1];
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
vec_to[i] = vec_from[i].toUnderType();
|
||||||
|
}
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
if constexpr (std::is_same_v<FromDataType, DataTypeUUID> != std::is_same_v<ToDataType, DataTypeUUID>)
|
if constexpr (std::is_same_v<FromDataType, DataTypeUUID> != std::is_same_v<ToDataType, DataTypeUUID>)
|
||||||
{
|
{
|
||||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED,
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED,
|
||||||
|
@ -39,6 +39,9 @@ struct HasTokenImpl
|
|||||||
if (start_pos != nullptr)
|
if (start_pos != nullptr)
|
||||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Function '{}' does not support start_pos argument", name);
|
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Function '{}' does not support start_pos argument", name);
|
||||||
|
|
||||||
|
if (pattern.empty())
|
||||||
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Needle cannot be empty, because empty string isn't a token");
|
||||||
|
|
||||||
if (haystack_offsets.empty())
|
if (haystack_offsets.empty())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -133,8 +133,6 @@ struct LowerUpperUTF8Impl
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
static const Poco::UTF8Encoding utf8;
|
|
||||||
|
|
||||||
size_t src_sequence_length = UTF8::seqLength(*src);
|
size_t src_sequence_length = UTF8::seqLength(*src);
|
||||||
/// In case partial buffer was passed (due to SSE optimization)
|
/// In case partial buffer was passed (due to SSE optimization)
|
||||||
/// we cannot convert it with current src_end, but we may have more
|
/// we cannot convert it with current src_end, but we may have more
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
#include <DataTypes/DataTypesNumber.h>
|
#include <DataTypes/DataTypesNumber.h>
|
||||||
#include <Functions/FunctionFactory.h>
|
#include <Functions/FunctionFactory.h>
|
||||||
#include <DataTypes/getLeastSupertype.h>
|
|
||||||
#include <Core/Types_fwd.h>
|
#include <Core/Types_fwd.h>
|
||||||
#include <DataTypes/Serializations/ISerialization.h>
|
#include <DataTypes/Serializations/ISerialization.h>
|
||||||
#include <Functions/castTypeToEither.h>
|
#include <Functions/castTypeToEither.h>
|
||||||
|
161
src/Functions/array/arrayJaccardIndex.cpp
Normal file
161
src/Functions/array/arrayJaccardIndex.cpp
Normal file
@ -0,0 +1,161 @@
|
|||||||
|
#include <Columns/ColumnArray.h>
|
||||||
|
#include <Columns/ColumnsNumber.h>
|
||||||
|
#include <Columns/IColumn.h>
|
||||||
|
#include <DataTypes/DataTypeArray.h>
|
||||||
|
#include <DataTypes/DataTypesNumber.h>
|
||||||
|
#include <DataTypes/IDataType.h>
|
||||||
|
#include <Functions/FunctionFactory.h>
|
||||||
|
#include <Functions/FunctionHelpers.h>
|
||||||
|
#include <DataTypes/DataTypeNothing.h>
|
||||||
|
#include <DataTypes/getMostSubtype.h>
|
||||||
|
#include <Core/ColumnsWithTypeAndName.h>
|
||||||
|
#include <Core/ColumnWithTypeAndName.h>
|
||||||
|
#include <Interpreters/Context_fwd.h>
|
||||||
|
#include <base/types.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int ILLEGAL_COLUMN;
|
||||||
|
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||||
|
extern const int LOGICAL_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
class FunctionArrayJaccardIndex : public IFunction
|
||||||
|
{
|
||||||
|
private:
|
||||||
|
using ResultType = Float64;
|
||||||
|
|
||||||
|
struct LeftAndRightSizes
|
||||||
|
{
|
||||||
|
size_t left_size;
|
||||||
|
size_t right_size;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <bool left_is_const, bool right_is_const>
|
||||||
|
static LeftAndRightSizes getArraySizes(const ColumnArray::Offsets & left_offsets, const ColumnArray::Offsets & right_offsets, size_t i)
|
||||||
|
{
|
||||||
|
size_t left_size;
|
||||||
|
size_t right_size;
|
||||||
|
|
||||||
|
if constexpr (left_is_const)
|
||||||
|
left_size = left_offsets[0];
|
||||||
|
else
|
||||||
|
left_size = left_offsets[i] - left_offsets[i - 1];
|
||||||
|
|
||||||
|
if constexpr (right_is_const)
|
||||||
|
right_size = right_offsets[0];
|
||||||
|
else
|
||||||
|
right_size = right_offsets[i] - right_offsets[i - 1];
|
||||||
|
|
||||||
|
return {left_size, right_size};
|
||||||
|
}
|
||||||
|
|
||||||
|
template <bool left_is_const, bool right_is_const>
|
||||||
|
static void vector(const ColumnArray::Offsets & intersect_offsets, const ColumnArray::Offsets & left_offsets, const ColumnArray::Offsets & right_offsets, PaddedPODArray<ResultType> & res)
|
||||||
|
{
|
||||||
|
for (size_t i = 0; i < res.size(); ++i)
|
||||||
|
{
|
||||||
|
LeftAndRightSizes sizes = getArraySizes<left_is_const, right_is_const>(left_offsets, right_offsets, i);
|
||||||
|
size_t intersect_size = intersect_offsets[i] - intersect_offsets[i - 1];
|
||||||
|
res[i] = static_cast<ResultType>(intersect_size) / (sizes.left_size + sizes.right_size - intersect_size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <bool left_is_const, bool right_is_const>
|
||||||
|
static void vectorWithEmptyIntersect(const ColumnArray::Offsets & left_offsets, const ColumnArray::Offsets & right_offsets, PaddedPODArray<ResultType> & res)
|
||||||
|
{
|
||||||
|
for (size_t i = 0; i < res.size(); ++i)
|
||||||
|
{
|
||||||
|
LeftAndRightSizes sizes = getArraySizes<left_is_const, right_is_const>(left_offsets, right_offsets, i);
|
||||||
|
if (sizes.left_size == 0 && sizes.right_size == 0)
|
||||||
|
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "array aggregate functions cannot be performed on two empty arrays");
|
||||||
|
res[i] = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
static constexpr auto name = "arrayJaccardIndex";
|
||||||
|
String getName() const override { return name; }
|
||||||
|
static FunctionPtr create(ContextPtr context_) { return std::make_shared<FunctionArrayJaccardIndex>(context_); }
|
||||||
|
explicit FunctionArrayJaccardIndex(ContextPtr context_) : context(context_) {}
|
||||||
|
size_t getNumberOfArguments() const override { return 2; }
|
||||||
|
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo &) const override { return true; }
|
||||||
|
bool useDefaultImplementationForConstants() const override { return true; }
|
||||||
|
|
||||||
|
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
|
||||||
|
{
|
||||||
|
FunctionArgumentDescriptors args{
|
||||||
|
{"array_1", &isArray<IDataType>, nullptr, "Array"},
|
||||||
|
{"array_2", &isArray<IDataType>, nullptr, "Array"},
|
||||||
|
};
|
||||||
|
validateFunctionArgumentTypes(*this, arguments, args);
|
||||||
|
return std::make_shared<DataTypeNumber<ResultType>>();
|
||||||
|
}
|
||||||
|
|
||||||
|
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
||||||
|
{
|
||||||
|
auto cast_to_array = [&](const ColumnWithTypeAndName & col) -> std::pair<const ColumnArray *, bool>
|
||||||
|
{
|
||||||
|
if (const ColumnConst * col_const = typeid_cast<const ColumnConst *>(col.column.get()))
|
||||||
|
{
|
||||||
|
const ColumnArray * col_const_array = checkAndGetColumn<ColumnArray>(col_const->getDataColumnPtr().get());
|
||||||
|
return {col_const_array, true};
|
||||||
|
}
|
||||||
|
else if (const ColumnArray * col_non_const_array = checkAndGetColumn<ColumnArray>(col.column.get()))
|
||||||
|
return {col_non_const_array, false};
|
||||||
|
else
|
||||||
|
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Argument for function {} must be array but it has type {}.", col.column->getName(), getName());
|
||||||
|
};
|
||||||
|
|
||||||
|
const auto & [left_array, left_is_const] = cast_to_array(arguments[0]);
|
||||||
|
const auto & [right_array, right_is_const] = cast_to_array(arguments[1]);
|
||||||
|
|
||||||
|
auto intersect_array = FunctionFactory::instance().get("arrayIntersect", context)->build(arguments);
|
||||||
|
|
||||||
|
ColumnWithTypeAndName intersect_column;
|
||||||
|
intersect_column.type = intersect_array->getResultType();
|
||||||
|
intersect_column.column = intersect_array->execute(arguments, intersect_column.type, input_rows_count);
|
||||||
|
|
||||||
|
const auto * intersect_column_type = checkAndGetDataType<DataTypeArray>(intersect_column.type.get());
|
||||||
|
if (!intersect_column_type)
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected return type for function arrayIntersect");
|
||||||
|
|
||||||
|
auto col_res = ColumnVector<ResultType>::create();
|
||||||
|
typename ColumnVector<ResultType>::Container & vec_res = col_res->getData();
|
||||||
|
vec_res.resize(input_rows_count);
|
||||||
|
|
||||||
|
#define EXECUTE_VECTOR(left_is_const, right_is_const) \
|
||||||
|
if (typeid_cast<const DataTypeNothing *>(intersect_column_type->getNestedType().get())) \
|
||||||
|
vectorWithEmptyIntersect<left_is_const, right_is_const>(left_array->getOffsets(), right_array->getOffsets(), vec_res); \
|
||||||
|
else \
|
||||||
|
{ \
|
||||||
|
const ColumnArray * intersect_column_array = checkAndGetColumn<ColumnArray>(intersect_column.column.get()); \
|
||||||
|
vector<left_is_const, right_is_const>(intersect_column_array->getOffsets(), left_array->getOffsets(), right_array->getOffsets(), vec_res); \
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!left_is_const && !right_is_const)
|
||||||
|
EXECUTE_VECTOR(false, false)
|
||||||
|
else if (!left_is_const && right_is_const)
|
||||||
|
EXECUTE_VECTOR(false, true)
|
||||||
|
else if (left_is_const && !right_is_const)
|
||||||
|
EXECUTE_VECTOR(true, false)
|
||||||
|
else
|
||||||
|
EXECUTE_VECTOR(true, true)
|
||||||
|
|
||||||
|
#undef EXECUTE_VECTOR
|
||||||
|
|
||||||
|
return col_res;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
ContextPtr context;
|
||||||
|
};
|
||||||
|
|
||||||
|
REGISTER_FUNCTION(ArrayJaccardIndex)
|
||||||
|
{
|
||||||
|
factory.registerFunction<FunctionArrayJaccardIndex>();
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -5,7 +5,6 @@
|
|||||||
#include <DataTypes/DataTypeArray.h>
|
#include <DataTypes/DataTypeArray.h>
|
||||||
#include <DataTypes/DataTypesNumber.h>
|
#include <DataTypes/DataTypesNumber.h>
|
||||||
#include <DataTypes/IDataType.h>
|
#include <DataTypes/IDataType.h>
|
||||||
#include <DataTypes/getLeastSupertype.h>
|
|
||||||
#include <Functions/FunctionFactory.h>
|
#include <Functions/FunctionFactory.h>
|
||||||
#include <Functions/FunctionHelpers.h>
|
#include <Functions/FunctionHelpers.h>
|
||||||
|
|
||||||
|
@ -101,6 +101,7 @@ It is ok to have ASCII NUL bytes in strings, and they will be counted as well.
|
|||||||
.categories{"String", "Array"}
|
.categories{"String", "Array"}
|
||||||
},
|
},
|
||||||
FunctionFactory::CaseInsensitive);
|
FunctionFactory::CaseInsensitive);
|
||||||
|
factory.registerAlias("OCTET_LENGTH", "length", FunctionFactory::CaseInsensitive);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -3,9 +3,12 @@
|
|||||||
#include <Functions/FunctionHelpers.h>
|
#include <Functions/FunctionHelpers.h>
|
||||||
#include <DataTypes/DataTypesNumber.h>
|
#include <DataTypes/DataTypesNumber.h>
|
||||||
#include <DataTypes/DataTypeArray.h>
|
#include <DataTypes/DataTypeArray.h>
|
||||||
|
#include <DataTypes/DataTypeNothing.h>
|
||||||
#include <DataTypes/getLeastSupertype.h>
|
#include <DataTypes/getLeastSupertype.h>
|
||||||
#include <Columns/ColumnArray.h>
|
#include <Columns/ColumnArray.h>
|
||||||
|
#include <Columns/ColumnNullable.h>
|
||||||
#include <Columns/ColumnVector.h>
|
#include <Columns/ColumnVector.h>
|
||||||
|
#include <Columns/ColumnsCommon.h>
|
||||||
#include <Interpreters/castColumn.h>
|
#include <Interpreters/castColumn.h>
|
||||||
#include <Interpreters/Context.h>
|
#include <Interpreters/Context.h>
|
||||||
#include <numeric>
|
#include <numeric>
|
||||||
@ -21,6 +24,7 @@ namespace ErrorCodes
|
|||||||
extern const int ILLEGAL_COLUMN;
|
extern const int ILLEGAL_COLUMN;
|
||||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||||
|
extern const int BAD_ARGUMENTS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -43,6 +47,7 @@ private:
|
|||||||
|
|
||||||
size_t getNumberOfArguments() const override { return 0; }
|
size_t getNumberOfArguments() const override { return 0; }
|
||||||
bool isVariadic() const override { return true; }
|
bool isVariadic() const override { return true; }
|
||||||
|
bool useDefaultImplementationForNulls() const override { return false; }
|
||||||
bool useDefaultImplementationForConstants() const override { return true; }
|
bool useDefaultImplementationForConstants() const override { return true; }
|
||||||
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
|
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
|
||||||
|
|
||||||
@ -55,13 +60,18 @@ private:
|
|||||||
getName(), arguments.size());
|
getName(), arguments.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (std::find_if (arguments.cbegin(), arguments.cend(), [](const auto & arg) { return arg->onlyNull(); }) != arguments.cend())
|
||||||
|
return makeNullable(std::make_shared<DataTypeNothing>());
|
||||||
|
|
||||||
DataTypes arg_types;
|
DataTypes arg_types;
|
||||||
for (size_t i = 0, size = arguments.size(); i < size; ++i)
|
for (size_t i = 0, size = arguments.size(); i < size; ++i)
|
||||||
{
|
{
|
||||||
if (i < 2 && WhichDataType(arguments[i]).isIPv4())
|
DataTypePtr type_no_nullable = removeNullable(arguments[i]);
|
||||||
|
|
||||||
|
if (i < 2 && WhichDataType(type_no_nullable).isIPv4())
|
||||||
arg_types.emplace_back(std::make_shared<DataTypeUInt32>());
|
arg_types.emplace_back(std::make_shared<DataTypeUInt32>());
|
||||||
else if (isInteger(arguments[i]))
|
else if (isInteger(type_no_nullable))
|
||||||
arg_types.push_back(arguments[i]);
|
arg_types.push_back(type_no_nullable);
|
||||||
else
|
else
|
||||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument of function {}",
|
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument of function {}",
|
||||||
arguments[i]->getName(), getName());
|
arguments[i]->getName(), getName());
|
||||||
@ -376,6 +386,10 @@ private:
|
|||||||
|
|
||||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override
|
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override
|
||||||
{
|
{
|
||||||
|
NullPresence null_presence = getNullPresense(arguments);
|
||||||
|
if (null_presence.has_null_constant)
|
||||||
|
return result_type->createColumnConstWithDefaultValue(input_rows_count);
|
||||||
|
|
||||||
DataTypePtr elem_type = checkAndGetDataType<DataTypeArray>(result_type.get())->getNestedType();
|
DataTypePtr elem_type = checkAndGetDataType<DataTypeArray>(result_type.get())->getNestedType();
|
||||||
WhichDataType which(elem_type);
|
WhichDataType which(elem_type);
|
||||||
|
|
||||||
@ -386,10 +400,31 @@ private:
|
|||||||
"for unsigned/signed integers up to 64 bit", getName());
|
"for unsigned/signed integers up to 64 bit", getName());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
auto throwIfNullValue = [&](const ColumnWithTypeAndName & col)
|
||||||
|
{
|
||||||
|
if (!col.type->isNullable())
|
||||||
|
return;
|
||||||
|
const ColumnNullable * nullable_col = checkAndGetColumn<ColumnNullable>(*col.column);
|
||||||
|
if (!nullable_col)
|
||||||
|
nullable_col = checkAndGetColumnConstData<ColumnNullable>(col.column.get());
|
||||||
|
if (!nullable_col)
|
||||||
|
return;
|
||||||
|
const auto & null_map = nullable_col->getNullMapData();
|
||||||
|
if (!memoryIsZero(null_map.data(), 0, null_map.size()))
|
||||||
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Illegal (null) value column {} of argument of function {}", col.column->getName(), getName());
|
||||||
|
};
|
||||||
|
|
||||||
ColumnPtr res;
|
ColumnPtr res;
|
||||||
if (arguments.size() == 1)
|
if (arguments.size() == 1)
|
||||||
{
|
{
|
||||||
|
throwIfNullValue(arguments[0]);
|
||||||
const auto * col = arguments[0].column.get();
|
const auto * col = arguments[0].column.get();
|
||||||
|
if (arguments[0].type->isNullable())
|
||||||
|
{
|
||||||
|
const auto * nullable = checkAndGetColumn<ColumnNullable>(*arguments[0].column);
|
||||||
|
col = nullable->getNestedColumnPtr().get();
|
||||||
|
}
|
||||||
|
|
||||||
if (!((res = executeInternal<UInt8>(col)) || (res = executeInternal<UInt16>(col)) || (res = executeInternal<UInt32>(col))
|
if (!((res = executeInternal<UInt8>(col)) || (res = executeInternal<UInt16>(col)) || (res = executeInternal<UInt32>(col))
|
||||||
|| (res = executeInternal<UInt64>(col)) || (res = executeInternal<Int8>(col)) || (res = executeInternal<Int16>(col))
|
|| (res = executeInternal<UInt64>(col)) || (res = executeInternal<Int8>(col)) || (res = executeInternal<Int16>(col))
|
||||||
|| (res = executeInternal<Int32>(col)) || (res = executeInternal<Int64>(col))))
|
|| (res = executeInternal<Int32>(col)) || (res = executeInternal<Int64>(col))))
|
||||||
@ -404,6 +439,7 @@ private:
|
|||||||
|
|
||||||
for (size_t i = 0; i < arguments.size(); ++i)
|
for (size_t i = 0; i < arguments.size(); ++i)
|
||||||
{
|
{
|
||||||
|
throwIfNullValue(arguments[i]);
|
||||||
if (i == 1)
|
if (i == 1)
|
||||||
columns_holder[i] = castColumn(arguments[i], elem_type)->convertToFullColumnIfConst();
|
columns_holder[i] = castColumn(arguments[i], elem_type)->convertToFullColumnIfConst();
|
||||||
else
|
else
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
#include <Columns/ColumnString.h>
|
#include <Columns/ColumnString.h>
|
||||||
#include <DataTypes/DataTypeString.h>
|
#include <DataTypes/DataTypeString.h>
|
||||||
#include <DataTypes/getLeastSupertype.h>
|
|
||||||
#include <Functions/FunctionFactory.h>
|
#include <Functions/FunctionFactory.h>
|
||||||
#include <Functions/FunctionHelpers.h>
|
#include <Functions/FunctionHelpers.h>
|
||||||
#include <Functions/GatherUtils/Algorithms.h>
|
#include <Functions/GatherUtils/Algorithms.h>
|
||||||
|
@ -2,7 +2,6 @@
|
|||||||
#include <Functions/FunctionHelpers.h>
|
#include <Functions/FunctionHelpers.h>
|
||||||
#include <Functions/FunctionFactory.h>
|
#include <Functions/FunctionFactory.h>
|
||||||
#include <DataTypes/DataTypesNumber.h>
|
#include <DataTypes/DataTypesNumber.h>
|
||||||
#include <DataTypes/getLeastSupertype.h>
|
|
||||||
#include <Core/ColumnNumbers.h>
|
#include <Core/ColumnNumbers.h>
|
||||||
|
|
||||||
|
|
||||||
|
66
src/Functions/initcap.cpp
Normal file
66
src/Functions/initcap.cpp
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
#include <Functions/FunctionFactory.h>
|
||||||
|
#include <Functions/FunctionStringToString.h>
|
||||||
|
#include <Common/StringUtils/StringUtils.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
|
||||||
|
struct InitcapImpl
|
||||||
|
{
|
||||||
|
static void vector(const ColumnString::Chars & data,
|
||||||
|
const ColumnString::Offsets & offsets,
|
||||||
|
ColumnString::Chars & res_data,
|
||||||
|
ColumnString::Offsets & res_offsets)
|
||||||
|
{
|
||||||
|
if (data.empty())
|
||||||
|
return;
|
||||||
|
res_data.resize(data.size());
|
||||||
|
res_offsets.assign(offsets);
|
||||||
|
array(data.data(), data.data() + data.size(), res_data.data());
|
||||||
|
}
|
||||||
|
|
||||||
|
static void vectorFixed(const ColumnString::Chars & data, size_t /*n*/, ColumnString::Chars & res_data)
|
||||||
|
{
|
||||||
|
res_data.resize(data.size());
|
||||||
|
array(data.data(), data.data() + data.size(), res_data.data());
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
static void array(const UInt8 * src, const UInt8 * src_end, UInt8 * dst)
|
||||||
|
{
|
||||||
|
bool prev_alphanum = false;
|
||||||
|
|
||||||
|
for (; src < src_end; ++src, ++dst)
|
||||||
|
{
|
||||||
|
char c = *src;
|
||||||
|
bool alphanum = isAlphaNumericASCII(c);
|
||||||
|
if (alphanum && !prev_alphanum)
|
||||||
|
if (isAlphaASCII(c))
|
||||||
|
*dst = toUpperIfAlphaASCII(c);
|
||||||
|
else
|
||||||
|
*dst = c;
|
||||||
|
else if (isAlphaASCII(c))
|
||||||
|
*dst = toLowerIfAlphaASCII(c);
|
||||||
|
else
|
||||||
|
*dst = c;
|
||||||
|
prev_alphanum = alphanum;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
struct NameInitcap
|
||||||
|
{
|
||||||
|
static constexpr auto name = "initcap";
|
||||||
|
};
|
||||||
|
using FunctionInitcap = FunctionStringToString<InitcapImpl, NameInitcap>;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
REGISTER_FUNCTION(Initcap)
|
||||||
|
{
|
||||||
|
factory.registerFunction<FunctionInitcap>({}, FunctionFactory::CaseInsensitive);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
114
src/Functions/initcapUTF8.cpp
Normal file
114
src/Functions/initcapUTF8.cpp
Normal file
@ -0,0 +1,114 @@
|
|||||||
|
#include <DataTypes/DataTypeString.h>
|
||||||
|
#include <Functions/FunctionStringToString.h>
|
||||||
|
#include <Functions/LowerUpperUTF8Impl.h>
|
||||||
|
#include <Functions/FunctionFactory.h>
|
||||||
|
#include <Poco/Unicode.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int BAD_ARGUMENTS;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
|
||||||
|
struct InitcapUTF8Impl
|
||||||
|
{
|
||||||
|
static void vector(
|
||||||
|
const ColumnString::Chars & data,
|
||||||
|
const ColumnString::Offsets & offsets,
|
||||||
|
ColumnString::Chars & res_data,
|
||||||
|
ColumnString::Offsets & res_offsets)
|
||||||
|
{
|
||||||
|
if (data.empty())
|
||||||
|
return;
|
||||||
|
res_data.resize(data.size());
|
||||||
|
res_offsets.assign(offsets);
|
||||||
|
array(data.data(), data.data() + data.size(), offsets, res_data.data());
|
||||||
|
}
|
||||||
|
|
||||||
|
[[noreturn]] static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &)
|
||||||
|
{
|
||||||
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Function initcapUTF8 cannot work with FixedString argument");
|
||||||
|
}
|
||||||
|
|
||||||
|
static void processCodePoint(const UInt8 *& src, const UInt8 * src_end, UInt8 *& dst, bool& prev_alphanum)
|
||||||
|
{
|
||||||
|
size_t src_sequence_length = UTF8::seqLength(*src);
|
||||||
|
auto src_code_point = UTF8::convertUTF8ToCodePoint(src, src_end - src);
|
||||||
|
|
||||||
|
if (src_code_point)
|
||||||
|
{
|
||||||
|
bool alpha = Poco::Unicode::isAlpha(*src_code_point);
|
||||||
|
bool alphanum = alpha || Poco::Unicode::isDigit(*src_code_point);
|
||||||
|
|
||||||
|
int dst_code_point = *src_code_point;
|
||||||
|
if (alphanum && !prev_alphanum)
|
||||||
|
{
|
||||||
|
if (alpha)
|
||||||
|
dst_code_point = Poco::Unicode::toUpper(*src_code_point);
|
||||||
|
}
|
||||||
|
else if (alpha)
|
||||||
|
{
|
||||||
|
dst_code_point = Poco::Unicode::toLower(*src_code_point);
|
||||||
|
}
|
||||||
|
prev_alphanum = alphanum;
|
||||||
|
if (dst_code_point > 0)
|
||||||
|
{
|
||||||
|
size_t dst_sequence_length = UTF8::convertCodePointToUTF8(dst_code_point, dst, src_end - src);
|
||||||
|
assert(dst_sequence_length <= 4);
|
||||||
|
|
||||||
|
if (dst_sequence_length == src_sequence_length)
|
||||||
|
{
|
||||||
|
src += dst_sequence_length;
|
||||||
|
dst += dst_sequence_length;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
*dst = *src;
|
||||||
|
++dst;
|
||||||
|
++src;
|
||||||
|
prev_alphanum = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
|
||||||
|
static void array(const UInt8 * src, const UInt8 * src_end, const ColumnString::Offsets & offsets, UInt8 * dst)
|
||||||
|
{
|
||||||
|
const auto * offset_it = offsets.begin();
|
||||||
|
const UInt8 * begin = src;
|
||||||
|
|
||||||
|
/// handle remaining symbols, row by row (to avoid influence of bad UTF8 symbols from one row, to another)
|
||||||
|
while (src < src_end)
|
||||||
|
{
|
||||||
|
const UInt8 * row_end = begin + *offset_it;
|
||||||
|
chassert(row_end >= src);
|
||||||
|
bool prev_alphanum = false;
|
||||||
|
while (src < row_end)
|
||||||
|
processCodePoint(src, row_end, dst, prev_alphanum);
|
||||||
|
++offset_it;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
struct NameInitcapUTF8
|
||||||
|
{
|
||||||
|
static constexpr auto name = "initcapUTF8";
|
||||||
|
};
|
||||||
|
|
||||||
|
using FunctionInitcapUTF8 = FunctionStringToString<InitcapUTF8Impl, NameInitcapUTF8>;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
REGISTER_FUNCTION(InitcapUTF8)
|
||||||
|
{
|
||||||
|
factory.registerFunction<FunctionInitcapUTF8>();
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -1005,8 +1005,8 @@ inline ReturnType readDateTimeTextImpl(DateTime64 & datetime64, UInt32 scale, Re
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/// 9908870400 is time_t value for 2184-01-01 UTC (a bit over the last year supported by DateTime64)
|
/// 10413792000 is time_t value for 2300-01-01 UTC (a bit over the last year supported by DateTime64)
|
||||||
else if (whole >= 9908870400LL)
|
else if (whole >= 10413792000LL)
|
||||||
{
|
{
|
||||||
/// Unix timestamp with subsecond precision, already scaled to integer.
|
/// Unix timestamp with subsecond precision, already scaled to integer.
|
||||||
/// For disambiguation we support only time since 2001-09-09 01:46:40 UTC and less than 30 000 years in future.
|
/// For disambiguation we support only time since 2001-09-09 01:46:40 UTC and less than 30 000 years in future.
|
||||||
|
@ -16,19 +16,15 @@
|
|||||||
|
|
||||||
#include <DataTypes/DataTypeSet.h>
|
#include <DataTypes/DataTypeSet.h>
|
||||||
#include <DataTypes/DataTypeFunction.h>
|
#include <DataTypes/DataTypeFunction.h>
|
||||||
#include <DataTypes/DataTypeFixedString.h>
|
|
||||||
#include <DataTypes/DataTypeString.h>
|
#include <DataTypes/DataTypeString.h>
|
||||||
#include <DataTypes/DataTypeTuple.h>
|
#include <DataTypes/DataTypeTuple.h>
|
||||||
#include <DataTypes/DataTypeArray.h>
|
#include <DataTypes/DataTypeArray.h>
|
||||||
#include <DataTypes/DataTypeLowCardinality.h>
|
#include <DataTypes/DataTypeLowCardinality.h>
|
||||||
#include <DataTypes/DataTypesNumber.h>
|
|
||||||
#include <DataTypes/FieldToDataType.h>
|
#include <DataTypes/FieldToDataType.h>
|
||||||
#include <DataTypes/DataTypesDecimal.h>
|
#include <DataTypes/DataTypesDecimal.h>
|
||||||
#include <DataTypes/DataTypeFactory.h>
|
#include <DataTypes/DataTypeFactory.h>
|
||||||
|
|
||||||
#include <Columns/ColumnArray.h>
|
|
||||||
#include <Columns/ColumnConst.h>
|
#include <Columns/ColumnConst.h>
|
||||||
#include <Columns/ColumnFixedString.h>
|
|
||||||
#include <Columns/ColumnSet.h>
|
#include <Columns/ColumnSet.h>
|
||||||
|
|
||||||
#include <Storages/StorageSet.h>
|
#include <Storages/StorageSet.h>
|
||||||
@ -47,7 +43,6 @@
|
|||||||
#include <Interpreters/ExpressionActions.h>
|
#include <Interpreters/ExpressionActions.h>
|
||||||
#include <Interpreters/misc.h>
|
#include <Interpreters/misc.h>
|
||||||
#include <Interpreters/ActionsVisitor.h>
|
#include <Interpreters/ActionsVisitor.h>
|
||||||
#include <Interpreters/InterpreterSelectWithUnionQuery.h>
|
|
||||||
#include <Interpreters/Set.h>
|
#include <Interpreters/Set.h>
|
||||||
#include <Interpreters/evaluateConstantExpression.h>
|
#include <Interpreters/evaluateConstantExpression.h>
|
||||||
#include <Interpreters/convertFieldToType.h>
|
#include <Interpreters/convertFieldToType.h>
|
||||||
@ -61,6 +56,7 @@
|
|||||||
#include <Interpreters/InterpreterSelectQueryAnalyzer.h>
|
#include <Interpreters/InterpreterSelectQueryAnalyzer.h>
|
||||||
#include <Parsers/queryToString.h>
|
#include <Parsers/queryToString.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -715,7 +711,7 @@ bool ActionsMatcher::needChildVisit(const ASTPtr & node, const ASTPtr & child)
|
|||||||
node->as<ASTExpressionList>())
|
node->as<ASTExpressionList>())
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
/// Do not go to FROM, JOIN, UNION.
|
/// Do not go to FROM, JOIN, UNION
|
||||||
if (child->as<ASTTableExpression>() ||
|
if (child->as<ASTTableExpression>() ||
|
||||||
child->as<ASTSelectQuery>())
|
child->as<ASTSelectQuery>())
|
||||||
return false;
|
return false;
|
||||||
|
@ -55,21 +55,10 @@ void AsynchronousInsertLogElement::appendToBlock(MutableColumns & columns) const
|
|||||||
columns[i++]->insert(event_time);
|
columns[i++]->insert(event_time);
|
||||||
columns[i++]->insert(event_time_microseconds);
|
columns[i++]->insert(event_time_microseconds);
|
||||||
|
|
||||||
const auto & insert_query = assert_cast<const ASTInsertQuery &>(*query);
|
columns[i++]->insert(query_for_logging);
|
||||||
columns[i++]->insert(queryToString(insert_query));
|
columns[i++]->insert(database);
|
||||||
|
columns[i++]->insert(table);
|
||||||
if (insert_query.table_id)
|
columns[i++]->insert(format);
|
||||||
{
|
|
||||||
columns[i++]->insert(insert_query.table_id.getDatabaseName());
|
|
||||||
columns[i++]->insert(insert_query.table_id.getTableName());
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
columns[i++]->insertDefault();
|
|
||||||
columns[i++]->insertDefault();
|
|
||||||
}
|
|
||||||
|
|
||||||
columns[i++]->insert(insert_query.format);
|
|
||||||
columns[i++]->insert(query_id);
|
columns[i++]->insert(query_id);
|
||||||
columns[i++]->insert(bytes);
|
columns[i++]->insert(bytes);
|
||||||
columns[i++]->insert(rows);
|
columns[i++]->insert(rows);
|
||||||
|
@ -21,8 +21,11 @@ struct AsynchronousInsertLogElement
|
|||||||
time_t event_time{};
|
time_t event_time{};
|
||||||
Decimal64 event_time_microseconds{};
|
Decimal64 event_time_microseconds{};
|
||||||
|
|
||||||
ASTPtr query;
|
|
||||||
String query_id;
|
String query_id;
|
||||||
|
String query_for_logging;
|
||||||
|
String database;
|
||||||
|
String table;
|
||||||
|
String format;
|
||||||
UInt64 bytes{};
|
UInt64 bytes{};
|
||||||
UInt64 rows{};
|
UInt64 rows{};
|
||||||
String exception;
|
String exception;
|
||||||
|
@ -1,33 +1,37 @@
|
|||||||
#include <Interpreters/AsynchronousInsertQueue.h>
|
#include <Interpreters/AsynchronousInsertQueue.h>
|
||||||
|
|
||||||
#include <Core/Settings.h>
|
|
||||||
#include <QueryPipeline/BlockIO.h>
|
|
||||||
#include <Interpreters/InterpreterInsertQuery.h>
|
|
||||||
#include <Interpreters/Context.h>
|
|
||||||
#include <Interpreters/AsynchronousInsertLog.h>
|
|
||||||
#include <Processors/Transforms/getSourceFromASTInsertQuery.h>
|
|
||||||
#include <Processors/Sources/SourceFromSingleChunk.h>
|
|
||||||
#include <Processors/Executors/StreamingFormatExecutor.h>
|
|
||||||
#include <Processors/Executors/CompletedPipelineExecutor.h>
|
|
||||||
#include <Processors/Transforms/AddingDefaultsTransform.h>
|
|
||||||
#include <IO/ConcatReadBuffer.h>
|
|
||||||
#include <IO/ReadBufferFromMemory.h>
|
|
||||||
#include <IO/ReadBufferFromString.h>
|
|
||||||
#include <IO/LimitReadBuffer.h>
|
|
||||||
#include <IO/copyData.h>
|
|
||||||
#include <Parsers/ASTInsertQuery.h>
|
|
||||||
#include <Parsers/queryToString.h>
|
|
||||||
#include <Storages/IStorage.h>
|
|
||||||
#include <Common/CurrentThread.h>
|
|
||||||
#include <Common/SipHash.h>
|
|
||||||
#include <Common/FieldVisitorHash.h>
|
|
||||||
#include <Common/DateLUT.h>
|
|
||||||
#include <Access/Common/AccessFlags.h>
|
#include <Access/Common/AccessFlags.h>
|
||||||
#include <Access/EnabledQuota.h>
|
#include <Access/EnabledQuota.h>
|
||||||
|
#include <Core/Settings.h>
|
||||||
#include <Formats/FormatFactory.h>
|
#include <Formats/FormatFactory.h>
|
||||||
#include <Common/logger_useful.h>
|
#include <IO/ConcatReadBuffer.h>
|
||||||
|
#include <IO/LimitReadBuffer.h>
|
||||||
|
#include <IO/ReadBufferFromMemory.h>
|
||||||
|
#include <IO/ReadBufferFromString.h>
|
||||||
|
#include <IO/copyData.h>
|
||||||
|
#include <Interpreters/AsynchronousInsertLog.h>
|
||||||
|
#include <Interpreters/Context.h>
|
||||||
|
#include <Interpreters/InterpreterInsertQuery.h>
|
||||||
|
#include <Interpreters/ProcessList.h>
|
||||||
|
#include <Interpreters/executeQuery.h>
|
||||||
|
#include <Parsers/ASTInsertQuery.h>
|
||||||
|
#include <Parsers/formatAST.h>
|
||||||
|
#include <Parsers/queryToString.h>
|
||||||
|
#include <Processors/Executors/CompletedPipelineExecutor.h>
|
||||||
|
#include <Processors/Executors/StreamingFormatExecutor.h>
|
||||||
|
#include <Processors/Sources/SourceFromSingleChunk.h>
|
||||||
|
#include <Processors/Transforms/AddingDefaultsTransform.h>
|
||||||
|
#include <Processors/Transforms/getSourceFromASTInsertQuery.h>
|
||||||
|
#include <QueryPipeline/BlockIO.h>
|
||||||
#include <QueryPipeline/Pipe.h>
|
#include <QueryPipeline/Pipe.h>
|
||||||
#include <QueryPipeline/QueryPipeline.h>
|
#include <QueryPipeline/QueryPipeline.h>
|
||||||
|
#include <Storages/IStorage.h>
|
||||||
|
#include <Common/CurrentThread.h>
|
||||||
|
#include <Common/DateLUT.h>
|
||||||
|
#include <Common/FieldVisitorHash.h>
|
||||||
|
#include <Common/SensitiveDataMasker.h>
|
||||||
|
#include <Common/SipHash.h>
|
||||||
|
#include <Common/logger_useful.h>
|
||||||
|
|
||||||
|
|
||||||
namespace CurrentMetrics
|
namespace CurrentMetrics
|
||||||
@ -202,6 +206,7 @@ AsynchronousInsertQueue::push(ASTPtr query, ContextPtr query_context)
|
|||||||
query = query->clone();
|
query = query->clone();
|
||||||
const auto & settings = query_context->getSettingsRef();
|
const auto & settings = query_context->getSettingsRef();
|
||||||
auto & insert_query = query->as<ASTInsertQuery &>();
|
auto & insert_query = query->as<ASTInsertQuery &>();
|
||||||
|
insert_query.async_insert_flush = true;
|
||||||
|
|
||||||
InterpreterInsertQuery interpreter(query, query_context, settings.insert_allow_materialized_columns);
|
InterpreterInsertQuery interpreter(query, query_context, settings.insert_allow_materialized_columns);
|
||||||
auto table = interpreter.getTable(insert_query);
|
auto table = interpreter.getTable(insert_query);
|
||||||
@ -398,6 +403,12 @@ try
|
|||||||
const auto * log = &Poco::Logger::get("AsynchronousInsertQueue");
|
const auto * log = &Poco::Logger::get("AsynchronousInsertQueue");
|
||||||
const auto & insert_query = assert_cast<const ASTInsertQuery &>(*key.query);
|
const auto & insert_query = assert_cast<const ASTInsertQuery &>(*key.query);
|
||||||
auto insert_context = Context::createCopy(global_context);
|
auto insert_context = Context::createCopy(global_context);
|
||||||
|
DB::CurrentThread::QueryScope query_scope_holder(insert_context);
|
||||||
|
bool internal = false; // To enable logging this query
|
||||||
|
bool async_insert = true;
|
||||||
|
|
||||||
|
/// Disabled query spans. Could be activated by initializing this to a SpanHolder
|
||||||
|
std::shared_ptr<OpenTelemetry::SpanHolder> query_span{nullptr};
|
||||||
|
|
||||||
/// 'resetParser' doesn't work for parallel parsing.
|
/// 'resetParser' doesn't work for parallel parsing.
|
||||||
key.settings.set("input_format_parallel_parsing", false);
|
key.settings.set("input_format_parallel_parsing", false);
|
||||||
@ -405,12 +416,67 @@ try
|
|||||||
insert_context->setSettings(key.settings);
|
insert_context->setSettings(key.settings);
|
||||||
|
|
||||||
/// Set initial_query_id, because it's used in InterpreterInsertQuery for table lock.
|
/// Set initial_query_id, because it's used in InterpreterInsertQuery for table lock.
|
||||||
insert_context->getClientInfo().query_kind = ClientInfo::QueryKind::INITIAL_QUERY;
|
|
||||||
insert_context->setCurrentQueryId("");
|
insert_context->setCurrentQueryId("");
|
||||||
|
|
||||||
InterpreterInsertQuery interpreter(key.query, insert_context, key.settings.insert_allow_materialized_columns, false, false, true);
|
auto insert_query_id = insert_context->getCurrentQueryId();
|
||||||
auto pipeline = interpreter.execute().pipeline;
|
auto query_start_time = std::chrono::system_clock::now();
|
||||||
assert(pipeline.pushing());
|
Stopwatch start_watch{CLOCK_MONOTONIC};
|
||||||
|
ClientInfo & client_info = insert_context->getClientInfo();
|
||||||
|
client_info.query_kind = ClientInfo::QueryKind::INITIAL_QUERY;
|
||||||
|
client_info.initial_query_start_time = timeInSeconds(query_start_time);
|
||||||
|
client_info.initial_query_start_time_microseconds = timeInMicroseconds(query_start_time);
|
||||||
|
client_info.current_query_id = insert_query_id;
|
||||||
|
client_info.initial_query_id = insert_query_id;
|
||||||
|
size_t log_queries_cut_to_length = insert_context->getSettingsRef().log_queries_cut_to_length;
|
||||||
|
String query_for_logging = insert_query.hasSecretParts()
|
||||||
|
? insert_query.formatForLogging(log_queries_cut_to_length)
|
||||||
|
: wipeSensitiveDataAndCutToLength(serializeAST(insert_query), log_queries_cut_to_length);
|
||||||
|
|
||||||
|
/// We add it to the process list so
|
||||||
|
/// a) it appears in system.processes
|
||||||
|
/// b) can be cancelled if we want to
|
||||||
|
/// c) has an associated process list element where runtime metrics are stored
|
||||||
|
auto process_list_entry
|
||||||
|
= insert_context->getProcessList().insert(query_for_logging, key.query.get(), insert_context, start_watch.getStart());
|
||||||
|
auto query_status = process_list_entry->getQueryStatus();
|
||||||
|
insert_context->setProcessListElement(std::move(query_status));
|
||||||
|
|
||||||
|
String query_database{};
|
||||||
|
String query_table{};
|
||||||
|
if (insert_query.table_id)
|
||||||
|
{
|
||||||
|
query_database = insert_query.table_id.getDatabaseName();
|
||||||
|
query_table = insert_query.table_id.getTableName();
|
||||||
|
insert_context->setInsertionTable(insert_query.table_id);
|
||||||
|
}
|
||||||
|
std::unique_ptr<DB::IInterpreter> interpreter;
|
||||||
|
QueryPipeline pipeline;
|
||||||
|
QueryLogElement query_log_elem;
|
||||||
|
|
||||||
|
try
|
||||||
|
{
|
||||||
|
interpreter = std::make_unique<InterpreterInsertQuery>(
|
||||||
|
key.query, insert_context, key.settings.insert_allow_materialized_columns, false, false, true);
|
||||||
|
pipeline = interpreter->execute().pipeline;
|
||||||
|
chassert(pipeline.pushing());
|
||||||
|
|
||||||
|
query_log_elem = logQueryStart(
|
||||||
|
query_start_time,
|
||||||
|
insert_context,
|
||||||
|
query_for_logging,
|
||||||
|
key.query,
|
||||||
|
pipeline,
|
||||||
|
interpreter,
|
||||||
|
internal,
|
||||||
|
query_database,
|
||||||
|
query_table,
|
||||||
|
async_insert);
|
||||||
|
}
|
||||||
|
catch (...)
|
||||||
|
{
|
||||||
|
logExceptionBeforeStart(query_for_logging, insert_context, key.query, query_span, start_watch.elapsedMilliseconds());
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
|
||||||
auto header = pipeline.getHeader();
|
auto header = pipeline.getHeader();
|
||||||
auto format = getInputFormatFromASTInsertQuery(key.query, false, header, insert_context, nullptr);
|
auto format = getInputFormatFromASTInsertQuery(key.query, false, header, insert_context, nullptr);
|
||||||
@ -470,7 +536,10 @@ try
|
|||||||
AsynchronousInsertLogElement elem;
|
AsynchronousInsertLogElement elem;
|
||||||
elem.event_time = timeInSeconds(entry->create_time);
|
elem.event_time = timeInSeconds(entry->create_time);
|
||||||
elem.event_time_microseconds = timeInMicroseconds(entry->create_time);
|
elem.event_time_microseconds = timeInMicroseconds(entry->create_time);
|
||||||
elem.query = key.query;
|
elem.query_for_logging = query_for_logging;
|
||||||
|
elem.database = query_database;
|
||||||
|
elem.table = query_table;
|
||||||
|
elem.format = insert_query.format;
|
||||||
elem.query_id = entry->query_id;
|
elem.query_id = entry->query_id;
|
||||||
elem.bytes = bytes_size;
|
elem.bytes = bytes_size;
|
||||||
elem.rows = num_rows;
|
elem.rows = num_rows;
|
||||||
@ -493,7 +562,6 @@ try
|
|||||||
}
|
}
|
||||||
|
|
||||||
format->addBuffer(std::move(last_buffer));
|
format->addBuffer(std::move(last_buffer));
|
||||||
auto insert_query_id = insert_context->getCurrentQueryId();
|
|
||||||
ProfileEvents::increment(ProfileEvents::AsyncInsertRows, total_rows);
|
ProfileEvents::increment(ProfileEvents::AsyncInsertRows, total_rows);
|
||||||
|
|
||||||
auto finish_entries = [&]
|
auto finish_entries = [&]
|
||||||
@ -531,9 +599,14 @@ try
|
|||||||
|
|
||||||
LOG_INFO(log, "Flushed {} rows, {} bytes for query '{}'",
|
LOG_INFO(log, "Flushed {} rows, {} bytes for query '{}'",
|
||||||
total_rows, total_bytes, key.query_str);
|
total_rows, total_bytes, key.query_str);
|
||||||
|
|
||||||
|
bool pulling_pipeline = false;
|
||||||
|
logQueryFinish(query_log_elem, insert_context, key.query, pipeline, pulling_pipeline, query_span, internal);
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
|
bool log_error = true;
|
||||||
|
logQueryException(query_log_elem, insert_context, start_watch, key.query, query_span, internal, log_error);
|
||||||
if (!log_elements.empty())
|
if (!log_elements.empty())
|
||||||
{
|
{
|
||||||
auto exception = getCurrentExceptionMessage(false);
|
auto exception = getCurrentExceptionMessage(false);
|
||||||
|
@ -1524,7 +1524,11 @@ StoragePtr Context::executeTableFunction(const ASTPtr & table_expression, const
|
|||||||
uint64_t use_structure_from_insertion_table_in_table_functions = getSettingsRef().use_structure_from_insertion_table_in_table_functions;
|
uint64_t use_structure_from_insertion_table_in_table_functions = getSettingsRef().use_structure_from_insertion_table_in_table_functions;
|
||||||
if (use_structure_from_insertion_table_in_table_functions && table_function_ptr->needStructureHint() && hasInsertionTable())
|
if (use_structure_from_insertion_table_in_table_functions && table_function_ptr->needStructureHint() && hasInsertionTable())
|
||||||
{
|
{
|
||||||
const auto & insert_structure = DatabaseCatalog::instance().getTable(getInsertionTable(), shared_from_this())->getInMemoryMetadataPtr()->getColumns();
|
const auto & insert_structure = DatabaseCatalog::instance()
|
||||||
|
.getTable(getInsertionTable(), shared_from_this())
|
||||||
|
->getInMemoryMetadataPtr()
|
||||||
|
->getColumns()
|
||||||
|
.getInsertable();
|
||||||
DB::ColumnsDescription structure_hint;
|
DB::ColumnsDescription structure_hint;
|
||||||
|
|
||||||
bool use_columns_from_insert_query = true;
|
bool use_columns_from_insert_query = true;
|
||||||
|
@ -15,6 +15,7 @@ namespace DB
|
|||||||
namespace ErrorCodes
|
namespace ErrorCodes
|
||||||
{
|
{
|
||||||
extern const int TABLE_IS_READ_ONLY;
|
extern const int TABLE_IS_READ_ONLY;
|
||||||
|
extern const int INCORRECT_QUERY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -23,6 +24,21 @@ BlockIO InterpreterCreateIndexQuery::execute()
|
|||||||
auto current_context = getContext();
|
auto current_context = getContext();
|
||||||
const auto & create_index = query_ptr->as<ASTCreateIndexQuery &>();
|
const auto & create_index = query_ptr->as<ASTCreateIndexQuery &>();
|
||||||
|
|
||||||
|
// Noop if allow_create_index_without_type = true. throw otherwise
|
||||||
|
if (!create_index.index_decl->as<ASTIndexDeclaration>()->type)
|
||||||
|
{
|
||||||
|
if (!current_context->getSettingsRef().allow_create_index_without_type)
|
||||||
|
{
|
||||||
|
throw Exception(ErrorCodes::INCORRECT_QUERY, "CREATE INDEX without TYPE is forbidden."
|
||||||
|
" SET allow_create_index_without_type=1 to ignore this statements.");
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// Nothing to do
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
AccessRightsElements required_access;
|
AccessRightsElements required_access;
|
||||||
required_access.emplace_back(AccessType::ALTER_ADD_INDEX, create_index.getDatabase(), create_index.getTable());
|
required_access.emplace_back(AccessType::ALTER_ADD_INDEX, create_index.getDatabase(), create_index.getTable());
|
||||||
|
|
||||||
|
@ -3181,7 +3181,7 @@ void InterpreterSelectQuery::initSettings()
|
|||||||
{
|
{
|
||||||
auto & query = getSelectQuery();
|
auto & query = getSelectQuery();
|
||||||
if (query.settings())
|
if (query.settings())
|
||||||
InterpreterSetQuery(query.settings(), context).executeForCurrentContext();
|
InterpreterSetQuery(query.settings(), context).executeForCurrentContext(options.ignore_setting_constraints);
|
||||||
|
|
||||||
auto & client_info = context->getClientInfo();
|
auto & client_info = context->getClientInfo();
|
||||||
auto min_major = DBMS_MIN_MAJOR_VERSION_WITH_CURRENT_AGGREGATION_VARIANT_SELECTION_METHOD;
|
auto min_major = DBMS_MIN_MAJOR_VERSION_WITH_CURRENT_AGGREGATION_VARIANT_SELECTION_METHOD;
|
||||||
|
@ -24,10 +24,11 @@ BlockIO InterpreterSetQuery::execute()
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void InterpreterSetQuery::executeForCurrentContext()
|
void InterpreterSetQuery::executeForCurrentContext(bool ignore_setting_constraints)
|
||||||
{
|
{
|
||||||
const auto & ast = query_ptr->as<ASTSetQuery &>();
|
const auto & ast = query_ptr->as<ASTSetQuery &>();
|
||||||
getContext()->checkSettingsConstraints(ast.changes);
|
if (!ignore_setting_constraints)
|
||||||
|
getContext()->checkSettingsConstraints(ast.changes);
|
||||||
getContext()->applySettingsChanges(ast.changes);
|
getContext()->applySettingsChanges(ast.changes);
|
||||||
getContext()->resetSettingsToDefaultValue(ast.default_settings);
|
getContext()->resetSettingsToDefaultValue(ast.default_settings);
|
||||||
}
|
}
|
||||||
|
@ -23,7 +23,7 @@ public:
|
|||||||
/** Set setting for current context (query context).
|
/** Set setting for current context (query context).
|
||||||
* It is used for interpretation of SETTINGS clause in SELECT query.
|
* It is used for interpretation of SETTINGS clause in SELECT query.
|
||||||
*/
|
*/
|
||||||
void executeForCurrentContext();
|
void executeForCurrentContext(bool ignore_setting_constraints = false);
|
||||||
|
|
||||||
bool supportsTransactions() const override { return true; }
|
bool supportsTransactions() const override { return true; }
|
||||||
|
|
||||||
|
@ -37,8 +37,8 @@ static bool isUnlimitedQuery(const IAST * ast)
|
|||||||
if (!ast)
|
if (!ast)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
/// It is KILL QUERY
|
/// It is KILL QUERY or an async insert flush query
|
||||||
if (ast->as<ASTKillQueryQuery>())
|
if (ast->as<ASTKillQueryQuery>() || ast->getQueryKind() == IAST::QueryKind::AsyncInsertFlush)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
/// It is SELECT FROM system.processes
|
/// It is SELECT FROM system.processes
|
||||||
|
@ -393,7 +393,7 @@ public:
|
|||||||
/** Register running query. Returns refcounted object, that will remove element from list in destructor.
|
/** Register running query. Returns refcounted object, that will remove element from list in destructor.
|
||||||
* If too many running queries - wait for not more than specified (see settings) amount of time.
|
* If too many running queries - wait for not more than specified (see settings) amount of time.
|
||||||
* If timeout is passed - throw an exception.
|
* If timeout is passed - throw an exception.
|
||||||
* Don't count KILL QUERY queries.
|
* Don't count KILL QUERY queries or async insert flush queries
|
||||||
*/
|
*/
|
||||||
EntryPtr insert(const String & query_, const IAST * ast, ContextMutablePtr query_context, UInt64 watch_start_nanoseconds);
|
EntryPtr insert(const String & query_, const IAST * ast, ContextMutablePtr query_context, UInt64 watch_start_nanoseconds);
|
||||||
|
|
||||||
|
@ -51,6 +51,8 @@ struct SelectQueryOptions
|
|||||||
bool settings_limit_offset_done = false;
|
bool settings_limit_offset_done = false;
|
||||||
bool is_explain = false; /// The value is true if it's explain statement.
|
bool is_explain = false; /// The value is true if it's explain statement.
|
||||||
bool is_create_parameterized_view = false;
|
bool is_create_parameterized_view = false;
|
||||||
|
/// Bypass setting constraints for some internal queries such as projection ASTs.
|
||||||
|
bool ignore_setting_constraints = false;
|
||||||
|
|
||||||
/// These two fields are used to evaluate shardNum() and shardCount() function when
|
/// These two fields are used to evaluate shardNum() and shardCount() function when
|
||||||
/// prefer_localhost_replica == 1 and local instance is selected. They are needed because local
|
/// prefer_localhost_replica == 1 and local instance is selected. They are needed because local
|
||||||
@ -141,6 +143,12 @@ struct SelectQueryOptions
|
|||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SelectQueryOptions & ignoreSettingConstraints(bool value = true)
|
||||||
|
{
|
||||||
|
ignore_setting_constraints = value;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
SelectQueryOptions & setInternal(bool value = false)
|
SelectQueryOptions & setInternal(bool value = false)
|
||||||
{
|
{
|
||||||
is_internal = value;
|
is_internal = value;
|
||||||
|
@ -1,27 +1,24 @@
|
|||||||
#include <Interpreters/evaluateConstantExpression.h>
|
#include <Interpreters/evaluateConstantExpression.h>
|
||||||
|
|
||||||
#include <Columns/ColumnConst.h>
|
#include <Columns/ColumnConst.h>
|
||||||
#include <Columns/ColumnsNumber.h>
|
|
||||||
#include <Core/Block.h>
|
#include <Core/Block.h>
|
||||||
#include <DataTypes/DataTypesNumber.h>
|
#include <DataTypes/DataTypesNumber.h>
|
||||||
#include <DataTypes/FieldToDataType.h>
|
#include <DataTypes/FieldToDataType.h>
|
||||||
#include <Interpreters/Context.h>
|
#include <Interpreters/Context.h>
|
||||||
#include <Interpreters/convertFieldToType.h>
|
#include <Interpreters/convertFieldToType.h>
|
||||||
#include <Interpreters/ExpressionActions.h>
|
|
||||||
#include <Interpreters/ExpressionAnalyzer.h>
|
#include <Interpreters/ExpressionAnalyzer.h>
|
||||||
#include <Interpreters/TreeRewriter.h>
|
#include <Interpreters/TreeRewriter.h>
|
||||||
#include <Parsers/ASTFunction.h>
|
#include <Parsers/ASTFunction.h>
|
||||||
#include <Parsers/ASTIdentifier.h>
|
#include <Parsers/ASTIdentifier.h>
|
||||||
#include <Parsers/ASTLiteral.h>
|
#include <Parsers/ASTLiteral.h>
|
||||||
#include <Parsers/ASTSubquery.h>
|
#include <Parsers/ASTSubquery.h>
|
||||||
#include <Parsers/ExpressionElementParsers.h>
|
|
||||||
#include <TableFunctions/TableFunctionFactory.h>
|
#include <TableFunctions/TableFunctionFactory.h>
|
||||||
#include <Common/typeid_cast.h>
|
#include <Common/typeid_cast.h>
|
||||||
#include <Interpreters/FunctionNameNormalizer.h>
|
#include <Interpreters/FunctionNameNormalizer.h>
|
||||||
#include <Interpreters/ReplaceQueryParameterVisitor.h>
|
#include <Interpreters/ReplaceQueryParameterVisitor.h>
|
||||||
#include <Poco/Util/AbstractConfiguration.h>
|
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -94,18 +91,18 @@ std::pair<Field, std::shared_ptr<const IDataType>> evaluateConstantExpression(co
|
|||||||
|
|
||||||
if (!result_column)
|
if (!result_column)
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||||
"Element of set in IN, VALUES or LIMIT or aggregate function parameter "
|
"Element of set in IN, VALUES, or LIMIT, or aggregate function parameter, or a table function argument "
|
||||||
"is not a constant expression (result column not found): {}", result_name);
|
"is not a constant expression (result column not found): {}", result_name);
|
||||||
|
|
||||||
if (result_column->empty())
|
if (result_column->empty())
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||||
"Logical error: empty result column after evaluation "
|
"Logical error: empty result column after evaluation "
|
||||||
"of constant expression for IN, VALUES or LIMIT or aggregate function parameter");
|
"of constant expression for IN, VALUES, or LIMIT, or aggregate function parameter, or a table function argument");
|
||||||
|
|
||||||
/// Expressions like rand() or now() are not constant
|
/// Expressions like rand() or now() are not constant
|
||||||
if (!isColumnConst(*result_column))
|
if (!isColumnConst(*result_column))
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||||
"Element of set in IN, VALUES or LIMIT or aggregate function parameter "
|
"Element of set in IN, VALUES, or LIMIT, or aggregate function parameter, or a table function argument "
|
||||||
"is not a constant expression (result column is not const): {}", result_name);
|
"is not a constant expression (result column is not const): {}", result_name);
|
||||||
|
|
||||||
return std::make_pair((*result_column)[0], result_type);
|
return std::make_pair((*result_column)[0], result_type);
|
||||||
|
@ -155,7 +155,6 @@ static void logQuery(const String & query, ContextPtr context, bool internal, Qu
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// Call this inside catch block.
|
/// Call this inside catch block.
|
||||||
static void setExceptionStackTrace(QueryLogElement & elem)
|
static void setExceptionStackTrace(QueryLogElement & elem)
|
||||||
{
|
{
|
||||||
@ -208,7 +207,332 @@ static void logException(ContextPtr context, QueryLogElement & elem, bool log_er
|
|||||||
LOG_INFO(&Poco::Logger::get("executeQuery"), message);
|
LOG_INFO(&Poco::Logger::get("executeQuery"), message);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void onExceptionBeforeStart(
|
static void
|
||||||
|
addStatusInfoToQueryElement(QueryLogElement & element, const QueryStatusInfo & info, const ASTPtr query_ast, const ContextPtr context_ptr)
|
||||||
|
{
|
||||||
|
const auto time_now = std::chrono::system_clock::now();
|
||||||
|
UInt64 elapsed_microseconds = info.elapsed_microseconds;
|
||||||
|
element.event_time = timeInSeconds(time_now);
|
||||||
|
element.event_time_microseconds = timeInMicroseconds(time_now);
|
||||||
|
element.query_duration_ms = elapsed_microseconds / 1000;
|
||||||
|
|
||||||
|
ProfileEvents::increment(ProfileEvents::QueryTimeMicroseconds, elapsed_microseconds);
|
||||||
|
if (query_ast->as<ASTSelectQuery>() || query_ast->as<ASTSelectWithUnionQuery>())
|
||||||
|
{
|
||||||
|
ProfileEvents::increment(ProfileEvents::SelectQueryTimeMicroseconds, elapsed_microseconds);
|
||||||
|
}
|
||||||
|
else if (query_ast->as<ASTInsertQuery>())
|
||||||
|
{
|
||||||
|
ProfileEvents::increment(ProfileEvents::InsertQueryTimeMicroseconds, elapsed_microseconds);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
ProfileEvents::increment(ProfileEvents::OtherQueryTimeMicroseconds, elapsed_microseconds);
|
||||||
|
}
|
||||||
|
|
||||||
|
element.read_rows = info.read_rows;
|
||||||
|
element.read_bytes = info.read_bytes;
|
||||||
|
|
||||||
|
element.written_rows = info.written_rows;
|
||||||
|
element.written_bytes = info.written_bytes;
|
||||||
|
|
||||||
|
element.memory_usage = info.peak_memory_usage > 0 ? info.peak_memory_usage : 0;
|
||||||
|
|
||||||
|
element.thread_ids = info.thread_ids;
|
||||||
|
element.profile_counters = info.profile_counters;
|
||||||
|
|
||||||
|
/// We need to refresh the access info since dependent views might have added extra information, either during
|
||||||
|
/// creation of the view (PushingToViews chain) or while executing its internal SELECT
|
||||||
|
const auto & access_info = context_ptr->getQueryAccessInfo();
|
||||||
|
element.query_databases.insert(access_info.databases.begin(), access_info.databases.end());
|
||||||
|
element.query_tables.insert(access_info.tables.begin(), access_info.tables.end());
|
||||||
|
element.query_columns.insert(access_info.columns.begin(), access_info.columns.end());
|
||||||
|
element.query_partitions.insert(access_info.partitions.begin(), access_info.partitions.end());
|
||||||
|
element.query_projections.insert(access_info.projections.begin(), access_info.projections.end());
|
||||||
|
element.query_views.insert(access_info.views.begin(), access_info.views.end());
|
||||||
|
|
||||||
|
const auto & factories_info = context_ptr->getQueryFactoriesInfo();
|
||||||
|
element.used_aggregate_functions = factories_info.aggregate_functions;
|
||||||
|
element.used_aggregate_function_combinators = factories_info.aggregate_function_combinators;
|
||||||
|
element.used_database_engines = factories_info.database_engines;
|
||||||
|
element.used_data_type_families = factories_info.data_type_families;
|
||||||
|
element.used_dictionaries = factories_info.dictionaries;
|
||||||
|
element.used_formats = factories_info.formats;
|
||||||
|
element.used_functions = factories_info.functions;
|
||||||
|
element.used_storages = factories_info.storages;
|
||||||
|
element.used_table_functions = factories_info.table_functions;
|
||||||
|
|
||||||
|
element.async_read_counters = context_ptr->getAsyncReadCounters();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
QueryLogElement logQueryStart(
|
||||||
|
const std::chrono::time_point<std::chrono::system_clock> & query_start_time,
|
||||||
|
const ContextMutablePtr & context,
|
||||||
|
const String & query_for_logging,
|
||||||
|
const ASTPtr & query_ast,
|
||||||
|
const QueryPipeline & pipeline,
|
||||||
|
const std::unique_ptr<IInterpreter> & interpreter,
|
||||||
|
bool internal,
|
||||||
|
const String & query_database,
|
||||||
|
const String & query_table,
|
||||||
|
bool async_insert)
|
||||||
|
{
|
||||||
|
const Settings & settings = context->getSettingsRef();
|
||||||
|
|
||||||
|
QueryLogElement elem;
|
||||||
|
|
||||||
|
elem.type = QueryLogElementType::QUERY_START;
|
||||||
|
elem.event_time = timeInSeconds(query_start_time);
|
||||||
|
elem.event_time_microseconds = timeInMicroseconds(query_start_time);
|
||||||
|
elem.query_start_time = timeInSeconds(query_start_time);
|
||||||
|
elem.query_start_time_microseconds = timeInMicroseconds(query_start_time);
|
||||||
|
|
||||||
|
elem.current_database = context->getCurrentDatabase();
|
||||||
|
elem.query = query_for_logging;
|
||||||
|
if (settings.log_formatted_queries)
|
||||||
|
elem.formatted_query = queryToString(query_ast);
|
||||||
|
elem.normalized_query_hash = normalizedQueryHash<false>(query_for_logging);
|
||||||
|
elem.query_kind = query_ast->getQueryKind();
|
||||||
|
|
||||||
|
elem.client_info = context->getClientInfo();
|
||||||
|
|
||||||
|
if (auto txn = context->getCurrentTransaction())
|
||||||
|
elem.tid = txn->tid;
|
||||||
|
|
||||||
|
bool log_queries = settings.log_queries && !internal;
|
||||||
|
|
||||||
|
/// Log into system table start of query execution, if need.
|
||||||
|
if (log_queries)
|
||||||
|
{
|
||||||
|
/// This check is not obvious, but without it 01220_scalar_optimization_in_alter fails.
|
||||||
|
if (pipeline.initialized())
|
||||||
|
{
|
||||||
|
const auto & info = context->getQueryAccessInfo();
|
||||||
|
elem.query_databases = info.databases;
|
||||||
|
elem.query_tables = info.tables;
|
||||||
|
elem.query_columns = info.columns;
|
||||||
|
elem.query_partitions = info.partitions;
|
||||||
|
elem.query_projections = info.projections;
|
||||||
|
elem.query_views = info.views;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (async_insert)
|
||||||
|
InterpreterInsertQuery::extendQueryLogElemImpl(elem, context);
|
||||||
|
else if (interpreter)
|
||||||
|
interpreter->extendQueryLogElem(elem, query_ast, context, query_database, query_table);
|
||||||
|
|
||||||
|
if (settings.log_query_settings)
|
||||||
|
elem.query_settings = std::make_shared<Settings>(context->getSettingsRef());
|
||||||
|
|
||||||
|
elem.log_comment = settings.log_comment;
|
||||||
|
if (elem.log_comment.size() > settings.max_query_size)
|
||||||
|
elem.log_comment.resize(settings.max_query_size);
|
||||||
|
|
||||||
|
if (elem.type >= settings.log_queries_min_type && !settings.log_queries_min_query_duration_ms.totalMilliseconds())
|
||||||
|
{
|
||||||
|
if (auto query_log = context->getQueryLog())
|
||||||
|
query_log->add(elem);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return elem;
|
||||||
|
}
|
||||||
|
|
||||||
|
void logQueryFinish(
|
||||||
|
QueryLogElement & elem,
|
||||||
|
const ContextMutablePtr & context,
|
||||||
|
const ASTPtr & query_ast,
|
||||||
|
const QueryPipeline & query_pipeline,
|
||||||
|
bool pulling_pipeline,
|
||||||
|
std::shared_ptr<OpenTelemetry::SpanHolder> query_span,
|
||||||
|
bool internal)
|
||||||
|
{
|
||||||
|
const Settings & settings = context->getSettingsRef();
|
||||||
|
auto log_queries = settings.log_queries && !internal;
|
||||||
|
auto log_queries_min_type = settings.log_queries_min_type;
|
||||||
|
auto log_queries_min_query_duration_ms = settings.log_queries_min_query_duration_ms.totalMilliseconds();
|
||||||
|
auto log_processors_profiles = settings.log_processors_profiles;
|
||||||
|
|
||||||
|
QueryStatusPtr process_list_elem = context->getProcessListElement();
|
||||||
|
if (process_list_elem)
|
||||||
|
{
|
||||||
|
/// Update performance counters before logging to query_log
|
||||||
|
CurrentThread::finalizePerformanceCounters();
|
||||||
|
|
||||||
|
QueryStatusInfo info = process_list_elem->getInfo(true, context->getSettingsRef().log_profile_events);
|
||||||
|
elem.type = QueryLogElementType::QUERY_FINISH;
|
||||||
|
|
||||||
|
addStatusInfoToQueryElement(elem, info, query_ast, context);
|
||||||
|
|
||||||
|
if (pulling_pipeline)
|
||||||
|
{
|
||||||
|
query_pipeline.tryGetResultRowsAndBytes(elem.result_rows, elem.result_bytes);
|
||||||
|
}
|
||||||
|
else /// will be used only for ordinary INSERT queries
|
||||||
|
{
|
||||||
|
auto progress_out = process_list_elem->getProgressOut();
|
||||||
|
elem.result_rows = progress_out.written_rows;
|
||||||
|
elem.result_bytes = progress_out.written_bytes;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto progress_callback = context->getProgressCallback();
|
||||||
|
if (progress_callback)
|
||||||
|
{
|
||||||
|
Progress p;
|
||||||
|
p.incrementPiecewiseAtomically(Progress{ResultProgress{elem.result_rows, elem.result_bytes}});
|
||||||
|
progress_callback(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (elem.read_rows != 0)
|
||||||
|
{
|
||||||
|
double elapsed_seconds = static_cast<double>(info.elapsed_microseconds) / 1000000.0;
|
||||||
|
double rows_per_second = static_cast<double>(elem.read_rows) / elapsed_seconds;
|
||||||
|
LOG_DEBUG(
|
||||||
|
&Poco::Logger::get("executeQuery"),
|
||||||
|
"Read {} rows, {} in {} sec., {} rows/sec., {}/sec.",
|
||||||
|
elem.read_rows,
|
||||||
|
ReadableSize(elem.read_bytes),
|
||||||
|
elapsed_seconds,
|
||||||
|
rows_per_second,
|
||||||
|
ReadableSize(elem.read_bytes / elapsed_seconds));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (log_queries && elem.type >= log_queries_min_type
|
||||||
|
&& static_cast<Int64>(elem.query_duration_ms) >= log_queries_min_query_duration_ms)
|
||||||
|
{
|
||||||
|
if (auto query_log = context->getQueryLog())
|
||||||
|
query_log->add(elem);
|
||||||
|
}
|
||||||
|
if (log_processors_profiles)
|
||||||
|
{
|
||||||
|
if (auto processors_profile_log = context->getProcessorsProfileLog())
|
||||||
|
{
|
||||||
|
ProcessorProfileLogElement processor_elem;
|
||||||
|
processor_elem.event_time = elem.event_time;
|
||||||
|
processor_elem.event_time_microseconds = elem.event_time_microseconds;
|
||||||
|
processor_elem.initial_query_id = elem.client_info.initial_query_id;
|
||||||
|
processor_elem.query_id = elem.client_info.current_query_id;
|
||||||
|
|
||||||
|
auto get_proc_id = [](const IProcessor & proc) -> UInt64 { return reinterpret_cast<std::uintptr_t>(&proc); };
|
||||||
|
|
||||||
|
for (const auto & processor : query_pipeline.getProcessors())
|
||||||
|
{
|
||||||
|
std::vector<UInt64> parents;
|
||||||
|
for (const auto & port : processor->getOutputs())
|
||||||
|
{
|
||||||
|
if (!port.isConnected())
|
||||||
|
continue;
|
||||||
|
const IProcessor & next = port.getInputPort().getProcessor();
|
||||||
|
parents.push_back(get_proc_id(next));
|
||||||
|
}
|
||||||
|
|
||||||
|
processor_elem.id = get_proc_id(*processor);
|
||||||
|
processor_elem.parent_ids = std::move(parents);
|
||||||
|
|
||||||
|
processor_elem.plan_step = reinterpret_cast<std::uintptr_t>(processor->getQueryPlanStep());
|
||||||
|
processor_elem.plan_group = processor->getQueryPlanStepGroup();
|
||||||
|
|
||||||
|
processor_elem.processor_name = processor->getName();
|
||||||
|
|
||||||
|
/// NOTE: convert this to UInt64
|
||||||
|
processor_elem.elapsed_us = static_cast<UInt32>(processor->getElapsedUs());
|
||||||
|
processor_elem.input_wait_elapsed_us = static_cast<UInt32>(processor->getInputWaitElapsedUs());
|
||||||
|
processor_elem.output_wait_elapsed_us = static_cast<UInt32>(processor->getOutputWaitElapsedUs());
|
||||||
|
|
||||||
|
auto stats = processor->getProcessorDataStats();
|
||||||
|
processor_elem.input_rows = stats.input_rows;
|
||||||
|
processor_elem.input_bytes = stats.input_bytes;
|
||||||
|
processor_elem.output_rows = stats.output_rows;
|
||||||
|
processor_elem.output_bytes = stats.output_bytes;
|
||||||
|
|
||||||
|
processors_profile_log->add(processor_elem);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (query_span)
|
||||||
|
{
|
||||||
|
query_span->addAttribute("db.statement", elem.query);
|
||||||
|
query_span->addAttribute("clickhouse.query_id", elem.client_info.current_query_id);
|
||||||
|
query_span->addAttribute("clickhouse.query_status", "QueryFinish");
|
||||||
|
query_span->addAttributeIfNotEmpty("clickhouse.tracestate", OpenTelemetry::CurrentContext().tracestate);
|
||||||
|
query_span->addAttributeIfNotZero("clickhouse.read_rows", elem.read_rows);
|
||||||
|
query_span->addAttributeIfNotZero("clickhouse.read_bytes", elem.read_bytes);
|
||||||
|
query_span->addAttributeIfNotZero("clickhouse.written_rows", elem.written_rows);
|
||||||
|
query_span->addAttributeIfNotZero("clickhouse.written_bytes", elem.written_bytes);
|
||||||
|
query_span->addAttributeIfNotZero("clickhouse.memory_usage", elem.memory_usage);
|
||||||
|
query_span->finish();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void logQueryException(
|
||||||
|
QueryLogElement & elem,
|
||||||
|
const ContextMutablePtr & context,
|
||||||
|
const Stopwatch & start_watch,
|
||||||
|
const ASTPtr & query_ast,
|
||||||
|
std::shared_ptr<OpenTelemetry::SpanHolder> query_span,
|
||||||
|
bool internal,
|
||||||
|
bool log_error)
|
||||||
|
{
|
||||||
|
const Settings & settings = context->getSettingsRef();
|
||||||
|
auto log_queries = settings.log_queries && !internal;
|
||||||
|
auto log_queries_min_type = settings.log_queries_min_type;
|
||||||
|
auto log_queries_min_query_duration_ms = settings.log_queries_min_query_duration_ms.totalMilliseconds();
|
||||||
|
|
||||||
|
elem.type = QueryLogElementType::EXCEPTION_WHILE_PROCESSING;
|
||||||
|
elem.exception_code = getCurrentExceptionCode();
|
||||||
|
auto exception_message = getCurrentExceptionMessageAndPattern(/* with_stacktrace */ false);
|
||||||
|
elem.exception = std::move(exception_message.text);
|
||||||
|
elem.exception_format_string = exception_message.format_string;
|
||||||
|
|
||||||
|
QueryStatusPtr process_list_elem = context->getProcessListElement();
|
||||||
|
|
||||||
|
/// Update performance counters before logging to query_log
|
||||||
|
CurrentThread::finalizePerformanceCounters();
|
||||||
|
const auto time_now = std::chrono::system_clock::now();
|
||||||
|
elem.event_time = timeInSeconds(time_now);
|
||||||
|
elem.event_time_microseconds = timeInMicroseconds(time_now);
|
||||||
|
|
||||||
|
if (process_list_elem)
|
||||||
|
{
|
||||||
|
QueryStatusInfo info = process_list_elem->getInfo(true, settings.log_profile_events, false);
|
||||||
|
addStatusInfoToQueryElement(elem, info, query_ast, context);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
elem.query_duration_ms = start_watch.elapsedMilliseconds();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (settings.calculate_text_stack_trace && log_error)
|
||||||
|
setExceptionStackTrace(elem);
|
||||||
|
logException(context, elem, log_error);
|
||||||
|
|
||||||
|
/// In case of exception we log internal queries also
|
||||||
|
if (log_queries && elem.type >= log_queries_min_type && static_cast<Int64>(elem.query_duration_ms) >= log_queries_min_query_duration_ms)
|
||||||
|
{
|
||||||
|
if (auto query_log = context->getQueryLog())
|
||||||
|
query_log->add(elem);
|
||||||
|
}
|
||||||
|
|
||||||
|
ProfileEvents::increment(ProfileEvents::FailedQuery);
|
||||||
|
if (query_ast->as<ASTSelectQuery>() || query_ast->as<ASTSelectWithUnionQuery>())
|
||||||
|
ProfileEvents::increment(ProfileEvents::FailedSelectQuery);
|
||||||
|
else if (query_ast->as<ASTInsertQuery>())
|
||||||
|
ProfileEvents::increment(ProfileEvents::FailedInsertQuery);
|
||||||
|
|
||||||
|
if (query_span)
|
||||||
|
{
|
||||||
|
query_span->addAttribute("db.statement", elem.query);
|
||||||
|
query_span->addAttribute("clickhouse.query_id", elem.client_info.current_query_id);
|
||||||
|
query_span->addAttribute("clickhouse.exception", elem.exception);
|
||||||
|
query_span->addAttribute("clickhouse.exception_code", elem.exception_code);
|
||||||
|
query_span->finish();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void logExceptionBeforeStart(
|
||||||
const String & query_for_logging,
|
const String & query_for_logging,
|
||||||
ContextPtr context,
|
ContextPtr context,
|
||||||
ASTPtr ast,
|
ASTPtr ast,
|
||||||
@ -431,7 +755,7 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
|
|||||||
logQuery(query_for_logging, context, internal, stage);
|
logQuery(query_for_logging, context, internal, stage);
|
||||||
|
|
||||||
if (!internal)
|
if (!internal)
|
||||||
onExceptionBeforeStart(query_for_logging, context, ast, query_span, start_watch.elapsedMilliseconds());
|
logExceptionBeforeStart(query_for_logging, context, ast, query_span, start_watch.elapsedMilliseconds());
|
||||||
throw;
|
throw;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -804,132 +1128,23 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
|
|||||||
|
|
||||||
/// Everything related to query log.
|
/// Everything related to query log.
|
||||||
{
|
{
|
||||||
QueryLogElement elem;
|
QueryLogElement elem = logQueryStart(
|
||||||
|
query_start_time,
|
||||||
elem.type = QueryLogElementType::QUERY_START;
|
context,
|
||||||
|
query_for_logging,
|
||||||
elem.event_time = timeInSeconds(query_start_time);
|
ast,
|
||||||
elem.event_time_microseconds = timeInMicroseconds(query_start_time);
|
pipeline,
|
||||||
elem.query_start_time = timeInSeconds(query_start_time);
|
interpreter,
|
||||||
elem.query_start_time_microseconds = timeInMicroseconds(query_start_time);
|
internal,
|
||||||
|
query_database,
|
||||||
elem.current_database = context->getCurrentDatabase();
|
query_table,
|
||||||
elem.query = query_for_logging;
|
async_insert);
|
||||||
if (settings.log_formatted_queries)
|
|
||||||
elem.formatted_query = queryToString(ast);
|
|
||||||
elem.normalized_query_hash = normalizedQueryHash<false>(query_for_logging);
|
|
||||||
elem.query_kind = ast->getQueryKind();
|
|
||||||
|
|
||||||
elem.client_info = client_info;
|
|
||||||
|
|
||||||
if (auto txn = context->getCurrentTransaction())
|
|
||||||
elem.tid = txn->tid;
|
|
||||||
|
|
||||||
bool log_queries = settings.log_queries && !internal;
|
|
||||||
|
|
||||||
/// Log into system table start of query execution, if need.
|
|
||||||
if (log_queries)
|
|
||||||
{
|
|
||||||
/// This check is not obvious, but without it 01220_scalar_optimization_in_alter fails.
|
|
||||||
if (pipeline.initialized())
|
|
||||||
{
|
|
||||||
const auto & info = context->getQueryAccessInfo();
|
|
||||||
elem.query_databases = info.databases;
|
|
||||||
elem.query_tables = info.tables;
|
|
||||||
elem.query_columns = info.columns;
|
|
||||||
elem.query_partitions = info.partitions;
|
|
||||||
elem.query_projections = info.projections;
|
|
||||||
elem.query_views = info.views;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (async_insert)
|
|
||||||
InterpreterInsertQuery::extendQueryLogElemImpl(elem, context);
|
|
||||||
else if (interpreter)
|
|
||||||
interpreter->extendQueryLogElem(elem, ast, context, query_database, query_table);
|
|
||||||
|
|
||||||
if (settings.log_query_settings)
|
|
||||||
elem.query_settings = std::make_shared<Settings>(context->getSettingsRef());
|
|
||||||
|
|
||||||
elem.log_comment = settings.log_comment;
|
|
||||||
if (elem.log_comment.size() > settings.max_query_size)
|
|
||||||
elem.log_comment.resize(settings.max_query_size);
|
|
||||||
|
|
||||||
if (elem.type >= settings.log_queries_min_type && !settings.log_queries_min_query_duration_ms.totalMilliseconds())
|
|
||||||
{
|
|
||||||
if (auto query_log = context->getQueryLog())
|
|
||||||
query_log->add(elem);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Common code for finish and exception callbacks
|
|
||||||
auto status_info_to_query_log
|
|
||||||
= [](QueryLogElement & element, const QueryStatusInfo & info, const ASTPtr query_ast, const ContextPtr context_ptr) mutable
|
|
||||||
{
|
|
||||||
const auto time_now = std::chrono::system_clock::now();
|
|
||||||
UInt64 elapsed_microseconds = info.elapsed_microseconds;
|
|
||||||
element.event_time = timeInSeconds(time_now);
|
|
||||||
element.event_time_microseconds = timeInMicroseconds(time_now);
|
|
||||||
element.query_duration_ms = elapsed_microseconds / 1000;
|
|
||||||
|
|
||||||
ProfileEvents::increment(ProfileEvents::QueryTimeMicroseconds, elapsed_microseconds);
|
|
||||||
if (query_ast->as<ASTSelectQuery>() || query_ast->as<ASTSelectWithUnionQuery>())
|
|
||||||
{
|
|
||||||
ProfileEvents::increment(ProfileEvents::SelectQueryTimeMicroseconds, elapsed_microseconds);
|
|
||||||
}
|
|
||||||
else if (query_ast->as<ASTInsertQuery>())
|
|
||||||
{
|
|
||||||
ProfileEvents::increment(ProfileEvents::InsertQueryTimeMicroseconds, elapsed_microseconds);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
ProfileEvents::increment(ProfileEvents::OtherQueryTimeMicroseconds, elapsed_microseconds);
|
|
||||||
}
|
|
||||||
|
|
||||||
element.read_rows = info.read_rows;
|
|
||||||
element.read_bytes = info.read_bytes;
|
|
||||||
|
|
||||||
element.written_rows = info.written_rows;
|
|
||||||
element.written_bytes = info.written_bytes;
|
|
||||||
|
|
||||||
element.memory_usage = info.peak_memory_usage > 0 ? info.peak_memory_usage : 0;
|
|
||||||
|
|
||||||
element.thread_ids = info.thread_ids;
|
|
||||||
element.profile_counters = info.profile_counters;
|
|
||||||
|
|
||||||
/// We need to refresh the access info since dependent views might have added extra information, either during
|
|
||||||
/// creation of the view (PushingToViews chain) or while executing its internal SELECT
|
|
||||||
const auto & access_info = context_ptr->getQueryAccessInfo();
|
|
||||||
element.query_databases.insert(access_info.databases.begin(), access_info.databases.end());
|
|
||||||
element.query_tables.insert(access_info.tables.begin(), access_info.tables.end());
|
|
||||||
element.query_columns.insert(access_info.columns.begin(), access_info.columns.end());
|
|
||||||
element.query_partitions.insert(access_info.partitions.begin(), access_info.partitions.end());
|
|
||||||
element.query_projections.insert(access_info.projections.begin(), access_info.projections.end());
|
|
||||||
element.query_views.insert(access_info.views.begin(), access_info.views.end());
|
|
||||||
|
|
||||||
const auto & factories_info = context_ptr->getQueryFactoriesInfo();
|
|
||||||
element.used_aggregate_functions = factories_info.aggregate_functions;
|
|
||||||
element.used_aggregate_function_combinators = factories_info.aggregate_function_combinators;
|
|
||||||
element.used_database_engines = factories_info.database_engines;
|
|
||||||
element.used_data_type_families = factories_info.data_type_families;
|
|
||||||
element.used_dictionaries = factories_info.dictionaries;
|
|
||||||
element.used_formats = factories_info.formats;
|
|
||||||
element.used_functions = factories_info.functions;
|
|
||||||
element.used_storages = factories_info.storages;
|
|
||||||
element.used_table_functions = factories_info.table_functions;
|
|
||||||
|
|
||||||
element.async_read_counters = context_ptr->getAsyncReadCounters();
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Also make possible for caller to log successful query finish and exception during execution.
|
/// Also make possible for caller to log successful query finish and exception during execution.
|
||||||
auto finish_callback = [elem,
|
auto finish_callback = [elem,
|
||||||
context,
|
context,
|
||||||
ast,
|
ast,
|
||||||
write_into_query_cache,
|
write_into_query_cache,
|
||||||
log_queries,
|
internal,
|
||||||
log_queries_min_type = settings.log_queries_min_type,
|
|
||||||
log_queries_min_query_duration_ms = settings.log_queries_min_query_duration_ms.totalMilliseconds(),
|
|
||||||
log_processors_profiles = settings.log_processors_profiles,
|
|
||||||
status_info_to_query_log,
|
|
||||||
implicit_txn_control,
|
implicit_txn_control,
|
||||||
execute_implicit_tcl_query,
|
execute_implicit_tcl_query,
|
||||||
pulling_pipeline = pipeline.pulling(),
|
pulling_pipeline = pipeline.pulling(),
|
||||||
@ -940,137 +1155,15 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
|
|||||||
/// partial/garbage results in case of exceptions during query execution.
|
/// partial/garbage results in case of exceptions during query execution.
|
||||||
query_pipeline.finalizeWriteInQueryCache();
|
query_pipeline.finalizeWriteInQueryCache();
|
||||||
|
|
||||||
QueryStatusPtr process_list_elem = context->getProcessListElement();
|
logQueryFinish(elem, context, ast, query_pipeline, pulling_pipeline, query_span, internal);
|
||||||
|
|
||||||
if (process_list_elem)
|
if (*implicit_txn_control)
|
||||||
{
|
execute_implicit_tcl_query(context, ASTTransactionControl::COMMIT);
|
||||||
/// Update performance counters before logging to query_log
|
|
||||||
CurrentThread::finalizePerformanceCounters();
|
|
||||||
|
|
||||||
QueryStatusInfo info = process_list_elem->getInfo(true, context->getSettingsRef().log_profile_events);
|
|
||||||
elem.type = QueryLogElementType::QUERY_FINISH;
|
|
||||||
|
|
||||||
status_info_to_query_log(elem, info, ast, context);
|
|
||||||
|
|
||||||
if (pulling_pipeline)
|
|
||||||
{
|
|
||||||
query_pipeline.tryGetResultRowsAndBytes(elem.result_rows, elem.result_bytes);
|
|
||||||
}
|
|
||||||
else /// will be used only for ordinary INSERT queries
|
|
||||||
{
|
|
||||||
auto progress_out = process_list_elem->getProgressOut();
|
|
||||||
elem.result_rows = progress_out.written_rows;
|
|
||||||
elem.result_bytes = progress_out.written_bytes;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto progress_callback = context->getProgressCallback();
|
|
||||||
if (progress_callback)
|
|
||||||
{
|
|
||||||
Progress p;
|
|
||||||
p.incrementPiecewiseAtomically(Progress{ResultProgress{elem.result_rows, elem.result_bytes}});
|
|
||||||
progress_callback(p);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (elem.read_rows != 0)
|
|
||||||
{
|
|
||||||
double elapsed_seconds = static_cast<double>(info.elapsed_microseconds) / 1000000.0;
|
|
||||||
double rows_per_second = static_cast<double>(elem.read_rows) / elapsed_seconds;
|
|
||||||
LOG_DEBUG(
|
|
||||||
&Poco::Logger::get("executeQuery"),
|
|
||||||
"Read {} rows, {} in {} sec., {} rows/sec., {}/sec.",
|
|
||||||
elem.read_rows,
|
|
||||||
ReadableSize(elem.read_bytes),
|
|
||||||
elapsed_seconds,
|
|
||||||
rows_per_second,
|
|
||||||
ReadableSize(elem.read_bytes / elapsed_seconds));
|
|
||||||
}
|
|
||||||
|
|
||||||
if (log_queries && elem.type >= log_queries_min_type && static_cast<Int64>(elem.query_duration_ms) >= log_queries_min_query_duration_ms)
|
|
||||||
{
|
|
||||||
if (auto query_log = context->getQueryLog())
|
|
||||||
query_log->add(elem);
|
|
||||||
}
|
|
||||||
if (log_processors_profiles)
|
|
||||||
{
|
|
||||||
if (auto processors_profile_log = context->getProcessorsProfileLog())
|
|
||||||
{
|
|
||||||
ProcessorProfileLogElement processor_elem;
|
|
||||||
processor_elem.event_time = elem.event_time;
|
|
||||||
processor_elem.event_time_microseconds = elem.event_time_microseconds;
|
|
||||||
processor_elem.initial_query_id = elem.client_info.initial_query_id;
|
|
||||||
processor_elem.query_id = elem.client_info.current_query_id;
|
|
||||||
|
|
||||||
auto get_proc_id = [](const IProcessor & proc) -> UInt64
|
|
||||||
{
|
|
||||||
return reinterpret_cast<std::uintptr_t>(&proc);
|
|
||||||
};
|
|
||||||
|
|
||||||
for (const auto & processor : query_pipeline.getProcessors())
|
|
||||||
{
|
|
||||||
std::vector<UInt64> parents;
|
|
||||||
for (const auto & port : processor->getOutputs())
|
|
||||||
{
|
|
||||||
if (!port.isConnected())
|
|
||||||
continue;
|
|
||||||
const IProcessor & next = port.getInputPort().getProcessor();
|
|
||||||
parents.push_back(get_proc_id(next));
|
|
||||||
}
|
|
||||||
|
|
||||||
processor_elem.id = get_proc_id(*processor);
|
|
||||||
processor_elem.parent_ids = std::move(parents);
|
|
||||||
|
|
||||||
processor_elem.plan_step = reinterpret_cast<std::uintptr_t>(processor->getQueryPlanStep());
|
|
||||||
processor_elem.plan_group = processor->getQueryPlanStepGroup();
|
|
||||||
|
|
||||||
processor_elem.processor_name = processor->getName();
|
|
||||||
|
|
||||||
/// NOTE: convert this to UInt64
|
|
||||||
processor_elem.elapsed_us = static_cast<UInt32>(processor->getElapsedUs());
|
|
||||||
processor_elem.input_wait_elapsed_us = static_cast<UInt32>(processor->getInputWaitElapsedUs());
|
|
||||||
processor_elem.output_wait_elapsed_us = static_cast<UInt32>(processor->getOutputWaitElapsedUs());
|
|
||||||
|
|
||||||
auto stats = processor->getProcessorDataStats();
|
|
||||||
processor_elem.input_rows = stats.input_rows;
|
|
||||||
processor_elem.input_bytes = stats.input_bytes;
|
|
||||||
processor_elem.output_rows = stats.output_rows;
|
|
||||||
processor_elem.output_bytes = stats.output_bytes;
|
|
||||||
|
|
||||||
processors_profile_log->add(processor_elem);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (*implicit_txn_control)
|
|
||||||
execute_implicit_tcl_query(context, ASTTransactionControl::COMMIT);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (query_span)
|
|
||||||
{
|
|
||||||
query_span->addAttribute("db.statement", elem.query);
|
|
||||||
query_span->addAttribute("clickhouse.query_id", elem.client_info.current_query_id);
|
|
||||||
query_span->addAttribute("clickhouse.query_status", "QueryFinish");
|
|
||||||
query_span->addAttributeIfNotEmpty("clickhouse.tracestate", OpenTelemetry::CurrentContext().tracestate);
|
|
||||||
query_span->addAttributeIfNotZero("clickhouse.read_rows", elem.read_rows);
|
|
||||||
query_span->addAttributeIfNotZero("clickhouse.read_bytes", elem.read_bytes);
|
|
||||||
query_span->addAttributeIfNotZero("clickhouse.written_rows", elem.written_rows);
|
|
||||||
query_span->addAttributeIfNotZero("clickhouse.written_bytes", elem.written_bytes);
|
|
||||||
query_span->addAttributeIfNotZero("clickhouse.memory_usage", elem.memory_usage);
|
|
||||||
query_span->finish();
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
auto exception_callback = [start_watch,
|
auto exception_callback =
|
||||||
elem,
|
[start_watch, elem, context, ast, internal, my_quota(quota), implicit_txn_control, execute_implicit_tcl_query, query_span](
|
||||||
context,
|
bool log_error) mutable
|
||||||
ast,
|
|
||||||
log_queries,
|
|
||||||
log_queries_min_type = settings.log_queries_min_type,
|
|
||||||
log_queries_min_query_duration_ms = settings.log_queries_min_query_duration_ms.totalMilliseconds(),
|
|
||||||
my_quota(quota),
|
|
||||||
status_info_to_query_log,
|
|
||||||
implicit_txn_control,
|
|
||||||
execute_implicit_tcl_query,
|
|
||||||
query_span](bool log_error) mutable
|
|
||||||
{
|
{
|
||||||
if (*implicit_txn_control)
|
if (*implicit_txn_control)
|
||||||
execute_implicit_tcl_query(context, ASTTransactionControl::ROLLBACK);
|
execute_implicit_tcl_query(context, ASTTransactionControl::ROLLBACK);
|
||||||
@ -1080,60 +1173,7 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
|
|||||||
if (my_quota)
|
if (my_quota)
|
||||||
my_quota->used(QuotaType::ERRORS, 1, /* check_exceeded = */ false);
|
my_quota->used(QuotaType::ERRORS, 1, /* check_exceeded = */ false);
|
||||||
|
|
||||||
elem.type = QueryLogElementType::EXCEPTION_WHILE_PROCESSING;
|
logQueryException(elem, context, start_watch, ast, query_span, internal, log_error);
|
||||||
elem.exception_code = getCurrentExceptionCode();
|
|
||||||
auto exception_message = getCurrentExceptionMessageAndPattern(/* with_stacktrace */ false);
|
|
||||||
elem.exception = std::move(exception_message.text);
|
|
||||||
elem.exception_format_string = exception_message.format_string;
|
|
||||||
|
|
||||||
QueryStatusPtr process_list_elem = context->getProcessListElement();
|
|
||||||
const Settings & current_settings = context->getSettingsRef();
|
|
||||||
|
|
||||||
/// Update performance counters before logging to query_log
|
|
||||||
CurrentThread::finalizePerformanceCounters();
|
|
||||||
const auto time_now = std::chrono::system_clock::now();
|
|
||||||
elem.event_time = timeInSeconds(time_now);
|
|
||||||
elem.event_time_microseconds = timeInMicroseconds(time_now);
|
|
||||||
|
|
||||||
if (process_list_elem)
|
|
||||||
{
|
|
||||||
QueryStatusInfo info = process_list_elem->getInfo(true, current_settings.log_profile_events, false);
|
|
||||||
status_info_to_query_log(elem, info, ast, context);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
elem.query_duration_ms = start_watch.elapsedMilliseconds();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (current_settings.calculate_text_stack_trace && log_error)
|
|
||||||
setExceptionStackTrace(elem);
|
|
||||||
logException(context, elem, log_error);
|
|
||||||
|
|
||||||
/// In case of exception we log internal queries also
|
|
||||||
if (log_queries && elem.type >= log_queries_min_type && static_cast<Int64>(elem.query_duration_ms) >= log_queries_min_query_duration_ms)
|
|
||||||
{
|
|
||||||
if (auto query_log = context->getQueryLog())
|
|
||||||
query_log->add(elem);
|
|
||||||
}
|
|
||||||
|
|
||||||
ProfileEvents::increment(ProfileEvents::FailedQuery);
|
|
||||||
if (ast->as<ASTSelectQuery>() || ast->as<ASTSelectWithUnionQuery>())
|
|
||||||
{
|
|
||||||
ProfileEvents::increment(ProfileEvents::FailedSelectQuery);
|
|
||||||
}
|
|
||||||
else if (ast->as<ASTInsertQuery>())
|
|
||||||
{
|
|
||||||
ProfileEvents::increment(ProfileEvents::FailedInsertQuery);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (query_span)
|
|
||||||
{
|
|
||||||
query_span->addAttribute("db.statement", elem.query);
|
|
||||||
query_span->addAttribute("clickhouse.query_id", elem.client_info.current_query_id);
|
|
||||||
query_span->addAttribute("clickhouse.exception", elem.exception);
|
|
||||||
query_span->addAttribute("clickhouse.exception_code", elem.exception_code);
|
|
||||||
query_span->finish();
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
res.finish_callback = std::move(finish_callback);
|
res.finish_callback = std::move(finish_callback);
|
||||||
@ -1148,7 +1188,7 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
|
|||||||
txn->onException();
|
txn->onException();
|
||||||
|
|
||||||
if (!internal)
|
if (!internal)
|
||||||
onExceptionBeforeStart(query_for_logging, context, ast, query_span, start_watch.elapsedMilliseconds());
|
logExceptionBeforeStart(query_for_logging, context, ast, query_span, start_watch.elapsedMilliseconds());
|
||||||
|
|
||||||
throw;
|
throw;
|
||||||
}
|
}
|
||||||
|
@ -1,15 +1,21 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <Core/QueryProcessingStage.h>
|
#include <Core/QueryProcessingStage.h>
|
||||||
#include <QueryPipeline/BlockIO.h>
|
|
||||||
#include <Interpreters/Context_fwd.h>
|
|
||||||
#include <Formats/FormatSettings.h>
|
#include <Formats/FormatSettings.h>
|
||||||
|
#include <Interpreters/Context_fwd.h>
|
||||||
|
#include <Interpreters/QueryLog.h>
|
||||||
|
#include <QueryPipeline/BlockIO.h>
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
#include <optional>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
|
class IInterpreter;
|
||||||
class ReadBuffer;
|
class ReadBuffer;
|
||||||
class WriteBuffer;
|
class WriteBuffer;
|
||||||
|
struct QueryStatusInfo;
|
||||||
|
|
||||||
struct QueryResultDetails
|
struct QueryResultDetails
|
||||||
{
|
{
|
||||||
@ -66,4 +72,41 @@ BlockIO executeQuery(
|
|||||||
/// if built pipeline does not require any input and does not produce any output.
|
/// if built pipeline does not require any input and does not produce any output.
|
||||||
void executeTrivialBlockIO(BlockIO & streams, ContextPtr context);
|
void executeTrivialBlockIO(BlockIO & streams, ContextPtr context);
|
||||||
|
|
||||||
|
/// Prepares a QueryLogElement and, if enabled, logs it to system.query_log
|
||||||
|
QueryLogElement logQueryStart(
|
||||||
|
const std::chrono::time_point<std::chrono::system_clock> & query_start_time,
|
||||||
|
const ContextMutablePtr & context,
|
||||||
|
const String & query_for_logging,
|
||||||
|
const ASTPtr & query_ast,
|
||||||
|
const QueryPipeline & pipeline,
|
||||||
|
const std::unique_ptr<IInterpreter> & interpreter,
|
||||||
|
bool internal,
|
||||||
|
const String & query_database,
|
||||||
|
const String & query_table,
|
||||||
|
bool async_insert);
|
||||||
|
|
||||||
|
void logQueryFinish(
|
||||||
|
QueryLogElement & elem,
|
||||||
|
const ContextMutablePtr & context,
|
||||||
|
const ASTPtr & query_ast,
|
||||||
|
const QueryPipeline & query_pipeline,
|
||||||
|
bool pulling_pipeline,
|
||||||
|
std::shared_ptr<OpenTelemetry::SpanHolder> query_span,
|
||||||
|
bool internal);
|
||||||
|
|
||||||
|
void logQueryException(
|
||||||
|
QueryLogElement & elem,
|
||||||
|
const ContextMutablePtr & context,
|
||||||
|
const Stopwatch & start_watch,
|
||||||
|
const ASTPtr & query_ast,
|
||||||
|
std::shared_ptr<OpenTelemetry::SpanHolder> query_span,
|
||||||
|
bool internal,
|
||||||
|
bool log_error);
|
||||||
|
|
||||||
|
void logExceptionBeforeStart(
|
||||||
|
const String & query_for_logging,
|
||||||
|
ContextPtr context,
|
||||||
|
ASTPtr ast,
|
||||||
|
const std::shared_ptr<OpenTelemetry::SpanHolder> & query_span,
|
||||||
|
UInt64 elapsed_millliseconds);
|
||||||
}
|
}
|
||||||
|
@ -56,8 +56,7 @@ void ASTCreateIndexQuery::formatQueryImpl(const FormatSettings & settings, Forma
|
|||||||
|
|
||||||
formatOnCluster(settings);
|
formatOnCluster(settings);
|
||||||
|
|
||||||
if (!cluster.empty())
|
settings.ostr << " ";
|
||||||
settings.ostr << " ";
|
|
||||||
|
|
||||||
index_decl->formatImpl(settings, state, frame);
|
index_decl->formatImpl(settings, state, frame);
|
||||||
}
|
}
|
||||||
|
@ -13,8 +13,8 @@ ASTPtr ASTIndexDeclaration::clone() const
|
|||||||
auto res = std::make_shared<ASTIndexDeclaration>();
|
auto res = std::make_shared<ASTIndexDeclaration>();
|
||||||
|
|
||||||
res->name = name;
|
res->name = name;
|
||||||
res->granularity = granularity;
|
if (granularity)
|
||||||
|
res->granularity = granularity;
|
||||||
if (expr)
|
if (expr)
|
||||||
res->set(res->expr, expr->clone());
|
res->set(res->expr, expr->clone());
|
||||||
if (type)
|
if (type)
|
||||||
@ -25,23 +25,37 @@ ASTPtr ASTIndexDeclaration::clone() const
|
|||||||
|
|
||||||
void ASTIndexDeclaration::formatImpl(const FormatSettings & s, FormatState & state, FormatStateStacked frame) const
|
void ASTIndexDeclaration::formatImpl(const FormatSettings & s, FormatState & state, FormatStateStacked frame) const
|
||||||
{
|
{
|
||||||
if (part_of_create_index_query)
|
if (expr)
|
||||||
{
|
{
|
||||||
s.ostr << "(";
|
if (part_of_create_index_query)
|
||||||
expr->formatImpl(s, state, frame);
|
{
|
||||||
s.ostr << ")";
|
if (expr->as<ASTExpressionList>())
|
||||||
}
|
{
|
||||||
else
|
s.ostr << "(";
|
||||||
{
|
expr->formatImpl(s, state, frame);
|
||||||
s.ostr << backQuoteIfNeed(name);
|
s.ostr << ")";
|
||||||
s.ostr << " ";
|
}
|
||||||
expr->formatImpl(s, state, frame);
|
else
|
||||||
|
expr->formatImpl(s, state, frame);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
s.ostr << backQuoteIfNeed(name);
|
||||||
|
s.ostr << " ";
|
||||||
|
expr->formatImpl(s, state, frame);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
s.ostr << (s.hilite ? hilite_keyword : "") << " TYPE " << (s.hilite ? hilite_none : "");
|
if (type)
|
||||||
type->formatImpl(s, state, frame);
|
{
|
||||||
s.ostr << (s.hilite ? hilite_keyword : "") << " GRANULARITY " << (s.hilite ? hilite_none : "");
|
s.ostr << (s.hilite ? hilite_keyword : "") << " TYPE " << (s.hilite ? hilite_none : "");
|
||||||
s.ostr << granularity;
|
type->formatImpl(s, state, frame);
|
||||||
|
}
|
||||||
|
if (granularity)
|
||||||
|
{
|
||||||
|
s.ostr << (s.hilite ? hilite_keyword : "") << " GRANULARITY " << (s.hilite ? hilite_none : "");
|
||||||
|
s.ostr << granularity;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -35,6 +35,8 @@ public:
|
|||||||
/// Data from buffer to insert after inlined one - may be nullptr.
|
/// Data from buffer to insert after inlined one - may be nullptr.
|
||||||
ReadBuffer * tail = nullptr;
|
ReadBuffer * tail = nullptr;
|
||||||
|
|
||||||
|
bool async_insert_flush = false;
|
||||||
|
|
||||||
String getDatabase() const;
|
String getDatabase() const;
|
||||||
String getTable() const;
|
String getTable() const;
|
||||||
|
|
||||||
@ -66,7 +68,7 @@ public:
|
|||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
QueryKind getQueryKind() const override { return QueryKind::Insert; }
|
QueryKind getQueryKind() const override { return async_insert_flush ? QueryKind::AsyncInsertFlush : QueryKind::Insert; }
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
void formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override;
|
void formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override;
|
||||||
|
@ -142,6 +142,14 @@ ASTPtr ASTProjectionSelectQuery::cloneToASTSelect() const
|
|||||||
}
|
}
|
||||||
if (groupBy())
|
if (groupBy())
|
||||||
select_query->setExpression(ASTSelectQuery::Expression::GROUP_BY, groupBy()->clone());
|
select_query->setExpression(ASTSelectQuery::Expression::GROUP_BY, groupBy()->clone());
|
||||||
|
|
||||||
|
auto settings_query = std::make_shared<ASTSetQuery>();
|
||||||
|
SettingsChanges settings_changes;
|
||||||
|
settings_changes.insertSetting("optimize_aggregators_of_group_by_keys", false);
|
||||||
|
settings_changes.insertSetting("optimize_group_by_function_keys", false);
|
||||||
|
settings_query->changes = std::move(settings_changes);
|
||||||
|
settings_query->is_standalone = false;
|
||||||
|
select_query->setExpression(ASTSelectQuery::Expression::SETTINGS, std::move(settings_query));
|
||||||
return node;
|
return node;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -64,4 +64,14 @@ void ASTSetQuery::formatImpl(const FormatSettings & format, FormatState &, Forma
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ASTSetQuery::appendColumnName(WriteBuffer & ostr) const
|
||||||
|
{
|
||||||
|
Hash hash = getTreeHash();
|
||||||
|
|
||||||
|
writeCString("__settings_", ostr);
|
||||||
|
writeText(hash.first, ostr);
|
||||||
|
ostr.write('_');
|
||||||
|
writeText(hash.second, ostr);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -37,6 +37,9 @@ public:
|
|||||||
void updateTreeHashImpl(SipHash & hash_state) const override;
|
void updateTreeHashImpl(SipHash & hash_state) const override;
|
||||||
|
|
||||||
QueryKind getQueryKind() const override { return QueryKind::Set; }
|
QueryKind getQueryKind() const override { return QueryKind::Set; }
|
||||||
|
|
||||||
|
void appendColumnName(WriteBuffer & ostr) const override;
|
||||||
|
void appendColumnNameWithoutAlias(WriteBuffer & ostr) const override { return appendColumnName(ostr); }
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -305,6 +305,7 @@ public:
|
|||||||
Commit,
|
Commit,
|
||||||
Rollback,
|
Rollback,
|
||||||
SetTransactionSnapshot,
|
SetTransactionSnapshot,
|
||||||
|
AsyncInsertFlush
|
||||||
};
|
};
|
||||||
/// Return QueryKind of this AST query.
|
/// Return QueryKind of this AST query.
|
||||||
virtual QueryKind getQueryKind() const { return QueryKind::None; }
|
virtual QueryKind getQueryKind() const { return QueryKind::None; }
|
||||||
|
27
src/Parsers/Kusto/Formatters.cpp
Normal file
27
src/Parsers/Kusto/Formatters.cpp
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
#include "Formatters.h"
|
||||||
|
|
||||||
|
#include <format>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
std::string formatKQLTimespan(const Int64 ticks)
|
||||||
|
{
|
||||||
|
static constexpr Int64 TICKS_PER_SECOND = 10000000;
|
||||||
|
static constexpr auto TICKS_PER_MINUTE = TICKS_PER_SECOND * 60;
|
||||||
|
static constexpr auto TICKS_PER_HOUR = TICKS_PER_MINUTE * 60;
|
||||||
|
static constexpr auto TICKS_PER_DAY = TICKS_PER_HOUR * 24;
|
||||||
|
|
||||||
|
const auto abs_ticks = std::abs(ticks);
|
||||||
|
std::string result = ticks < 0 ? "-" : "";
|
||||||
|
if (abs_ticks >= TICKS_PER_DAY)
|
||||||
|
result.append(std::format("{}.", abs_ticks / TICKS_PER_DAY));
|
||||||
|
|
||||||
|
result.append(std::format(
|
||||||
|
"{:02}:{:02}:{:02}", (abs_ticks / TICKS_PER_HOUR) % 24, (abs_ticks / TICKS_PER_MINUTE) % 60, (abs_ticks / TICKS_PER_SECOND) % 60));
|
||||||
|
|
||||||
|
if (const auto fractional_second = abs_ticks % TICKS_PER_SECOND)
|
||||||
|
result.append(std::format(".{:07}", fractional_second));
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
}
|
10
src/Parsers/Kusto/Formatters.h
Normal file
10
src/Parsers/Kusto/Formatters.h
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <base/types.h>
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
std::string formatKQLTimespan(Int64 ticks);
|
||||||
|
}
|
@ -17,24 +17,36 @@ bool ParserCreateIndexDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected
|
|||||||
{
|
{
|
||||||
ParserKeyword s_type("TYPE");
|
ParserKeyword s_type("TYPE");
|
||||||
ParserKeyword s_granularity("GRANULARITY");
|
ParserKeyword s_granularity("GRANULARITY");
|
||||||
|
ParserToken open(TokenType::OpeningRoundBracket);
|
||||||
|
ParserToken close(TokenType::ClosingRoundBracket);
|
||||||
|
ParserOrderByExpressionList order_list;
|
||||||
ParserDataType data_type_p;
|
ParserDataType data_type_p;
|
||||||
ParserExpression expression_p;
|
ParserExpression expression_p;
|
||||||
ParserUnsignedInteger granularity_p;
|
ParserUnsignedInteger granularity_p;
|
||||||
|
|
||||||
ASTPtr expr;
|
ASTPtr expr;
|
||||||
|
ASTPtr order;
|
||||||
ASTPtr type;
|
ASTPtr type;
|
||||||
ASTPtr granularity;
|
ASTPtr granularity;
|
||||||
|
|
||||||
/// Skip name parser for SQL-standard CREATE INDEX
|
/// Skip name parser for SQL-standard CREATE INDEX
|
||||||
if (!expression_p.parse(pos, expr, expected))
|
if (expression_p.parse(pos, expr, expected))
|
||||||
return false;
|
{
|
||||||
|
}
|
||||||
|
else if (open.ignore(pos, expected))
|
||||||
|
{
|
||||||
|
if (!order_list.parse(pos, order, expected))
|
||||||
|
return false;
|
||||||
|
|
||||||
if (!s_type.ignore(pos, expected))
|
if (!close.ignore(pos, expected))
|
||||||
return false;
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
if (!data_type_p.parse(pos, type, expected))
|
if (s_type.ignore(pos, expected))
|
||||||
return false;
|
{
|
||||||
|
if (!data_type_p.parse(pos, type, expected))
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
if (s_granularity.ignore(pos, expected))
|
if (s_granularity.ignore(pos, expected))
|
||||||
{
|
{
|
||||||
@ -45,13 +57,14 @@ bool ParserCreateIndexDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected
|
|||||||
auto index = std::make_shared<ASTIndexDeclaration>();
|
auto index = std::make_shared<ASTIndexDeclaration>();
|
||||||
index->part_of_create_index_query = true;
|
index->part_of_create_index_query = true;
|
||||||
index->set(index->expr, expr);
|
index->set(index->expr, expr);
|
||||||
index->set(index->type, type);
|
if (type)
|
||||||
|
index->set(index->type, type);
|
||||||
|
|
||||||
if (granularity)
|
if (granularity)
|
||||||
index->granularity = granularity->as<ASTLiteral &>().value.safeGet<UInt64>();
|
index->granularity = granularity->as<ASTLiteral &>().value.safeGet<UInt64>();
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if (index->type->name == "annoy")
|
if (index->type && index->type->name == "annoy")
|
||||||
index->granularity = ASTIndexDeclaration::DEFAULT_ANNOY_INDEX_GRANULARITY;
|
index->granularity = ASTIndexDeclaration::DEFAULT_ANNOY_INDEX_GRANULARITY;
|
||||||
else
|
else
|
||||||
index->granularity = ASTIndexDeclaration::DEFAULT_INDEX_GRANULARITY;
|
index->granularity = ASTIndexDeclaration::DEFAULT_INDEX_GRANULARITY;
|
||||||
|
@ -111,7 +111,7 @@ void optimizePrimaryKeyCondition(const Stack & stack);
|
|||||||
void optimizePrewhere(Stack & stack, QueryPlan::Nodes & nodes);
|
void optimizePrewhere(Stack & stack, QueryPlan::Nodes & nodes);
|
||||||
void optimizeReadInOrder(QueryPlan::Node & node, QueryPlan::Nodes & nodes);
|
void optimizeReadInOrder(QueryPlan::Node & node, QueryPlan::Nodes & nodes);
|
||||||
void optimizeAggregationInOrder(QueryPlan::Node & node, QueryPlan::Nodes &);
|
void optimizeAggregationInOrder(QueryPlan::Node & node, QueryPlan::Nodes &);
|
||||||
bool optimizeUseAggregateProjections(QueryPlan::Node & node, QueryPlan::Nodes & nodes);
|
bool optimizeUseAggregateProjections(QueryPlan::Node & node, QueryPlan::Nodes & nodes, bool allow_implicit_projections);
|
||||||
bool optimizeUseNormalProjections(Stack & stack, QueryPlan::Nodes & nodes);
|
bool optimizeUseNormalProjections(Stack & stack, QueryPlan::Nodes & nodes);
|
||||||
bool addPlansForSets(QueryPlan::Node & node, QueryPlan::Nodes & nodes);
|
bool addPlansForSets(QueryPlan::Node & node, QueryPlan::Nodes & nodes);
|
||||||
|
|
||||||
|
@ -19,6 +19,7 @@ QueryPlanOptimizationSettings QueryPlanOptimizationSettings::fromSettings(const
|
|||||||
settings.remove_redundant_distinct = from.query_plan_remove_redundant_distinct;
|
settings.remove_redundant_distinct = from.query_plan_remove_redundant_distinct;
|
||||||
settings.optimize_projection = from.optimize_use_projections && from.query_plan_optimize_projection;
|
settings.optimize_projection = from.optimize_use_projections && from.query_plan_optimize_projection;
|
||||||
settings.force_use_projection = settings.optimize_projection && from.force_optimize_projection;
|
settings.force_use_projection = settings.optimize_projection && from.force_optimize_projection;
|
||||||
|
settings.optimize_use_implicit_projections = settings.optimize_projection && from.optimize_use_implicit_projections;
|
||||||
return settings;
|
return settings;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -41,6 +41,7 @@ struct QueryPlanOptimizationSettings
|
|||||||
/// If reading from projection can be applied
|
/// If reading from projection can be applied
|
||||||
bool optimize_projection = false;
|
bool optimize_projection = false;
|
||||||
bool force_use_projection = false;
|
bool force_use_projection = false;
|
||||||
|
bool optimize_use_implicit_projections = false;
|
||||||
|
|
||||||
static QueryPlanOptimizationSettings fromSettings(const Settings & from);
|
static QueryPlanOptimizationSettings fromSettings(const Settings & from);
|
||||||
static QueryPlanOptimizationSettings fromContext(ContextPtr from);
|
static QueryPlanOptimizationSettings fromContext(ContextPtr from);
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
#include <Processors/QueryPlan/Optimizations/Optimizations.h>
|
#include <Processors/QueryPlan/Optimizations/Optimizations.h>
|
||||||
#include <Processors/QueryPlan/SortingStep.h>
|
#include <Processors/QueryPlan/SortingStep.h>
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
|
#include <DataTypes/IDataType.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -28,6 +29,20 @@ const DB::DataStream & getChildOutputStream(DB::QueryPlan::Node & node)
|
|||||||
namespace DB::QueryPlanOptimizations
|
namespace DB::QueryPlanOptimizations
|
||||||
{
|
{
|
||||||
|
|
||||||
|
/// This is a check that output columns does not have the same name
|
||||||
|
/// This is ok for DAG, but may introduce a bug in a SotringStep cause columns are selected by name.
|
||||||
|
static bool areOutputsConvertableToBlock(const ActionsDAG::NodeRawConstPtrs & outputs)
|
||||||
|
{
|
||||||
|
std::unordered_set<std::string_view> names;
|
||||||
|
for (const auto & output : outputs)
|
||||||
|
{
|
||||||
|
if (!names.emplace(output->result_name).second)
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
size_t tryExecuteFunctionsAfterSorting(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes)
|
size_t tryExecuteFunctionsAfterSorting(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes)
|
||||||
{
|
{
|
||||||
if (parent_node->children.size() != 1)
|
if (parent_node->children.size() != 1)
|
||||||
@ -57,6 +72,9 @@ size_t tryExecuteFunctionsAfterSorting(QueryPlan::Node * parent_node, QueryPlan:
|
|||||||
if (unneeded_for_sorting->trivial())
|
if (unneeded_for_sorting->trivial())
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
if (!areOutputsConvertableToBlock(needed_for_sorting->getOutputs()))
|
||||||
|
return 0;
|
||||||
|
|
||||||
// Sorting (parent_node) -> Expression (child_node)
|
// Sorting (parent_node) -> Expression (child_node)
|
||||||
auto & node_with_needed = nodes.emplace_back();
|
auto & node_with_needed = nodes.emplace_back();
|
||||||
std::swap(node_with_needed.children, child_node->children);
|
std::swap(node_with_needed.children, child_node->children);
|
||||||
|
@ -126,7 +126,8 @@ void optimizeTreeSecondPass(const QueryPlanOptimizationSettings & optimization_s
|
|||||||
optimizeReadInOrder(*frame.node, nodes);
|
optimizeReadInOrder(*frame.node, nodes);
|
||||||
|
|
||||||
if (optimization_settings.optimize_projection)
|
if (optimization_settings.optimize_projection)
|
||||||
num_applied_projection += optimizeUseAggregateProjections(*frame.node, nodes);
|
num_applied_projection
|
||||||
|
+= optimizeUseAggregateProjections(*frame.node, nodes, optimization_settings.optimize_use_implicit_projections);
|
||||||
|
|
||||||
if (optimization_settings.aggregation_in_order)
|
if (optimization_settings.aggregation_in_order)
|
||||||
optimizeAggregationInOrder(*frame.node, nodes);
|
optimizeAggregationInOrder(*frame.node, nodes);
|
||||||
|
@ -69,7 +69,7 @@ static AggregateProjectionInfo getAggregatingProjectionInfo(
|
|||||||
projection.query_ast,
|
projection.query_ast,
|
||||||
context,
|
context,
|
||||||
Pipe(std::make_shared<SourceFromSingleChunk>(metadata_snapshot->getSampleBlock())),
|
Pipe(std::make_shared<SourceFromSingleChunk>(metadata_snapshot->getSampleBlock())),
|
||||||
SelectQueryOptions{QueryProcessingStage::WithMergeableState}.ignoreASTOptimizations());
|
SelectQueryOptions{QueryProcessingStage::WithMergeableState}.ignoreASTOptimizations().ignoreSettingConstraints());
|
||||||
|
|
||||||
const auto & analysis_result = interpreter.getAnalysisResult();
|
const auto & analysis_result = interpreter.getAnalysisResult();
|
||||||
const auto & query_analyzer = interpreter.getQueryAnalyzer();
|
const auto & query_analyzer = interpreter.getQueryAnalyzer();
|
||||||
@ -433,7 +433,8 @@ AggregateProjectionCandidates getAggregateProjectionCandidates(
|
|||||||
QueryPlan::Node & node,
|
QueryPlan::Node & node,
|
||||||
AggregatingStep & aggregating,
|
AggregatingStep & aggregating,
|
||||||
ReadFromMergeTree & reading,
|
ReadFromMergeTree & reading,
|
||||||
const std::shared_ptr<PartitionIdToMaxBlock> & max_added_blocks)
|
const std::shared_ptr<PartitionIdToMaxBlock> & max_added_blocks,
|
||||||
|
bool allow_implicit_projections)
|
||||||
{
|
{
|
||||||
const auto & keys = aggregating.getParams().keys;
|
const auto & keys = aggregating.getParams().keys;
|
||||||
const auto & aggregates = aggregating.getParams().aggregates;
|
const auto & aggregates = aggregating.getParams().aggregates;
|
||||||
@ -453,7 +454,8 @@ AggregateProjectionCandidates getAggregateProjectionCandidates(
|
|||||||
if (projection.type == ProjectionDescription::Type::Aggregate)
|
if (projection.type == ProjectionDescription::Type::Aggregate)
|
||||||
agg_projections.push_back(&projection);
|
agg_projections.push_back(&projection);
|
||||||
|
|
||||||
bool can_use_minmax_projection = metadata->minmax_count_projection && !reading.getMergeTreeData().has_lightweight_delete_parts.load();
|
bool can_use_minmax_projection = allow_implicit_projections && metadata->minmax_count_projection
|
||||||
|
&& !reading.getMergeTreeData().has_lightweight_delete_parts.load();
|
||||||
|
|
||||||
if (!can_use_minmax_projection && agg_projections.empty())
|
if (!can_use_minmax_projection && agg_projections.empty())
|
||||||
return candidates;
|
return candidates;
|
||||||
@ -543,7 +545,7 @@ static QueryPlan::Node * findReadingStep(QueryPlan::Node & node)
|
|||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool optimizeUseAggregateProjections(QueryPlan::Node & node, QueryPlan::Nodes & nodes)
|
bool optimizeUseAggregateProjections(QueryPlan::Node & node, QueryPlan::Nodes & nodes, bool allow_implicit_projections)
|
||||||
{
|
{
|
||||||
if (node.children.size() != 1)
|
if (node.children.size() != 1)
|
||||||
return false;
|
return false;
|
||||||
@ -568,7 +570,7 @@ bool optimizeUseAggregateProjections(QueryPlan::Node & node, QueryPlan::Nodes &
|
|||||||
|
|
||||||
std::shared_ptr<PartitionIdToMaxBlock> max_added_blocks = getMaxAddedBlocks(reading);
|
std::shared_ptr<PartitionIdToMaxBlock> max_added_blocks = getMaxAddedBlocks(reading);
|
||||||
|
|
||||||
auto candidates = getAggregateProjectionCandidates(node, *aggregating, *reading, max_added_blocks);
|
auto candidates = getAggregateProjectionCandidates(node, *aggregating, *reading, max_added_blocks, allow_implicit_projections);
|
||||||
|
|
||||||
AggregateProjectionCandidate * best_candidate = nullptr;
|
AggregateProjectionCandidate * best_candidate = nullptr;
|
||||||
if (candidates.minmax_projection)
|
if (candidates.minmax_projection)
|
||||||
|
@ -71,15 +71,12 @@ TableLockHolder IStorage::tryLockForShare(const String & query_id, const std::ch
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
IStorage::AlterLockHolder IStorage::lockForAlter(const std::chrono::milliseconds & acquire_timeout)
|
std::optional<IStorage::AlterLockHolder> IStorage::tryLockForAlter(const std::chrono::milliseconds & acquire_timeout)
|
||||||
{
|
{
|
||||||
AlterLockHolder lock{alter_lock, std::defer_lock};
|
AlterLockHolder lock{alter_lock, std::defer_lock};
|
||||||
|
|
||||||
if (!lock.try_lock_for(acquire_timeout))
|
if (!lock.try_lock_for(acquire_timeout))
|
||||||
throw Exception(ErrorCodes::DEADLOCK_AVOIDED,
|
return {};
|
||||||
"Locking attempt for ALTER on \"{}\" has timed out! ({} ms) "
|
|
||||||
"Possible deadlock avoided. Client should retry.",
|
|
||||||
getStorageID().getFullTableName(), acquire_timeout.count());
|
|
||||||
|
|
||||||
if (is_dropped || is_detached)
|
if (is_dropped || is_detached)
|
||||||
throw Exception(ErrorCodes::TABLE_IS_DROPPED, "Table {} is dropped or detached", getStorageID());
|
throw Exception(ErrorCodes::TABLE_IS_DROPPED, "Table {} is dropped or detached", getStorageID());
|
||||||
@ -87,6 +84,18 @@ IStorage::AlterLockHolder IStorage::lockForAlter(const std::chrono::milliseconds
|
|||||||
return lock;
|
return lock;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
IStorage::AlterLockHolder IStorage::lockForAlter(const std::chrono::milliseconds & acquire_timeout)
|
||||||
|
{
|
||||||
|
|
||||||
|
if (auto lock = tryLockForAlter(acquire_timeout); lock == std::nullopt)
|
||||||
|
throw Exception(ErrorCodes::DEADLOCK_AVOIDED,
|
||||||
|
"Locking attempt for ALTER on \"{}\" has timed out! ({} ms) "
|
||||||
|
"Possible deadlock avoided. Client should retry.",
|
||||||
|
getStorageID().getFullTableName(), acquire_timeout.count());
|
||||||
|
else
|
||||||
|
return std::move(*lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
TableExclusiveLockHolder IStorage::lockExclusively(const String & query_id, const std::chrono::milliseconds & acquire_timeout)
|
TableExclusiveLockHolder IStorage::lockExclusively(const String & query_id, const std::chrono::milliseconds & acquire_timeout)
|
||||||
{
|
{
|
||||||
|
@ -283,6 +283,7 @@ public:
|
|||||||
/// sure, that we execute only one simultaneous alter. Doesn't affect share lock.
|
/// sure, that we execute only one simultaneous alter. Doesn't affect share lock.
|
||||||
using AlterLockHolder = std::unique_lock<std::timed_mutex>;
|
using AlterLockHolder = std::unique_lock<std::timed_mutex>;
|
||||||
AlterLockHolder lockForAlter(const std::chrono::milliseconds & acquire_timeout);
|
AlterLockHolder lockForAlter(const std::chrono::milliseconds & acquire_timeout);
|
||||||
|
std::optional<AlterLockHolder> tryLockForAlter(const std::chrono::milliseconds & acquire_timeout);
|
||||||
|
|
||||||
/// Lock table exclusively. This lock must be acquired if you want to be
|
/// Lock table exclusively. This lock must be acquired if you want to be
|
||||||
/// sure, that no other thread (SELECT, merge, ALTER, etc.) doing something
|
/// sure, that no other thread (SELECT, merge, ALTER, etc.) doing something
|
||||||
|
@ -11,6 +11,7 @@
|
|||||||
#include <Storages/extractKeyExpressionList.h>
|
#include <Storages/extractKeyExpressionList.h>
|
||||||
|
|
||||||
#include <Core/Defines.h>
|
#include <Core/Defines.h>
|
||||||
|
#include "Common/Exception.h"
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -89,8 +90,16 @@ IndexDescription IndexDescription::getIndexFromAST(const ASTPtr & definition_ast
|
|||||||
result.type = Poco::toLower(index_definition->type->name);
|
result.type = Poco::toLower(index_definition->type->name);
|
||||||
result.granularity = index_definition->granularity;
|
result.granularity = index_definition->granularity;
|
||||||
|
|
||||||
ASTPtr expr_list = extractKeyExpressionList(index_definition->expr->clone());
|
ASTPtr expr_list;
|
||||||
result.expression_list_ast = expr_list->clone();
|
if (index_definition->expr)
|
||||||
|
{
|
||||||
|
expr_list = extractKeyExpressionList(index_definition->expr->clone());
|
||||||
|
result.expression_list_ast = expr_list->clone();
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Expression is not set");
|
||||||
|
}
|
||||||
|
|
||||||
auto syntax = TreeRewriter(context).analyze(expr_list, columns.getAllPhysical());
|
auto syntax = TreeRewriter(context).analyze(expr_list, columns.getAllPhysical());
|
||||||
result.expression = ExpressionAnalyzer(expr_list, syntax, context).getActions(true);
|
result.expression = ExpressionAnalyzer(expr_list, syntax, context).getActions(true);
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#include "IMergeTreeDataPart.h"
|
#include "IMergeTreeDataPart.h"
|
||||||
#include "Storages/MergeTree/IDataPartStorage.h"
|
#include <Storages/MergeTree/IDataPartStorage.h>
|
||||||
|
#include <base/types.h>
|
||||||
|
|
||||||
#include <optional>
|
#include <optional>
|
||||||
#include <boost/algorithm/string/join.hpp>
|
#include <boost/algorithm/string/join.hpp>
|
||||||
@ -1816,6 +1817,22 @@ MutableDataPartStoragePtr IMergeTreeDataPart::makeCloneOnDisk(const DiskPtr & di
|
|||||||
return getDataPartStorage().clonePart(path_to_clone, getDataPartStorage().getPartDirectory(), disk, storage.log);
|
return getDataPartStorage().clonePart(path_to_clone, getDataPartStorage().getPartDirectory(), disk, storage.log);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
UInt64 IMergeTreeDataPart::getIndexSizeFromFile() const
|
||||||
|
{
|
||||||
|
auto metadata_snapshot = storage.getInMemoryMetadataPtr();
|
||||||
|
if (parent_part)
|
||||||
|
metadata_snapshot = metadata_snapshot->projections.get(name).metadata;
|
||||||
|
const auto & pk = metadata_snapshot->getPrimaryKey();
|
||||||
|
if (!pk.column_names.empty())
|
||||||
|
{
|
||||||
|
String file = "primary" + getIndexExtension(false);
|
||||||
|
if (checksums.files.contains("primary" + getIndexExtension(true)))
|
||||||
|
file = "primary" + getIndexExtension(true);
|
||||||
|
return getFileSizeOrZero(file);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
void IMergeTreeDataPart::checkConsistencyBase() const
|
void IMergeTreeDataPart::checkConsistencyBase() const
|
||||||
{
|
{
|
||||||
auto metadata_snapshot = storage.getInMemoryMetadataPtr();
|
auto metadata_snapshot = storage.getInMemoryMetadataPtr();
|
||||||
|
@ -353,6 +353,7 @@ public:
|
|||||||
UInt64 getIndexSizeInBytes() const;
|
UInt64 getIndexSizeInBytes() const;
|
||||||
UInt64 getIndexSizeInAllocatedBytes() const;
|
UInt64 getIndexSizeInAllocatedBytes() const;
|
||||||
UInt64 getMarksCount() const;
|
UInt64 getMarksCount() const;
|
||||||
|
UInt64 getIndexSizeFromFile() const;
|
||||||
|
|
||||||
UInt64 getBytesOnDisk() const { return bytes_on_disk; }
|
UInt64 getBytesOnDisk() const { return bytes_on_disk; }
|
||||||
void setBytesOnDisk(UInt64 bytes_on_disk_) { bytes_on_disk = bytes_on_disk_; }
|
void setBytesOnDisk(UInt64 bytes_on_disk_) { bytes_on_disk = bytes_on_disk_; }
|
||||||
|
@ -564,7 +564,17 @@ static const ActionsDAG::Node & cloneASTWithInversionPushDown(
|
|||||||
}
|
}
|
||||||
case (ActionsDAG::ActionType::COLUMN):
|
case (ActionsDAG::ActionType::COLUMN):
|
||||||
{
|
{
|
||||||
res = &inverted_dag.addColumn({node.column, node.result_type, node.result_name});
|
String name;
|
||||||
|
if (const auto * column_const = typeid_cast<const ColumnConst *>(node.column.get()))
|
||||||
|
/// Re-generate column name for constant.
|
||||||
|
/// DAG form query (with enabled analyzer) uses suffixes for constants, like 1_UInt8.
|
||||||
|
/// DAG from PK does not use it. This is breakig match by column name sometimes.
|
||||||
|
/// Ideally, we should not compare manes, but DAG subtrees instead.
|
||||||
|
name = ASTLiteral(column_const->getDataColumn()[0]).getColumnName();
|
||||||
|
else
|
||||||
|
name = node.result_name;
|
||||||
|
|
||||||
|
res = &inverted_dag.addColumn({node.column, node.result_type, name});
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case (ActionsDAG::ActionType::ALIAS):
|
case (ActionsDAG::ActionType::ALIAS):
|
||||||
|
@ -6994,7 +6994,8 @@ std::optional<ProjectionCandidate> MergeTreeData::getQueryProcessingStageWithAgg
|
|||||||
|
|
||||||
ProjectionCandidate * selected_candidate = nullptr;
|
ProjectionCandidate * selected_candidate = nullptr;
|
||||||
size_t min_sum_marks = std::numeric_limits<size_t>::max();
|
size_t min_sum_marks = std::numeric_limits<size_t>::max();
|
||||||
if (metadata_snapshot->minmax_count_projection && !has_lightweight_delete_parts.load(std::memory_order_relaxed)) /// Disable ReadFromStorage for parts with lightweight.
|
if (settings.optimize_use_implicit_projections && metadata_snapshot->minmax_count_projection
|
||||||
|
&& !has_lightweight_delete_parts.load(std::memory_order_relaxed)) /// Disable ReadFromStorage for parts with lightweight.
|
||||||
add_projection_candidate(*metadata_snapshot->minmax_count_projection, true);
|
add_projection_candidate(*metadata_snapshot->minmax_count_projection, true);
|
||||||
std::optional<ProjectionCandidate> minmax_count_projection_candidate;
|
std::optional<ProjectionCandidate> minmax_count_projection_candidate;
|
||||||
if (!candidates.empty())
|
if (!candidates.empty())
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user