mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 23:21:59 +00:00
Merge remote-tracking branch 'upstream/master' into allowed-caches-dir-for-dynamic-disks
This commit is contained in:
commit
c3b8bc0267
43
.github/workflows/master.yml
vendored
43
.github/workflows/master.yml
vendored
@ -895,6 +895,48 @@ jobs:
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
BuilderBinS390X:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_s390x
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
############################################################################################
|
||||
##################################### Docker images #######################################
|
||||
############################################################################################
|
||||
@ -978,6 +1020,7 @@ jobs:
|
||||
- BuilderBinFreeBSD
|
||||
- BuilderBinPPC64
|
||||
- BuilderBinRISCV64
|
||||
- BuilderBinS390X
|
||||
- BuilderBinAmd64Compat
|
||||
- BuilderBinAarch64V80Compat
|
||||
- BuilderBinClangTidy
|
||||
|
78
.github/workflows/pull_request.yml
vendored
78
.github/workflows/pull_request.yml
vendored
@ -955,6 +955,47 @@ jobs:
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
BuilderBinS390X:
|
||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_s390x
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
############################################################################################
|
||||
##################################### Docker images #######################################
|
||||
############################################################################################
|
||||
@ -1037,6 +1078,7 @@ jobs:
|
||||
- BuilderBinFreeBSD
|
||||
- BuilderBinPPC64
|
||||
- BuilderBinRISCV64
|
||||
- BuilderBinS390X
|
||||
- BuilderBinAmd64Compat
|
||||
- BuilderBinAarch64V80Compat
|
||||
- BuilderBinClangTidy
|
||||
@ -5185,3 +5227,39 @@ jobs:
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
##############################################################################################
|
||||
##################################### SQL TEST ###############################################
|
||||
##############################################################################################
|
||||
SQLTest:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, fuzzer-unit-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/sqltest
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=SQLTest
|
||||
REPO_COPY=${{runner.temp}}/sqltest/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: SQLTest
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 sqltest.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <magic_enum.hpp>
|
||||
#include <fmt/format.h>
|
||||
|
||||
|
||||
template <class T> concept is_enum = std::is_enum_v<T>;
|
||||
|
||||
namespace detail
|
||||
|
@ -97,7 +97,7 @@ namespace Data
|
||||
///
|
||||
/// static void extract(std::size_t pos, Person& obj, const Person& defVal, AbstractExtractor::Ptr pExt)
|
||||
/// {
|
||||
/// // defVal is the default person we should use if we encunter NULL entries, so we take the individual fields
|
||||
/// // defVal is the default person we should use if we encounter NULL entries, so we take the individual fields
|
||||
/// // as defaults. You can do more complex checking, ie return defVal if only one single entry of the fields is null etc...
|
||||
/// poco_assert_dbg (!pExt.isNull());
|
||||
/// std::string lastName;
|
||||
|
@ -146,7 +146,7 @@ namespace Net
|
||||
|
||||
std::string cipherList;
|
||||
/// Specifies the supported ciphers in OpenSSL notation.
|
||||
/// Defaults to "ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH".
|
||||
/// Defaults to "ALL:!ADH:!LOW:!EXP:!MD5:!3DES:@STRENGTH".
|
||||
|
||||
std::string dhParamsFile;
|
||||
/// Specifies a file containing Diffie-Hellman parameters.
|
||||
@ -172,7 +172,7 @@ namespace Net
|
||||
VerificationMode verificationMode = VERIFY_RELAXED,
|
||||
int verificationDepth = 9,
|
||||
bool loadDefaultCAs = false,
|
||||
const std::string & cipherList = "ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH");
|
||||
const std::string & cipherList = "ALL:!ADH:!LOW:!EXP:!MD5:!3DES:@STRENGTH");
|
||||
/// Creates a Context.
|
||||
///
|
||||
/// * usage specifies whether the context is used by a client or server.
|
||||
@ -200,7 +200,7 @@ namespace Net
|
||||
VerificationMode verificationMode = VERIFY_RELAXED,
|
||||
int verificationDepth = 9,
|
||||
bool loadDefaultCAs = false,
|
||||
const std::string & cipherList = "ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH");
|
||||
const std::string & cipherList = "ALL:!ADH:!LOW:!EXP:!MD5:!3DES:@STRENGTH");
|
||||
/// Creates a Context.
|
||||
///
|
||||
/// * usage specifies whether the context is used by a client or server.
|
||||
|
@ -76,7 +76,7 @@ namespace Net
|
||||
/// <verificationMode>none|relaxed|strict|once</verificationMode>
|
||||
/// <verificationDepth>1..9</verificationDepth>
|
||||
/// <loadDefaultCAFile>true|false</loadDefaultCAFile>
|
||||
/// <cipherList>ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH</cipherList>
|
||||
/// <cipherList>ALL:!ADH:!LOW:!EXP:!MD5:!3DES:@STRENGTH</cipherList>
|
||||
/// <preferServerCiphers>true|false</preferServerCiphers>
|
||||
/// <privateKeyPassphraseHandler>
|
||||
/// <name>KeyFileHandler</name>
|
||||
|
@ -41,7 +41,7 @@ Context::Params::Params():
|
||||
verificationMode(VERIFY_RELAXED),
|
||||
verificationDepth(9),
|
||||
loadDefaultCAs(false),
|
||||
cipherList("ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH")
|
||||
cipherList("ALL:!ADH:!LOW:!EXP:!MD5:!3DES:@STRENGTH")
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -20,6 +20,9 @@ set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/s390x-linux-gnu/libc")
|
||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=mold -Wl,-L${CMAKE_SYSROOT}/usr/lib64")
|
||||
set (CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -fuse-ld=mold -Wl,-L${CMAKE_SYSROOT}/usr/lib64")
|
||||
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fuse-ld=mold -Wl,-L${CMAKE_SYSROOT}/usr/lib64")
|
||||
|
||||
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
||||
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
||||
|
@ -47,7 +47,7 @@ if (CMAKE_CROSSCOMPILING)
|
||||
set (ENABLE_RUST OFF CACHE INTERNAL "")
|
||||
elseif (ARCH_S390X)
|
||||
set (ENABLE_GRPC OFF CACHE INTERNAL "")
|
||||
set (ENABLE_SENTRY OFF CACHE INTERNAL "")
|
||||
set (ENABLE_RUST OFF CACHE INTERNAL "")
|
||||
endif ()
|
||||
elseif (OS_FREEBSD)
|
||||
# FIXME: broken dependencies
|
||||
|
@ -2,4 +2,4 @@
|
||||
|
||||
This directory contain Dockerfiles for `clickhouse-server`. They are updated in each release.
|
||||
|
||||
Also there is bunch of images for testing and CI. They are listed in `images.json` file and updated on each commit to master. If you need to add another image, place information about it into `images.json`.
|
||||
Also, there is a bunch of images for testing and CI. They are listed in `images.json` file and updated on each commit to master. If you need to add another image, place information about it into `images.json`.
|
||||
|
@ -125,6 +125,7 @@
|
||||
"docker/test/keeper-jepsen",
|
||||
"docker/test/server-jepsen",
|
||||
"docker/test/sqllogic",
|
||||
"docker/test/sqltest",
|
||||
"docker/test/stateless"
|
||||
]
|
||||
},
|
||||
@ -155,13 +156,16 @@
|
||||
},
|
||||
"docker/docs/builder": {
|
||||
"name": "clickhouse/docs-builder",
|
||||
"dependent": [
|
||||
]
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/sqllogic": {
|
||||
"name": "clickhouse/sqllogic-test",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/sqltest": {
|
||||
"name": "clickhouse/sqltest",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/integration/nginx_dav": {
|
||||
"name": "clickhouse/nginx-dav",
|
||||
"dependent": []
|
||||
|
@ -80,6 +80,14 @@ RUN add-apt-repository ppa:ubuntu-toolchain-r/test --yes \
|
||||
# Download toolchain and SDK for Darwin
|
||||
RUN curl -sL -O https://github.com/phracker/MacOSX-SDKs/releases/download/11.3/MacOSX11.0.sdk.tar.xz
|
||||
|
||||
# Download and install mold 2.0 for s390x build
|
||||
RUN curl -Lo /tmp/mold.tar.gz "https://github.com/rui314/mold/releases/download/v2.0.0/mold-2.0.0-x86_64-linux.tar.gz" \
|
||||
&& mkdir /tmp/mold \
|
||||
&& tar -xzf /tmp/mold.tar.gz -C /tmp/mold \
|
||||
&& cp -r /tmp/mold/mold*/* /usr \
|
||||
&& rm -rf /tmp/mold \
|
||||
&& rm /tmp/mold.tar.gz
|
||||
|
||||
# Architecture of the image when BuildKit/buildx is used
|
||||
ARG TARGETARCH
|
||||
ARG NFPM_VERSION=2.20.0
|
||||
|
@ -143,6 +143,7 @@ def parse_env_variables(
|
||||
FREEBSD_SUFFIX = "-freebsd"
|
||||
PPC_SUFFIX = "-ppc64le"
|
||||
RISCV_SUFFIX = "-riscv64"
|
||||
S390X_SUFFIX = "-s390x"
|
||||
AMD64_COMPAT_SUFFIX = "-amd64-compat"
|
||||
|
||||
result = []
|
||||
@ -156,6 +157,7 @@ def parse_env_variables(
|
||||
is_cross_arm_v80compat = compiler.endswith(ARM_V80COMPAT_SUFFIX)
|
||||
is_cross_ppc = compiler.endswith(PPC_SUFFIX)
|
||||
is_cross_riscv = compiler.endswith(RISCV_SUFFIX)
|
||||
is_cross_s390x = compiler.endswith(S390X_SUFFIX)
|
||||
is_cross_freebsd = compiler.endswith(FREEBSD_SUFFIX)
|
||||
is_amd64_compat = compiler.endswith(AMD64_COMPAT_SUFFIX)
|
||||
|
||||
@ -217,6 +219,11 @@ def parse_env_variables(
|
||||
cmake_flags.append(
|
||||
"-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-riscv64.cmake"
|
||||
)
|
||||
elif is_cross_s390x:
|
||||
cc = compiler[: -len(S390X_SUFFIX)]
|
||||
cmake_flags.append(
|
||||
"-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-s390x.cmake"
|
||||
)
|
||||
elif is_amd64_compat:
|
||||
cc = compiler[: -len(AMD64_COMPAT_SUFFIX)]
|
||||
result.append("DEB_ARCH=amd64")
|
||||
@ -380,6 +387,7 @@ def parse_args() -> argparse.Namespace:
|
||||
"clang-16-aarch64-v80compat",
|
||||
"clang-16-ppc64le",
|
||||
"clang-16-riscv64",
|
||||
"clang-16-s390x",
|
||||
"clang-16-amd64-compat",
|
||||
"clang-16-freebsd",
|
||||
),
|
||||
|
@ -35,4 +35,7 @@ ENV LC_ALL en_US.UTF-8
|
||||
ENV TZ=Europe/Amsterdam
|
||||
RUN ln -snf "/usr/share/zoneinfo/$TZ" /etc/localtime && echo "$TZ" > /etc/timezone
|
||||
|
||||
# This script is used to setup realtime export of server logs from the CI into external ClickHouse cluster:
|
||||
COPY setup_export_logs.sh /
|
||||
|
||||
CMD sleep 1
|
||||
|
61
docker/test/base/setup_export_logs.sh
Executable file
61
docker/test/base/setup_export_logs.sh
Executable file
@ -0,0 +1,61 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script sets up export of system log tables to a remote server.
|
||||
# Remote tables are created if not exist, and augmented with extra columns,
|
||||
# and their names will contain a hash of the table structure,
|
||||
# which allows exporting tables from servers of different versions.
|
||||
|
||||
# Pre-configured destination cluster, where to export the data
|
||||
CLUSTER=${CLUSTER:=system_logs_export}
|
||||
|
||||
EXTRA_COLUMNS=${EXTRA_COLUMNS:="pull_request_number UInt32, commit_sha String, check_start_time DateTime, check_name LowCardinality(String), instance_type LowCardinality(String), "}
|
||||
EXTRA_COLUMNS_EXPRESSION=${EXTRA_COLUMNS_EXPRESSION:="0 AS pull_request_number, '' AS commit_sha, now() AS check_start_time, '' AS check_name, '' AS instance_type"}
|
||||
EXTRA_ORDER_BY_COLUMNS=${EXTRA_ORDER_BY_COLUMNS:="check_name, "}
|
||||
|
||||
CONNECTION_PARAMETERS=${CONNECTION_PARAMETERS:=""}
|
||||
|
||||
# Create all configured system logs:
|
||||
clickhouse-client --query "SYSTEM FLUSH LOGS"
|
||||
|
||||
# For each system log table:
|
||||
clickhouse-client --query "SHOW TABLES FROM system LIKE '%\\_log'" | while read -r table
|
||||
do
|
||||
# Calculate hash of its structure:
|
||||
hash=$(clickhouse-client --query "
|
||||
SELECT sipHash64(groupArray((name, type)))
|
||||
FROM (SELECT name, type FROM system.columns
|
||||
WHERE database = 'system' AND table = '$table'
|
||||
ORDER BY position)
|
||||
")
|
||||
|
||||
# Create the destination table with adapted name and structure:
|
||||
statement=$(clickhouse-client --format TSVRaw --query "SHOW CREATE TABLE system.${table}" | sed -r -e '
|
||||
s/^\($/('"$EXTRA_COLUMNS"'/;
|
||||
s/ORDER BY \(/ORDER BY ('"$EXTRA_ORDER_BY_COLUMNS"'/;
|
||||
s/^CREATE TABLE system\.\w+_log$/CREATE TABLE IF NOT EXISTS '"$table"'_'"$hash"'/;
|
||||
/^TTL /d
|
||||
')
|
||||
|
||||
echo "Creating destination table ${table}_${hash}" >&2
|
||||
|
||||
echo "$statement" | clickhouse-client $CONNECTION_PARAMETERS
|
||||
|
||||
echo "Creating table system.${table}_sender" >&2
|
||||
|
||||
# Create Distributed table and materialized view to watch on the original table:
|
||||
clickhouse-client --query "
|
||||
CREATE TABLE system.${table}_sender
|
||||
ENGINE = Distributed(${CLUSTER}, default, ${table}_${hash})
|
||||
EMPTY AS
|
||||
SELECT ${EXTRA_COLUMNS_EXPRESSION}, *
|
||||
FROM system.${table}
|
||||
"
|
||||
|
||||
echo "Creating materialized view system.${table}_watcher" >&2
|
||||
|
||||
clickhouse-client --query "
|
||||
CREATE MATERIALIZED VIEW system.${table}_watcher TO system.${table}_sender AS
|
||||
SELECT ${EXTRA_COLUMNS_EXPRESSION}, *
|
||||
FROM system.${table}
|
||||
"
|
||||
done
|
@ -148,6 +148,7 @@ function clone_submodules
|
||||
contrib/liburing
|
||||
contrib/libfiu
|
||||
contrib/incbin
|
||||
contrib/yaml-cpp
|
||||
)
|
||||
|
||||
git submodule sync
|
||||
@ -170,6 +171,7 @@ function run_cmake
|
||||
"-DENABLE_SIMDJSON=1"
|
||||
"-DENABLE_JEMALLOC=1"
|
||||
"-DENABLE_LIBURING=1"
|
||||
"-DENABLE_YAML_CPP=1"
|
||||
)
|
||||
|
||||
export CCACHE_DIR="$FASTTEST_WORKSPACE/ccache"
|
||||
|
@ -2,7 +2,7 @@ version: "2.3"
|
||||
|
||||
services:
|
||||
coredns:
|
||||
image: coredns/coredns:latest
|
||||
image: coredns/coredns:1.9.3 # :latest broke this test
|
||||
restart: always
|
||||
volumes:
|
||||
- ${COREDNS_CONFIG_DIR}/example.com:/example.com
|
||||
|
@ -1,4 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -exu
|
||||
trap "exit" INT TERM
|
||||
|
||||
|
30
docker/test/sqltest/Dockerfile
Normal file
30
docker/test/sqltest/Dockerfile
Normal file
@ -0,0 +1,30 @@
|
||||
# docker build -t clickhouse/sqltest .
|
||||
ARG FROM_TAG=latest
|
||||
FROM clickhouse/test-base:$FROM_TAG
|
||||
|
||||
RUN apt-get update --yes \
|
||||
&& env DEBIAN_FRONTEND=noninteractive \
|
||||
apt-get install --yes --no-install-recommends \
|
||||
wget \
|
||||
git \
|
||||
python3 \
|
||||
python3-dev \
|
||||
python3-pip \
|
||||
sudo \
|
||||
&& apt-get clean
|
||||
|
||||
RUN pip3 install \
|
||||
pyyaml \
|
||||
clickhouse-driver
|
||||
|
||||
ARG sqltest_repo="https://github.com/elliotchance/sqltest/"
|
||||
|
||||
RUN git clone ${sqltest_repo}
|
||||
|
||||
ENV TZ=UTC
|
||||
ENV MAX_RUN_TIME=900
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
COPY run.sh /
|
||||
COPY test.py /
|
||||
CMD ["/bin/bash", "/run.sh"]
|
51
docker/test/sqltest/run.sh
Executable file
51
docker/test/sqltest/run.sh
Executable file
@ -0,0 +1,51 @@
|
||||
#!/bin/bash
|
||||
# shellcheck disable=SC2015
|
||||
|
||||
set -x
|
||||
set -e
|
||||
set -u
|
||||
set -o pipefail
|
||||
|
||||
BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-16_debug_none_unsplitted_disable_False_binary"}
|
||||
BINARY_URL_TO_DOWNLOAD=${BINARY_URL_TO_DOWNLOAD:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/$BINARY_TO_DOWNLOAD/clickhouse"}
|
||||
|
||||
function wget_with_retry
|
||||
{
|
||||
for _ in 1 2 3 4; do
|
||||
if wget -nv -nd -c "$1";then
|
||||
return 0
|
||||
else
|
||||
sleep 0.5
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
wget_with_retry "$BINARY_URL_TO_DOWNLOAD"
|
||||
chmod +x clickhouse
|
||||
./clickhouse install --noninteractive
|
||||
|
||||
echo "
|
||||
users:
|
||||
default:
|
||||
access_management: 1" > /etc/clickhouse-server/users.d/access_management.yaml
|
||||
|
||||
clickhouse start
|
||||
|
||||
# Wait for start
|
||||
for _ in {1..100}
|
||||
do
|
||||
clickhouse-client --query "SELECT 1" && break ||:
|
||||
sleep 1
|
||||
done
|
||||
|
||||
# Run the test
|
||||
pushd sqltest/standards/2016/
|
||||
/test.py
|
||||
mv report.html test.log /workspace
|
||||
popd
|
||||
|
||||
zstd --threads=0 /var/log/clickhouse-server/clickhouse-server.log
|
||||
zstd --threads=0 /var/log/clickhouse-server/clickhouse-server.err.log
|
||||
|
||||
mv /var/log/clickhouse-server/clickhouse-server.log.zst /var/log/clickhouse-server/clickhouse-server.err.log.zst /workspace
|
148
docker/test/sqltest/test.py
Executable file
148
docker/test/sqltest/test.py
Executable file
@ -0,0 +1,148 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import yaml
|
||||
import html
|
||||
import random
|
||||
import string
|
||||
from clickhouse_driver import Client
|
||||
|
||||
|
||||
client = Client(host="localhost", port=9000)
|
||||
settings = {
|
||||
"default_table_engine": "Memory",
|
||||
"union_default_mode": "DISTINCT",
|
||||
"calculate_text_stack_trace": 0,
|
||||
}
|
||||
|
||||
database_name = "sqltest_" + "".join(
|
||||
random.choice(string.ascii_lowercase) for _ in range(10)
|
||||
)
|
||||
|
||||
client.execute(f"DROP DATABASE IF EXISTS {database_name}", settings=settings)
|
||||
client.execute(f"CREATE DATABASE {database_name}", settings=settings)
|
||||
|
||||
client = Client(host="localhost", port=9000, database=database_name)
|
||||
|
||||
summary = {"success": 0, "total": 0, "results": {}}
|
||||
|
||||
log_file = open("test.log", "w")
|
||||
report_html_file = open("report.html", "w")
|
||||
|
||||
with open("features.yml", "r") as file:
|
||||
yaml_content = yaml.safe_load(file)
|
||||
|
||||
for category in yaml_content:
|
||||
log_file.write(category.capitalize() + " features:\n")
|
||||
summary["results"][category] = {"success": 0, "total": 0, "results": {}}
|
||||
|
||||
for test in yaml_content[category]:
|
||||
log_file.write(test + ": " + yaml_content[category][test] + "\n")
|
||||
summary["results"][category]["results"][test] = {
|
||||
"success": 0,
|
||||
"total": 0,
|
||||
"description": yaml_content[category][test],
|
||||
}
|
||||
|
||||
test_path = test[0] + "/" + test + ".tests.yml"
|
||||
if os.path.exists(test_path):
|
||||
with open(test_path, "r") as test_file:
|
||||
test_yaml_content = yaml.load_all(test_file, Loader=yaml.FullLoader)
|
||||
|
||||
for test_case in test_yaml_content:
|
||||
queries = test_case["sql"]
|
||||
if not isinstance(queries, list):
|
||||
queries = [queries]
|
||||
|
||||
for query in queries:
|
||||
# Example: E011-01
|
||||
test_group = ""
|
||||
if "-" in test:
|
||||
test_group = test.split("-", 1)[0]
|
||||
summary["results"][category]["results"][test_group][
|
||||
"total"
|
||||
] += 1
|
||||
summary["results"][category]["results"][test]["total"] += 1
|
||||
summary["results"][category]["total"] += 1
|
||||
summary["total"] += 1
|
||||
|
||||
log_file.write(query + "\n")
|
||||
|
||||
try:
|
||||
result = client.execute(query, settings=settings)
|
||||
log_file.write(str(result) + "\n")
|
||||
|
||||
if test_group:
|
||||
summary["results"][category]["results"][test_group][
|
||||
"success"
|
||||
] += 1
|
||||
summary["results"][category]["results"][test][
|
||||
"success"
|
||||
] += 1
|
||||
summary["results"][category]["success"] += 1
|
||||
summary["success"] += 1
|
||||
|
||||
except Exception as e:
|
||||
log_file.write(f"Error occurred: {str(e)}\n")
|
||||
|
||||
client.execute(f"DROP DATABASE {database_name}", settings=settings)
|
||||
|
||||
|
||||
def enable_color(ratio):
|
||||
if ratio == 0:
|
||||
return "<b style='color: red;'>"
|
||||
elif ratio < 0.5:
|
||||
return "<b style='color: orange;'>"
|
||||
elif ratio < 1:
|
||||
return "<b style='color: gray;'>"
|
||||
else:
|
||||
return "<b style='color: green;'>"
|
||||
|
||||
|
||||
reset_color = "</b>"
|
||||
|
||||
|
||||
def print_ratio(indent, name, success, total, description):
|
||||
report_html_file.write(
|
||||
"{}{}: {}{} / {} ({:.1%}){}{}\n".format(
|
||||
" " * indent,
|
||||
name.capitalize(),
|
||||
enable_color(success / total),
|
||||
success,
|
||||
total,
|
||||
success / total,
|
||||
reset_color,
|
||||
f" - " + html.escape(description) if description else "",
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
report_html_file.write(
|
||||
"<html><body><pre style='font-size: 16pt; padding: 1em; line-height: 1.25;'>\n"
|
||||
)
|
||||
|
||||
print_ratio(0, "Total", summary["success"], summary["total"], "")
|
||||
|
||||
for category in summary["results"]:
|
||||
cat_summary = summary["results"][category]
|
||||
|
||||
if cat_summary["total"] == 0:
|
||||
continue
|
||||
|
||||
print_ratio(2, category, cat_summary["success"], cat_summary["total"], "")
|
||||
|
||||
for test in summary["results"][category]["results"]:
|
||||
test_summary = summary["results"][category]["results"][test]
|
||||
|
||||
if test_summary["total"] == 0:
|
||||
continue
|
||||
|
||||
print_ratio(
|
||||
6 if "-" in test else 4,
|
||||
test,
|
||||
test_summary["success"],
|
||||
test_summary["total"],
|
||||
test_summary["description"],
|
||||
)
|
||||
|
||||
report_html_file.write("</pre></body></html>\n")
|
@ -20,6 +20,22 @@ ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
|
||||
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log &
|
||||
./setup_minio.sh stateful
|
||||
|
||||
# Setup a cluster for logs export to ClickHouse Cloud
|
||||
# Note: these variables are provided to the Docker run command by the Python script in tests/ci
|
||||
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
|
||||
then
|
||||
echo "
|
||||
remote_servers:
|
||||
system_logs_export:
|
||||
shard:
|
||||
replica:
|
||||
secure: 1
|
||||
user: ci
|
||||
host: '${CLICKHOUSE_CI_LOGS_HOST}'
|
||||
password: '${CLICKHOUSE_CI_LOGS_PASSWORD}'
|
||||
" > /etc/clickhouse-server/config.d/system_logs_export.yaml
|
||||
fi
|
||||
|
||||
function start()
|
||||
{
|
||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
@ -65,6 +81,22 @@ function start()
|
||||
}
|
||||
|
||||
start
|
||||
|
||||
# Initialize export of system logs to ClickHouse Cloud
|
||||
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
|
||||
then
|
||||
export EXTRA_COLUMNS_EXPRESSION="$PULL_REQUEST_NUMBER AS pull_request_number, '$COMMIT_SHA' AS commit_sha, '$CHECK_START_TIME' AS check_start_time, '$CHECK_NAME' AS check_name, '$INSTANCE_TYPE' AS instance_type"
|
||||
# TODO: Check if the password will appear in the logs.
|
||||
export CONNECTION_PARAMETERS="--secure --user ci --host ${CLICKHOUSE_CI_LOGS_HOST} --password ${CLICKHOUSE_CI_LOGS_PASSWORD}"
|
||||
|
||||
./setup_export_logs.sh
|
||||
|
||||
# Unset variables after use
|
||||
export CONNECTION_PARAMETERS=''
|
||||
export CLICKHOUSE_CI_LOGS_HOST=''
|
||||
export CLICKHOUSE_CI_LOGS_PASSWORD=''
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2086 # No quotes because I want to split it into words.
|
||||
/s3downloader --url-prefix "$S3_URL" --dataset-names $DATASETS
|
||||
chmod 777 -R /var/lib/clickhouse
|
||||
|
@ -87,4 +87,5 @@ RUN npm install -g azurite \
|
||||
COPY run.sh /
|
||||
COPY setup_minio.sh /
|
||||
COPY setup_hdfs_minicluster.sh /
|
||||
|
||||
CMD ["/bin/bash", "/run.sh"]
|
||||
|
@ -36,6 +36,22 @@ fi
|
||||
./setup_minio.sh stateless
|
||||
./setup_hdfs_minicluster.sh
|
||||
|
||||
# Setup a cluster for logs export to ClickHouse Cloud
|
||||
# Note: these variables are provided to the Docker run command by the Python script in tests/ci
|
||||
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
|
||||
then
|
||||
echo "
|
||||
remote_servers:
|
||||
system_logs_export:
|
||||
shard:
|
||||
replica:
|
||||
secure: 1
|
||||
user: ci
|
||||
host: '${CLICKHOUSE_CI_LOGS_HOST}'
|
||||
password: '${CLICKHOUSE_CI_LOGS_PASSWORD}'
|
||||
" > /etc/clickhouse-server/config.d/system_logs_export.yaml
|
||||
fi
|
||||
|
||||
# For flaky check we also enable thread fuzzer
|
||||
if [ "$NUM_TRIES" -gt "1" ]; then
|
||||
export THREAD_FUZZER_CPU_TIME_PERIOD_US=1000
|
||||
@ -92,7 +108,28 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
|
||||
MAX_RUN_TIME=$((MAX_RUN_TIME != 0 ? MAX_RUN_TIME : 9000)) # set to 2.5 hours if 0 (unlimited)
|
||||
fi
|
||||
|
||||
sleep 5
|
||||
|
||||
# Wait for the server to start, but not for too long.
|
||||
for _ in {1..100}
|
||||
do
|
||||
clickhouse-client --query "SELECT 1" && break
|
||||
sleep 1
|
||||
done
|
||||
|
||||
# Initialize export of system logs to ClickHouse Cloud
|
||||
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
|
||||
then
|
||||
export EXTRA_COLUMNS_EXPRESSION="$PULL_REQUEST_NUMBER AS pull_request_number, '$COMMIT_SHA' AS commit_sha, '$CHECK_START_TIME' AS check_start_time, '$CHECK_NAME' AS check_name, '$INSTANCE_TYPE' AS instance_type"
|
||||
# TODO: Check if the password will appear in the logs.
|
||||
export CONNECTION_PARAMETERS="--secure --user ci --host ${CLICKHOUSE_CI_LOGS_HOST} --password ${CLICKHOUSE_CI_LOGS_PASSWORD}"
|
||||
|
||||
./setup_export_logs.sh
|
||||
|
||||
# Unset variables after use
|
||||
export CONNECTION_PARAMETERS=''
|
||||
export CLICKHOUSE_CI_LOGS_HOST=''
|
||||
export CLICKHOUSE_CI_LOGS_PASSWORD=''
|
||||
fi
|
||||
|
||||
attach_gdb_to_clickhouse || true # FIXME: to not break old builds, clean on 2023-09-01
|
||||
|
||||
|
@ -51,8 +51,39 @@ configure
|
||||
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log &
|
||||
./setup_minio.sh stateless # to have a proper environment
|
||||
|
||||
# Setup a cluster for logs export to ClickHouse Cloud
|
||||
# Note: these variables are provided to the Docker run command by the Python script in tests/ci
|
||||
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
|
||||
then
|
||||
echo "
|
||||
remote_servers:
|
||||
system_logs_export:
|
||||
shard:
|
||||
replica:
|
||||
secure: 1
|
||||
user: ci
|
||||
host: '${CLICKHOUSE_CI_LOGS_HOST}'
|
||||
password: '${CLICKHOUSE_CI_LOGS_PASSWORD}'
|
||||
" > /etc/clickhouse-server/config.d/system_logs_export.yaml
|
||||
fi
|
||||
|
||||
start
|
||||
|
||||
# Initialize export of system logs to ClickHouse Cloud
|
||||
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
|
||||
then
|
||||
export EXTRA_COLUMNS_EXPRESSION="$PULL_REQUEST_NUMBER AS pull_request_number, '$COMMIT_SHA' AS commit_sha, '$CHECK_START_TIME' AS check_start_time, '$CHECK_NAME' AS check_name, '$INSTANCE_TYPE' AS instance_type"
|
||||
# TODO: Check if the password will appear in the logs.
|
||||
export CONNECTION_PARAMETERS="--secure --user ci --host ${CLICKHOUSE_CI_LOGS_HOST} --password ${CLICKHOUSE_CI_LOGS_PASSWORD}"
|
||||
|
||||
./setup_export_logs.sh
|
||||
|
||||
# Unset variables after use
|
||||
export CONNECTION_PARAMETERS=''
|
||||
export CLICKHOUSE_CI_LOGS_HOST=''
|
||||
export CLICKHOUSE_CI_LOGS_PASSWORD=''
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2086 # No quotes because I want to split it into words.
|
||||
/s3downloader --url-prefix "$S3_URL" --dataset-names $DATASETS
|
||||
chmod 777 -R /var/lib/clickhouse
|
||||
|
@ -323,9 +323,9 @@ clickhouse-client clickhouse://192.168.1.15,192.168.1.25
|
||||
`clickhouse-client` uses the first existing file of the following:
|
||||
|
||||
- Defined in the `--config-file` parameter.
|
||||
- `./clickhouse-client.xml`
|
||||
- `~/.clickhouse-client/config.xml`
|
||||
- `/etc/clickhouse-client/config.xml`
|
||||
- `./clickhouse-client.xml`, `.yaml`, `.yml`
|
||||
- `~/.clickhouse-client/config.xml`, `.yaml`, `.yml`
|
||||
- `/etc/clickhouse-client/config.xml`, `.yaml`, `.yml`
|
||||
|
||||
Example of a config file:
|
||||
|
||||
@ -342,6 +342,17 @@ Example of a config file:
|
||||
</config>
|
||||
```
|
||||
|
||||
Or the same config in a YAML format:
|
||||
|
||||
```yaml
|
||||
user: username
|
||||
password: 'password'
|
||||
secure: true
|
||||
openSSL:
|
||||
client:
|
||||
caConfig: '/etc/ssl/cert.pem'
|
||||
```
|
||||
|
||||
### Query ID Format {#query-id-format}
|
||||
|
||||
In interactive mode `clickhouse-client` shows query ID for every query. By default, the ID is formatted like this:
|
||||
|
@ -169,7 +169,6 @@ host = '127.0.0.1',
|
||||
port = 3306,
|
||||
database = 'test',
|
||||
connection_pool_size = 8,
|
||||
on_duplicate_clause = 1,
|
||||
replace_query = 1
|
||||
```
|
||||
|
||||
@ -185,7 +184,6 @@ replace_query = 1
|
||||
<port>3306</port>
|
||||
<database>test</database>
|
||||
<connection_pool_size>8</connection_pool_size>
|
||||
<on_duplicate_clause>1</on_duplicate_clause>
|
||||
<replace_query>1</replace_query>
|
||||
</mymysql>
|
||||
</named_collections>
|
||||
|
@ -1640,7 +1640,7 @@ Keys for server/client settings:
|
||||
- verificationMode (default: relaxed) – The method for checking the node’s certificates. Details are in the description of the [Context](https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/Context.h) class. Possible values: `none`, `relaxed`, `strict`, `once`.
|
||||
- verificationDepth (default: 9) – The maximum length of the verification chain. Verification will fail if the certificate chain length exceeds the set value.
|
||||
- loadDefaultCAFile (default: true) – Wether built-in CA certificates for OpenSSL will be used. ClickHouse assumes that builtin CA certificates are in the file `/etc/ssl/cert.pem` (resp. the directory `/etc/ssl/certs`) or in file (resp. directory) specified by the environment variable `SSL_CERT_FILE` (resp. `SSL_CERT_DIR`).
|
||||
- cipherList (default: `ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH`) - Supported OpenSSL encryptions.
|
||||
- cipherList (default: `ALL:!ADH:!LOW:!EXP:!MD5:!3DES:@STRENGTH`) - Supported OpenSSL encryptions.
|
||||
- cacheSessions (default: false) – Enables or disables caching sessions. Must be used in combination with `sessionIdContext`. Acceptable values: `true`, `false`.
|
||||
- sessionIdContext (default: `${application.name}`) – A unique set of random characters that the server appends to each generated identifier. The length of the string must not exceed `SSL_MAX_SSL_SESSION_ID_LENGTH`. This parameter is always recommended since it helps avoid problems both if the server caches the session and if the client requested caching. Default value: `${application.name}`.
|
||||
- sessionCacheSize (default: [1024\*20](https://github.com/ClickHouse/boringssl/blob/master/include/openssl/ssl.h#L1978)) – The maximum number of sessions that the server caches. A value of 0 means unlimited sessions.
|
||||
|
@ -4,7 +4,7 @@ sidebar_position: 54
|
||||
sidebar_label: Tuple(T1, T2, ...)
|
||||
---
|
||||
|
||||
# Tuple(t1, T2, …)
|
||||
# Tuple(T1, T2, …)
|
||||
|
||||
A tuple of elements, each having an individual [type](../../sql-reference/data-types/index.md#data_types). Tuple must contain at least one element.
|
||||
|
||||
|
@ -2476,52 +2476,3 @@ Dictionary updates (other than loading at first use) do not block queries. Durin
|
||||
We recommend periodically updating the dictionaries with the geobase. During an update, generate new files and write them to a separate location. When everything is ready, rename them to the files used by the server.
|
||||
|
||||
There are also functions for working with OS identifiers and search engines, but they shouldn’t be used.
|
||||
|
||||
## Embedded Dictionaries
|
||||
|
||||
<SelfManaged />
|
||||
|
||||
ClickHouse contains a built-in feature for working with a geobase.
|
||||
|
||||
This allows you to:
|
||||
|
||||
- Use a region’s ID to get its name in the desired language.
|
||||
- Use a region’s ID to get the ID of a city, area, federal district, country, or continent.
|
||||
- Check whether a region is part of another region.
|
||||
- Get a chain of parent regions.
|
||||
|
||||
All the functions support “translocality,” the ability to simultaneously use different perspectives on region ownership. For more information, see the section “Functions for working with web analytics dictionaries”.
|
||||
|
||||
The internal dictionaries are disabled in the default package.
|
||||
To enable them, uncomment the parameters `path_to_regions_hierarchy_file` and `path_to_regions_names_files` in the server configuration file.
|
||||
|
||||
The geobase is loaded from text files.
|
||||
|
||||
Place the `regions_hierarchy*.txt` files into the `path_to_regions_hierarchy_file` directory. This configuration parameter must contain the path to the `regions_hierarchy.txt` file (the default regional hierarchy), and the other files (`regions_hierarchy_ua.txt`) must be located in the same directory.
|
||||
|
||||
Put the `regions_names_*.txt` files in the `path_to_regions_names_files` directory.
|
||||
|
||||
You can also create these files yourself. The file format is as follows:
|
||||
|
||||
`regions_hierarchy*.txt`: TabSeparated (no header), columns:
|
||||
|
||||
- region ID (`UInt32`)
|
||||
- parent region ID (`UInt32`)
|
||||
- region type (`UInt8`): 1 - continent, 3 - country, 4 - federal district, 5 - region, 6 - city; other types do not have values
|
||||
- population (`UInt32`) — optional column
|
||||
|
||||
`regions_names_*.txt`: TabSeparated (no header), columns:
|
||||
|
||||
- region ID (`UInt32`)
|
||||
- region name (`String`) — Can’t contain tabs or line feeds, even escaped ones.
|
||||
|
||||
A flat array is used for storing in RAM. For this reason, IDs shouldn’t be more than a million.
|
||||
|
||||
Dictionaries can be updated without restarting the server. However, the set of available dictionaries is not updated.
|
||||
For updates, the file modification times are checked. If a file has changed, the dictionary is updated.
|
||||
The interval to check for changes is configured in the `builtin_dictionaries_reload_interval` parameter.
|
||||
Dictionary updates (other than loading at first use) do not block queries. During updates, queries use the old versions of dictionaries. If an error occurs during an update, the error is written to the server log, and queries continue using the old version of dictionaries.
|
||||
|
||||
We recommend periodically updating the dictionaries with the geobase. During an update, generate new files and write them to a separate location. When everything is ready, rename them to the files used by the server.
|
||||
|
||||
There are also functions for working with OS identifiers and search engines, but they shouldn’t be used.
|
||||
|
@ -183,9 +183,8 @@ arrayConcat(arrays)
|
||||
**Arguments**
|
||||
|
||||
- `arrays` – Arbitrary number of arguments of [Array](../../sql-reference/data-types/array.md) type.
|
||||
**Example**
|
||||
|
||||
<!-- -->
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
SELECT arrayConcat([1, 2], [3, 4], [5, 6]) AS res
|
||||
|
@ -559,6 +559,29 @@ Result:
|
||||
└────────────────────────────┘
|
||||
```
|
||||
|
||||
## tupleConcat
|
||||
|
||||
Combines tuples passed as arguments.
|
||||
|
||||
``` sql
|
||||
tupleConcat(tuples)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `tuples` – Arbitrary number of arguments of [Tuple](../../sql-reference/data-types/tuple.md) type.
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
SELECT tupleConcat((1, 2), (3, 4), (true, false)) AS res
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─res──────────────────┐
|
||||
│ (1,2,3,4,true,false) │
|
||||
└──────────────────────┘
|
||||
```
|
||||
|
||||
## Distance functions
|
||||
|
||||
|
@ -88,7 +88,6 @@ SELECT * FROM s3_engine_table LIMIT 3;
|
||||
<port>3306</port>
|
||||
<database>test</database>
|
||||
<connection_pool_size>8</connection_pool_size>
|
||||
<on_duplicate_clause>1</on_duplicate_clause>
|
||||
<replace_query>1</replace_query>
|
||||
</mymysql>
|
||||
</named_collections>
|
||||
|
@ -1106,7 +1106,7 @@ ClickHouse использует потоки из глобального пул
|
||||
- verificationMode - Способ проверки сертификатов узла. Подробности находятся в описании класса [Context](https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/Context.h). Допустимые значения: `none`, `relaxed`, `strict`, `once`.
|
||||
- verificationDepth - Максимальная длина верификационной цепи. Верификация завершится ошибкой, если длина цепи сертификатов превысит установленное значение.
|
||||
- loadDefaultCAFile - Признак того, что будут использоваться встроенные CA-сертификаты для OpenSSL. Допустимые значения: `true`, `false`. \|
|
||||
- cipherList - Поддерживаемые OpenSSL-шифры. Например, `ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH`.
|
||||
- cipherList - Поддерживаемые OpenSSL-шифры. Например, `ALL:!ADH:!LOW:!EXP:!MD5:!3DES:@STRENGTH`.
|
||||
- cacheSessions - Включение/выключение кеширования сессии. Использовать обязательно вместе с `sessionIdContext`. Допустимые значения: `true`, `false`.
|
||||
- sessionIdContext - Уникальный набор произвольных символов, которые сервер добавляет к каждому сгенерированному идентификатору. Длина строки не должна превышать `SSL_MAX_SSL_SESSION_ID_LENGTH`. Рекомендуется к использованию всегда, поскольку позволяет избежать проблем как в случае, если сервер кеширует сессию, так и если клиент затребовал кеширование. По умолчанию `${application.name}`.
|
||||
- sessionCacheSize - Максимальное количество сессий, которые кэширует сервер. По умолчанию - 1024\*20. 0 - неограниченное количество сессий.
|
||||
|
@ -346,9 +346,7 @@ UserID.bin,URL.bin,和EventTime.bin是<font face = "monospace">UserID</font>
|
||||
|
||||
- 我们将主键列(<font face = "monospace">UserID</font>, <font face = "monospace">URL</font>)中的一些列值标记为橙色。
|
||||
|
||||
这些橙色标记的列值是每个颗粒中每个主键列的最小值。这里的例外是最后一个颗粒(上图中的颗粒1082),最后一个颗粒我们标记的是最大的值。
|
||||
|
||||
正如我们将在下面看到的,这些橙色标记的列值将是表主索引中的条目。
|
||||
这些橙色标记的列值是每个颗粒中第一行的主键列值。正如我们将在下面看到的,这些橙色标记的列值将是表主索引中的条目。
|
||||
|
||||
- 我们从0开始对行进行编号,以便与ClickHouse内部行编号方案对齐,该方案也用于记录消息。
|
||||
:::
|
||||
@ -1071,13 +1069,6 @@ ClickHouse服务器日志文件中相应的跟踪日志确认了ClickHouse正在
|
||||
## 通过projections使用联合主键索引
|
||||
|
||||
|
||||
<a href="https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree/#projections" target="_blank">Projections</a>目前是一个实验性的功能,因此我们需要告诉ClickHouse:
|
||||
|
||||
```sql
|
||||
SET optimize_use_projections = 1;
|
||||
```
|
||||
|
||||
|
||||
在原表上创建projection:
|
||||
```sql
|
||||
ALTER TABLE hits_UserID_URL
|
||||
@ -1096,10 +1087,12 @@ ALTER TABLE hits_UserID_URL
|
||||
|
||||
:::note
|
||||
- 该projection正在创建一个隐藏表,该表的行顺序和主索引基于该projection的给定order BY子句
|
||||
- 我们使用MATERIALIZE关键字,以便立即用源表hits_UserID_URL的所有887万行导入隐藏表
|
||||
- `SHOW TABLES` 语句查询是不会列出这个隐藏表的
|
||||
- 我们使用`MATERIALIZE`关键字,以便立即用源表<a href="https://clickhouse.com/docs/zh/guides/best-practices#%E5%8C%85%E5%90%AB%E4%B8%BB%E9%94%AE%E7%9A%84%E8%A1%A8" target="_blank">hits_UserID_URL</a>的所有887万行导入隐藏表
|
||||
- 如果在源表hits_UserID_URL中插入了新行,那么这些行也会自动插入到隐藏表中
|
||||
- 查询总是(从语法上)针对源表hits_UserID_URL,但是如果隐藏表的行顺序和主索引允许更有效地执行查询,那么将使用该隐藏表
|
||||
- 实际上,隐式创建的隐藏表的行顺序和主索引与我们显式创建的辅助表相同:
|
||||
- 请注意,投影(projections)不会使 `ORDER BY` 查询语句的效率更高,即使 `ORDER BY` 匹配上了 projection 的 `ORDER BY` 语句(请参阅:https://github.com/ClickHouse/ClickHouse/issues/47333)
|
||||
- 实际上,隐式创建的隐藏表的行顺序和主索引与我们显式创建的<a href="https://clickhouse.com/docs/zh/guides/best-practices#%E9%80%9A%E8%BF%87%E8%BE%85%E5%8A%A9%E8%A1%A8%E4%BD%BF%E7%94%A8%E8%81%94%E5%90%88%E4%B8%BB%E9%94%AE%E7%B4%A2%E5%BC%95" target="_blank">辅助表</a>相同:
|
||||
|
||||
<img src={require('../../../en/guides/best-practices/images/sparse-primary-indexes-12c-1.png').default} class="image"/>
|
||||
|
||||
@ -1163,7 +1156,7 @@ ClickHouse服务器日志文件中跟踪日志确认了ClickHouse正在对索引
|
||||
```
|
||||
|
||||
|
||||
## 移除无效的主键列
|
||||
## 小结
|
||||
|
||||
|
||||
带有联合主键(UserID, URL)的表的主索引对于加快UserID的查询过滤非常有用。但是,尽管URL列是联合主键的一部分,但该索引在加速URL查询过滤方面并没有提供显著的帮助。
|
||||
@ -1176,4 +1169,12 @@ ClickHouse服务器日志文件中跟踪日志确认了ClickHouse正在对索引
|
||||
|
||||
但是,如果复合主键中的键列在基数上有很大的差异,那么查询按基数升序对主键列进行排序是有益的。
|
||||
|
||||
主键键列之间的基数差越大,主键键列的顺序越重要。我们将在以后的文章中对此进行演示。请继续关注。
|
||||
主键键列之间的基数差得越大,主键中的列的顺序越重要。我们将在下一章节对此进行演示。
|
||||
|
||||
# 高效地为键列排序
|
||||
|
||||
TODO
|
||||
|
||||
# 高效地识别单行
|
||||
|
||||
TODO
|
||||
|
@ -455,7 +455,7 @@ SSL客户端/服务器配置。
|
||||
- verificationMode – The method for checking the node’s certificates. Details are in the description of the [A.背景](https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/Context.h) 同学们 可能的值: `none`, `relaxed`, `strict`, `once`.
|
||||
- verificationDepth – The maximum length of the verification chain. Verification will fail if the certificate chain length exceeds the set value.
|
||||
- loadDefaultCAFile – Indicates that built-in CA certificates for OpenSSL will be used. Acceptable values: `true`, `false`. \|
|
||||
- cipherList – Supported OpenSSL encryptions. For example: `ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH`.
|
||||
- cipherList – Supported OpenSSL encryptions. For example: `ALL:!ADH:!LOW:!EXP:!MD5:!3DES:@STRENGTH`.
|
||||
- cacheSessions – Enables or disables caching sessions. Must be used in combination with `sessionIdContext`. 可接受的值: `true`, `false`.
|
||||
- sessionIdContext – A unique set of random characters that the server appends to each generated identifier. The length of the string must not exceed `SSL_MAX_SSL_SESSION_ID_LENGTH`. 始终建议使用此参数,因为如果服务器缓存会话,以及客户端请求缓存,它有助于避免出现问题。 默认值: `${application.name}`.
|
||||
- sessionCacheSize – The maximum number of sessions that the server caches. Default value: 1024\*20. 0 – Unlimited sessions.
|
||||
|
@ -1,8 +1,6 @@
|
||||
#include <unistd.h>
|
||||
#include <cstdlib>
|
||||
#include <fcntl.h>
|
||||
#include <csignal>
|
||||
#include <ctime>
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
#include <iomanip>
|
||||
@ -18,9 +16,7 @@
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/randomSeed.h>
|
||||
#include <Common/clearPasswordFromCommandLine.h>
|
||||
#include <Core/Types.h>
|
||||
#include <IO/ReadBufferFromFileDescriptor.h>
|
||||
#include <IO/WriteBufferFromFileDescriptor.h>
|
||||
#include <IO/WriteBufferFromFile.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
@ -38,8 +34,6 @@
|
||||
#include <filesystem>
|
||||
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
/** A tool for evaluating ClickHouse performance.
|
||||
* The tool emulates a case with fixed amount of simultaneously executing queries.
|
||||
*/
|
||||
@ -79,7 +73,6 @@ public:
|
||||
bool randomize_,
|
||||
size_t max_iterations_,
|
||||
double max_time_,
|
||||
const String & json_path_,
|
||||
size_t confidence_,
|
||||
const String & query_id_,
|
||||
const String & query_to_execute_,
|
||||
@ -98,7 +91,6 @@ public:
|
||||
cumulative(cumulative_),
|
||||
max_iterations(max_iterations_),
|
||||
max_time(max_time_),
|
||||
json_path(json_path_),
|
||||
confidence(confidence_),
|
||||
query_id(query_id_),
|
||||
query_to_execute(query_to_execute_),
|
||||
@ -165,9 +157,6 @@ public:
|
||||
|
||||
int main(const std::vector<std::string> &) override
|
||||
{
|
||||
if (!json_path.empty() && fs::exists(json_path)) /// Clear file with previous results
|
||||
fs::remove(json_path);
|
||||
|
||||
readQueries();
|
||||
runBenchmark();
|
||||
return 0;
|
||||
@ -197,7 +186,6 @@ private:
|
||||
bool cumulative;
|
||||
size_t max_iterations;
|
||||
double max_time;
|
||||
String json_path;
|
||||
size_t confidence;
|
||||
String query_id;
|
||||
String query_to_execute;
|
||||
@ -226,26 +214,23 @@ private:
|
||||
size_t read_bytes = 0;
|
||||
size_t result_rows = 0;
|
||||
size_t result_bytes = 0;
|
||||
double work_time = 0;
|
||||
|
||||
using Sampler = ReservoirSampler<double>;
|
||||
Sampler sampler {1 << 16};
|
||||
|
||||
void add(double seconds, size_t read_rows_inc, size_t read_bytes_inc, size_t result_rows_inc, size_t result_bytes_inc)
|
||||
void add(double duration, size_t read_rows_inc, size_t read_bytes_inc, size_t result_rows_inc, size_t result_bytes_inc)
|
||||
{
|
||||
++queries;
|
||||
work_time += seconds;
|
||||
read_rows += read_rows_inc;
|
||||
read_bytes += read_bytes_inc;
|
||||
result_rows += result_rows_inc;
|
||||
result_bytes += result_bytes_inc;
|
||||
sampler.insert(seconds);
|
||||
sampler.insert(duration);
|
||||
}
|
||||
|
||||
void clear()
|
||||
{
|
||||
queries = 0;
|
||||
work_time = 0;
|
||||
read_rows = 0;
|
||||
read_bytes = 0;
|
||||
result_rows = 0;
|
||||
@ -331,10 +316,13 @@ private:
|
||||
return false;
|
||||
}
|
||||
|
||||
if (delay > 0 && delay_watch.elapsedSeconds() > delay)
|
||||
double seconds = delay_watch.elapsedSeconds();
|
||||
if (delay > 0 && seconds > delay)
|
||||
{
|
||||
printNumberOfQueriesExecuted(queries_executed);
|
||||
cumulative ? report(comparison_info_total) : report(comparison_info_per_interval);
|
||||
cumulative
|
||||
? report(comparison_info_total, total_watch.elapsedSeconds())
|
||||
: report(comparison_info_per_interval, seconds);
|
||||
delay_watch.restart();
|
||||
}
|
||||
}
|
||||
@ -350,16 +338,7 @@ private:
|
||||
try
|
||||
{
|
||||
for (size_t i = 0; i < concurrency; ++i)
|
||||
{
|
||||
EntryPtrs connection_entries;
|
||||
connection_entries.reserve(connections.size());
|
||||
|
||||
for (const auto & connection : connections)
|
||||
connection_entries.emplace_back(std::make_shared<Entry>(
|
||||
connection->get(ConnectionTimeouts::getTCPTimeoutsWithoutFailover(settings))));
|
||||
|
||||
pool.scheduleOrThrowOnError([this, connection_entries]() mutable { thread(connection_entries); });
|
||||
}
|
||||
pool.scheduleOrThrowOnError([this]() mutable { thread(); });
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
@ -389,21 +368,18 @@ private:
|
||||
pool.wait();
|
||||
total_watch.stop();
|
||||
|
||||
if (!json_path.empty())
|
||||
reportJSON(comparison_info_total, json_path);
|
||||
|
||||
printNumberOfQueriesExecuted(queries_executed);
|
||||
report(comparison_info_total);
|
||||
report(comparison_info_total, total_watch.elapsedSeconds());
|
||||
}
|
||||
|
||||
|
||||
void thread(EntryPtrs & connection_entries)
|
||||
void thread()
|
||||
{
|
||||
Query query;
|
||||
|
||||
/// Randomly choosing connection index
|
||||
pcg64 generator(randomSeed());
|
||||
std::uniform_int_distribution<size_t> distribution(0, connection_entries.size() - 1);
|
||||
std::uniform_int_distribution<size_t> distribution(0, connections.size() - 1);
|
||||
|
||||
/// In these threads we do not accept INT signal.
|
||||
sigset_t sig_set;
|
||||
@ -423,15 +399,13 @@ private:
|
||||
extracted = queue.tryPop(query, 100);
|
||||
|
||||
if (shutdown || (max_iterations && queries_executed == max_iterations))
|
||||
{
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
const auto connection_index = distribution(generator);
|
||||
try
|
||||
{
|
||||
execute(connection_entries, query, connection_index);
|
||||
execute(query, connection_index);
|
||||
consecutive_errors = 0;
|
||||
}
|
||||
catch (...)
|
||||
@ -460,17 +434,18 @@ private:
|
||||
}
|
||||
}
|
||||
|
||||
void execute(EntryPtrs & connection_entries, Query & query, size_t connection_index)
|
||||
void execute(Query & query, size_t connection_index)
|
||||
{
|
||||
Stopwatch watch;
|
||||
|
||||
Connection & connection = **connection_entries[connection_index];
|
||||
ConnectionPool::Entry entry = connections[connection_index]->get(
|
||||
ConnectionTimeouts::getTCPTimeoutsWithoutFailover(settings));
|
||||
|
||||
if (reconnect)
|
||||
connection.disconnect();
|
||||
entry->disconnect();
|
||||
|
||||
RemoteQueryExecutor executor(
|
||||
connection, query, {}, global_context, nullptr, Scalars(), Tables(), query_processing_stage);
|
||||
*entry, query, {}, global_context, nullptr, Scalars(), Tables(), query_processing_stage);
|
||||
if (!query_id.empty())
|
||||
executor.setQueryId(query_id);
|
||||
|
||||
@ -485,19 +460,19 @@ private:
|
||||
|
||||
executor.finish();
|
||||
|
||||
double seconds = (display_client_side_time || progress.elapsed_ns == 0)
|
||||
double duration = (display_client_side_time || progress.elapsed_ns == 0)
|
||||
? watch.elapsedSeconds()
|
||||
: progress.elapsed_ns / 1e9;
|
||||
|
||||
std::lock_guard lock(mutex);
|
||||
|
||||
size_t info_index = round_robin ? 0 : connection_index;
|
||||
comparison_info_per_interval[info_index]->add(seconds, progress.read_rows, progress.read_bytes, info.rows, info.bytes);
|
||||
comparison_info_total[info_index]->add(seconds, progress.read_rows, progress.read_bytes, info.rows, info.bytes);
|
||||
t_test.add(info_index, seconds);
|
||||
comparison_info_per_interval[info_index]->add(duration, progress.read_rows, progress.read_bytes, info.rows, info.bytes);
|
||||
comparison_info_total[info_index]->add(duration, progress.read_rows, progress.read_bytes, info.rows, info.bytes);
|
||||
t_test.add(info_index, duration);
|
||||
}
|
||||
|
||||
void report(MultiStats & infos)
|
||||
void report(MultiStats & infos, double seconds)
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
|
||||
@ -510,8 +485,6 @@ private:
|
||||
if (0 == info->queries)
|
||||
return;
|
||||
|
||||
double seconds = info->work_time / concurrency;
|
||||
|
||||
std::string connection_description = connections[i]->getDescription();
|
||||
if (round_robin)
|
||||
{
|
||||
@ -525,10 +498,10 @@ private:
|
||||
}
|
||||
std::cerr
|
||||
<< connection_description << ", "
|
||||
<< "queries " << info->queries << ", ";
|
||||
<< "queries: " << info->queries << ", ";
|
||||
if (info->errors)
|
||||
{
|
||||
std::cerr << "errors " << info->errors << ", ";
|
||||
std::cerr << "errors: " << info->errors << ", ";
|
||||
}
|
||||
std::cerr
|
||||
<< "QPS: " << (info->queries / seconds) << ", "
|
||||
@ -567,62 +540,6 @@ private:
|
||||
}
|
||||
}
|
||||
|
||||
void reportJSON(MultiStats & infos, const std::string & filename)
|
||||
{
|
||||
WriteBufferFromFile json_out(filename);
|
||||
|
||||
std::lock_guard lock(mutex);
|
||||
|
||||
auto print_key_value = [&](auto key, auto value, bool with_comma = true)
|
||||
{
|
||||
json_out << double_quote << key << ": " << value << (with_comma ? ",\n" : "\n");
|
||||
};
|
||||
|
||||
auto print_percentile = [&json_out](Stats & info, auto percent, bool with_comma = true)
|
||||
{
|
||||
json_out << "\"" << percent << "\": " << info.sampler.quantileNearest(percent / 100.0) << (with_comma ? ",\n" : "\n");
|
||||
};
|
||||
|
||||
json_out << "{\n";
|
||||
|
||||
for (size_t i = 0; i < infos.size(); ++i)
|
||||
{
|
||||
const auto & info = infos[i];
|
||||
|
||||
json_out << double_quote << connections[i]->getDescription() << ": {\n";
|
||||
json_out << double_quote << "statistics" << ": {\n";
|
||||
|
||||
double seconds = info->work_time / concurrency;
|
||||
|
||||
print_key_value("QPS", info->queries.load() / seconds);
|
||||
print_key_value("RPS", info->read_rows / seconds);
|
||||
print_key_value("MiBPS", info->read_bytes / seconds / 1048576);
|
||||
print_key_value("RPS_result", info->result_rows / seconds);
|
||||
print_key_value("MiBPS_result", info->result_bytes / seconds / 1048576);
|
||||
print_key_value("num_queries", info->queries.load());
|
||||
print_key_value("num_errors", info->errors, false);
|
||||
|
||||
json_out << "},\n";
|
||||
json_out << double_quote << "query_time_percentiles" << ": {\n";
|
||||
|
||||
if (info->queries != 0)
|
||||
{
|
||||
for (int percent = 0; percent <= 90; percent += 10)
|
||||
print_percentile(*info, percent);
|
||||
|
||||
print_percentile(*info, 95);
|
||||
print_percentile(*info, 99);
|
||||
print_percentile(*info, 99.9);
|
||||
print_percentile(*info, 99.99, false);
|
||||
}
|
||||
|
||||
json_out << "}\n";
|
||||
json_out << (i == infos.size() - 1 ? "}\n" : "},\n");
|
||||
}
|
||||
|
||||
json_out << "}\n";
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
~Benchmark() override
|
||||
@ -675,7 +592,6 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv)
|
||||
("iterations,i", value<size_t>()->default_value(0), "amount of queries to be executed")
|
||||
("timelimit,t", value<double>()->default_value(0.), "stop launch of queries after specified time limit")
|
||||
("randomize,r", "randomize order of execution")
|
||||
("json", value<std::string>()->default_value(""), "write final report to specified file in JSON format")
|
||||
("host,h", value<Strings>()->multitoken(), "list of hosts")
|
||||
("port", value<Ports>()->multitoken(), "list of ports")
|
||||
("roundrobin", "Instead of comparing queries for different --host/--port just pick one random --host/--port for every query and send query to it.")
|
||||
@ -739,7 +655,6 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv)
|
||||
options.count("randomize"),
|
||||
options["iterations"].as<size_t>(),
|
||||
options["timelimit"].as<double>(),
|
||||
options["json"].as<std::string>(),
|
||||
options["confidence"].as<size_t>(),
|
||||
options["query_id"].as<std::string>(),
|
||||
options["query"].as<std::string>(),
|
||||
|
@ -608,6 +608,8 @@ TaskStatus ClusterCopier::tryMoveAllPiecesToDestinationTable(const TaskTable & t
|
||||
ss << "ALTER TABLE " << getQuotedTable(original_table) << ((partition_name == "'all'") ? " DROP PARTITION ID " : " DROP PARTITION ") << partition_name;
|
||||
|
||||
UInt64 num_shards_drop_partition = executeQueryOnCluster(task_table.cluster_push, ss.str(), task_cluster->settings_push, ClusterExecutionMode::ON_EACH_SHARD);
|
||||
if (num_shards_drop_partition != task_table.cluster_push->getShardCount())
|
||||
return TaskStatus::Error;
|
||||
|
||||
LOG_INFO(log, "Drop partition {} in original table {} have been executed successfully on {} shards of {}",
|
||||
partition_name, getQuotedTable(original_table), num_shards_drop_partition, task_table.cluster_push->getShardCount());
|
||||
|
@ -163,13 +163,15 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
|
||||
{
|
||||
ASTPtr res = parseQueryAndMovePosition(
|
||||
parser, pos, end, "query", multiple, cmd_settings.max_query_size, cmd_settings.max_parser_depth);
|
||||
/// For insert query with data(INSERT INTO ... VALUES ...), will lead to format fail,
|
||||
/// should throw exception early and make exception message more readable.
|
||||
|
||||
/// For insert query with data(INSERT INTO ... VALUES ...), that will lead to the formatting failure,
|
||||
/// we should throw an exception early, and make exception message more readable.
|
||||
if (const auto * insert_query = res->as<ASTInsertQuery>(); insert_query && insert_query->data)
|
||||
{
|
||||
throw Exception(DB::ErrorCodes::INVALID_FORMAT_INSERT_QUERY_WITH_DATA,
|
||||
"Can't format ASTInsertQuery with data, since data will be lost");
|
||||
}
|
||||
|
||||
if (!quiet)
|
||||
{
|
||||
if (!backslash)
|
||||
|
@ -1655,6 +1655,9 @@ try
|
||||
database_catalog.initializeAndLoadTemporaryDatabase();
|
||||
loadMetadataSystem(global_context);
|
||||
maybeConvertSystemDatabase(global_context);
|
||||
/// This has to be done before the initialization of system logs,
|
||||
/// otherwise there is a race condition between the system database initialization
|
||||
/// and creation of new tables in the database.
|
||||
startupSystemTables();
|
||||
/// After attaching system databases we can initialize system log.
|
||||
global_context->initializeSystemLogs();
|
||||
|
@ -550,12 +550,12 @@ bool ContextAccess::checkAccessImplHelper(AccessFlags flags, const Args &... arg
|
||||
return access_denied(ErrorCodes::ACCESS_DENIED,
|
||||
"{}: Not enough privileges. "
|
||||
"The required privileges have been granted, but without grant option. "
|
||||
"To execute this query it's necessary to have grant {} WITH GRANT OPTION",
|
||||
"To execute this query, it's necessary to have the grant {} WITH GRANT OPTION",
|
||||
AccessRightsElement{flags, args...}.toStringWithoutOptions());
|
||||
}
|
||||
|
||||
return access_denied(ErrorCodes::ACCESS_DENIED,
|
||||
"{}: Not enough privileges. To execute this query it's necessary to have grant {}",
|
||||
"{}: Not enough privileges. To execute this query, it's necessary to have the grant {}",
|
||||
AccessRightsElement{flags, args...}.toStringWithoutOptions() + (grant_option ? " WITH GRANT OPTION" : ""));
|
||||
}
|
||||
|
||||
@ -756,11 +756,11 @@ bool ContextAccess::checkAdminOptionImplHelper(const Container & role_ids, const
|
||||
show_error(ErrorCodes::ACCESS_DENIED,
|
||||
"Not enough privileges. "
|
||||
"Role {} is granted, but without ADMIN option. "
|
||||
"To execute this query it's necessary to have the role {} granted with ADMIN option.",
|
||||
"To execute this query, it's necessary to have the role {} granted with ADMIN option.",
|
||||
backQuote(*role_name), backQuoteIfNeed(*role_name));
|
||||
else
|
||||
show_error(ErrorCodes::ACCESS_DENIED, "Not enough privileges. "
|
||||
"To execute this query it's necessary to have the role {} granted with ADMIN option.",
|
||||
"To execute this query, it's necessary to have the role {} granted with ADMIN option.",
|
||||
backQuoteIfNeed(*role_name));
|
||||
}
|
||||
|
||||
|
@ -240,7 +240,7 @@ public:
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
const AggregateFunctionForEachData & state = data(place);
|
||||
writeBinary(state.dynamic_array_size, buf);
|
||||
writeBinaryLittleEndian(state.dynamic_array_size, buf);
|
||||
|
||||
const char * nested_state = state.array_of_aggregate_datas;
|
||||
for (size_t i = 0; i < state.dynamic_array_size; ++i)
|
||||
@ -255,7 +255,7 @@ public:
|
||||
AggregateFunctionForEachData & state = data(place);
|
||||
|
||||
size_t new_size = 0;
|
||||
readBinary(new_size, buf);
|
||||
readBinaryLittleEndian(new_size, buf);
|
||||
|
||||
ensureAggregateData(place, new_size, *arena);
|
||||
|
||||
|
@ -108,7 +108,14 @@ private:
|
||||
inline size_t buf_size() const { return 1ULL << size_degree; } /// NOLINT
|
||||
inline size_t max_fill() const { return 1ULL << (size_degree - 1); } /// NOLINT
|
||||
inline size_t mask() const { return buf_size() - 1; }
|
||||
inline size_t place(HashValue x) const { return (x >> UNIQUES_HASH_BITS_FOR_SKIP) & mask(); }
|
||||
|
||||
inline size_t place(HashValue x) const
|
||||
{
|
||||
if constexpr (std::endian::native == std::endian::little)
|
||||
return (x >> UNIQUES_HASH_BITS_FOR_SKIP) & mask();
|
||||
else
|
||||
return (std::byteswap(x) >> UNIQUES_HASH_BITS_FOR_SKIP) & mask();
|
||||
}
|
||||
|
||||
/// The value is divided by 2 ^ skip_degree
|
||||
inline bool good(HashValue hash) const
|
||||
|
@ -5,36 +5,57 @@
|
||||
#include <filesystem>
|
||||
#include <base/types.h>
|
||||
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
bool safeFsExists(const String & path)
|
||||
{
|
||||
std::error_code ec;
|
||||
return fs::exists(path, ec);
|
||||
}
|
||||
|
||||
bool configReadClient(Poco::Util::LayeredConfiguration & config, const std::string & home_path)
|
||||
{
|
||||
std::string config_path;
|
||||
if (config.has("config-file"))
|
||||
config_path = config.getString("config-file");
|
||||
else if (safeFsExists("./clickhouse-client.xml"))
|
||||
config_path = "./clickhouse-client.xml";
|
||||
else if (!home_path.empty() && safeFsExists(home_path + "/.clickhouse-client/config.xml"))
|
||||
config_path = home_path + "/.clickhouse-client/config.xml";
|
||||
else if (safeFsExists("/etc/clickhouse-client/config.xml"))
|
||||
config_path = "/etc/clickhouse-client/config.xml";
|
||||
|
||||
if (!config_path.empty())
|
||||
bool found = false;
|
||||
if (config.has("config-file"))
|
||||
{
|
||||
found = true;
|
||||
config_path = config.getString("config-file");
|
||||
}
|
||||
else
|
||||
{
|
||||
std::vector<std::string> names;
|
||||
names.emplace_back("./clickhouse-client");
|
||||
if (!home_path.empty())
|
||||
names.emplace_back(home_path + "/.clickhouse-client/config");
|
||||
names.emplace_back("/etc/clickhouse-client/config");
|
||||
|
||||
for (const auto & name : names)
|
||||
{
|
||||
for (const auto & extension : {".xml", ".yaml", ".yml"})
|
||||
{
|
||||
config_path = name + extension;
|
||||
|
||||
std::error_code ec;
|
||||
if (fs::exists(config_path, ec))
|
||||
{
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (found)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (found)
|
||||
{
|
||||
ConfigProcessor config_processor(config_path);
|
||||
auto loaded_config = config_processor.loadConfig();
|
||||
config.add(loaded_config.configuration);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1,4 +1,5 @@
|
||||
#pragma once
|
||||
|
||||
#include <base/defines.h>
|
||||
#include <base/types.h>
|
||||
#include <fmt/format.h>
|
||||
@ -6,6 +7,8 @@
|
||||
#include <unordered_map>
|
||||
#include <Poco/Logger.h>
|
||||
#include <Poco/Message.h>
|
||||
#include <base/EnumReflection.h>
|
||||
|
||||
|
||||
struct PreformattedMessage;
|
||||
consteval void formatStringCheckArgsNumImpl(std::string_view str, size_t nargs);
|
||||
|
@ -3,6 +3,8 @@
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/SettingsChanges.h>
|
||||
#include <Common/FieldVisitorToString.h>
|
||||
#include <magic_enum.hpp>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
@ -33,13 +33,17 @@
|
||||
|
||||
namespace Poco
|
||||
{
|
||||
|
||||
class Logger;
|
||||
|
||||
namespace Util
|
||||
{
|
||||
class AbstractConfiguration;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
|
@ -11,8 +11,11 @@
|
||||
#include <Common/FieldVisitorWriteBinary.h>
|
||||
|
||||
|
||||
using namespace std::literals;
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int CANNOT_RESTORE_FROM_FIELD_DUMP;
|
||||
@ -582,34 +585,41 @@ String toString(const Field & x)
|
||||
x);
|
||||
}
|
||||
|
||||
String fieldTypeToString(Field::Types::Which type)
|
||||
std::string_view fieldTypeToString(Field::Types::Which type)
|
||||
{
|
||||
switch (type)
|
||||
{
|
||||
case Field::Types::Which::Null: return "Null";
|
||||
case Field::Types::Which::Array: return "Array";
|
||||
case Field::Types::Which::Tuple: return "Tuple";
|
||||
case Field::Types::Which::Map: return "Map";
|
||||
case Field::Types::Which::Object: return "Object";
|
||||
case Field::Types::Which::AggregateFunctionState: return "AggregateFunctionState";
|
||||
case Field::Types::Which::Bool: return "Bool";
|
||||
case Field::Types::Which::String: return "String";
|
||||
case Field::Types::Which::Decimal32: return "Decimal32";
|
||||
case Field::Types::Which::Decimal64: return "Decimal64";
|
||||
case Field::Types::Which::Decimal128: return "Decimal128";
|
||||
case Field::Types::Which::Decimal256: return "Decimal256";
|
||||
case Field::Types::Which::Float64: return "Float64";
|
||||
case Field::Types::Which::Int64: return "Int64";
|
||||
case Field::Types::Which::Int128: return "Int128";
|
||||
case Field::Types::Which::Int256: return "Int256";
|
||||
case Field::Types::Which::UInt64: return "UInt64";
|
||||
case Field::Types::Which::UInt128: return "UInt128";
|
||||
case Field::Types::Which::UInt256: return "UInt256";
|
||||
case Field::Types::Which::UUID: return "UUID";
|
||||
case Field::Types::Which::IPv4: return "IPv4";
|
||||
case Field::Types::Which::IPv6: return "IPv6";
|
||||
case Field::Types::Which::CustomType: return "CustomType";
|
||||
case Field::Types::Which::Null: return "Null"sv;
|
||||
case Field::Types::Which::Array: return "Array"sv;
|
||||
case Field::Types::Which::Tuple: return "Tuple"sv;
|
||||
case Field::Types::Which::Map: return "Map"sv;
|
||||
case Field::Types::Which::Object: return "Object"sv;
|
||||
case Field::Types::Which::AggregateFunctionState: return "AggregateFunctionState"sv;
|
||||
case Field::Types::Which::Bool: return "Bool"sv;
|
||||
case Field::Types::Which::String: return "String"sv;
|
||||
case Field::Types::Which::Decimal32: return "Decimal32"sv;
|
||||
case Field::Types::Which::Decimal64: return "Decimal64"sv;
|
||||
case Field::Types::Which::Decimal128: return "Decimal128"sv;
|
||||
case Field::Types::Which::Decimal256: return "Decimal256"sv;
|
||||
case Field::Types::Which::Float64: return "Float64"sv;
|
||||
case Field::Types::Which::Int64: return "Int64"sv;
|
||||
case Field::Types::Which::Int128: return "Int128"sv;
|
||||
case Field::Types::Which::Int256: return "Int256"sv;
|
||||
case Field::Types::Which::UInt64: return "UInt64"sv;
|
||||
case Field::Types::Which::UInt128: return "UInt128"sv;
|
||||
case Field::Types::Which::UInt256: return "UInt256"sv;
|
||||
case Field::Types::Which::UUID: return "UUID"sv;
|
||||
case Field::Types::Which::IPv4: return "IPv4"sv;
|
||||
case Field::Types::Which::IPv6: return "IPv6"sv;
|
||||
case Field::Types::Which::CustomType: return "CustomType"sv;
|
||||
}
|
||||
}
|
||||
|
||||
/// Keep in mind, that "magic_enum" is very expensive for compiler, that's why we don't use it.
|
||||
std::string_view Field::getTypeName() const
|
||||
{
|
||||
return fieldTypeToString(which);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
@ -15,8 +15,7 @@
|
||||
#include <Core/UUID.h>
|
||||
#include <base/IPv4andIPv6.h>
|
||||
#include <base/DayNum.h>
|
||||
#include <base/strong_typedef.h>
|
||||
#include <base/EnumReflection.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -449,7 +448,7 @@ public:
|
||||
|
||||
Types::Which getType() const { return which; }
|
||||
|
||||
constexpr std::string_view getTypeName() const { return magic_enum::enum_name(which); }
|
||||
std::string_view getTypeName() const;
|
||||
|
||||
bool isNull() const { return which == Types::Null; }
|
||||
template <typename T>
|
||||
@ -1005,7 +1004,7 @@ void writeFieldText(const Field & x, WriteBuffer & buf);
|
||||
|
||||
String toString(const Field & x);
|
||||
|
||||
String fieldTypeToString(Field::Types::Which type);
|
||||
std::string_view fieldTypeToString(Field::Types::Which type);
|
||||
|
||||
}
|
||||
|
||||
|
@ -816,7 +816,6 @@ class IColumn;
|
||||
MAKE_OBSOLETE(M, UInt64, merge_tree_clear_old_parts_interval_seconds, 1) \
|
||||
MAKE_OBSOLETE(M, UInt64, partial_merge_join_optimizations, 0) \
|
||||
MAKE_OBSOLETE(M, MaxThreads, max_alter_threads, 0) \
|
||||
MAKE_OBSOLETE(M, Bool, allow_experimental_query_cache, true) \
|
||||
/* moved to config.xml: see also src/Core/ServerSettings.h */ \
|
||||
MAKE_DEPRECATED_BY_SERVER_CONFIG(M, UInt64, background_buffer_flush_schedule_pool_size, 16) \
|
||||
MAKE_DEPRECATED_BY_SERVER_CONFIG(M, UInt64, background_pool_size, 16) \
|
||||
|
@ -10,6 +10,8 @@
|
||||
#include <chrono>
|
||||
#include <unordered_map>
|
||||
#include <string_view>
|
||||
#include <magic_enum.hpp>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
@ -72,12 +72,6 @@ void applySettingsQuirks(Settings & settings, Poco::Logger * log)
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(THREAD_SANITIZER)
|
||||
settings.use_hedged_requests.value = false;
|
||||
if (log)
|
||||
LOG_WARNING(log, "use_hedged_requests has been disabled for the build with Thread Sanitizer, because they are using fibers, leading to a failed assertion inside TSan");
|
||||
#endif
|
||||
|
||||
if (!queryProfilerWorks())
|
||||
{
|
||||
if (settings.query_profiler_real_time_period_ns)
|
||||
|
@ -9,6 +9,3 @@ target_link_libraries (string_ref_hash PRIVATE clickhouse_common_io)
|
||||
|
||||
clickhouse_add_executable (mysql_protocol mysql_protocol.cpp)
|
||||
target_link_libraries (mysql_protocol PRIVATE dbms)
|
||||
|
||||
clickhouse_add_executable (coro coro.cpp)
|
||||
target_link_libraries (coro PRIVATE clickhouse_common_io)
|
||||
|
@ -1,194 +0,0 @@
|
||||
#include <cassert>
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
#include <optional>
|
||||
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Poco/ConsoleChannel.h>
|
||||
#include <Poco/Logger.h>
|
||||
#include <Poco/AutoPtr.h>
|
||||
|
||||
#if defined(__clang__)
|
||||
#include <experimental/coroutine>
|
||||
|
||||
namespace std // NOLINT(cert-dcl58-cpp)
|
||||
{
|
||||
using namespace experimental::coroutines_v1; // NOLINT(cert-dcl58-cpp)
|
||||
}
|
||||
|
||||
#if __has_warning("-Wdeprecated-experimental-coroutine")
|
||||
#pragma clang diagnostic push
|
||||
#pragma clang diagnostic ignored "-Wdeprecated-experimental-coroutine"
|
||||
#endif
|
||||
|
||||
#else
|
||||
#include <coroutine>
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wzero-as-null-pointer-constant"
|
||||
#endif
|
||||
|
||||
|
||||
template <typename T>
|
||||
struct suspend_value // NOLINT(readability-identifier-naming)
|
||||
{
|
||||
constexpr bool await_ready() const noexcept { return true; } // NOLINT(readability-identifier-naming)
|
||||
constexpr void await_suspend(std::coroutine_handle<>) const noexcept {} // NOLINT(readability-identifier-naming)
|
||||
constexpr T await_resume() const noexcept // NOLINT(readability-identifier-naming)
|
||||
{
|
||||
std::cout << " ret " << val << std::endl;
|
||||
return val;
|
||||
}
|
||||
|
||||
T val;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct Task
|
||||
{
|
||||
struct promise_type // NOLINT(readability-identifier-naming)
|
||||
{
|
||||
using coro_handle = std::coroutine_handle<promise_type>;
|
||||
auto get_return_object() { return coro_handle::from_promise(*this); } // NOLINT(readability-identifier-naming)
|
||||
auto initial_suspend() { return std::suspend_never(); } // NOLINT(readability-identifier-naming)
|
||||
auto final_suspend() noexcept { return suspend_value<T>{*r->value}; } // NOLINT(readability-identifier-naming)
|
||||
//void return_void() {}
|
||||
void return_value(T value_) { r->value = value_; } // NOLINT(readability-identifier-naming)
|
||||
void unhandled_exception() // NOLINT(readability-identifier-naming)
|
||||
{
|
||||
DB::tryLogCurrentException("Logger");
|
||||
r->exception = std::current_exception(); // NOLINT(bugprone-throw-keyword-missing)
|
||||
}
|
||||
|
||||
explicit promise_type(std::string tag_) : tag(tag_) {}
|
||||
~promise_type() { std::cout << "~promise_type " << tag << std::endl; }
|
||||
std::string tag;
|
||||
coro_handle next;
|
||||
Task * r = nullptr;
|
||||
};
|
||||
|
||||
using coro_handle = std::coroutine_handle<promise_type>;
|
||||
|
||||
bool await_ready() const noexcept { return false; } // NOLINT(readability-identifier-naming)
|
||||
void await_suspend(coro_handle g) noexcept // NOLINT(readability-identifier-naming)
|
||||
{
|
||||
std::cout << " await_suspend " << my.promise().tag << std::endl;
|
||||
std::cout << " g tag " << g.promise().tag << std::endl;
|
||||
g.promise().next = my;
|
||||
}
|
||||
T await_resume() noexcept // NOLINT(readability-identifier-naming)
|
||||
{
|
||||
std::cout << " await_res " << my.promise().tag << std::endl;
|
||||
return *value;
|
||||
}
|
||||
|
||||
Task(coro_handle handle) : my(handle), tag(handle.promise().tag) // NOLINT(google-explicit-constructor)
|
||||
{
|
||||
assert(handle);
|
||||
my.promise().r = this;
|
||||
std::cout << " Task " << tag << std::endl;
|
||||
}
|
||||
Task(Task &) = delete;
|
||||
Task(Task &&rhs) noexcept : my(rhs.my), tag(rhs.tag)
|
||||
{
|
||||
rhs.my = {};
|
||||
std::cout << " Task&& " << tag << std::endl;
|
||||
}
|
||||
static bool resumeImpl(Task *r)
|
||||
{
|
||||
if (r->value)
|
||||
return false;
|
||||
|
||||
auto & next = r->my.promise().next;
|
||||
|
||||
if (next)
|
||||
{
|
||||
if (resumeImpl(next.promise().r))
|
||||
return true;
|
||||
next = {};
|
||||
}
|
||||
|
||||
if (!r->value)
|
||||
{
|
||||
r->my.resume();
|
||||
if (r->exception)
|
||||
std::rethrow_exception(r->exception);
|
||||
}
|
||||
return !r->value;
|
||||
}
|
||||
|
||||
bool resume()
|
||||
{
|
||||
return resumeImpl(this);
|
||||
}
|
||||
|
||||
T res()
|
||||
{
|
||||
return *value;
|
||||
}
|
||||
|
||||
~Task()
|
||||
{
|
||||
std::cout << " ~Task " << tag << std::endl;
|
||||
}
|
||||
|
||||
private:
|
||||
coro_handle my;
|
||||
std::string tag;
|
||||
std::optional<T> value;
|
||||
std::exception_ptr exception;
|
||||
};
|
||||
|
||||
Task<int> boo([[maybe_unused]] std::string tag)
|
||||
{
|
||||
std::cout << "x" << std::endl;
|
||||
co_await std::suspend_always();
|
||||
std::cout << StackTrace().toString();
|
||||
std::cout << "y" << std::endl;
|
||||
co_return 1;
|
||||
}
|
||||
|
||||
Task<int> bar([[maybe_unused]] std::string tag)
|
||||
{
|
||||
std::cout << "a" << std::endl;
|
||||
int res1 = co_await boo("boo1");
|
||||
std::cout << "b " << res1 << std::endl;
|
||||
int res2 = co_await boo("boo2");
|
||||
if (res2 == 1)
|
||||
throw DB::Exception(1, "hello");
|
||||
std::cout << "c " << res2 << std::endl;
|
||||
co_return res1 + res2; // 1 + 1 = 2
|
||||
}
|
||||
|
||||
Task<int> foo([[maybe_unused]] std::string tag)
|
||||
{
|
||||
std::cout << "Hello" << std::endl;
|
||||
auto res1 = co_await bar("bar1");
|
||||
std::cout << "Coro " << res1 << std::endl;
|
||||
auto res2 = co_await bar("bar2");
|
||||
std::cout << "World " << res2 << std::endl;
|
||||
co_return res1 * res2; // 2 * 2 = 4
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
Poco::AutoPtr<Poco::ConsoleChannel> app_channel(new Poco::ConsoleChannel(std::cerr));
|
||||
Poco::Logger::root().setChannel(app_channel);
|
||||
Poco::Logger::root().setLevel("trace");
|
||||
|
||||
LOG_INFO(&Poco::Logger::get(""), "Starting");
|
||||
|
||||
try
|
||||
{
|
||||
auto t = foo("foo");
|
||||
std::cout << ".. started" << std::endl;
|
||||
while (t.resume())
|
||||
std::cout << ".. yielded" << std::endl;
|
||||
std::cout << ".. done: " << t.res() << std::endl;
|
||||
}
|
||||
catch (DB::Exception & e)
|
||||
{
|
||||
std::cout << "Got exception " << e.what() << std::endl;
|
||||
std::cout << e.getStackTraceString() << std::endl;
|
||||
}
|
||||
}
|
@ -129,7 +129,7 @@ namespace
|
||||
for (size_t i = offset; i < end; ++i)
|
||||
{
|
||||
ColumnArray::Offset current_offset = offset_values[i];
|
||||
writeIntBinary(current_offset - prev_offset, ostr);
|
||||
writeBinaryLittleEndian(current_offset - prev_offset, ostr);
|
||||
prev_offset = current_offset;
|
||||
}
|
||||
}
|
||||
@ -145,7 +145,7 @@ namespace
|
||||
while (i < initial_size + limit && !istr.eof())
|
||||
{
|
||||
ColumnArray::Offset current_size = 0;
|
||||
readIntBinary(current_size, istr);
|
||||
readBinaryLittleEndian(current_size, istr);
|
||||
|
||||
if (unlikely(current_size > MAX_ARRAY_SIZE))
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Array size is too large: {}", current_size);
|
||||
|
@ -106,28 +106,28 @@ void SerializationNumber<T>::serializeBinary(const Field & field, WriteBuffer &
|
||||
{
|
||||
/// ColumnVector<T>::ValueType is a narrower type. For example, UInt8, when the Field type is UInt64
|
||||
typename ColumnVector<T>::ValueType x = static_cast<typename ColumnVector<T>::ValueType>(field.get<FieldType>());
|
||||
writeBinary(x, ostr);
|
||||
writeBinaryLittleEndian(x, ostr);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void SerializationNumber<T>::deserializeBinary(Field & field, ReadBuffer & istr, const FormatSettings &) const
|
||||
{
|
||||
typename ColumnVector<T>::ValueType x;
|
||||
readBinary(x, istr);
|
||||
readBinaryLittleEndian(x, istr);
|
||||
field = NearestFieldType<FieldType>(x);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void SerializationNumber<T>::serializeBinary(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const
|
||||
{
|
||||
writeBinary(assert_cast<const ColumnVector<T> &>(column).getData()[row_num], ostr);
|
||||
writeBinaryLittleEndian(assert_cast<const ColumnVector<T> &>(column).getData()[row_num], ostr);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void SerializationNumber<T>::deserializeBinary(IColumn & column, ReadBuffer & istr, const FormatSettings &) const
|
||||
{
|
||||
typename ColumnVector<T>::ValueType x;
|
||||
readBinary(x, istr);
|
||||
readBinaryLittleEndian(x, istr);
|
||||
assert_cast<ColumnVector<T> &>(column).getData().push_back(x);
|
||||
}
|
||||
|
||||
|
@ -471,8 +471,16 @@ void DatabaseAtomic::tryCreateSymlink(const String & table_name, const String &
|
||||
{
|
||||
String link = path_to_table_symlinks + escapeForFileName(table_name);
|
||||
fs::path data = fs::canonical(getContext()->getPath()) / actual_data_path;
|
||||
if (!if_data_path_exist || fs::exists(data))
|
||||
fs::create_directory_symlink(data, link);
|
||||
|
||||
/// If it already points where needed.
|
||||
std::error_code ec;
|
||||
if (fs::equivalent(data, link, ec))
|
||||
return;
|
||||
|
||||
if (if_data_path_exist && !fs::exists(data))
|
||||
return;
|
||||
|
||||
fs::create_directory_symlink(data, link);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
|
@ -755,6 +755,9 @@ class FunctionBinaryArithmetic : public IFunction
|
||||
static constexpr bool is_multiply = IsOperation<Op>::multiply;
|
||||
static constexpr bool is_division = IsOperation<Op>::division;
|
||||
static constexpr bool is_bit_hamming_distance = IsOperation<Op>::bit_hamming_distance;
|
||||
static constexpr bool is_modulo = IsOperation<Op>::modulo;
|
||||
static constexpr bool is_div_int = IsOperation<Op>::div_int;
|
||||
static constexpr bool is_div_int_or_zero = IsOperation<Op>::div_int_or_zero;
|
||||
|
||||
ContextPtr context;
|
||||
bool check_decimal_overflow = true;
|
||||
@ -964,13 +967,28 @@ class FunctionBinaryArithmetic : public IFunction
|
||||
"argument of numeric type cannot be first", name);
|
||||
|
||||
std::string function_name;
|
||||
if (is_multiply)
|
||||
if constexpr (is_multiply)
|
||||
{
|
||||
function_name = "tupleMultiplyByNumber";
|
||||
}
|
||||
else
|
||||
else // is_division
|
||||
{
|
||||
function_name = "tupleDivideByNumber";
|
||||
if constexpr (is_modulo)
|
||||
{
|
||||
function_name = "tupleModuloByNumber";
|
||||
}
|
||||
else if constexpr (is_div_int)
|
||||
{
|
||||
function_name = "tupleIntDivByNumber";
|
||||
}
|
||||
else if constexpr (is_div_int_or_zero)
|
||||
{
|
||||
function_name = "tupleIntDivOrZeroByNumber";
|
||||
}
|
||||
else
|
||||
{
|
||||
function_name = "tupleDivideByNumber";
|
||||
}
|
||||
}
|
||||
|
||||
return FunctionFactory::instance().get(function_name, context);
|
||||
|
@ -62,14 +62,13 @@ namespace ErrorCodes
|
||||
*/
|
||||
|
||||
|
||||
class FunctionDictHelper : WithContext
|
||||
class FunctionDictHelper
|
||||
{
|
||||
public:
|
||||
explicit FunctionDictHelper(ContextPtr context_) : WithContext(context_) {}
|
||||
explicit FunctionDictHelper(ContextPtr context_) : current_context(context_) {}
|
||||
|
||||
std::shared_ptr<const IDictionary> getDictionary(const String & dictionary_name)
|
||||
{
|
||||
auto current_context = getContext();
|
||||
auto dict = current_context->getExternalDictionariesLoader().getDictionary(dictionary_name, current_context);
|
||||
|
||||
if (!access_checked)
|
||||
@ -132,10 +131,12 @@ public:
|
||||
|
||||
DictionaryStructure getDictionaryStructure(const String & dictionary_name) const
|
||||
{
|
||||
return getContext()->getExternalDictionariesLoader().getDictionaryStructure(dictionary_name, getContext());
|
||||
return current_context->getExternalDictionariesLoader().getDictionaryStructure(dictionary_name, current_context);
|
||||
}
|
||||
|
||||
private:
|
||||
ContextPtr current_context;
|
||||
|
||||
/// Access cannot be not granted, since in this case checkAccess() will throw and access_checked will not be updated.
|
||||
std::atomic<bool> access_checked = false;
|
||||
|
||||
|
@ -13,6 +13,7 @@ namespace ErrorCodes
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int ILLEGAL_COLUMN;
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
/** Token search the string, means that needle must be surrounded by some separator chars, like whitespace or puctuation.
|
||||
@ -39,9 +40,6 @@ struct HasTokenImpl
|
||||
if (start_pos != nullptr)
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Function '{}' does not support start_pos argument", name);
|
||||
|
||||
if (pattern.empty())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Needle cannot be empty, because empty string isn't a token");
|
||||
|
||||
if (haystack_offsets.empty())
|
||||
return;
|
||||
|
||||
@ -49,7 +47,7 @@ struct HasTokenImpl
|
||||
const UInt8 * const end = haystack_data.data() + haystack_data.size();
|
||||
const UInt8 * pos = begin;
|
||||
|
||||
if (!std::none_of(pattern.begin(), pattern.end(), isTokenSeparator))
|
||||
if (const auto has_separator = std::any_of(pattern.cbegin(), pattern.cend(), isTokenSeparator); has_separator || pattern.empty())
|
||||
{
|
||||
if (res_null)
|
||||
{
|
||||
@ -57,8 +55,12 @@ struct HasTokenImpl
|
||||
std::ranges::fill(res_null->getData(), true);
|
||||
return;
|
||||
}
|
||||
else
|
||||
else if (has_separator)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Needle must not contain whitespace or separator characters");
|
||||
else if (pattern.empty())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Needle cannot be empty, because empty string isn't a token");
|
||||
else
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected internal state");
|
||||
}
|
||||
|
||||
size_t pattern_size = pattern.size();
|
||||
|
@ -60,7 +60,7 @@ struct IsOperation
|
||||
|
||||
static constexpr bool bit_hamming_distance = IsSameOperation<Op, BitHammingDistanceImpl>::value;
|
||||
|
||||
static constexpr bool division = div_floating || div_int || div_int_or_zero;
|
||||
static constexpr bool division = div_floating || div_int || div_int_or_zero || modulo;
|
||||
|
||||
static constexpr bool allow_decimal = plus || minus || multiply || division || least || greatest;
|
||||
};
|
||||
|
@ -208,6 +208,10 @@ public:
|
||||
{
|
||||
return FunctionFactory::instance().getImpl("mapConcat", context)->build(arguments);
|
||||
}
|
||||
else if (isTuple(arguments.at(0).type))
|
||||
{
|
||||
return FunctionFactory::instance().getImpl("tupleConcat", context)->build(arguments);
|
||||
}
|
||||
else
|
||||
return std::make_unique<FunctionToFunctionBaseAdaptor>(
|
||||
FunctionConcat::create(context), collections::map<DataTypes>(arguments, [](const auto & elem) { return elem.type; }), return_type);
|
||||
|
@ -764,9 +764,8 @@ namespace
|
||||
}
|
||||
|
||||
/// Note: Doesn't check the duplicates in the `from` array.
|
||||
|
||||
WhichDataType which(from_type);
|
||||
if (isNativeNumber(which) || which.isDecimal32() || which.isDecimal64())
|
||||
/// Field may be of Float type, but for the purpose of bitwise equality we can treat them as UInt64
|
||||
if (WhichDataType which(from_type); isNativeNumber(which) || which.isDecimal32() || which.isDecimal64())
|
||||
{
|
||||
cache.table_num_to_idx = std::make_unique<Cache::NumToIdx>();
|
||||
auto & table = *cache.table_num_to_idx;
|
||||
@ -774,10 +773,13 @@ namespace
|
||||
{
|
||||
if (applyVisitor(FieldVisitorAccurateEquals(), (*cache.from_column)[i], (*from_column_uncasted)[i]))
|
||||
{
|
||||
/// Field may be of Float type, but for the purpose of bitwise equality we can treat them as UInt64
|
||||
StringRef ref = cache.from_column->getDataAt(i);
|
||||
UInt64 key = 0;
|
||||
memcpy(&key, ref.data, ref.size);
|
||||
auto * dst = reinterpret_cast<char *>(&key);
|
||||
const auto ref = cache.from_column->getDataAt(i);
|
||||
if constexpr (std::endian::native == std::endian::big)
|
||||
dst += sizeof(key) - ref.size;
|
||||
|
||||
memcpy(dst, ref.data, ref.size);
|
||||
table[key] = i;
|
||||
}
|
||||
}
|
||||
|
102
src/Functions/tupleConcat.cpp
Normal file
102
src/Functions/tupleConcat.cpp
Normal file
@ -0,0 +1,102 @@
|
||||
#include <Columns/ColumnTuple.h>
|
||||
#include <DataTypes/DataTypeTuple.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/FunctionHelpers.h>
|
||||
#include <Functions/IFunction.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||
extern const int ILLEGAL_COLUMN;
|
||||
}
|
||||
|
||||
/// tupleConcat(tup1, ...) - concatenate tuples.
|
||||
class FunctionTupleConcat : public IFunction
|
||||
{
|
||||
public:
|
||||
static constexpr auto name = "tupleConcat";
|
||||
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionTupleConcat>(); }
|
||||
|
||||
String getName() const override { return name; }
|
||||
|
||||
bool isVariadic() const override { return true; }
|
||||
|
||||
size_t getNumberOfArguments() const override { return 0; }
|
||||
|
||||
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
|
||||
|
||||
bool useDefaultImplementationForConstants() const override { return true; }
|
||||
|
||||
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
|
||||
{
|
||||
if (arguments.empty())
|
||||
throw Exception(
|
||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
|
||||
"Function {} requires at least one argument.",
|
||||
getName());
|
||||
|
||||
DataTypes tuple_arg_types;
|
||||
|
||||
for (const auto arg_idx : collections::range(0, arguments.size()))
|
||||
{
|
||||
const auto * arg = arguments[arg_idx].get();
|
||||
if (!isTuple(arg))
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Illegal type {} of argument {} of function {}",
|
||||
arg->getName(),
|
||||
arg_idx + 1,
|
||||
getName());
|
||||
|
||||
const auto * type = checkAndGetDataType<DataTypeTuple>(arg);
|
||||
for (const auto & elem : type->getElements())
|
||||
tuple_arg_types.push_back(elem);
|
||||
}
|
||||
|
||||
return std::make_shared<DataTypeTuple>(tuple_arg_types);
|
||||
}
|
||||
|
||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t) const override
|
||||
{
|
||||
const size_t num_arguments = arguments.size();
|
||||
Columns columns;
|
||||
|
||||
for (size_t i = 0; i < num_arguments; i++)
|
||||
{
|
||||
const DataTypeTuple * arg_type = checkAndGetDataType<DataTypeTuple>(arguments[i].type.get());
|
||||
|
||||
if (!arg_type)
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Illegal type {} of argument {} of function {}",
|
||||
arguments[i].type->getName(),
|
||||
i + 1,
|
||||
getName());
|
||||
|
||||
ColumnPtr arg_col = arguments[i].column->convertToFullColumnIfConst();
|
||||
const ColumnTuple * tuple_col = checkAndGetColumn<ColumnTuple>(arg_col.get());
|
||||
|
||||
if (!tuple_col)
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_COLUMN,
|
||||
"Illegal column {} of argument of function {}",
|
||||
arguments[i].column->getName(),
|
||||
getName());
|
||||
|
||||
for (const auto & inner_col : tuple_col->getColumns())
|
||||
columns.push_back(inner_col);
|
||||
}
|
||||
|
||||
return ColumnTuple::create(columns);
|
||||
}
|
||||
};
|
||||
|
||||
REGISTER_FUNCTION(TupleConcat)
|
||||
{
|
||||
factory.registerFunction<FunctionTupleConcat>();
|
||||
}
|
||||
|
||||
}
|
@ -23,6 +23,9 @@ struct PlusName { static constexpr auto name = "plus"; };
|
||||
struct MinusName { static constexpr auto name = "minus"; };
|
||||
struct MultiplyName { static constexpr auto name = "multiply"; };
|
||||
struct DivideName { static constexpr auto name = "divide"; };
|
||||
struct ModuloName { static constexpr auto name = "modulo"; };
|
||||
struct IntDivName { static constexpr auto name = "intDiv"; };
|
||||
struct IntDivOrZeroName { static constexpr auto name = "intDivOrZero"; };
|
||||
|
||||
struct L1Label { static constexpr auto name = "1"; };
|
||||
struct L2Label { static constexpr auto name = "2"; };
|
||||
@ -141,6 +144,12 @@ using FunctionTupleMultiply = FunctionTupleOperator<MultiplyName>;
|
||||
|
||||
using FunctionTupleDivide = FunctionTupleOperator<DivideName>;
|
||||
|
||||
using FunctionTupleModulo = FunctionTupleOperator<ModuloName>;
|
||||
|
||||
using FunctionTupleIntDiv = FunctionTupleOperator<IntDivName>;
|
||||
|
||||
using FunctionTupleIntDivOrZero = FunctionTupleOperator<IntDivOrZeroName>;
|
||||
|
||||
class FunctionTupleNegate : public ITupleFunction
|
||||
{
|
||||
public:
|
||||
@ -297,6 +306,12 @@ using FunctionTupleMultiplyByNumber = FunctionTupleOperatorByNumber<MultiplyName
|
||||
|
||||
using FunctionTupleDivideByNumber = FunctionTupleOperatorByNumber<DivideName>;
|
||||
|
||||
using FunctionTupleModuloByNumber = FunctionTupleOperatorByNumber<ModuloName>;
|
||||
|
||||
using FunctionTupleIntDivByNumber = FunctionTupleOperatorByNumber<IntDivName>;
|
||||
|
||||
using FunctionTupleIntDivOrZeroByNumber = FunctionTupleOperatorByNumber<IntDivOrZeroName>;
|
||||
|
||||
class FunctionDotProduct : public ITupleFunction
|
||||
{
|
||||
public:
|
||||
@ -1563,6 +1578,9 @@ REGISTER_FUNCTION(VectorFunctions)
|
||||
factory.registerAlias("vectorDifference", FunctionTupleMinus::name, FunctionFactory::CaseInsensitive);
|
||||
factory.registerFunction<FunctionTupleMultiply>();
|
||||
factory.registerFunction<FunctionTupleDivide>();
|
||||
factory.registerFunction<FunctionTupleModulo>();
|
||||
factory.registerFunction<FunctionTupleIntDiv>();
|
||||
factory.registerFunction<FunctionTupleIntDivOrZero>();
|
||||
factory.registerFunction<FunctionTupleNegate>();
|
||||
|
||||
factory.registerFunction<FunctionAddTupleOfIntervals>(FunctionDocumentation
|
||||
@ -1626,6 +1644,9 @@ If the types of the first interval (or the interval in the tuple) and the second
|
||||
|
||||
factory.registerFunction<FunctionTupleMultiplyByNumber>();
|
||||
factory.registerFunction<FunctionTupleDivideByNumber>();
|
||||
factory.registerFunction<FunctionTupleModuloByNumber>();
|
||||
factory.registerFunction<FunctionTupleIntDivByNumber>();
|
||||
factory.registerFunction<FunctionTupleIntDivOrZeroByNumber>();
|
||||
|
||||
factory.registerFunction<TupleOrArrayFunctionDotProduct>();
|
||||
factory.registerAlias("scalarProduct", TupleOrArrayFunctionDotProduct::name, FunctionFactory::CaseInsensitive);
|
||||
|
@ -14,26 +14,26 @@
|
||||
# include <Common/logger_useful.h>
|
||||
|
||||
# include <IO/S3/PocoHTTPClient.h>
|
||||
# include <IO/S3/PocoHTTPClientFactory.h>
|
||||
# include <IO/S3/Client.h>
|
||||
# include <IO/S3Common.h>
|
||||
|
||||
# include <fstream>
|
||||
# include <base/EnumReflection.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int AWS_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
namespace DB::S3
|
||||
namespace S3
|
||||
{
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
bool areCredentialsEmptyOrExpired(const Aws::Auth::AWSCredentials & credentials, uint64_t expiration_window_seconds)
|
||||
{
|
||||
if (credentials.IsEmpty())
|
||||
@ -569,4 +569,6 @@ S3CredentialsProviderChain::S3CredentialsProviderChain(
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -304,9 +304,10 @@ inline void writeJSONString(const char * begin, const char * end, WriteBuffer &
|
||||
/** Will escape quote_character and a list of special characters('\b', '\f', '\n', '\r', '\t', '\0', '\\').
|
||||
* - when escape_quote_with_quote is true, use backslash to escape list of special characters,
|
||||
* and use quote_character to escape quote_character. such as: 'hello''world'
|
||||
* - otherwise use backslash to escape list of special characters and quote_character
|
||||
* otherwise use backslash to escape list of special characters and quote_character
|
||||
* - when escape_backslash_with_backslash is true, backslash is escaped with another backslash
|
||||
*/
|
||||
template <char quote_character, bool escape_quote_with_quote = false>
|
||||
template <char quote_character, bool escape_quote_with_quote = false, bool escape_backslash_with_backslash = true>
|
||||
void writeAnyEscapedString(const char * begin, const char * end, WriteBuffer & buf)
|
||||
{
|
||||
const char * pos = begin;
|
||||
@ -360,7 +361,8 @@ void writeAnyEscapedString(const char * begin, const char * end, WriteBuffer & b
|
||||
writeChar('0', buf);
|
||||
break;
|
||||
case '\\':
|
||||
writeChar('\\', buf);
|
||||
if constexpr (escape_backslash_with_backslash)
|
||||
writeChar('\\', buf);
|
||||
writeChar('\\', buf);
|
||||
break;
|
||||
default:
|
||||
@ -466,6 +468,13 @@ inline void writeQuotedString(std::string_view ref, WriteBuffer & buf)
|
||||
writeAnyQuotedString<'\''>(ref.data(), ref.data() + ref.size(), buf);
|
||||
}
|
||||
|
||||
inline void writeQuotedStringPostgreSQL(std::string_view ref, WriteBuffer & buf)
|
||||
{
|
||||
writeChar('\'', buf);
|
||||
writeAnyEscapedString<'\'', true, false>(ref.data(), ref.data() + ref.size(), buf);
|
||||
writeChar('\'', buf);
|
||||
}
|
||||
|
||||
inline void writeDoubleQuotedString(const String & s, WriteBuffer & buf)
|
||||
{
|
||||
writeAnyQuotedString<'"'>(s, buf);
|
||||
|
@ -139,7 +139,7 @@ namespace
|
||||
/// For example, to execute
|
||||
/// GRANT ALL ON mydb.* TO role1
|
||||
/// REVOKE ALL ON *.* FROM role1
|
||||
/// the current user needs to have grants only on the 'mydb' database.
|
||||
/// the current user needs to have the grants only on the 'mydb' database.
|
||||
AccessRights all_granted_access;
|
||||
for (const auto & id : grantees_from_query)
|
||||
{
|
||||
|
@ -36,6 +36,7 @@ public:
|
||||
explicit ConcurrentHashJoin(ContextPtr context_, std::shared_ptr<TableJoin> table_join_, size_t slots_, const Block & right_sample_block, bool any_take_last_row_ = false);
|
||||
~ConcurrentHashJoin() override = default;
|
||||
|
||||
std::string getName() const override { return "ConcurrentHashJoin"; }
|
||||
const TableJoin & getTableJoin() const override { return *table_join; }
|
||||
bool addBlockToJoin(const Block & block, bool check_limits) override;
|
||||
void checkTypesOfKeys(const Block & block) const override;
|
||||
|
@ -3180,7 +3180,12 @@ void Context::setCluster(const String & cluster_name, const std::shared_ptr<Clus
|
||||
|
||||
void Context::initializeSystemLogs()
|
||||
{
|
||||
/// It is required, because the initialization of system logs can be also
|
||||
/// triggered from another thread, that is launched while initializing the system logs,
|
||||
/// for example, system.filesystem_cache_log will be triggered by parts loading
|
||||
/// of any other table if it is stored on a disk with cache.
|
||||
auto lock = getLock();
|
||||
|
||||
shared->system_logs = std::make_unique<SystemLogs>(getGlobalContext(), getConfigRef());
|
||||
}
|
||||
|
||||
@ -3381,6 +3386,16 @@ std::shared_ptr<AsynchronousInsertLog> Context::getAsynchronousInsertLog() const
|
||||
return shared->system_logs->asynchronous_insert_log;
|
||||
}
|
||||
|
||||
std::vector<ISystemLog *> Context::getSystemLogs() const
|
||||
{
|
||||
auto lock = getLock();
|
||||
|
||||
if (!shared->system_logs)
|
||||
return {};
|
||||
|
||||
return shared->system_logs->logs;
|
||||
}
|
||||
|
||||
CompressionCodecPtr Context::chooseCompressionCodec(size_t part_size, double part_size_ratio) const
|
||||
{
|
||||
auto lock = getLock();
|
||||
|
@ -86,6 +86,7 @@ struct Progress;
|
||||
struct FileProgress;
|
||||
class Clusters;
|
||||
class QueryCache;
|
||||
class ISystemLog;
|
||||
class QueryLog;
|
||||
class QueryThreadLog;
|
||||
class QueryViewsLog;
|
||||
@ -1023,6 +1024,8 @@ public:
|
||||
std::shared_ptr<FilesystemReadPrefetchesLog> getFilesystemReadPrefetchesLog() const;
|
||||
std::shared_ptr<AsynchronousInsertLog> getAsynchronousInsertLog() const;
|
||||
|
||||
std::vector<ISystemLog *> getSystemLogs() const;
|
||||
|
||||
/// Returns an object used to log operations with parts if it possible.
|
||||
/// Provide table name to make required checks.
|
||||
std::shared_ptr<PartLog> getPartLog(const String & part_database) const;
|
||||
|
@ -30,6 +30,7 @@ public:
|
||||
std::shared_ptr<const IKeyValueEntity> storage_,
|
||||
const Block & right_sample_block_with_storage_column_names_);
|
||||
|
||||
std::string getName() const override { return "DirectKeyValueJoin"; }
|
||||
virtual const TableJoin & getTableJoin() const override { return *table_join; }
|
||||
|
||||
virtual bool addBlockToJoin(const Block &, bool) override;
|
||||
|
@ -28,6 +28,7 @@ public:
|
||||
LOG_TRACE(&Poco::Logger::get("FullSortingMergeJoin"), "Will use full sorting merge join");
|
||||
}
|
||||
|
||||
std::string getName() const override { return "FullSortingMergeJoin"; }
|
||||
const TableJoin & getTableJoin() const override { return *table_join; }
|
||||
|
||||
bool addBlockToJoin(const Block & /* block */, bool /* check_limits */) override
|
||||
|
@ -60,6 +60,7 @@ public:
|
||||
|
||||
~GraceHashJoin() override;
|
||||
|
||||
std::string getName() const override { return "GraceHashJoin"; }
|
||||
const TableJoin & getTableJoin() const override { return *table_join; }
|
||||
|
||||
void initialize(const Block & sample_block) override;
|
||||
|
@ -151,6 +151,7 @@ public:
|
||||
|
||||
~HashJoin() override;
|
||||
|
||||
std::string getName() const override { return "HashJoin"; }
|
||||
const TableJoin & getTableJoin() const override { return *table_join; }
|
||||
|
||||
/** Add block of data from right hand of JOIN to the map.
|
||||
|
@ -48,6 +48,8 @@ class IJoin
|
||||
public:
|
||||
virtual ~IJoin() = default;
|
||||
|
||||
virtual std::string getName() const = 0;
|
||||
|
||||
virtual const TableJoin & getTableJoin() const = 0;
|
||||
|
||||
/// Add block of data from right hand of JOIN.
|
||||
|
@ -295,7 +295,7 @@ BlockIO InterpreterKillQueryQuery::execute()
|
||||
|
||||
if (res_columns[0]->empty() && access_denied)
|
||||
throw Exception(ErrorCodes::ACCESS_DENIED, "Not allowed to kill mutation. "
|
||||
"To execute this query it's necessary to have the grant {}", required_access_rights.toString());
|
||||
"To execute this query, it's necessary to have the grant {}", required_access_rights.toString());
|
||||
|
||||
res_io.pipeline = QueryPipeline(Pipe(std::make_shared<SourceFromSingleChunk>(header.cloneWithColumns(std::move(res_columns)))));
|
||||
|
||||
@ -359,7 +359,7 @@ BlockIO InterpreterKillQueryQuery::execute()
|
||||
|
||||
if (res_columns[0]->empty() && access_denied)
|
||||
throw Exception(ErrorCodes::ACCESS_DENIED, "Not allowed to kill move partition. "
|
||||
"To execute this query it's necessary to have the grant {}", required_access_rights.toString());
|
||||
"To execute this query, it's necessary to have the grant {}", required_access_rights.toString());
|
||||
|
||||
res_io.pipeline = QueryPipeline(Pipe(std::make_shared<SourceFromSingleChunk>(header.cloneWithColumns(std::move(res_columns)))));
|
||||
|
||||
|
@ -10,7 +10,7 @@ namespace DB
|
||||
class AccessRightsElements;
|
||||
class DDLGuard;
|
||||
|
||||
/// To avoid deadlocks, we must acquire locks for tables in same order in any different RENAMES.
|
||||
/// To avoid deadlocks, we must acquire locks for tables in same order in any different RENAMEs.
|
||||
struct UniqueTableName
|
||||
{
|
||||
String database_name;
|
||||
|
@ -299,7 +299,7 @@ void checkAccessRightsForSelect(
|
||||
}
|
||||
throw Exception(
|
||||
ErrorCodes::ACCESS_DENIED,
|
||||
"{}: Not enough privileges. To execute this query it's necessary to have grant SELECT for at least one column on {}",
|
||||
"{}: Not enough privileges. To execute this query, it's necessary to have the grant SELECT for at least one column on {}",
|
||||
context->getUserName(),
|
||||
table_id.getFullTableName());
|
||||
}
|
||||
|
@ -103,40 +103,34 @@ namespace ActionLocks
|
||||
namespace
|
||||
{
|
||||
|
||||
ExecutionStatus getOverallExecutionStatusOfCommands()
|
||||
/// Sequentially tries to execute all commands and throws exception with info about failed commands
|
||||
void executeCommandsAndThrowIfError(std::vector<std::function<void()>> commands)
|
||||
{
|
||||
return ExecutionStatus(0);
|
||||
}
|
||||
ExecutionStatus result(0);
|
||||
for (auto & command : commands)
|
||||
{
|
||||
try
|
||||
{
|
||||
command();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
ExecutionStatus current_result = ExecutionStatus::fromCurrentException();
|
||||
|
||||
/// Consequently tries to execute all commands and generates final exception message for failed commands
|
||||
template <typename Callable, typename ... Callables>
|
||||
ExecutionStatus getOverallExecutionStatusOfCommands(Callable && command, Callables && ... commands)
|
||||
{
|
||||
ExecutionStatus status_head(0);
|
||||
try
|
||||
{
|
||||
command();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
status_head = ExecutionStatus::fromCurrentException();
|
||||
if (result.code == 0)
|
||||
result.code = current_result.code;
|
||||
|
||||
if (!current_result.message.empty())
|
||||
{
|
||||
if (!result.message.empty())
|
||||
result.message += '\n';
|
||||
result.message += current_result.message;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ExecutionStatus status_tail = getOverallExecutionStatusOfCommands(std::forward<Callables>(commands)...);
|
||||
|
||||
auto res_status = status_head.code != 0 ? status_head.code : status_tail.code;
|
||||
auto res_message = status_head.message + (status_tail.message.empty() ? "" : ("\n" + status_tail.message));
|
||||
|
||||
return ExecutionStatus(res_status, res_message);
|
||||
}
|
||||
|
||||
/// Consequently tries to execute all commands and throws exception with info about failed commands
|
||||
template <typename ... Callables>
|
||||
void executeCommandsAndThrowIfError(Callables && ... commands)
|
||||
{
|
||||
auto status = getOverallExecutionStatusOfCommands(std::forward<Callables>(commands)...);
|
||||
if (status.code != 0)
|
||||
throw Exception::createDeprecated(status.message, status.code);
|
||||
if (result.code != 0)
|
||||
throw Exception::createDeprecated(result.message, result.code);
|
||||
}
|
||||
|
||||
|
||||
@ -425,10 +419,10 @@ BlockIO InterpreterSystemQuery::execute()
|
||||
case Type::RELOAD_DICTIONARIES:
|
||||
{
|
||||
getContext()->checkAccess(AccessType::SYSTEM_RELOAD_DICTIONARY);
|
||||
executeCommandsAndThrowIfError(
|
||||
executeCommandsAndThrowIfError({
|
||||
[&] { system_context->getExternalDictionariesLoader().reloadAllTriedToLoad(); },
|
||||
[&] { system_context->getEmbeddedDictionaries().reload(); }
|
||||
);
|
||||
});
|
||||
ExternalDictionariesLoader::resetAll();
|
||||
break;
|
||||
}
|
||||
@ -557,23 +551,14 @@ BlockIO InterpreterSystemQuery::execute()
|
||||
case Type::FLUSH_LOGS:
|
||||
{
|
||||
getContext()->checkAccess(AccessType::SYSTEM_FLUSH_LOGS);
|
||||
executeCommandsAndThrowIfError(
|
||||
[&] { if (auto query_log = getContext()->getQueryLog()) query_log->flush(true); },
|
||||
[&] { if (auto part_log = getContext()->getPartLog("")) part_log->flush(true); },
|
||||
[&] { if (auto query_thread_log = getContext()->getQueryThreadLog()) query_thread_log->flush(true); },
|
||||
[&] { if (auto trace_log = getContext()->getTraceLog()) trace_log->flush(true); },
|
||||
[&] { if (auto text_log = getContext()->getTextLog()) text_log->flush(true); },
|
||||
[&] { if (auto metric_log = getContext()->getMetricLog()) metric_log->flush(true); },
|
||||
[&] { if (auto asynchronous_metric_log = getContext()->getAsynchronousMetricLog()) asynchronous_metric_log->flush(true); },
|
||||
[&] { if (auto opentelemetry_span_log = getContext()->getOpenTelemetrySpanLog()) opentelemetry_span_log->flush(true); },
|
||||
[&] { if (auto query_views_log = getContext()->getQueryViewsLog()) query_views_log->flush(true); },
|
||||
[&] { if (auto zookeeper_log = getContext()->getZooKeeperLog()) zookeeper_log->flush(true); },
|
||||
[&] { if (auto session_log = getContext()->getSessionLog()) session_log->flush(true); },
|
||||
[&] { if (auto transactions_info_log = getContext()->getTransactionsInfoLog()) transactions_info_log->flush(true); },
|
||||
[&] { if (auto processors_profile_log = getContext()->getProcessorsProfileLog()) processors_profile_log->flush(true); },
|
||||
[&] { if (auto cache_log = getContext()->getFilesystemCacheLog()) cache_log->flush(true); },
|
||||
[&] { if (auto asynchronous_insert_log = getContext()->getAsynchronousInsertLog()) asynchronous_insert_log->flush(true); }
|
||||
);
|
||||
|
||||
auto logs = getContext()->getSystemLogs();
|
||||
std::vector<std::function<void()>> commands;
|
||||
commands.reserve(logs.size());
|
||||
for (auto * system_log : logs)
|
||||
commands.emplace_back([system_log] { system_log->flush(true); });
|
||||
|
||||
executeCommandsAndThrowIfError(commands);
|
||||
break;
|
||||
}
|
||||
case Type::STOP_LISTEN:
|
||||
|
@ -18,6 +18,7 @@ class JoinSwitcher : public IJoin
|
||||
public:
|
||||
JoinSwitcher(std::shared_ptr<TableJoin> table_join_, const Block & right_sample_block_);
|
||||
|
||||
std::string getName() const override { return "JoinSwitcher"; }
|
||||
const TableJoin & getTableJoin() const override { return *table_join; }
|
||||
|
||||
/// Add block of data from right hand of JOIN into current join object.
|
||||
|
@ -22,6 +22,7 @@ class MergeJoin : public IJoin
|
||||
public:
|
||||
MergeJoin(std::shared_ptr<TableJoin> table_join_, const Block & right_sample_block);
|
||||
|
||||
std::string getName() const override { return "PartialMergeJoin"; }
|
||||
const TableJoin & getTableJoin() const override { return *table_join; }
|
||||
bool addBlockToJoin(const Block & block, bool check_limits) override;
|
||||
void checkTypesOfKeys(const Block & block) const override;
|
||||
|
@ -520,6 +520,8 @@ ContextMutablePtr Session::makeSessionContext()
|
||||
{},
|
||||
session_context->getSettingsRef().max_sessions_for_user);
|
||||
|
||||
recordLoginSucess(session_context);
|
||||
|
||||
return session_context;
|
||||
}
|
||||
|
||||
@ -582,6 +584,8 @@ ContextMutablePtr Session::makeSessionContext(const String & session_name_, std:
|
||||
{ session_name_ },
|
||||
max_sessions_for_user);
|
||||
|
||||
recordLoginSucess(session_context);
|
||||
|
||||
return session_context;
|
||||
}
|
||||
|
||||
@ -655,24 +659,38 @@ ContextMutablePtr Session::makeQueryContextImpl(const ClientInfo * client_info_t
|
||||
if (user_id)
|
||||
user = query_context->getUser();
|
||||
|
||||
if (!notified_session_log_about_login)
|
||||
{
|
||||
if (auto session_log = getSessionLog())
|
||||
{
|
||||
session_log->addLoginSuccess(
|
||||
auth_id,
|
||||
named_session ? std::optional<std::string>(named_session->key.second) : std::nullopt,
|
||||
*query_context,
|
||||
user);
|
||||
|
||||
notified_session_log_about_login = true;
|
||||
}
|
||||
}
|
||||
/// Interserver does not create session context
|
||||
recordLoginSucess(query_context);
|
||||
|
||||
return query_context;
|
||||
}
|
||||
|
||||
|
||||
void Session::recordLoginSucess(ContextPtr login_context) const
|
||||
{
|
||||
if (notified_session_log_about_login)
|
||||
return;
|
||||
|
||||
if (!login_context)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Session or query context must be created");
|
||||
|
||||
if (auto session_log = getSessionLog())
|
||||
{
|
||||
const auto & settings = login_context->getSettingsRef();
|
||||
const auto access = login_context->getAccess();
|
||||
|
||||
session_log->addLoginSuccess(auth_id,
|
||||
named_session ? named_session->key.second : "",
|
||||
settings,
|
||||
access,
|
||||
getClientInfo(),
|
||||
user);
|
||||
}
|
||||
|
||||
notified_session_log_about_login = true;
|
||||
}
|
||||
|
||||
|
||||
void Session::releaseSessionID()
|
||||
{
|
||||
if (!named_session)
|
||||
|
@ -97,6 +97,8 @@ public:
|
||||
private:
|
||||
std::shared_ptr<SessionLog> getSessionLog() const;
|
||||
ContextMutablePtr makeQueryContextImpl(const ClientInfo * client_info_to_copy, ClientInfo * client_info_to_move) const;
|
||||
void recordLoginSucess(ContextPtr login_context) const;
|
||||
|
||||
|
||||
mutable bool notified_session_log_about_login = false;
|
||||
const UUID auth_id;
|
||||
|
@ -199,12 +199,13 @@ void SessionLogElement::appendToBlock(MutableColumns & columns) const
|
||||
columns[i++]->insertData(auth_failure_reason.data(), auth_failure_reason.length());
|
||||
}
|
||||
|
||||
void SessionLog::addLoginSuccess(const UUID & auth_id, std::optional<String> session_id, const Context & login_context, const UserPtr & login_user)
|
||||
void SessionLog::addLoginSuccess(const UUID & auth_id,
|
||||
const String & session_id,
|
||||
const Settings & settings,
|
||||
const ContextAccessPtr & access,
|
||||
const ClientInfo & client_info,
|
||||
const UserPtr & login_user)
|
||||
{
|
||||
const auto access = login_context.getAccess();
|
||||
const auto & settings = login_context.getSettingsRef();
|
||||
const auto & client_info = login_context.getClientInfo();
|
||||
|
||||
DB::SessionLogElement log_entry(auth_id, SESSION_LOGIN_SUCCESS);
|
||||
log_entry.client_info = client_info;
|
||||
|
||||
@ -215,8 +216,7 @@ void SessionLog::addLoginSuccess(const UUID & auth_id, std::optional<String> ses
|
||||
}
|
||||
log_entry.external_auth_server = login_user ? login_user->auth_data.getLDAPServerName() : "";
|
||||
|
||||
if (session_id)
|
||||
log_entry.session_id = *session_id;
|
||||
log_entry.session_id = session_id;
|
||||
|
||||
if (const auto roles_info = access->getRolesInfo())
|
||||
log_entry.roles = roles_info->getCurrentRolesNames();
|
||||
|
@ -20,6 +20,7 @@ enum SessionLogElementType : int8_t
|
||||
class ContextAccess;
|
||||
struct User;
|
||||
using UserPtr = std::shared_ptr<const User>;
|
||||
using ContextAccessPtr = std::shared_ptr<const ContextAccess>;
|
||||
|
||||
/** A struct which will be inserted as row into session_log table.
|
||||
*
|
||||
@ -72,7 +73,13 @@ class SessionLog : public SystemLog<SessionLogElement>
|
||||
using SystemLog<SessionLogElement>::SystemLog;
|
||||
|
||||
public:
|
||||
void addLoginSuccess(const UUID & auth_id, std::optional<String> session_id, const Context & login_context, const UserPtr & login_user);
|
||||
void addLoginSuccess(const UUID & auth_id,
|
||||
const String & session_id,
|
||||
const Settings & settings,
|
||||
const ContextAccessPtr & access,
|
||||
const ClientInfo & client_info,
|
||||
const UserPtr & login_user);
|
||||
|
||||
void addLoginFailure(const UUID & auth_id, const ClientInfo & info, const std::optional<String> & user, const Exception & reason);
|
||||
void addLogOut(const UUID & auth_id, const UserPtr & login_user, const ClientInfo & client_info);
|
||||
};
|
||||
|
@ -129,6 +129,7 @@ std::shared_ptr<TSystemLog> createSystemLog(
|
||||
"Creating {}.{} from {}", default_database_name, default_table_name, config_prefix);
|
||||
|
||||
SystemLogSettings log_settings;
|
||||
|
||||
log_settings.queue_settings.database = config.getString(config_prefix + ".database", default_database_name);
|
||||
log_settings.queue_settings.table = config.getString(config_prefix + ".table", default_table_name);
|
||||
|
||||
|
@ -330,7 +330,7 @@ public:
|
||||
const ColumnsWithTypeAndName & right_sample_columns);
|
||||
|
||||
void setAsofInequality(ASOFJoinInequality inequality) { asof_inequality = inequality; }
|
||||
ASOFJoinInequality getAsofInequality() { return asof_inequality; }
|
||||
ASOFJoinInequality getAsofInequality() const { return asof_inequality; }
|
||||
|
||||
ASTPtr leftKeysList() const;
|
||||
ASTPtr rightKeysList() const; /// For ON syntax only
|
||||
|
@ -93,7 +93,7 @@ void ASTLiteral::appendColumnNameImpl(WriteBuffer & ostr) const
|
||||
|
||||
void ASTLiteral::appendColumnNameImplLegacy(WriteBuffer & ostr) const
|
||||
{
|
||||
/// 100 - just arbitrary value.
|
||||
/// 100 - just arbitrary value.
|
||||
constexpr auto min_elements_for_hashing = 100;
|
||||
|
||||
/// Special case for very large arrays. Instead of listing all elements, will use hash of them.
|
||||
@ -118,9 +118,31 @@ void ASTLiteral::appendColumnNameImplLegacy(WriteBuffer & ostr) const
|
||||
}
|
||||
}
|
||||
|
||||
/// Use different rules for escaping backslashes and quotes
|
||||
class FieldVisitorToStringPostgreSQL : public StaticVisitor<String>
|
||||
{
|
||||
public:
|
||||
template<typename T>
|
||||
String operator() (const T & x) const { return visitor(x); }
|
||||
|
||||
private:
|
||||
FieldVisitorToString visitor;
|
||||
};
|
||||
|
||||
template<>
|
||||
String FieldVisitorToStringPostgreSQL::operator() (const String & x) const
|
||||
{
|
||||
WriteBufferFromOwnString wb;
|
||||
writeQuotedStringPostgreSQL(x, wb);
|
||||
return wb.str();
|
||||
}
|
||||
|
||||
void ASTLiteral::formatImplWithoutAlias(const FormatSettings & settings, IAST::FormatState &, IAST::FormatStateStacked) const
|
||||
{
|
||||
settings.ostr << applyVisitor(FieldVisitorToString(), value);
|
||||
if (settings.literal_escaping_style == LiteralEscapingStyle::Regular)
|
||||
settings.ostr << applyVisitor(FieldVisitorToString(), value);
|
||||
else
|
||||
settings.ostr << applyVisitor(FieldVisitorToStringPostgreSQL(), value);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -73,11 +73,11 @@ void ASTProjectionSelectQuery::formatImpl(const FormatSettings & s, FormatState
|
||||
|
||||
if (orderBy())
|
||||
{
|
||||
/// Let's convert the ASTFunction into ASTExpressionList, which generates consistent format
|
||||
/// Let's convert tuple ASTFunction into ASTExpressionList, which generates consistent format
|
||||
/// between GROUP BY and ORDER BY projection definition.
|
||||
s.ostr << (s.hilite ? hilite_keyword : "") << s.nl_or_ws << indent_str << "ORDER BY " << (s.hilite ? hilite_none : "");
|
||||
ASTPtr order_by;
|
||||
if (auto * func = orderBy()->as<ASTFunction>())
|
||||
if (auto * func = orderBy()->as<ASTFunction>(); func && func->name == "tuple")
|
||||
order_by = func->arguments;
|
||||
else
|
||||
{
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <base/types.h>
|
||||
#include <Parsers/IAST_fwd.h>
|
||||
#include <Parsers/IdentifierQuotingStyle.h>
|
||||
#include <Parsers/LiteralEscapingStyle.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/TypePromotion.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
@ -197,6 +198,7 @@ public:
|
||||
IdentifierQuotingStyle identifier_quoting_style;
|
||||
bool show_secrets; /// Show secret parts of the AST (e.g. passwords, encryption keys).
|
||||
char nl_or_ws; /// Newline or whitespace.
|
||||
LiteralEscapingStyle literal_escaping_style;
|
||||
|
||||
explicit FormatSettings(
|
||||
WriteBuffer & ostr_,
|
||||
@ -204,7 +206,8 @@ public:
|
||||
bool hilite_ = false,
|
||||
bool always_quote_identifiers_ = false,
|
||||
IdentifierQuotingStyle identifier_quoting_style_ = IdentifierQuotingStyle::Backticks,
|
||||
bool show_secrets_ = true)
|
||||
bool show_secrets_ = true,
|
||||
LiteralEscapingStyle literal_escaping_style_ = LiteralEscapingStyle::Regular)
|
||||
: ostr(ostr_)
|
||||
, one_line(one_line_)
|
||||
, hilite(hilite_)
|
||||
@ -212,6 +215,7 @@ public:
|
||||
, identifier_quoting_style(identifier_quoting_style_)
|
||||
, show_secrets(show_secrets_)
|
||||
, nl_or_ws(one_line ? ' ' : '\n')
|
||||
, literal_escaping_style(literal_escaping_style_)
|
||||
{
|
||||
}
|
||||
|
||||
@ -223,6 +227,7 @@ public:
|
||||
, identifier_quoting_style(other.identifier_quoting_style)
|
||||
, show_secrets(other.show_secrets)
|
||||
, nl_or_ws(other.nl_or_ws)
|
||||
, literal_escaping_style(other.literal_escaping_style)
|
||||
{
|
||||
}
|
||||
|
||||
|
14
src/Parsers/LiteralEscapingStyle.h
Normal file
14
src/Parsers/LiteralEscapingStyle.h
Normal file
@ -0,0 +1,14 @@
|
||||
#pragma once
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/// Method to escape single quotes.
|
||||
enum class LiteralEscapingStyle
|
||||
{
|
||||
Regular, /// Escape backslashes with backslash (\\) and quotes with backslash (\')
|
||||
PostgreSQL, /// Do not escape backslashes (\), escape quotes with quote ('')
|
||||
};
|
||||
|
||||
}
|
@ -4,7 +4,7 @@ clickhouse_add_executable(lexer lexer.cpp ${SRCS})
|
||||
target_link_libraries(lexer PRIVATE clickhouse_parsers)
|
||||
|
||||
clickhouse_add_executable(select_parser select_parser.cpp ${SRCS} "../../Server/ServerType.cpp")
|
||||
target_link_libraries(select_parser PRIVATE clickhouse_parsers)
|
||||
target_link_libraries(select_parser PRIVATE dbms)
|
||||
|
||||
clickhouse_add_executable(create_parser create_parser.cpp ${SRCS} "../../Server/ServerType.cpp")
|
||||
target_link_libraries(create_parser PRIVATE clickhouse_parsers)
|
||||
target_link_libraries(create_parser PRIVATE dbms)
|
||||
|
@ -101,7 +101,7 @@ void checkAccessRights(const TableNode & table_node, const Names & column_names,
|
||||
}
|
||||
|
||||
throw Exception(ErrorCodes::ACCESS_DENIED,
|
||||
"{}: Not enough privileges. To execute this query it's necessary to have grant SELECT for at least one column on {}",
|
||||
"{}: Not enough privileges. To execute this query, it's necessary to have the grant SELECT for at least one column on {}",
|
||||
query_context->getUserName(),
|
||||
storage_id.getFullTableName());
|
||||
}
|
||||
|
@ -2,6 +2,9 @@
|
||||
#include <QueryPipeline/QueryPipelineBuilder.h>
|
||||
#include <Processors/Transforms/JoiningTransform.h>
|
||||
#include <Interpreters/IJoin.h>
|
||||
#include <Interpreters/TableJoin.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <Common/JSONBuilder.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
|
||||
namespace DB
|
||||
@ -62,6 +65,36 @@ void JoinStep::describePipeline(FormatSettings & settings) const
|
||||
IQueryPlanStep::describePipeline(processors, settings);
|
||||
}
|
||||
|
||||
void JoinStep::describeActions(FormatSettings & settings) const
|
||||
{
|
||||
String prefix(settings.offset, ' ');
|
||||
|
||||
const auto & table_join = join->getTableJoin();
|
||||
settings.out << prefix << "Type: " << toString(table_join.kind()) << '\n';
|
||||
settings.out << prefix << "Strictness: " << toString(table_join.strictness()) << '\n';
|
||||
settings.out << prefix << "Algorithm: " << join->getName() << '\n';
|
||||
|
||||
if (table_join.strictness() == JoinStrictness::Asof)
|
||||
settings.out << prefix << "ASOF inequality: " << toString(table_join.getAsofInequality()) << '\n';
|
||||
|
||||
if (!table_join.getClauses().empty())
|
||||
settings.out << prefix << "Clauses: " << table_join.formatClauses(table_join.getClauses(), true /*short_format*/) << '\n';
|
||||
}
|
||||
|
||||
void JoinStep::describeActions(JSONBuilder::JSONMap & map) const
|
||||
{
|
||||
const auto & table_join = join->getTableJoin();
|
||||
map.add("Type", toString(table_join.kind()));
|
||||
map.add("Strictness", toString(table_join.strictness()));
|
||||
map.add("Algorithm", join->getName());
|
||||
|
||||
if (table_join.strictness() == JoinStrictness::Asof)
|
||||
map.add("ASOF inequality", toString(table_join.getAsofInequality()));
|
||||
|
||||
if (!table_join.getClauses().empty())
|
||||
map.add("Clauses", table_join.formatClauses(table_join.getClauses(), true /*short_format*/));
|
||||
}
|
||||
|
||||
void JoinStep::updateInputStream(const DataStream & new_input_stream_, size_t idx)
|
||||
{
|
||||
if (idx == 0)
|
||||
|
@ -27,6 +27,9 @@ public:
|
||||
|
||||
void describePipeline(FormatSettings & settings) const override;
|
||||
|
||||
void describeActions(JSONBuilder::JSONMap & map) const override;
|
||||
void describeActions(FormatSettings & settings) const override;
|
||||
|
||||
const JoinPtr & getJoin() const { return join; }
|
||||
bool allowPushDownToRight() const;
|
||||
|
||||
|
@ -561,8 +561,7 @@ void HTTPHandler::processQuery(
|
||||
session->makeSessionContext();
|
||||
}
|
||||
|
||||
auto client_info = session->getClientInfo();
|
||||
auto context = session->makeQueryContext(std::move(client_info));
|
||||
auto context = session->makeQueryContext();
|
||||
|
||||
/// This parameter is used to tune the behavior of output formats (such as Native) for compatibility.
|
||||
if (params.has("client_protocol_version"))
|
||||
|
@ -3166,6 +3166,10 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, Context
|
||||
}
|
||||
}
|
||||
|
||||
if (command.type == AlterCommand::MODIFY_QUERY)
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED,
|
||||
"ALTER MODIFY QUERY is not supported by MergeTree engines family");
|
||||
|
||||
if (command.type == AlterCommand::MODIFY_ORDER_BY && !is_custom_partitioned)
|
||||
{
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
|
@ -226,7 +226,7 @@ static bool isConditionGood(const RPNBuilderTreeNode & condition, const NameSet
|
||||
return false;
|
||||
}
|
||||
|
||||
void MergeTreeWhereOptimizer::analyzeImpl(Conditions & res, const RPNBuilderTreeNode & node, const WhereOptimizerContext & where_optimizer_context) const
|
||||
void MergeTreeWhereOptimizer::analyzeImpl(Conditions & res, const RPNBuilderTreeNode & node, const WhereOptimizerContext & where_optimizer_context, std::set<Int64> & pk_positions) const
|
||||
{
|
||||
auto function_node_optional = node.toFunctionNodeOrNull();
|
||||
|
||||
@ -237,7 +237,7 @@ void MergeTreeWhereOptimizer::analyzeImpl(Conditions & res, const RPNBuilderTree
|
||||
for (size_t i = 0; i < arguments_size; ++i)
|
||||
{
|
||||
auto argument = function_node_optional->getArgumentAt(i);
|
||||
analyzeImpl(res, argument, where_optimizer_context);
|
||||
analyzeImpl(res, argument, where_optimizer_context, pk_positions);
|
||||
}
|
||||
}
|
||||
else
|
||||
@ -270,6 +270,7 @@ void MergeTreeWhereOptimizer::analyzeImpl(Conditions & res, const RPNBuilderTree
|
||||
cond.good = cond.viable;
|
||||
/// Find min position in PK of any column that is used in this condition.
|
||||
cond.min_position_in_primary_key = findMinPosition(cond.table_columns, primary_key_names_positions);
|
||||
pk_positions.emplace(cond.min_position_in_primary_key);
|
||||
}
|
||||
|
||||
res.emplace_back(std::move(cond));
|
||||
@ -281,7 +282,29 @@ MergeTreeWhereOptimizer::Conditions MergeTreeWhereOptimizer::analyze(const RPNBu
|
||||
const WhereOptimizerContext & where_optimizer_context) const
|
||||
{
|
||||
Conditions res;
|
||||
analyzeImpl(res, node, where_optimizer_context);
|
||||
std::set<Int64> pk_positions;
|
||||
analyzeImpl(res, node, where_optimizer_context, pk_positions);
|
||||
|
||||
/// E.g., if the primary key is (a, b, c) but the condition is a = 1 and c = 1,
|
||||
/// we should only put (a = 1) to the tail of PREWHERE,
|
||||
/// and treat (c = 1) as a normal column.
|
||||
if (where_optimizer_context.move_primary_key_columns_to_end_of_prewhere)
|
||||
{
|
||||
Int64 min_valid_pk_pos = -1;
|
||||
for (auto pk_pos : pk_positions)
|
||||
{
|
||||
if (pk_pos != min_valid_pk_pos + 1)
|
||||
break;
|
||||
min_valid_pk_pos = pk_pos;
|
||||
}
|
||||
for (auto & cond : res)
|
||||
{
|
||||
if (cond.min_position_in_primary_key > min_valid_pk_pos)
|
||||
cond.min_position_in_primary_key = std::numeric_limits<Int64>::max() - 1;
|
||||
}
|
||||
LOG_TRACE(log, "The min valid primary key position for moving to the tail of PREWHERE is {}", min_valid_pk_pos);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -108,7 +108,7 @@ private:
|
||||
|
||||
std::optional<OptimizeResult> optimizeImpl(const RPNBuilderTreeNode & node, const WhereOptimizerContext & where_optimizer_context) const;
|
||||
|
||||
void analyzeImpl(Conditions & res, const RPNBuilderTreeNode & node, const WhereOptimizerContext & where_optimizer_context) const;
|
||||
void analyzeImpl(Conditions & res, const RPNBuilderTreeNode & node, const WhereOptimizerContext & where_optimizer_context, std::set<Int64> & pk_positions) const;
|
||||
|
||||
/// Transform conjunctions chain in WHERE expression to Conditions list.
|
||||
Conditions analyze(const RPNBuilderTreeNode & node, const WhereOptimizerContext & where_optimizer_context) const;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user