Merge remote-tracking branch 'upstream/master' into better-stop-listen

This commit is contained in:
Nikolay Degterinsky 2023-08-16 23:06:18 +00:00
commit a893f75884
629 changed files with 7324 additions and 3102 deletions

View File

@ -895,6 +895,48 @@ jobs:
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderBinS390X:
needs: [DockerHubPush]
runs-on: [self-hosted, builder]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/build_check
IMAGES_PATH=${{runner.temp}}/images_path
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
CACHES_PATH=${{runner.temp}}/../ccaches
BUILD_NAME=binary_s390x
EOF
- name: Download changed images
uses: actions/download-artifact@v3
with:
name: changed_images
path: ${{ env.IMAGES_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
submodules: true
fetch-depth: 0 # otherwise we will have no info about contributors
- name: Build
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
- name: Upload build URLs to artifacts
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v3
with:
name: ${{ env.BUILD_URLS }}
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
############################################################################################
##################################### Docker images #######################################
############################################################################################
@ -978,6 +1020,7 @@ jobs:
- BuilderBinFreeBSD
- BuilderBinPPC64
- BuilderBinRISCV64
- BuilderBinS390X
- BuilderBinAmd64Compat
- BuilderBinAarch64V80Compat
- BuilderBinClangTidy

View File

@ -955,6 +955,47 @@ jobs:
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderBinS390X:
needs: [DockerHubPush, FastTest, StyleCheck]
runs-on: [self-hosted, builder]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/build_check
IMAGES_PATH=${{runner.temp}}/images_path
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
CACHES_PATH=${{runner.temp}}/../ccaches
BUILD_NAME=binary_s390x
EOF
- name: Download changed images
uses: actions/download-artifact@v3
with:
name: changed_images
path: ${{ env.IMAGES_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
submodules: true
- name: Build
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
- name: Upload build URLs to artifacts
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v3
with:
name: ${{ env.BUILD_URLS }}
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
############################################################################################
##################################### Docker images #######################################
############################################################################################
@ -1037,6 +1078,7 @@ jobs:
- BuilderBinFreeBSD
- BuilderBinPPC64
- BuilderBinRISCV64
- BuilderBinS390X
- BuilderBinAmd64Compat
- BuilderBinAarch64V80Compat
- BuilderBinClangTidy
@ -5185,3 +5227,39 @@ jobs:
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
##############################################################################################
##################################### SQL TEST ###############################################
##############################################################################################
SQLTest:
needs: [BuilderDebRelease]
runs-on: [self-hosted, fuzzer-unit-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/sqltest
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=SQLTest
REPO_COPY=${{runner.temp}}/sqltest/ClickHouse
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: SQLTest
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 sqltest.py "$CHECK_NAME"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"

View File

@ -208,9 +208,6 @@ option(OMIT_HEAVY_DEBUG_SYMBOLS
"Do not generate debugger info for heavy modules (ClickHouse functions and dictionaries, some contrib)"
${OMIT_HEAVY_DEBUG_SYMBOLS_DEFAULT})
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
set(USE_DEBUG_HELPERS ON)
endif()
option(USE_DEBUG_HELPERS "Enable debug helpers" ${USE_DEBUG_HELPERS})
option(BUILD_STANDALONE_KEEPER "Build keeper as small standalone binary" OFF)

View File

@ -3,6 +3,7 @@
#include <magic_enum.hpp>
#include <fmt/format.h>
template <class T> concept is_enum = std::is_enum_v<T>;
namespace detail

View File

@ -7,8 +7,6 @@
#include <base/find_symbols.h>
#include <base/preciseExp10.h>
#include <iostream>
#define JSON_MAX_DEPTH 100

View File

@ -12,7 +12,6 @@
#include <tuple>
#include <limits>
#include <boost/multiprecision/cpp_bin_float.hpp>
#include <boost/math/special_functions/fpclassify.hpp>
// NOLINTBEGIN(*)
@ -22,6 +21,7 @@
#define CONSTEXPR_FROM_DOUBLE constexpr
using FromDoubleIntermediateType = long double;
#else
#include <boost/multiprecision/cpp_bin_float.hpp>
/// `wide_integer_from_builtin` can't be constexpr with non-literal `cpp_bin_float_double_extended`
#define CONSTEXPR_FROM_DOUBLE
using FromDoubleIntermediateType = boost::multiprecision::cpp_bin_float_double_extended;

View File

@ -19,7 +19,6 @@
#include "Poco/UTF16Encoding.h"
#include "Poco/Buffer.h"
#include "Poco/Exception.h"
#include <iostream>
using Poco::Buffer;

View File

@ -97,7 +97,7 @@ namespace Data
///
/// static void extract(std::size_t pos, Person& obj, const Person& defVal, AbstractExtractor::Ptr pExt)
/// {
/// // defVal is the default person we should use if we encunter NULL entries, so we take the individual fields
/// // defVal is the default person we should use if we encounter NULL entries, so we take the individual fields
/// // as defaults. You can do more complex checking, ie return defVal if only one single entry of the fields is null etc...
/// poco_assert_dbg (!pExt.isNull());
/// std::string lastName;

View File

@ -16,7 +16,6 @@
#include "Poco/TaskManager.h"
#include "Poco/Exception.h"
#include <iostream>
#include <array>

View File

@ -14,7 +14,6 @@
#include "Poco/JSON/Object.h"
#include <iostream>
#include <sstream>
using Poco::Dynamic::Var;

View File

@ -26,7 +26,6 @@
#include "Poco/CountingStream.h"
#include "Poco/RegularExpression.h"
#include <sstream>
#include <iostream>
using Poco::NumberFormatter;

View File

@ -146,7 +146,7 @@ namespace Net
std::string cipherList;
/// Specifies the supported ciphers in OpenSSL notation.
/// Defaults to "ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH".
/// Defaults to "ALL:!ADH:!LOW:!EXP:!MD5:!3DES:@STRENGTH".
std::string dhParamsFile;
/// Specifies a file containing Diffie-Hellman parameters.
@ -172,7 +172,7 @@ namespace Net
VerificationMode verificationMode = VERIFY_RELAXED,
int verificationDepth = 9,
bool loadDefaultCAs = false,
const std::string & cipherList = "ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH");
const std::string & cipherList = "ALL:!ADH:!LOW:!EXP:!MD5:!3DES:@STRENGTH");
/// Creates a Context.
///
/// * usage specifies whether the context is used by a client or server.
@ -200,7 +200,7 @@ namespace Net
VerificationMode verificationMode = VERIFY_RELAXED,
int verificationDepth = 9,
bool loadDefaultCAs = false,
const std::string & cipherList = "ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH");
const std::string & cipherList = "ALL:!ADH:!LOW:!EXP:!MD5:!3DES:@STRENGTH");
/// Creates a Context.
///
/// * usage specifies whether the context is used by a client or server.

View File

@ -76,7 +76,7 @@ namespace Net
/// <verificationMode>none|relaxed|strict|once</verificationMode>
/// <verificationDepth>1..9</verificationDepth>
/// <loadDefaultCAFile>true|false</loadDefaultCAFile>
/// <cipherList>ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH</cipherList>
/// <cipherList>ALL:!ADH:!LOW:!EXP:!MD5:!3DES:@STRENGTH</cipherList>
/// <preferServerCiphers>true|false</preferServerCiphers>
/// <privateKeyPassphraseHandler>
/// <name>KeyFileHandler</name>

View File

@ -41,7 +41,7 @@ Context::Params::Params():
verificationMode(VERIFY_RELAXED),
verificationDepth(9),
loadDefaultCAs(false),
cipherList("ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH")
cipherList("ALL:!ADH:!LOW:!EXP:!MD5:!3DES:@STRENGTH")
{
}

View File

@ -20,6 +20,9 @@ set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/s390x-linux-gnu/libc")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=mold -Wl,-L${CMAKE_SYSROOT}/usr/lib64")
set (CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -fuse-ld=mold -Wl,-L${CMAKE_SYSROOT}/usr/lib64")
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fuse-ld=mold -Wl,-L${CMAKE_SYSROOT}/usr/lib64")
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)

View File

@ -47,7 +47,7 @@ if (CMAKE_CROSSCOMPILING)
set (ENABLE_RUST OFF CACHE INTERNAL "")
elseif (ARCH_S390X)
set (ENABLE_GRPC OFF CACHE INTERNAL "")
set (ENABLE_SENTRY OFF CACHE INTERNAL "")
set (ENABLE_RUST OFF CACHE INTERNAL "")
endif ()
elseif (OS_FREEBSD)
# FIXME: broken dependencies

View File

@ -1,5 +1,5 @@
## ClickHouse Dockerfiles
This directory contain Dockerfiles for `clickhouse-client` and `clickhouse-server`. They are updated in each release.
This directory contain Dockerfiles for `clickhouse-server`. They are updated in each release.
Also there is bunch of images for testing and CI. They are listed in `images.json` file and updated on each commit to master. If you need to add another image, place information about it into `images.json`.
Also, there is a bunch of images for testing and CI. They are listed in `images.json` file and updated on each commit to master. If you need to add another image, place information about it into `images.json`.

View File

@ -1,34 +0,0 @@
FROM ubuntu:18.04
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
ARG repository="deb https://repo.clickhouse.com/deb/stable/ main/"
ARG version=22.1.1.*
RUN apt-get update \
&& apt-get install --yes --no-install-recommends \
apt-transport-https \
ca-certificates \
dirmngr \
gnupg \
&& mkdir -p /etc/apt/sources.list.d \
&& apt-key adv --keyserver keyserver.ubuntu.com --recv E0C56BD4 \
&& echo $repository > /etc/apt/sources.list.d/clickhouse.list \
&& apt-get update \
&& env DEBIAN_FRONTEND=noninteractive \
apt-get install --allow-unauthenticated --yes --no-install-recommends \
clickhouse-client=$version \
clickhouse-common-static=$version \
locales \
tzdata \
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf \
&& apt-get clean
RUN locale-gen en_US.UTF-8
ENV LANG en_US.UTF-8
ENV LANGUAGE en_US:en
ENV LC_ALL en_US.UTF-8
ENTRYPOINT ["/usr/bin/clickhouse-client"]

View File

@ -1,7 +0,0 @@
# ClickHouse Client Docker Image
For more information see [ClickHouse Server Docker Image](https://hub.docker.com/r/clickhouse/clickhouse-server/).
## License
View [license information](https://github.com/ClickHouse/ClickHouse/blob/master/LICENSE) for the software contained in this image.

View File

@ -125,6 +125,7 @@
"docker/test/keeper-jepsen",
"docker/test/server-jepsen",
"docker/test/sqllogic",
"docker/test/sqltest",
"docker/test/stateless"
]
},
@ -155,13 +156,16 @@
},
"docker/docs/builder": {
"name": "clickhouse/docs-builder",
"dependent": [
]
"dependent": []
},
"docker/test/sqllogic": {
"name": "clickhouse/sqllogic-test",
"dependent": []
},
"docker/test/sqltest": {
"name": "clickhouse/sqltest",
"dependent": []
},
"docker/test/integration/nginx_dav": {
"name": "clickhouse/nginx-dav",
"dependent": []

View File

@ -58,33 +58,6 @@ RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \
rustup target add aarch64-apple-darwin && \
rustup target add powerpc64le-unknown-linux-gnu
# Create vendor cache for cargo.
#
# Note, that the config.toml for the root is used, you will not be able to
# install any other crates, except those which had been vendored (since if
# there is "replace-with" for some source, then cargo will not look to other
# remotes except this).
#
# Notes for the command itself:
# - --chown is required to preserve the rights
# - unstable-options for -C
# - chmod is required to fix the permissions, since builds are running from a different user
# - copy of the Cargo.lock is required for proper dependencies versions
# - cargo vendor --sync is requried to overcome [1] bug.
#
# [1]: https://github.com/rust-lang/wg-cargo-std-aware/issues/23
COPY --chown=root:root /rust /rust/packages
RUN cargo -Z unstable-options -C /rust/packages vendor > $CARGO_HOME/config.toml && \
cp "$(rustc --print=sysroot)"/lib/rustlib/src/rust/Cargo.lock "$(rustc --print=sysroot)"/lib/rustlib/src/rust/library/test/ && \
cargo -Z unstable-options -C /rust/packages vendor --sync "$(rustc --print=sysroot)"/lib/rustlib/src/rust/library/test/Cargo.toml && \
rm "$(rustc --print=sysroot)"/lib/rustlib/src/rust/library/test/Cargo.lock && \
sed -i "s#\"vendor\"#\"/rust/vendor\"#" $CARGO_HOME/config.toml && \
cat $CARGO_HOME/config.toml && \
mv /rust/packages/vendor /rust/vendor && \
chmod -R o=r+X /rust/vendor && \
ls -R -l /rust/packages && \
rm -r /rust/packages
# NOTE: Seems like gcc-11 is too new for ubuntu20 repository
# A cross-linker for RISC-V 64 (we need it, because LLVM's LLD does not work):
RUN add-apt-repository ppa:ubuntu-toolchain-r/test --yes \
@ -107,6 +80,14 @@ RUN add-apt-repository ppa:ubuntu-toolchain-r/test --yes \
# Download toolchain and SDK for Darwin
RUN curl -sL -O https://github.com/phracker/MacOSX-SDKs/releases/download/11.3/MacOSX11.0.sdk.tar.xz
# Download and install mold 2.0 for s390x build
RUN curl -Lo /tmp/mold.tar.gz "https://github.com/rui314/mold/releases/download/v2.0.0/mold-2.0.0-x86_64-linux.tar.gz" \
&& mkdir /tmp/mold \
&& tar -xzf /tmp/mold.tar.gz -C /tmp/mold \
&& cp -r /tmp/mold/mold*/* /usr \
&& rm -rf /tmp/mold \
&& rm /tmp/mold.tar.gz
# Architecture of the image when BuildKit/buildx is used
ARG TARGETARCH
ARG NFPM_VERSION=2.20.0

View File

@ -1 +0,0 @@
../../../rust

View File

@ -80,9 +80,11 @@ def run_docker_image_with_env(
output_dir: Path,
env_variables: List[str],
ch_root: Path,
cargo_cache_dir: Path,
ccache_dir: Optional[Path],
) -> None:
output_dir.mkdir(parents=True, exist_ok=True)
cargo_cache_dir.mkdir(parents=True, exist_ok=True)
env_part = " -e ".join(env_variables)
if env_part:
@ -105,7 +107,7 @@ def run_docker_image_with_env(
cmd = (
f"docker run --network=host --user={user} --rm {ccache_mount}"
f"--volume={output_dir}:/output --volume={ch_root}:/build {env_part} "
f"{interactive} {image_name}"
f"--volume={cargo_cache_dir}:/rust/cargo/registry {interactive} {image_name}"
)
logging.info("Will build ClickHouse pkg with cmd: '%s'", cmd)
@ -141,6 +143,7 @@ def parse_env_variables(
FREEBSD_SUFFIX = "-freebsd"
PPC_SUFFIX = "-ppc64le"
RISCV_SUFFIX = "-riscv64"
S390X_SUFFIX = "-s390x"
AMD64_COMPAT_SUFFIX = "-amd64-compat"
result = []
@ -154,6 +157,7 @@ def parse_env_variables(
is_cross_arm_v80compat = compiler.endswith(ARM_V80COMPAT_SUFFIX)
is_cross_ppc = compiler.endswith(PPC_SUFFIX)
is_cross_riscv = compiler.endswith(RISCV_SUFFIX)
is_cross_s390x = compiler.endswith(S390X_SUFFIX)
is_cross_freebsd = compiler.endswith(FREEBSD_SUFFIX)
is_amd64_compat = compiler.endswith(AMD64_COMPAT_SUFFIX)
@ -215,6 +219,11 @@ def parse_env_variables(
cmake_flags.append(
"-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-riscv64.cmake"
)
elif is_cross_s390x:
cc = compiler[: -len(S390X_SUFFIX)]
cmake_flags.append(
"-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-s390x.cmake"
)
elif is_amd64_compat:
cc = compiler[: -len(AMD64_COMPAT_SUFFIX)]
result.append("DEB_ARCH=amd64")
@ -378,6 +387,7 @@ def parse_args() -> argparse.Namespace:
"clang-16-aarch64-v80compat",
"clang-16-ppc64le",
"clang-16-riscv64",
"clang-16-s390x",
"clang-16-amd64-compat",
"clang-16-freebsd",
),
@ -417,6 +427,13 @@ def parse_args() -> argparse.Namespace:
action="store_true",
help="if set, the build fails on errors writing cache to S3",
)
parser.add_argument(
"--cargo-cache-dir",
default=Path(os.getenv("CARGO_HOME", "") or Path.home() / ".cargo")
/ "registry",
type=dir_name,
help="a directory to preserve the rust cargo crates",
)
parser.add_argument("--force-build-image", action="store_true")
parser.add_argument("--version")
parser.add_argument("--official", action="store_true")
@ -497,6 +514,7 @@ def main() -> None:
args.output_dir,
env_prepared,
ch_root,
args.cargo_cache_dir,
args.ccache_dir,
)
logging.info("Output placed into %s", args.output_dir)

View File

@ -35,4 +35,7 @@ ENV LC_ALL en_US.UTF-8
ENV TZ=Europe/Amsterdam
RUN ln -snf "/usr/share/zoneinfo/$TZ" /etc/localtime && echo "$TZ" > /etc/timezone
# This script is used to setup realtime export of server logs from the CI into external ClickHouse cluster:
COPY setup_export_logs.sh /
CMD sleep 1

View File

@ -0,0 +1,61 @@
#!/bin/bash
# This script sets up export of system log tables to a remote server.
# Remote tables are created if not exist, and augmented with extra columns,
# and their names will contain a hash of the table structure,
# which allows exporting tables from servers of different versions.
# Pre-configured destination cluster, where to export the data
CLUSTER=${CLUSTER:=system_logs_export}
EXTRA_COLUMNS=${EXTRA_COLUMNS:="pull_request_number UInt32, commit_sha String, check_start_time DateTime, check_name LowCardinality(String), instance_type LowCardinality(String), "}
EXTRA_COLUMNS_EXPRESSION=${EXTRA_COLUMNS_EXPRESSION:="0 AS pull_request_number, '' AS commit_sha, now() AS check_start_time, '' AS check_name, '' AS instance_type"}
EXTRA_ORDER_BY_COLUMNS=${EXTRA_ORDER_BY_COLUMNS:="check_name, "}
CONNECTION_PARAMETERS=${CONNECTION_PARAMETERS:=""}
# Create all configured system logs:
clickhouse-client --query "SYSTEM FLUSH LOGS"
# For each system log table:
clickhouse-client --query "SHOW TABLES FROM system LIKE '%\\_log'" | while read -r table
do
# Calculate hash of its structure:
hash=$(clickhouse-client --query "
SELECT sipHash64(groupArray((name, type)))
FROM (SELECT name, type FROM system.columns
WHERE database = 'system' AND table = '$table'
ORDER BY position)
")
# Create the destination table with adapted name and structure:
statement=$(clickhouse-client --format TSVRaw --query "SHOW CREATE TABLE system.${table}" | sed -r -e '
s/^\($/('"$EXTRA_COLUMNS"'/;
s/ORDER BY \(/ORDER BY ('"$EXTRA_ORDER_BY_COLUMNS"'/;
s/^CREATE TABLE system\.\w+_log$/CREATE TABLE IF NOT EXISTS '"$table"'_'"$hash"'/;
/^TTL /d
')
echo "Creating destination table ${table}_${hash}" >&2
echo "$statement" | clickhouse-client $CONNECTION_PARAMETERS
echo "Creating table system.${table}_sender" >&2
# Create Distributed table and materialized view to watch on the original table:
clickhouse-client --query "
CREATE TABLE system.${table}_sender
ENGINE = Distributed(${CLUSTER}, default, ${table}_${hash})
EMPTY AS
SELECT ${EXTRA_COLUMNS_EXPRESSION}, *
FROM system.${table}
"
echo "Creating materialized view system.${table}_watcher" >&2
clickhouse-client --query "
CREATE MATERIALIZED VIEW system.${table}_watcher TO system.${table}_sender AS
SELECT ${EXTRA_COLUMNS_EXPRESSION}, *
FROM system.${table}
"
done

View File

@ -148,6 +148,7 @@ function clone_submodules
contrib/liburing
contrib/libfiu
contrib/incbin
contrib/yaml-cpp
)
git submodule sync
@ -170,6 +171,7 @@ function run_cmake
"-DENABLE_SIMDJSON=1"
"-DENABLE_JEMALLOC=1"
"-DENABLE_LIBURING=1"
"-DENABLE_YAML_CPP=1"
)
export CCACHE_DIR="$FASTTEST_WORKSPACE/ccache"

View File

@ -122,6 +122,23 @@ EOL
<core_path>$PWD</core_path>
</clickhouse>
EOL
# Setup a cluster for logs export to ClickHouse Cloud
# Note: these variables are provided to the Docker run command by the Python script in tests/ci
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
then
echo "
remote_servers:
system_logs_export:
shard:
replica:
secure: 1
user: ci
host: '${CLICKHOUSE_CI_LOGS_HOST}'
port: 9440
password: '${CLICKHOUSE_CI_LOGS_PASSWORD}'
" > db/config.d/system_logs_export.yaml
fi
}
function filter_exists_and_template
@ -223,7 +240,22 @@ quit
done
clickhouse-client --query "select 1" # This checks that the server is responding
kill -0 $server_pid # This checks that it is our server that is started and not some other one
echo Server started and responded
echo 'Server started and responded'
# Initialize export of system logs to ClickHouse Cloud
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
then
export EXTRA_COLUMNS_EXPRESSION="$PR_TO_TEST AS pull_request_number, '$SHA_TO_TEST' AS commit_sha, '$CHECK_START_TIME' AS check_start_time, '$CHECK_NAME' AS check_name, '$INSTANCE_TYPE' AS instance_type"
# TODO: Check if the password will appear in the logs.
export CONNECTION_PARAMETERS="--secure --user ci --host ${CLICKHOUSE_CI_LOGS_HOST} --password ${CLICKHOUSE_CI_LOGS_PASSWORD}"
/setup_export_logs.sh
# Unset variables after use
export CONNECTION_PARAMETERS=''
export CLICKHOUSE_CI_LOGS_HOST=''
export CLICKHOUSE_CI_LOGS_PASSWORD=''
fi
# SC2012: Use find instead of ls to better handle non-alphanumeric filenames. They are all alphanumeric.
# SC2046: Quote this to prevent word splitting. Actually I need word splitting.

View File

@ -12,6 +12,7 @@ ENV \
# install systemd packages
RUN apt-get update && \
apt-get install -y --no-install-recommends \
sudo \
systemd \
&& \
apt-get clean && \

View File

@ -2,7 +2,7 @@ version: "2.3"
services:
coredns:
image: coredns/coredns:latest
image: coredns/coredns:1.9.3 # :latest broke this test
restart: always
volumes:
- ${COREDNS_CONFIG_DIR}/example.com:/example.com

View File

@ -665,9 +665,8 @@ create view partial_query_times as select * from
-- Report for backward-incompatible ('partial') queries that we could only run on the new server (e.g.
-- queries with new functions added in the tested PR).
create table partial_queries_report engine File(TSV, 'report/partial-queries-report.tsv')
settings output_format_decimal_trailing_zeros = 1
as select toDecimal64(time_median, 3) time,
toDecimal64(time_stddev / time_median, 3) relative_time_stddev,
as select round(time_median, 3) time,
round(time_stddev / time_median, 3) relative_time_stddev,
test, query_index, query_display_name
from partial_query_times
join query_display_names using (test, query_index)
@ -739,28 +738,26 @@ create table queries engine File(TSVWithNamesAndTypes, 'report/queries.tsv')
;
create table changed_perf_report engine File(TSV, 'report/changed-perf.tsv')
settings output_format_decimal_trailing_zeros = 1
as with
-- server_time is sometimes reported as zero (if it's less than 1 ms),
-- so we have to work around this to not get an error about conversion
-- of NaN to decimal.
(left > right ? left / right : right / left) as times_change_float,
isFinite(times_change_float) as times_change_finite,
toDecimal64(times_change_finite ? times_change_float : 1., 3) as times_change_decimal,
round(times_change_finite ? times_change_float : 1., 3) as times_change_decimal,
times_change_finite
? (left > right ? '-' : '+') || toString(times_change_decimal) || 'x'
: '--' as times_change_str
select
toDecimal64(left, 3), toDecimal64(right, 3), times_change_str,
toDecimal64(diff, 3), toDecimal64(stat_threshold, 3),
round(left, 3), round(right, 3), times_change_str,
round(diff, 3), round(stat_threshold, 3),
changed_fail, test, query_index, query_display_name
from queries where changed_show order by abs(diff) desc;
create table unstable_queries_report engine File(TSV, 'report/unstable-queries.tsv')
settings output_format_decimal_trailing_zeros = 1
as select
toDecimal64(left, 3), toDecimal64(right, 3), toDecimal64(diff, 3),
toDecimal64(stat_threshold, 3), unstable_fail, test, query_index, query_display_name
round(left, 3), round(right, 3), round(diff, 3),
round(stat_threshold, 3), unstable_fail, test, query_index, query_display_name
from queries where unstable_show order by stat_threshold desc;
@ -789,11 +786,10 @@ create view total_speedup as
;
create table test_perf_changes_report engine File(TSV, 'report/test-perf-changes.tsv')
settings output_format_decimal_trailing_zeros = 1
as with
(times_speedup >= 1
? '-' || toString(toDecimal64(times_speedup, 3)) || 'x'
: '+' || toString(toDecimal64(1 / times_speedup, 3)) || 'x')
? '-' || toString(round(times_speedup, 3)) || 'x'
: '+' || toString(round(1 / times_speedup, 3)) || 'x')
as times_speedup_str
select test, times_speedup_str, queries, bad, changed, unstable
-- Not sure what's the precedence of UNION ALL vs WHERE & ORDER BY, hence all
@ -817,11 +813,10 @@ create view total_client_time_per_query as select *
'test text, query_index int, client float, server float');
create table slow_on_client_report engine File(TSV, 'report/slow-on-client.tsv')
settings output_format_decimal_trailing_zeros = 1
as select client, server, toDecimal64(client/server, 3) p,
as select client, server, round(client/server, 3) p,
test, query_display_name
from total_client_time_per_query left join query_display_names using (test, query_index)
where p > toDecimal64(1.02, 3) order by p desc;
where p > round(1.02, 3) order by p desc;
create table wall_clock_time_per_test engine Memory as select *
from file('wall-clock-times.tsv', TSV, 'test text, real float, user float, system float');
@ -899,15 +894,14 @@ create view test_times_view_total as
;
create table test_times_report engine File(TSV, 'report/test-times.tsv')
settings output_format_decimal_trailing_zeros = 1
as select
test,
toDecimal64(real, 3),
toDecimal64(total_client_time, 3),
round(real, 3),
round(total_client_time, 3),
queries,
toDecimal64(query_max, 3),
toDecimal64(avg_real_per_query, 3),
toDecimal64(query_min, 3),
round(query_max, 3),
round(avg_real_per_query, 3),
round(query_min, 3),
runs
from (
select * from test_times_view
@ -919,21 +913,20 @@ create table test_times_report engine File(TSV, 'report/test-times.tsv')
-- report for all queries page, only main metric
create table all_tests_report engine File(TSV, 'report/all-queries.tsv')
settings output_format_decimal_trailing_zeros = 1
as with
-- server_time is sometimes reported as zero (if it's less than 1 ms),
-- so we have to work around this to not get an error about conversion
-- of NaN to decimal.
(left > right ? left / right : right / left) as times_change_float,
isFinite(times_change_float) as times_change_finite,
toDecimal64(times_change_finite ? times_change_float : 1., 3) as times_change_decimal,
round(times_change_finite ? times_change_float : 1., 3) as times_change_decimal,
times_change_finite
? (left > right ? '-' : '+') || toString(times_change_decimal) || 'x'
: '--' as times_change_str
select changed_fail, unstable_fail,
toDecimal64(left, 3), toDecimal64(right, 3), times_change_str,
toDecimal64(isFinite(diff) ? diff : 0, 3),
toDecimal64(isFinite(stat_threshold) ? stat_threshold : 0, 3),
round(left, 3), round(right, 3), times_change_str,
round(isFinite(diff) ? diff : 0, 3),
round(isFinite(stat_threshold) ? stat_threshold : 0, 3),
test, query_index, query_display_name
from queries order by test, query_index;
@ -1044,27 +1037,6 @@ create table unstable_run_traces engine File(TSVWithNamesAndTypes,
order by count() desc
;
create table metric_devation engine File(TSVWithNamesAndTypes,
'report/metric-deviation.$version.tsv')
settings output_format_decimal_trailing_zeros = 1
-- first goes the key used to split the file with grep
as select test, query_index, query_display_name,
toDecimal64(d, 3) d, q, metric
from (
select
test, query_index,
(q[3] - q[1])/q[2] d,
quantilesExact(0, 0.5, 1)(value) q, metric
from (select * from unstable_run_metrics
union all select * from unstable_run_traces
union all select * from unstable_run_metrics_2) mm
group by test, query_index, metric
having isFinite(d) and d > 0.5 and q[3] > 5
) metrics
left join query_display_names using (test, query_index)
order by test, query_index, d desc
;
create table stacks engine File(TSV, 'report/stacks.$version.tsv') as
select
-- first goes the key used to split the file with grep
@ -1173,9 +1145,8 @@ create table metrics engine File(TSV, 'metrics/metrics.tsv') as
-- Show metrics that have changed
create table changes engine File(TSV, 'metrics/changes.tsv')
settings output_format_decimal_trailing_zeros = 1
as select metric, left, right,
toDecimal64(diff, 3), toDecimal64(times_diff, 3)
round(diff, 3), round(times_diff, 3)
from (
select metric, median(left) as left, median(right) as right,
(right - left) / left diff,
@ -1226,7 +1197,6 @@ create table ci_checks engine File(TSVWithNamesAndTypes, 'ci-checks.tsv')
'$SHA_TO_TEST' :: LowCardinality(String) AS commit_sha,
'${CLICKHOUSE_PERFORMANCE_COMPARISON_CHECK_NAME:-Performance}' :: LowCardinality(String) AS check_name,
'$(sed -n 's/.*<!--status: \(.*\)-->/\1/p' report.html)' :: LowCardinality(String) AS check_status,
-- TODO toDateTime() can't parse output of 'date', so no time for now.
(($(date +%s) - $CHPC_CHECK_START_TIMESTAMP) * 1000) :: UInt64 AS check_duration_ms,
fromUnixTimestamp($CHPC_CHECK_START_TIMESTAMP) check_start_time,
test_name :: LowCardinality(String) AS test_name ,

View File

@ -1,4 +1,5 @@
#!/bin/bash
set -exu
trap "exit" INT TERM

View File

@ -0,0 +1,30 @@
# docker build -t clickhouse/sqltest .
ARG FROM_TAG=latest
FROM clickhouse/test-base:$FROM_TAG
RUN apt-get update --yes \
&& env DEBIAN_FRONTEND=noninteractive \
apt-get install --yes --no-install-recommends \
wget \
git \
python3 \
python3-dev \
python3-pip \
sudo \
&& apt-get clean
RUN pip3 install \
pyyaml \
clickhouse-driver
ARG sqltest_repo="https://github.com/elliotchance/sqltest/"
RUN git clone ${sqltest_repo}
ENV TZ=UTC
ENV MAX_RUN_TIME=900
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
COPY run.sh /
COPY test.py /
CMD ["/bin/bash", "/run.sh"]

51
docker/test/sqltest/run.sh Executable file
View File

@ -0,0 +1,51 @@
#!/bin/bash
# shellcheck disable=SC2015
set -x
set -e
set -u
set -o pipefail
BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-16_debug_none_unsplitted_disable_False_binary"}
BINARY_URL_TO_DOWNLOAD=${BINARY_URL_TO_DOWNLOAD:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/$BINARY_TO_DOWNLOAD/clickhouse"}
function wget_with_retry
{
for _ in 1 2 3 4; do
if wget -nv -nd -c "$1";then
return 0
else
sleep 0.5
fi
done
return 1
}
wget_with_retry "$BINARY_URL_TO_DOWNLOAD"
chmod +x clickhouse
./clickhouse install --noninteractive
echo "
users:
default:
access_management: 1" > /etc/clickhouse-server/users.d/access_management.yaml
clickhouse start
# Wait for start
for _ in {1..100}
do
clickhouse-client --query "SELECT 1" && break ||:
sleep 1
done
# Run the test
pushd sqltest/standards/2016/
/test.py
mv report.html test.log /workspace
popd
zstd --threads=0 /var/log/clickhouse-server/clickhouse-server.log
zstd --threads=0 /var/log/clickhouse-server/clickhouse-server.err.log
mv /var/log/clickhouse-server/clickhouse-server.log.zst /var/log/clickhouse-server/clickhouse-server.err.log.zst /workspace

148
docker/test/sqltest/test.py Executable file
View File

@ -0,0 +1,148 @@
#!/usr/bin/env python3
import os
import yaml
import html
import random
import string
from clickhouse_driver import Client
client = Client(host="localhost", port=9000)
settings = {
"default_table_engine": "Memory",
"union_default_mode": "DISTINCT",
"calculate_text_stack_trace": 0,
}
database_name = "sqltest_" + "".join(
random.choice(string.ascii_lowercase) for _ in range(10)
)
client.execute(f"DROP DATABASE IF EXISTS {database_name}", settings=settings)
client.execute(f"CREATE DATABASE {database_name}", settings=settings)
client = Client(host="localhost", port=9000, database=database_name)
summary = {"success": 0, "total": 0, "results": {}}
log_file = open("test.log", "w")
report_html_file = open("report.html", "w")
with open("features.yml", "r") as file:
yaml_content = yaml.safe_load(file)
for category in yaml_content:
log_file.write(category.capitalize() + " features:\n")
summary["results"][category] = {"success": 0, "total": 0, "results": {}}
for test in yaml_content[category]:
log_file.write(test + ": " + yaml_content[category][test] + "\n")
summary["results"][category]["results"][test] = {
"success": 0,
"total": 0,
"description": yaml_content[category][test],
}
test_path = test[0] + "/" + test + ".tests.yml"
if os.path.exists(test_path):
with open(test_path, "r") as test_file:
test_yaml_content = yaml.load_all(test_file, Loader=yaml.FullLoader)
for test_case in test_yaml_content:
queries = test_case["sql"]
if not isinstance(queries, list):
queries = [queries]
for query in queries:
# Example: E011-01
test_group = ""
if "-" in test:
test_group = test.split("-", 1)[0]
summary["results"][category]["results"][test_group][
"total"
] += 1
summary["results"][category]["results"][test]["total"] += 1
summary["results"][category]["total"] += 1
summary["total"] += 1
log_file.write(query + "\n")
try:
result = client.execute(query, settings=settings)
log_file.write(str(result) + "\n")
if test_group:
summary["results"][category]["results"][test_group][
"success"
] += 1
summary["results"][category]["results"][test][
"success"
] += 1
summary["results"][category]["success"] += 1
summary["success"] += 1
except Exception as e:
log_file.write(f"Error occurred: {str(e)}\n")
client.execute(f"DROP DATABASE {database_name}", settings=settings)
def enable_color(ratio):
if ratio == 0:
return "<b style='color: red;'>"
elif ratio < 0.5:
return "<b style='color: orange;'>"
elif ratio < 1:
return "<b style='color: gray;'>"
else:
return "<b style='color: green;'>"
reset_color = "</b>"
def print_ratio(indent, name, success, total, description):
report_html_file.write(
"{}{}: {}{} / {} ({:.1%}){}{}\n".format(
" " * indent,
name.capitalize(),
enable_color(success / total),
success,
total,
success / total,
reset_color,
f" - " + html.escape(description) if description else "",
)
)
report_html_file.write(
"<html><body><pre style='font-size: 16pt; padding: 1em; line-height: 1.25;'>\n"
)
print_ratio(0, "Total", summary["success"], summary["total"], "")
for category in summary["results"]:
cat_summary = summary["results"][category]
if cat_summary["total"] == 0:
continue
print_ratio(2, category, cat_summary["success"], cat_summary["total"], "")
for test in summary["results"][category]["results"]:
test_summary = summary["results"][category]["results"][test]
if test_summary["total"] == 0:
continue
print_ratio(
6 if "-" in test else 4,
test,
test_summary["success"],
test_summary["total"],
test_summary["description"],
)
report_html_file.write("</pre></body></html>\n")

View File

@ -20,6 +20,22 @@ ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log &
./setup_minio.sh stateful
# Setup a cluster for logs export to ClickHouse Cloud
# Note: these variables are provided to the Docker run command by the Python script in tests/ci
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
then
echo "
remote_servers:
system_logs_export:
shard:
replica:
secure: 1
user: ci
host: '${CLICKHOUSE_CI_LOGS_HOST}'
password: '${CLICKHOUSE_CI_LOGS_PASSWORD}'
" > /etc/clickhouse-server/config.d/system_logs_export.yaml
fi
function start()
{
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
@ -65,6 +81,22 @@ function start()
}
start
# Initialize export of system logs to ClickHouse Cloud
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
then
export EXTRA_COLUMNS_EXPRESSION="$PULL_REQUEST_NUMBER AS pull_request_number, '$COMMIT_SHA' AS commit_sha, '$CHECK_START_TIME' AS check_start_time, '$CHECK_NAME' AS check_name, '$INSTANCE_TYPE' AS instance_type"
# TODO: Check if the password will appear in the logs.
export CONNECTION_PARAMETERS="--secure --user ci --host ${CLICKHOUSE_CI_LOGS_HOST} --password ${CLICKHOUSE_CI_LOGS_PASSWORD}"
./setup_export_logs.sh
# Unset variables after use
export CONNECTION_PARAMETERS=''
export CLICKHOUSE_CI_LOGS_HOST=''
export CLICKHOUSE_CI_LOGS_PASSWORD=''
fi
# shellcheck disable=SC2086 # No quotes because I want to split it into words.
/s3downloader --url-prefix "$S3_URL" --dataset-names $DATASETS
chmod 777 -R /var/lib/clickhouse

View File

@ -87,4 +87,5 @@ RUN npm install -g azurite \
COPY run.sh /
COPY setup_minio.sh /
COPY setup_hdfs_minicluster.sh /
CMD ["/bin/bash", "/run.sh"]

View File

@ -36,6 +36,22 @@ fi
./setup_minio.sh stateless
./setup_hdfs_minicluster.sh
# Setup a cluster for logs export to ClickHouse Cloud
# Note: these variables are provided to the Docker run command by the Python script in tests/ci
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
then
echo "
remote_servers:
system_logs_export:
shard:
replica:
secure: 1
user: ci
host: '${CLICKHOUSE_CI_LOGS_HOST}'
password: '${CLICKHOUSE_CI_LOGS_PASSWORD}'
" > /etc/clickhouse-server/config.d/system_logs_export.yaml
fi
# For flaky check we also enable thread fuzzer
if [ "$NUM_TRIES" -gt "1" ]; then
export THREAD_FUZZER_CPU_TIME_PERIOD_US=1000
@ -92,7 +108,28 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
MAX_RUN_TIME=$((MAX_RUN_TIME != 0 ? MAX_RUN_TIME : 9000)) # set to 2.5 hours if 0 (unlimited)
fi
sleep 5
# Wait for the server to start, but not for too long.
for _ in {1..100}
do
clickhouse-client --query "SELECT 1" && break
sleep 1
done
# Initialize export of system logs to ClickHouse Cloud
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
then
export EXTRA_COLUMNS_EXPRESSION="$PULL_REQUEST_NUMBER AS pull_request_number, '$COMMIT_SHA' AS commit_sha, '$CHECK_START_TIME' AS check_start_time, '$CHECK_NAME' AS check_name, '$INSTANCE_TYPE' AS instance_type"
# TODO: Check if the password will appear in the logs.
export CONNECTION_PARAMETERS="--secure --user ci --host ${CLICKHOUSE_CI_LOGS_HOST} --password ${CLICKHOUSE_CI_LOGS_PASSWORD}"
./setup_export_logs.sh
# Unset variables after use
export CONNECTION_PARAMETERS=''
export CLICKHOUSE_CI_LOGS_HOST=''
export CLICKHOUSE_CI_LOGS_PASSWORD=''
fi
attach_gdb_to_clickhouse || true # FIXME: to not break old builds, clean on 2023-09-01

View File

@ -51,8 +51,39 @@ configure
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log &
./setup_minio.sh stateless # to have a proper environment
# Setup a cluster for logs export to ClickHouse Cloud
# Note: these variables are provided to the Docker run command by the Python script in tests/ci
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
then
echo "
remote_servers:
system_logs_export:
shard:
replica:
secure: 1
user: ci
host: '${CLICKHOUSE_CI_LOGS_HOST}'
password: '${CLICKHOUSE_CI_LOGS_PASSWORD}'
" > /etc/clickhouse-server/config.d/system_logs_export.yaml
fi
start
# Initialize export of system logs to ClickHouse Cloud
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
then
export EXTRA_COLUMNS_EXPRESSION="$PULL_REQUEST_NUMBER AS pull_request_number, '$COMMIT_SHA' AS commit_sha, '$CHECK_START_TIME' AS check_start_time, '$CHECK_NAME' AS check_name, '$INSTANCE_TYPE' AS instance_type"
# TODO: Check if the password will appear in the logs.
export CONNECTION_PARAMETERS="--secure --user ci --host ${CLICKHOUSE_CI_LOGS_HOST} --password ${CLICKHOUSE_CI_LOGS_PASSWORD}"
./setup_export_logs.sh
# Unset variables after use
export CONNECTION_PARAMETERS=''
export CLICKHOUSE_CI_LOGS_HOST=''
export CLICKHOUSE_CI_LOGS_PASSWORD=''
fi
# shellcheck disable=SC2086 # No quotes because I want to split it into words.
/s3downloader --url-prefix "$S3_URL" --dataset-names $DATASETS
chmod 777 -R /var/lib/clickhouse

View File

@ -36,6 +36,9 @@ then
elif [ "${ARCH}" = "riscv64" ]
then
DIR="riscv64"
elif [ "${ARCH}" = "s390x" ]
then
DIR="s390x"
fi
elif [ "${OS}" = "FreeBSD" ]
then

View File

@ -190,7 +190,7 @@ These are the schema conversion manipulations you can do with table overrides fo
* Modify [column TTL](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#mergetree-column-ttl).
* Modify [column compression codec](/docs/en/sql-reference/statements/create/table.md/#codecs).
* Add [ALIAS columns](/docs/en/sql-reference/statements/create/table.md/#alias).
* Add [skipping indexes](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-data_skipping-indexes)
* Add [skipping indexes](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-data_skipping-indexes). Note that you need to enable `use_skip_indexes_if_final` setting to make them work (MaterializedMySQL is using `SELECT ... FINAL` by default)
* Add [projections](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#projections). Note that projection optimizations are
disabled when using `SELECT ... FINAL` (which MaterializedMySQL does by default), so their utility is limited here.
`INDEX ... TYPE hypothesis` as [described in the v21.12 blog post]](https://clickhouse.com/blog/en/2021/clickhouse-v21.12-released/)

View File

@ -1,4 +1,4 @@
# Approximate Nearest Neighbor Search Indexes [experimental] {#table_engines-ANNIndex}
# Approximate Nearest Neighbor Search Indexes [experimental]
Nearest neighborhood search is the problem of finding the M closest points for a given point in an N-dimensional vector space. The most
straightforward approach to solve this problem is a brute force search where the distance between all points in the vector space and the
@ -17,7 +17,7 @@ In terms of SQL, the nearest neighborhood problem can be expressed as follows:
``` sql
SELECT *
FROM table
FROM table_with_ann_index
ORDER BY Distance(vectors, Point)
LIMIT N
```
@ -32,7 +32,7 @@ An alternative formulation of the nearest neighborhood search problem looks as f
``` sql
SELECT *
FROM table
FROM table_with_ann_index
WHERE Distance(vectors, Point) < MaxDistance
LIMIT N
```
@ -45,12 +45,12 @@ With brute force search, both queries are expensive (linear in the number of poi
`Point` must be computed. To speed this process up, Approximate Nearest Neighbor Search Indexes (ANN indexes) store a compact representation
of the search space (using clustering, search trees, etc.) which allows to compute an approximate answer much quicker (in sub-linear time).
# Creating and Using ANN Indexes
# Creating and Using ANN Indexes {#creating_using_ann_indexes}
Syntax to create an ANN index over an [Array](../../../sql-reference/data-types/array.md) column:
```sql
CREATE TABLE table
CREATE TABLE table_with_ann_index
(
`id` Int64,
`vectors` Array(Float32),
@ -63,7 +63,7 @@ ORDER BY id;
Syntax to create an ANN index over a [Tuple](../../../sql-reference/data-types/tuple.md) column:
```sql
CREATE TABLE table
CREATE TABLE table_with_ann_index
(
`id` Int64,
`vectors` Tuple(Float32[, Float32[, ...]]),
@ -83,7 +83,7 @@ ANN indexes support two types of queries:
``` sql
SELECT *
FROM table
FROM table_with_ann_index
[WHERE ...]
ORDER BY Distance(vectors, Point)
LIMIT N
@ -93,7 +93,7 @@ ANN indexes support two types of queries:
``` sql
SELECT *
FROM table
FROM table_with_ann_index
WHERE Distance(vectors, Point) < MaxDistance
LIMIT N
```
@ -103,7 +103,7 @@ To avoid writing out large vectors, you can use [query
parameters](/docs/en/interfaces/cli.md#queries-with-parameters-cli-queries-with-parameters), e.g.
```bash
clickhouse-client --param_vec='hello' --query="SELECT * FROM table WHERE L2Distance(vectors, {vec: Array(Float32)}) < 1.0"
clickhouse-client --param_vec='hello' --query="SELECT * FROM table_with_ann_index WHERE L2Distance(vectors, {vec: Array(Float32)}) < 1.0"
```
:::
@ -138,7 +138,7 @@ back to a smaller `GRANULARITY` values only in case of problems like excessive m
was specified for ANN indexes, the default value is 100 million.
# Available ANN Indexes
# Available ANN Indexes {#available_ann_indexes}
- [Annoy](/docs/en/engines/table-engines/mergetree-family/annindexes.md#annoy-annoy)
@ -165,7 +165,7 @@ space in random linear surfaces (lines in 2D, planes in 3D etc.).
Syntax to create an Annoy index over an [Array](../../../sql-reference/data-types/array.md) column:
```sql
CREATE TABLE table
CREATE TABLE table_with_annoy_index
(
id Int64,
vectors Array(Float32),
@ -178,7 +178,7 @@ ORDER BY id;
Syntax to create an ANN index over a [Tuple](../../../sql-reference/data-types/tuple.md) column:
```sql
CREATE TABLE table
CREATE TABLE table_with_annoy_index
(
id Int64,
vectors Tuple(Float32[, Float32[, ...]]),
@ -188,23 +188,17 @@ ENGINE = MergeTree
ORDER BY id;
```
Annoy currently supports `L2Distance` and `cosineDistance` as distance function `Distance`. If no distance function was specified during
index creation, `L2Distance` is used as default. Parameter `NumTrees` is the number of trees which the algorithm creates (default if not
specified: 100). Higher values of `NumTree` mean more accurate search results but slower index creation / query times (approximately
linearly) as well as larger index sizes.
Annoy currently supports two distance functions:
- `L2Distance`, also called Euclidean distance, is the length of a line segment between two points in Euclidean space
([Wikipedia](https://en.wikipedia.org/wiki/Euclidean_distance)).
- `cosineDistance`, also called cosine similarity, is the cosine of the angle between two (non-zero) vectors
([Wikipedia](https://en.wikipedia.org/wiki/Cosine_similarity)).
`L2Distance` is also called Euclidean distance, the Euclidean distance between two points in Euclidean space is the length of a line segment between the two points.
For example: If we have point P(p1,p2), Q(q1,q2), their distance will be d(p,q)
![L2Distance](https://en.wikipedia.org/wiki/Euclidean_distance#/media/File:Euclidean_distance_2d.svg)
For normalized data, `L2Distance` is usually a better choice, otherwise `cosineDistance` is recommended to compensate for scale. If no
distance function was specified during index creation, `L2Distance` is used as default.
`cosineDistance` also called cosine similarity is a measure of similarity between two non-zero vectors defined in an inner product space. Cosine similarity is the cosine of the angle between the vectors; that is, it is the dot product of the vectors divided by the product of their lengths.
![cosineDistance](https://www.tyrrell4innovation.ca/wp-content/uploads/2021/06/rsz_jenny_du_miword.png)
The Euclidean distance corresponds to the L2-norm of a difference between vectors. The cosine similarity is proportional to the dot product of two vectors and inversely proportional to the product of their magnitudes.
![compare](https://www.researchgate.net/publication/320914786/figure/fig2/AS:558221849841664@1510101868614/The-difference-between-Euclidean-distance-and-cosine-similarity.png)
In one sentence: cosine similarity care only about the angle between them, but do not care about the "distance" we normally think.
![L2 distance](https://www.baeldung.com/wp-content/uploads/sites/4/2020/06/4-1.png)
![cosineDistance](https://www.baeldung.com/wp-content/uploads/sites/4/2020/06/5.png)
Parameter `NumTrees` is the number of trees which the algorithm creates (default if not specified: 100). Higher values of `NumTree` mean
more accurate search results but slower index creation / query times (approximately linearly) as well as larger index sizes.
:::note
Indexes over columns of type `Array` will generally work faster than indexes on `Tuple` columns. All arrays **must** have same length. Use

View File

@ -323,9 +323,9 @@ clickhouse-client clickhouse://192.168.1.15,192.168.1.25
`clickhouse-client` uses the first existing file of the following:
- Defined in the `--config-file` parameter.
- `./clickhouse-client.xml`
- `~/.clickhouse-client/config.xml`
- `/etc/clickhouse-client/config.xml`
- `./clickhouse-client.xml`, `.yaml`, `.yml`
- `~/.clickhouse-client/config.xml`, `.yaml`, `.yml`
- `/etc/clickhouse-client/config.xml`, `.yaml`, `.yml`
Example of a config file:
@ -342,6 +342,17 @@ Example of a config file:
</config>
```
Or the same config in a YAML format:
```yaml
user: username
password: 'password'
secure: true
openSSL:
client:
caConfig: '/etc/ssl/cert.pem'
```
### Query ID Format {#query-id-format}
In interactive mode `clickhouse-client` shows query ID for every query. By default, the ID is formatted like this:

View File

@ -11,82 +11,83 @@ results of a `SELECT`, and to perform `INSERT`s into a file-backed table.
The supported formats are:
| Format | Input | Output |
|-------------------------------------------------------------------------------------------|------|--------|
| [TabSeparated](#tabseparated) | ✔ | ✔ |
| [TabSeparatedRaw](#tabseparatedraw) | ✔ | ✔ |
| [TabSeparatedWithNames](#tabseparatedwithnames) | ✔ | ✔ |
| [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes) | ✔ | ✔ |
| [TabSeparatedRawWithNames](#tabseparatedrawwithnames) | ✔ | ✔ |
| [TabSeparatedRawWithNamesAndTypes](#tabseparatedrawwithnamesandtypes) | ✔ | ✔ |
| [Template](#format-template) | ✔ | ✔ |
| [TemplateIgnoreSpaces](#templateignorespaces) | ✔ | ✗ |
| [CSV](#csv) | ✔ | ✔ |
| [CSVWithNames](#csvwithnames) | ✔ | ✔ |
| [CSVWithNamesAndTypes](#csvwithnamesandtypes) | ✔ | ✔ |
| [CustomSeparated](#format-customseparated) | ✔ | ✔ |
| [CustomSeparatedWithNames](#customseparatedwithnames) | ✔ | ✔ |
| [CustomSeparatedWithNamesAndTypes](#customseparatedwithnamesandtypes) | ✔ | ✔ |
| [SQLInsert](#sqlinsert) | ✗ | ✔ |
| [Values](#data-format-values) | ✔ | ✔ |
| [Vertical](#vertical) | ✗ | ✔ |
| [JSON](#json) | ✔ | ✔ |
| [JSONAsString](#jsonasstring) | ✔ | ✗ |
| [JSONStrings](#jsonstrings) | ✔ | ✔ |
| [JSONColumns](#jsoncolumns) | ✔ | ✔ |
| [JSONColumnsWithMetadata](#jsoncolumnsmonoblock)) | ✔ | ✔ |
| [JSONCompact](#jsoncompact) | ✔ | ✔ |
| [JSONCompactStrings](#jsoncompactstrings) | ✗ | ✔ |
| [JSONCompactColumns](#jsoncompactcolumns) | ✔ | ✔ |
| [JSONEachRow](#jsoneachrow) | ✔ | ✔ |
| [PrettyJSONEachRow](#prettyjsoneachrow) | ✗ | ✔ |
| [JSONEachRowWithProgress](#jsoneachrowwithprogress) | ✗ | ✔ |
| [JSONStringsEachRow](#jsonstringseachrow) | ✔ | ✔ |
| [JSONStringsEachRowWithProgress](#jsonstringseachrowwithprogress) | ✗ | ✔ |
| [JSONCompactEachRow](#jsoncompacteachrow) | ✔ | ✔ |
| [JSONCompactEachRowWithNames](#jsoncompacteachrowwithnames) | ✔ | ✔ |
| [JSONCompactEachRowWithNamesAndTypes](#jsoncompacteachrowwithnamesandtypes) | ✔ | ✔ |
| [JSONCompactStringsEachRow](#jsoncompactstringseachrow) | ✔ | ✔ |
| [JSONCompactStringsEachRowWithNames](#jsoncompactstringseachrowwithnames) | ✔ | ✔ |
| [JSONCompactStringsEachRowWithNamesAndTypes](#jsoncompactstringseachrowwithnamesandtypes) | ✔ | ✔ |
| [JSONObjectEachRow](#jsonobjecteachrow) | ✔ | ✔ |
| [BSONEachRow](#bsoneachrow) | ✔ | ✔ |
| [TSKV](#tskv) | ✔ | ✔ |
| [Pretty](#pretty) | ✗ | ✔ |
| [PrettyNoEscapes](#prettynoescapes) | ✗ | ✔ |
| [PrettyMonoBlock](#prettymonoblock) | ✗ | ✔ |
| [PrettyNoEscapesMonoBlock](#prettynoescapesmonoblock) | ✗ | ✔ |
| [PrettyCompact](#prettycompact) | ✗ | ✔ |
| [PrettyCompactNoEscapes](#prettycompactnoescapes) | ✗ | ✔ |
| [PrettyCompactMonoBlock](#prettycompactmonoblock) | ✗ | ✔ |
| [PrettyCompactNoEscapesMonoBlock](#prettycompactnoescapesmonoblock) | ✗ | ✔ |
| [PrettySpace](#prettyspace) | ✗ | ✔ |
| [PrettySpaceNoEscapes](#prettyspacenoescapes) | ✗ | ✔ |
| [PrettySpaceMonoBlock](#prettyspacemonoblock) | ✗ | ✔ |
| [PrettySpaceNoEscapesMonoBlock](#prettyspacenoescapesmonoblock) | ✗ | ✔ |
| [Prometheus](#prometheus) | ✗ | ✔ |
| [Protobuf](#protobuf) | ✔ | ✔ |
| [ProtobufSingle](#protobufsingle) | ✔ | ✔ |
| [Avro](#data-format-avro) | ✔ | ✔ |
| [AvroConfluent](#data-format-avro-confluent) | ✔ | ✗ |
| [Parquet](#data-format-parquet) | ✔ | ✔ |
| [ParquetMetadata](#data-format-parquet-metadata) | ✔ | ✗ |
| [Arrow](#data-format-arrow) | ✔ | ✔ |
| [ArrowStream](#data-format-arrow-stream) | ✔ | ✔ |
| [ORC](#data-format-orc) | ✔ | ✔ |
| [RowBinary](#rowbinary) | ✔ | ✔ |
| [RowBinaryWithNames](#rowbinarywithnamesandtypes) | ✔ | ✔ |
| [RowBinaryWithNamesAndTypes](#rowbinarywithnamesandtypes) | ✔ | ✔ |
| [RowBinaryWithDefaults](#rowbinarywithdefaults) | ✔ | ✔ |
| [Native](#native) | ✔ | ✔ |
| [Null](#null) | ✗ | ✔ |
| [XML](#xml) | ✗ | ✔ |
| [CapnProto](#capnproto) | ✔ | ✔ |
| [LineAsString](#lineasstring) | ✔ | ✔ |
| [Regexp](#data-format-regexp) | ✔ | ✗ |
| [RawBLOB](#rawblob) | ✔ | ✔ |
| [MsgPack](#msgpack) | ✔ | ✔ |
| [MySQLDump](#mysqldump) | ✔ | ✗ |
| [Markdown](#markdown) | ✗ | ✔ |
|-------------------------------------------------------------------------------------------|------|-------|
| [TabSeparated](#tabseparated) | ✔ | ✔ |
| [TabSeparatedRaw](#tabseparatedraw) | ✔ | ✔ |
| [TabSeparatedWithNames](#tabseparatedwithnames) | ✔ | ✔ |
| [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes) | ✔ | ✔ |
| [TabSeparatedRawWithNames](#tabseparatedrawwithnames) | ✔ | ✔ |
| [TabSeparatedRawWithNamesAndTypes](#tabseparatedrawwithnamesandtypes) | ✔ | ✔ |
| [Template](#format-template) | ✔ | ✔ |
| [TemplateIgnoreSpaces](#templateignorespaces) | ✔ | ✗ |
| [CSV](#csv) | ✔ | ✔ |
| [CSVWithNames](#csvwithnames) | ✔ | ✔ |
| [CSVWithNamesAndTypes](#csvwithnamesandtypes) | ✔ | ✔ |
| [CustomSeparated](#format-customseparated) | ✔ | ✔ |
| [CustomSeparatedWithNames](#customseparatedwithnames) | ✔ | ✔ |
| [CustomSeparatedWithNamesAndTypes](#customseparatedwithnamesandtypes) | ✔ | ✔ |
| [SQLInsert](#sqlinsert) | ✗ | ✔ |
| [Values](#data-format-values) | ✔ | ✔ |
| [Vertical](#vertical) | ✗ | ✔ |
| [JSON](#json) | ✔ | ✔ |
| [JSONAsString](#jsonasstring) | ✔ | ✗ |
| [JSONStrings](#jsonstrings) | ✔ | ✔ |
| [JSONColumns](#jsoncolumns) | ✔ | ✔ |
| [JSONColumnsWithMetadata](#jsoncolumnsmonoblock)) | ✔ | ✔ |
| [JSONCompact](#jsoncompact) | ✔ | ✔ |
| [JSONCompactStrings](#jsoncompactstrings) | ✗ | ✔ |
| [JSONCompactColumns](#jsoncompactcolumns) | ✔ | ✔ |
| [JSONEachRow](#jsoneachrow) | ✔ | ✔ |
| [PrettyJSONEachRow](#prettyjsoneachrow) | ✗ | ✔ |
| [JSONEachRowWithProgress](#jsoneachrowwithprogress) | ✗ | ✔ |
| [JSONStringsEachRow](#jsonstringseachrow) | ✔ | ✔ |
| [JSONStringsEachRowWithProgress](#jsonstringseachrowwithprogress) | ✗ | ✔ |
| [JSONCompactEachRow](#jsoncompacteachrow) | ✔ | ✔ |
| [JSONCompactEachRowWithNames](#jsoncompacteachrowwithnames) | ✔ | ✔ |
| [JSONCompactEachRowWithNamesAndTypes](#jsoncompacteachrowwithnamesandtypes) | ✔ | ✔ |
| [JSONCompactStringsEachRow](#jsoncompactstringseachrow) | ✔ | ✔ |
| [JSONCompactStringsEachRowWithNames](#jsoncompactstringseachrowwithnames) | ✔ | ✔ |
| [JSONCompactStringsEachRowWithNamesAndTypes](#jsoncompactstringseachrowwithnamesandtypes) | ✔ | ✔ |
| [JSONObjectEachRow](#jsonobjecteachrow) | ✔ | ✔ |
| [BSONEachRow](#bsoneachrow) | ✔ | ✔ |
| [TSKV](#tskv) | ✔ | ✔ |
| [Pretty](#pretty) | ✗ | ✔ |
| [PrettyNoEscapes](#prettynoescapes) | ✗ | ✔ |
| [PrettyMonoBlock](#prettymonoblock) | ✗ | ✔ |
| [PrettyNoEscapesMonoBlock](#prettynoescapesmonoblock) | ✗ | ✔ |
| [PrettyCompact](#prettycompact) | ✗ | ✔ |
| [PrettyCompactNoEscapes](#prettycompactnoescapes) | ✗ | ✔ |
| [PrettyCompactMonoBlock](#prettycompactmonoblock) | ✗ | ✔ |
| [PrettyCompactNoEscapesMonoBlock](#prettycompactnoescapesmonoblock) | ✗ | ✔ |
| [PrettySpace](#prettyspace) | ✗ | ✔ |
| [PrettySpaceNoEscapes](#prettyspacenoescapes) | ✗ | ✔ |
| [PrettySpaceMonoBlock](#prettyspacemonoblock) | ✗ | ✔ |
| [PrettySpaceNoEscapesMonoBlock](#prettyspacenoescapesmonoblock) | ✗ | ✔ |
| [Prometheus](#prometheus) | ✗ | ✔ |
| [Protobuf](#protobuf) | ✔ | ✔ |
| [ProtobufSingle](#protobufsingle) | ✔ | ✔ |
| [Avro](#data-format-avro) | ✔ | ✔ |
| [AvroConfluent](#data-format-avro-confluent) | ✔ | ✗ |
| [Parquet](#data-format-parquet) | ✔ | ✔ |
| [ParquetMetadata](#data-format-parquet-metadata) | ✔ | ✗ |
| [Arrow](#data-format-arrow) | ✔ | ✔ |
| [ArrowStream](#data-format-arrow-stream) | ✔ | ✔ |
| [ORC](#data-format-orc) | ✔ | ✔ |
| [One](#data-format-one) | ✔ | ✗ |
| [RowBinary](#rowbinary) | ✔ | ✔ |
| [RowBinaryWithNames](#rowbinarywithnamesandtypes) | ✔ | ✔ |
| [RowBinaryWithNamesAndTypes](#rowbinarywithnamesandtypes) | ✔ | ✔ |
| [RowBinaryWithDefaults](#rowbinarywithdefaults) | ✔ | ✔ |
| [Native](#native) | ✔ | ✔ |
| [Null](#null) | ✗ | ✔ |
| [XML](#xml) | ✗ | ✔ |
| [CapnProto](#capnproto) | ✔ | ✔ |
| [LineAsString](#lineasstring) | ✔ | ✔ |
| [Regexp](#data-format-regexp) | ✔ | ✗ |
| [RawBLOB](#rawblob) | ✔ | ✔ |
| [MsgPack](#msgpack) | ✔ | ✔ |
| [MySQLDump](#mysqldump) | ✔ | ✗ |
| [Markdown](#markdown) | ✗ | ✔ |
You can control some format processing parameters with the ClickHouse settings. For more information read the [Settings](/docs/en/operations/settings/settings-formats.md) section.
@ -2131,6 +2132,7 @@ To exchange data with Hadoop, you can use [HDFS table engine](/docs/en/engines/t
- [output_format_parquet_row_group_size](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_row_group_size) - row group size in rows while data output. Default value - `1000000`.
- [output_format_parquet_string_as_string](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_string_as_string) - use Parquet String type instead of Binary for String columns. Default value - `false`.
- [input_format_parquet_import_nested](/docs/en/operations/settings/settings-formats.md/#input_format_parquet_import_nested) - allow inserting array of structs into [Nested](/docs/en/sql-reference/data-types/nested-data-structures/index.md) table in Parquet input format. Default value - `false`.
- [input_format_parquet_case_insensitive_column_matching](/docs/en/operations/settings/settings-formats.md/#input_format_parquet_case_insensitive_column_matching) - ignore case when matching Parquet columns with ClickHouse columns. Default value - `false`.
- [input_format_parquet_allow_missing_columns](/docs/en/operations/settings/settings-formats.md/#input_format_parquet_allow_missing_columns) - allow missing columns while reading Parquet data. Default value - `false`.
- [input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference](/docs/en/operations/settings/settings-formats.md/#input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference) - allow skipping columns with unsupported types while schema inference for Parquet format. Default value - `false`.
@ -2407,6 +2409,34 @@ $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT ORC" > {filename.
To exchange data with Hadoop, you can use [HDFS table engine](/docs/en/engines/table-engines/integrations/hdfs.md).
## One {#data-format-one}
Special input format that doesn't read any data from file and returns only one row with column of type `UInt8`, name `dummy` and value `0` (like `system.one` table).
Can be used with virtual columns `_file/_path` to list all files without reading actual data.
Example:
Query:
```sql
SELECT _file FROM file('path/to/files/data*', One);
```
Result:
```text
┌─_file────┐
│ data.csv │
└──────────┘
┌─_file──────┐
│ data.jsonl │
└────────────┘
┌─_file────┐
│ data.tsv │
└──────────┘
┌─_file────────┐
│ data.parquet │
└──────────────┘
```
## LineAsString {#lineasstring}
In this format, every line of input data is interpreted as a single string value. This format can only be parsed for table with a single field of type [String](/docs/en/sql-reference/data-types/string.md). The remaining columns must be set to [DEFAULT](/docs/en/sql-reference/statements/create/table.md/#default) or [MATERIALIZED](/docs/en/sql-reference/statements/create/table.md/#materialized), or omitted.

View File

@ -83,8 +83,8 @@ ClickHouse, Inc. does **not** maintain the tools and libraries listed below and
- Python
- [SQLAlchemy](https://www.sqlalchemy.org)
- [sqlalchemy-clickhouse](https://github.com/cloudflare/sqlalchemy-clickhouse) (uses [infi.clickhouse_orm](https://github.com/Infinidat/infi.clickhouse_orm))
- [pandas](https://pandas.pydata.org)
- [pandahouse](https://github.com/kszucs/pandahouse)
- [PyArrow/Pandas](https://pandas.pydata.org)
- [Ibis](https://github.com/ibis-project/ibis)
- PHP
- [Doctrine](https://www.doctrine-project.org/)
- [dbal-clickhouse](https://packagist.org/packages/friendsofdoctrine/dbal-clickhouse)

View File

@ -169,7 +169,6 @@ host = '127.0.0.1',
port = 3306,
database = 'test',
connection_pool_size = 8,
on_duplicate_clause = 1,
replace_query = 1
```
@ -185,7 +184,6 @@ replace_query = 1
<port>3306</port>
<database>test</database>
<connection_pool_size>8</connection_pool_size>
<on_duplicate_clause>1</on_duplicate_clause>
<replace_query>1</replace_query>
</mymysql>
</named_collections>

View File

@ -1640,7 +1640,7 @@ Keys for server/client settings:
- verificationMode (default: relaxed) The method for checking the nodes certificates. Details are in the description of the [Context](https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/Context.h) class. Possible values: `none`, `relaxed`, `strict`, `once`.
- verificationDepth (default: 9) The maximum length of the verification chain. Verification will fail if the certificate chain length exceeds the set value.
- loadDefaultCAFile (default: true) Wether built-in CA certificates for OpenSSL will be used. ClickHouse assumes that builtin CA certificates are in the file `/etc/ssl/cert.pem` (resp. the directory `/etc/ssl/certs`) or in file (resp. directory) specified by the environment variable `SSL_CERT_FILE` (resp. `SSL_CERT_DIR`).
- cipherList (default: `ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH`) - Supported OpenSSL encryptions.
- cipherList (default: `ALL:!ADH:!LOW:!EXP:!MD5:!3DES:@STRENGTH`) - Supported OpenSSL encryptions.
- cacheSessions (default: false) Enables or disables caching sessions. Must be used in combination with `sessionIdContext`. Acceptable values: `true`, `false`.
- sessionIdContext (default: `${application.name}`) A unique set of random characters that the server appends to each generated identifier. The length of the string must not exceed `SSL_MAX_SSL_SESSION_ID_LENGTH`. This parameter is always recommended since it helps avoid problems both if the server caches the session and if the client requested caching. Default value: `${application.name}`.
- sessionCacheSize (default: [1024\*20](https://github.com/ClickHouse/boringssl/blob/master/include/openssl/ssl.h#L1978)) The maximum number of sessions that the server caches. A value of 0 means unlimited sessions.

View File

@ -45,9 +45,14 @@ keeper foo bar
- `ls [path]` -- Lists the nodes for the given path (default: cwd)
- `cd [path]` -- Change the working path (default `.`)
- `set <path> <value> [version]` -- Updates the node's value. Only update if version matches (default: -1)
- `create <path> <value>` -- Creates new node
- `create <path> <value> [mode]` -- Creates new node with the set value
- `touch <path>` -- Creates new node with an empty string as value. Doesn't throw an exception if the node already exists
- `get <path>` -- Returns the node's value
- `remove <path>` -- Remove the node
- `rmr <path>` -- Recursively deletes path. Confirmation required
- `flwc <command>` -- Executes four-letter-word command
- `help` -- Prints this message
- `get_stat [path]` -- Returns the node's stat (default `.`)
- `find_super_nodes <threshold> [path]` -- Finds nodes with number of children larger than some threshold for the given path (default `.`)
- `delete_stale_backups` -- Deletes ClickHouse nodes used for backups that are now inactive
- `find_big_family [path] [n]` -- Returns the top n nodes with the biggest family in the subtree (default path = `.` and n = 10)

View File

@ -4,7 +4,7 @@ sidebar_position: 54
sidebar_label: Tuple(T1, T2, ...)
---
# Tuple(t1, T2, …)
# Tuple(T1, T2, …)
A tuple of elements, each having an individual [type](../../sql-reference/data-types/index.md#data_types). Tuple must contain at least one element.

View File

@ -2476,52 +2476,3 @@ Dictionary updates (other than loading at first use) do not block queries. Durin
We recommend periodically updating the dictionaries with the geobase. During an update, generate new files and write them to a separate location. When everything is ready, rename them to the files used by the server.
There are also functions for working with OS identifiers and search engines, but they shouldnt be used.
## Embedded Dictionaries
<SelfManaged />
ClickHouse contains a built-in feature for working with a geobase.
This allows you to:
- Use a regions ID to get its name in the desired language.
- Use a regions ID to get the ID of a city, area, federal district, country, or continent.
- Check whether a region is part of another region.
- Get a chain of parent regions.
All the functions support “translocality,” the ability to simultaneously use different perspectives on region ownership. For more information, see the section “Functions for working with web analytics dictionaries”.
The internal dictionaries are disabled in the default package.
To enable them, uncomment the parameters `path_to_regions_hierarchy_file` and `path_to_regions_names_files` in the server configuration file.
The geobase is loaded from text files.
Place the `regions_hierarchy*.txt` files into the `path_to_regions_hierarchy_file` directory. This configuration parameter must contain the path to the `regions_hierarchy.txt` file (the default regional hierarchy), and the other files (`regions_hierarchy_ua.txt`) must be located in the same directory.
Put the `regions_names_*.txt` files in the `path_to_regions_names_files` directory.
You can also create these files yourself. The file format is as follows:
`regions_hierarchy*.txt`: TabSeparated (no header), columns:
- region ID (`UInt32`)
- parent region ID (`UInt32`)
- region type (`UInt8`): 1 - continent, 3 - country, 4 - federal district, 5 - region, 6 - city; other types do not have values
- population (`UInt32`) — optional column
`regions_names_*.txt`: TabSeparated (no header), columns:
- region ID (`UInt32`)
- region name (`String`) — Cant contain tabs or line feeds, even escaped ones.
A flat array is used for storing in RAM. For this reason, IDs shouldnt be more than a million.
Dictionaries can be updated without restarting the server. However, the set of available dictionaries is not updated.
For updates, the file modification times are checked. If a file has changed, the dictionary is updated.
The interval to check for changes is configured in the `builtin_dictionaries_reload_interval` parameter.
Dictionary updates (other than loading at first use) do not block queries. During updates, queries use the old versions of dictionaries. If an error occurs during an update, the error is written to the server log, and queries continue using the old version of dictionaries.
We recommend periodically updating the dictionaries with the geobase. During an update, generate new files and write them to a separate location. When everything is ready, rename them to the files used by the server.
There are also functions for working with OS identifiers and search engines, but they shouldnt be used.

View File

@ -183,9 +183,8 @@ arrayConcat(arrays)
**Arguments**
- `arrays` Arbitrary number of arguments of [Array](../../sql-reference/data-types/array.md) type.
**Example**
<!-- -->
**Example**
``` sql
SELECT arrayConcat([1, 2], [3, 4], [5, 6]) AS res

View File

@ -559,6 +559,29 @@ Result:
└────────────────────────────┘
```
## tupleConcat
Combines tuples passed as arguments.
``` sql
tupleConcat(tuples)
```
**Arguments**
- `tuples` Arbitrary number of arguments of [Tuple](../../sql-reference/data-types/tuple.md) type.
**Example**
``` sql
SELECT tupleConcat((1, 2), (3, 4), (true, false)) AS res
```
``` text
┌─res──────────────────┐
│ (1,2,3,4,true,false) │
└──────────────────────┘
```
## Distance functions

View File

@ -11,6 +11,7 @@ Syntax:
``` sql
CREATE QUOTA [IF NOT EXISTS | OR REPLACE] name [ON CLUSTER cluster_name]
[IN access_storage_type]
[KEYED BY {user_name | ip_address | client_key | client_key,user_name | client_key,ip_address} | NOT KEYED]
[FOR [RANDOMIZED] INTERVAL number {second | minute | hour | day | week | month | quarter | year}
{MAX { {queries | query_selects | query_inserts | errors | result_rows | result_bytes | read_rows | read_bytes | execution_time} = number } [,...] |

View File

@ -11,6 +11,7 @@ Syntax:
``` sql
CREATE ROLE [IF NOT EXISTS | OR REPLACE] name1 [ON CLUSTER cluster_name1] [, name2 [ON CLUSTER cluster_name2] ...]
[IN access_storage_type]
[SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [CONST|READONLY|WRITABLE|CHANGEABLE_IN_READONLY] | PROFILE 'profile_name'] [,...]
```

View File

@ -16,6 +16,7 @@ Syntax:
``` sql
CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name1 [ON CLUSTER cluster_name1] ON [db1.]table1|db1.*
[, policy_name2 [ON CLUSTER cluster_name2] ON [db2.]table2|db2.* ...]
[IN access_storage_type]
[FOR SELECT] USING condition
[AS {PERMISSIVE | RESTRICTIVE}]
[TO {role1 [, role2 ...] | ALL | ALL EXCEPT role1 [, role2 ...]}]

View File

@ -12,6 +12,7 @@ Syntax:
``` sql
CREATE SETTINGS PROFILE [IF NOT EXISTS | OR REPLACE] name1 [ON CLUSTER cluster_name1]
[, name2 [ON CLUSTER cluster_name2] ...]
[IN access_storage_type]
[SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [CONST|READONLY|WRITABLE|CHANGEABLE_IN_READONLY] | INHERIT 'profile_name'] [,...]
```

View File

@ -14,6 +14,7 @@ CREATE USER [IF NOT EXISTS | OR REPLACE] name1 [ON CLUSTER cluster_name1]
[, name2 [ON CLUSTER cluster_name2] ...]
[NOT IDENTIFIED | IDENTIFIED {[WITH {no_password | plaintext_password | sha256_password | sha256_hash | double_sha1_password | double_sha1_hash}] BY {'password' | 'hash'}} | {WITH ldap SERVER 'server_name'} | {WITH kerberos [REALM 'realm']} | {WITH ssl_certificate CN 'common_name'}]
[HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE]
[IN access_storage_type]
[DEFAULT ROLE role [,...]]
[DEFAULT DATABASE database | NONE]
[GRANTEES {user | role | ANY | NONE} [,...] [EXCEPT {user | role} [,...]]]

View File

@ -49,7 +49,7 @@ Deletes a user.
Syntax:
``` sql
DROP USER [IF EXISTS] name [,...] [ON CLUSTER cluster_name]
DROP USER [IF EXISTS] name [,...] [ON CLUSTER cluster_name] [FROM access_storage_type]
```
## DROP ROLE
@ -59,7 +59,7 @@ Deletes a role. The deleted role is revoked from all the entities where it was a
Syntax:
``` sql
DROP ROLE [IF EXISTS] name [,...] [ON CLUSTER cluster_name]
DROP ROLE [IF EXISTS] name [,...] [ON CLUSTER cluster_name] [FROM access_storage_type]
```
## DROP ROW POLICY
@ -69,7 +69,7 @@ Deletes a row policy. Deleted row policy is revoked from all the entities where
Syntax:
``` sql
DROP [ROW] POLICY [IF EXISTS] name [,...] ON [database.]table [,...] [ON CLUSTER cluster_name]
DROP [ROW] POLICY [IF EXISTS] name [,...] ON [database.]table [,...] [ON CLUSTER cluster_name] [FROM access_storage_type]
```
## DROP QUOTA
@ -79,7 +79,7 @@ Deletes a quota. The deleted quota is revoked from all the entities where it was
Syntax:
``` sql
DROP QUOTA [IF EXISTS] name [,...] [ON CLUSTER cluster_name]
DROP QUOTA [IF EXISTS] name [,...] [ON CLUSTER cluster_name] [FROM access_storage_type]
```
## DROP SETTINGS PROFILE
@ -89,7 +89,7 @@ Deletes a settings profile. The deleted settings profile is revoked from all the
Syntax:
``` sql
DROP [SETTINGS] PROFILE [IF EXISTS] name [,...] [ON CLUSTER cluster_name]
DROP [SETTINGS] PROFILE [IF EXISTS] name [,...] [ON CLUSTER cluster_name] [FROM access_storage_type]
```
## DROP VIEW

View File

@ -11,7 +11,7 @@ Inserts data into a table.
**Syntax**
``` sql
INSERT INTO [db.]table [(c1, c2, c3)] VALUES (v11, v12, v13), (v21, v22, v23), ...
INSERT INTO [TABLE] [db.]table [(c1, c2, c3)] VALUES (v11, v12, v13), (v21, v22, v23), ...
```
You can specify a list of columns to insert using the `(c1, c2, c3)`. You can also use an expression with column [matcher](../../sql-reference/statements/select/index.md#asterisk) such as `*` and/or [modifiers](../../sql-reference/statements/select/index.md#select-modifiers) such as [APPLY](../../sql-reference/statements/select/index.md#apply-modifier), [EXCEPT](../../sql-reference/statements/select/index.md#except-modifier), [REPLACE](../../sql-reference/statements/select/index.md#replace-modifier).
@ -107,7 +107,7 @@ If table has [constraints](../../sql-reference/statements/create/table.md#constr
**Syntax**
``` sql
INSERT INTO [db.]table [(c1, c2, c3)] SELECT ...
INSERT INTO [TABLE] [db.]table [(c1, c2, c3)] SELECT ...
```
Columns are mapped according to their position in the SELECT clause. However, their names in the SELECT expression and the table for INSERT may differ. If necessary, type casting is performed.
@ -126,7 +126,7 @@ To insert a default value instead of `NULL` into a column with not nullable data
**Syntax**
``` sql
INSERT INTO [db.]table [(c1, c2, c3)] FROM INFILE file_name [COMPRESSION type] FORMAT format_name
INSERT INTO [TABLE] [db.]table [(c1, c2, c3)] FROM INFILE file_name [COMPRESSION type] FORMAT format_name
```
Use the syntax above to insert data from a file, or files, stored on the **client** side. `file_name` and `type` are string literals. Input file [format](../../interfaces/formats.md) must be set in the `FORMAT` clause.

View File

@ -0,0 +1,32 @@
---
slug: /en/sql-reference/statements/move
sidebar_position: 54
sidebar_label: MOVE
---
# MOVE access entity statement
This statement allows to move an access entity from one access storage to another.
Syntax:
```sql
MOVE {USER, ROLE, QUOTA, SETTINGS PROFILE, ROW POLICY} name1 [, name2, ...] TO access_storage_type
```
Currently, there are five access storages in ClickHouse:
- `local_directory`
- `memory`
- `replicated`
- `users_xml` (ro)
- `ldap` (ro)
Examples:
```sql
MOVE USER test TO local_directory
```
```sql
MOVE ROLE test TO memory
```

View File

@ -1,32 +0,0 @@
---
slug: /ru/getting-started/example-datasets/wikistat
sidebar_position: 17
sidebar_label: WikiStat
---
# WikiStat {#wikistat}
См: http://dumps.wikimedia.org/other/pagecounts-raw/
Создание таблицы:
``` sql
CREATE TABLE wikistat
(
date Date,
time DateTime,
project String,
subproject String,
path String,
hits UInt64,
size UInt64
) ENGINE = MergeTree(date, (path, time), 8192);
```
Загрузка данных:
``` bash
$ for i in {2007..2016}; do for j in {01..12}; do echo $i-$j >&2; curl -sSL "http://dumps.wikimedia.org/other/pagecounts-raw/$i/$i-$j/" | grep -oE 'pagecounts-[0-9]+-[0-9]+\.gz'; done; done | sort | uniq | tee links.txt
$ cat links.txt | while read link; do wget http://dumps.wikimedia.org/other/pagecounts-raw/$(echo $link | sed -r 's/pagecounts-([0-9]{4})([0-9]{2})[0-9]{2}-[0-9]+\.gz/\1/')/$(echo $link | sed -r 's/pagecounts-([0-9]{4})([0-9]{2})[0-9]{2}-[0-9]+\.gz/\1-\2/')/$link; done
$ ls -1 /opt/wikistat/ | grep gz | while read i; do echo $i; gzip -cd /opt/wikistat/$i | ./wikistat-loader --time="$(echo -n $i | sed -r 's/pagecounts-([0-9]{4})([0-9]{2})([0-9]{2})-([0-9]{2})([0-9]{2})([0-9]{2})\.gz/\1-\2-\3 \4-00-00/')" | clickhouse-client --query="INSERT INTO wikistat FORMAT TabSeparated"; done
```

View File

@ -0,0 +1 @@
../../../en/getting-started/example-datasets/wikistat.md

View File

@ -88,7 +88,6 @@ SELECT * FROM s3_engine_table LIMIT 3;
<port>3306</port>
<database>test</database>
<connection_pool_size>8</connection_pool_size>
<on_duplicate_clause>1</on_duplicate_clause>
<replace_query>1</replace_query>
</mymysql>
</named_collections>

View File

@ -1106,7 +1106,7 @@ ClickHouse использует потоки из глобального пул
- verificationMode - Способ проверки сертификатов узла. Подробности находятся в описании класса [Context](https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/Context.h). Допустимые значения: `none`, `relaxed`, `strict`, `once`.
- verificationDepth - Максимальная длина верификационной цепи. Верификация завершится ошибкой, если длина цепи сертификатов превысит установленное значение.
- loadDefaultCAFile - Признак того, что будут использоваться встроенные CA-сертификаты для OpenSSL. Допустимые значения: `true`, `false`. \|
- cipherList - Поддерживаемые OpenSSL-шифры. Например, `ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH`.
- cipherList - Поддерживаемые OpenSSL-шифры. Например, `ALL:!ADH:!LOW:!EXP:!MD5:!3DES:@STRENGTH`.
- cacheSessions - Включение/выключение кеширования сессии. Использовать обязательно вместе с `sessionIdContext`. Допустимые значения: `true`, `false`.
- sessionIdContext - Уникальный набор произвольных символов, которые сервер добавляет к каждому сгенерированному идентификатору. Длина строки не должна превышать `SSL_MAX_SSL_SESSION_ID_LENGTH`. Рекомендуется к использованию всегда, поскольку позволяет избежать проблем как в случае, если сервер кеширует сессию, так и если клиент затребовал кеширование. По умолчанию `${application.name}`.
- sessionCacheSize - Максимальное количество сессий, которые кэширует сервер. По умолчанию - 1024\*20. 0 - неограниченное количество сессий.

View File

@ -11,7 +11,7 @@ sidebar_label: INSERT INTO
**Синтаксис**
``` sql
INSERT INTO [db.]table [(c1, c2, c3)] VALUES (v11, v12, v13), (v21, v22, v23), ...
INSERT INTO [TABLE] [db.]table [(c1, c2, c3)] VALUES (v11, v12, v13), (v21, v22, v23), ...
```
Вы можете указать список столбцов для вставки, используя синтаксис `(c1, c2, c3)`. Также можно использовать выражение cо [звездочкой](../../sql-reference/statements/select/index.md#asterisk) и/или модификаторами, такими как [APPLY](../../sql-reference/statements/select/index.md#apply-modifier), [EXCEPT](../../sql-reference/statements/select/index.md#except-modifier), [REPLACE](../../sql-reference/statements/select/index.md#replace-modifier).
@ -100,7 +100,7 @@ INSERT INTO t FORMAT TabSeparated
**Синтаксис**
``` sql
INSERT INTO [db.]table [(c1, c2, c3)] SELECT ...
INSERT INTO [TABLE] [db.]table [(c1, c2, c3)] SELECT ...
```
Соответствие столбцов определяется их позицией в секции SELECT. При этом, их имена в выражении SELECT и в таблице для INSERT, могут отличаться. При необходимости выполняется приведение типов данных, эквивалентное соответствующему оператору CAST.
@ -120,7 +120,7 @@ INSERT INTO [db.]table [(c1, c2, c3)] SELECT ...
**Синтаксис**
``` sql
INSERT INTO [db.]table [(c1, c2, c3)] FROM INFILE file_name [COMPRESSION type] FORMAT format_name
INSERT INTO [TABLE] [db.]table [(c1, c2, c3)] FROM INFILE file_name [COMPRESSION type] FORMAT format_name
```
Используйте этот синтаксис, чтобы вставить данные из файла, который хранится на стороне **клиента**. `file_name` и `type` задаются в виде строковых литералов. [Формат](../../interfaces/formats.md) входного файла должен быть задан в секции `FORMAT`.

View File

@ -1,32 +0,0 @@
---
slug: /zh/getting-started/example-datasets/wikistat
sidebar_position: 17
sidebar_label: WikiStat
---
# WikiStat {#wikistat}
参考: http://dumps.wikimedia.org/other/pagecounts-raw/
创建表结构:
``` sql
CREATE TABLE wikistat
(
date Date,
time DateTime,
project String,
subproject String,
path String,
hits UInt64,
size UInt64
) ENGINE = MergeTree(date, (path, time), 8192);
```
加载数据:
``` bash
$ for i in {2007..2016}; do for j in {01..12}; do echo $i-$j >&2; curl -sSL "http://dumps.wikimedia.org/other/pagecounts-raw/$i/$i-$j/" | grep -oE 'pagecounts-[0-9]+-[0-9]+\.gz'; done; done | sort | uniq | tee links.txt
$ cat links.txt | while read link; do wget http://dumps.wikimedia.org/other/pagecounts-raw/$(echo $link | sed -r 's/pagecounts-([0-9]{4})([0-9]{2})[0-9]{2}-[0-9]+\.gz/\1/')/$(echo $link | sed -r 's/pagecounts-([0-9]{4})([0-9]{2})[0-9]{2}-[0-9]+\.gz/\1-\2/')/$link; done
$ ls -1 /opt/wikistat/ | grep gz | while read i; do echo $i; gzip -cd /opt/wikistat/$i | ./wikistat-loader --time="$(echo -n $i | sed -r 's/pagecounts-([0-9]{4})([0-9]{2})([0-9]{2})-([0-9]{2})([0-9]{2})([0-9]{2})\.gz/\1-\2-\3 \4-00-00/')" | clickhouse-client --query="INSERT INTO wikistat FORMAT TabSeparated"; done
```

View File

@ -0,0 +1 @@
../../../en/getting-started/example-datasets/wikistat.md

View File

@ -346,9 +346,7 @@ UserID.binURL.bin和EventTime.bin是<font face = "monospace">UserID</font>
- 我们将主键列(<font face = "monospace">UserID</font>, <font face = "monospace">URL</font>)中的一些列值标记为橙色。
这些橙色标记的列值是每个颗粒中每个主键列的最小值。这里的例外是最后一个颗粒(上图中的颗粒1082),最后一个颗粒我们标记的是最大的值。
正如我们将在下面看到的,这些橙色标记的列值将是表主索引中的条目。
这些橙色标记的列值是每个颗粒中第一行的主键列值。正如我们将在下面看到的,这些橙色标记的列值将是表主索引中的条目。
- 我们从0开始对行进行编号以便与ClickHouse内部行编号方案对齐该方案也用于记录消息。
:::
@ -1071,13 +1069,6 @@ ClickHouse服务器日志文件中相应的跟踪日志确认了ClickHouse正在
## 通过projections使用联合主键索引
<a href="https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree/#projections" target="_blank">Projections</a>目前是一个实验性的功能因此我们需要告诉ClickHouse
```sql
SET optimize_use_projections = 1;
```
在原表上创建projection
```sql
ALTER TABLE hits_UserID_URL
@ -1096,10 +1087,12 @@ ALTER TABLE hits_UserID_URL
:::note
- 该projection正在创建一个隐藏表该表的行顺序和主索引基于该projection的给定order BY子句
- 我们使用MATERIALIZE关键字以便立即用源表hits_UserID_URL的所有887万行导入隐藏表
- `SHOW TABLES` 语句查询是不会列出这个隐藏表的
- 我们使用`MATERIALIZE`关键字,以便立即用源表<a href="https://clickhouse.com/docs/zh/guides/best-practices#%E5%8C%85%E5%90%AB%E4%B8%BB%E9%94%AE%E7%9A%84%E8%A1%A8" target="_blank">hits_UserID_URL</a>的所有887万行导入隐藏表
- 如果在源表hits_UserID_URL中插入了新行那么这些行也会自动插入到隐藏表中
- 查询总是(从语法上)针对源表hits_UserID_URL但是如果隐藏表的行顺序和主索引允许更有效地执行查询那么将使用该隐藏表
- 实际上,隐式创建的隐藏表的行顺序和主索引与我们显式创建的辅助表相同:
- 请注意,投影(projections)不会使 `ORDER BY` 查询语句的效率更高,即使 `ORDER BY` 匹配上了 projection 的 `ORDER BY` 语句(请参阅https://github.com/ClickHouse/ClickHouse/issues/47333)
- 实际上,隐式创建的隐藏表的行顺序和主索引与我们显式创建的<a href="https://clickhouse.com/docs/zh/guides/best-practices#%E9%80%9A%E8%BF%87%E8%BE%85%E5%8A%A9%E8%A1%A8%E4%BD%BF%E7%94%A8%E8%81%94%E5%90%88%E4%B8%BB%E9%94%AE%E7%B4%A2%E5%BC%95" target="_blank">辅助表</a>相同:
<img src={require('../../../en/guides/best-practices/images/sparse-primary-indexes-12c-1.png').default} class="image"/>
@ -1163,7 +1156,7 @@ ClickHouse服务器日志文件中跟踪日志确认了ClickHouse正在对索引
```
## 移除无效的主键列
## 小结
带有联合主键(UserID, URL)的表的主索引对于加快UserID的查询过滤非常有用。但是尽管URL列是联合主键的一部分但该索引在加速URL查询过滤方面并没有提供显著的帮助。
@ -1176,4 +1169,12 @@ ClickHouse服务器日志文件中跟踪日志确认了ClickHouse正在对索引
但是,如果复合主键中的键列在基数上有很大的差异,那么查询按基数升序对主键列进行排序是有益的。
主键键列之间的基数差越大,主键键列的顺序越重要。我们将在以后的文章中对此进行演示。请继续关注。
主键键列之间的基数差得越大,主键中的列的顺序越重要。我们将在下一章节对此进行演示。
# 高效地为键列排序
TODO
# 高效地识别单行
TODO

View File

@ -455,7 +455,7 @@ SSL客户端/服务器配置。
- verificationMode The method for checking the nodes certificates. Details are in the description of the [A.背景](https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/Context.h) 同学们 可能的值: `none`, `relaxed`, `strict`, `once`.
- verificationDepth The maximum length of the verification chain. Verification will fail if the certificate chain length exceeds the set value.
- loadDefaultCAFile Indicates that built-in CA certificates for OpenSSL will be used. Acceptable values: `true`, `false`. \|
- cipherList Supported OpenSSL encryptions. For example: `ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH`.
- cipherList Supported OpenSSL encryptions. For example: `ALL:!ADH:!LOW:!EXP:!MD5:!3DES:@STRENGTH`.
- cacheSessions Enables or disables caching sessions. Must be used in combination with `sessionIdContext`. 可接受的值: `true`, `false`.
- sessionIdContext A unique set of random characters that the server appends to each generated identifier. The length of the string must not exceed `SSL_MAX_SSL_SESSION_ID_LENGTH`. 始终建议使用此参数,因为如果服务器缓存会话,以及客户端请求缓存,它有助于避免出现问题。 默认值: `${application.name}`.
- sessionCacheSize The maximum number of sessions that the server caches. Default value: 1024\*20. 0 Unlimited sessions.

View File

@ -8,7 +8,7 @@ INSERT INTO 语句主要用于向系统中添加数据.
查询的基本格式:
``` sql
INSERT INTO [db.]table [(c1, c2, c3)] VALUES (v11, v12, v13), (v21, v22, v23), ...
INSERT INTO [TABLE] [db.]table [(c1, c2, c3)] VALUES (v11, v12, v13), (v21, v22, v23), ...
```
您可以在查询中指定要插入的列的列表,如:`[(c1, c2, c3)]`。您还可以使用列[匹配器](../../sql-reference/statements/select/index.md#asterisk)的表达式,例如`*`和/或[修饰符](../../sql-reference/statements/select/index.md#select-modifiers),例如 [APPLY](../../sql-reference/statements/select/index.md#apply-modifier) [EXCEPT](../../sql-reference/statements/select/index.md#apply-modifier) [REPLACE](../../sql-reference/statements/select/index.md#replace-modifier)。
@ -71,7 +71,7 @@ INSERT INTO [db.]table [(c1, c2, c3)] FORMAT format_name data_set
例如下面的查询所使用的输入格式就与上面INSERT … VALUES的中使用的输入格式相同
``` sql
INSERT INTO [db.]table [(c1, c2, c3)] FORMAT Values (v11, v12, v13), (v21, v22, v23), ...
INSERT INTO [TABLE] [db.]table [(c1, c2, c3)] FORMAT Values (v11, v12, v13), (v21, v22, v23), ...
```
ClickHouse会清除数据前所有的空白字符与一个换行符如果有换行符的话。所以在进行查询时我们建议您将数据放入到输入输出格式名称后的新的一行中去如果数据是以空白字符开始的这将非常重要
@ -93,7 +93,7 @@ INSERT INTO t FORMAT TabSeparated
### 使用`SELECT`的结果写入 {#inserting-the-results-of-select}
``` sql
INSERT INTO [db.]table [(c1, c2, c3)] SELECT ...
INSERT INTO [TABLE] [db.]table [(c1, c2, c3)] SELECT ...
```
写入与SELECT的列的对应关系是使用位置来进行对应的尽管它们在SELECT表达式与INSERT中的名称可能是不同的。如果需要会对它们执行对应的类型转换。

View File

@ -1,8 +1,6 @@
#include <unistd.h>
#include <cstdlib>
#include <fcntl.h>
#include <csignal>
#include <ctime>
#include <iostream>
#include <fstream>
#include <iomanip>
@ -18,9 +16,7 @@
#include <Common/Exception.h>
#include <Common/randomSeed.h>
#include <Common/clearPasswordFromCommandLine.h>
#include <Core/Types.h>
#include <IO/ReadBufferFromFileDescriptor.h>
#include <IO/WriteBufferFromFileDescriptor.h>
#include <IO/WriteBufferFromFile.h>
#include <IO/ReadHelpers.h>
#include <IO/WriteHelpers.h>
@ -38,8 +34,6 @@
#include <filesystem>
namespace fs = std::filesystem;
/** A tool for evaluating ClickHouse performance.
* The tool emulates a case with fixed amount of simultaneously executing queries.
*/
@ -79,7 +73,6 @@ public:
bool randomize_,
size_t max_iterations_,
double max_time_,
const String & json_path_,
size_t confidence_,
const String & query_id_,
const String & query_to_execute_,
@ -98,7 +91,6 @@ public:
cumulative(cumulative_),
max_iterations(max_iterations_),
max_time(max_time_),
json_path(json_path_),
confidence(confidence_),
query_id(query_id_),
query_to_execute(query_to_execute_),
@ -165,9 +157,6 @@ public:
int main(const std::vector<std::string> &) override
{
if (!json_path.empty() && fs::exists(json_path)) /// Clear file with previous results
fs::remove(json_path);
readQueries();
runBenchmark();
return 0;
@ -197,7 +186,6 @@ private:
bool cumulative;
size_t max_iterations;
double max_time;
String json_path;
size_t confidence;
String query_id;
String query_to_execute;
@ -226,26 +214,23 @@ private:
size_t read_bytes = 0;
size_t result_rows = 0;
size_t result_bytes = 0;
double work_time = 0;
using Sampler = ReservoirSampler<double>;
Sampler sampler {1 << 16};
void add(double seconds, size_t read_rows_inc, size_t read_bytes_inc, size_t result_rows_inc, size_t result_bytes_inc)
void add(double duration, size_t read_rows_inc, size_t read_bytes_inc, size_t result_rows_inc, size_t result_bytes_inc)
{
++queries;
work_time += seconds;
read_rows += read_rows_inc;
read_bytes += read_bytes_inc;
result_rows += result_rows_inc;
result_bytes += result_bytes_inc;
sampler.insert(seconds);
sampler.insert(duration);
}
void clear()
{
queries = 0;
work_time = 0;
read_rows = 0;
read_bytes = 0;
result_rows = 0;
@ -331,10 +316,13 @@ private:
return false;
}
if (delay > 0 && delay_watch.elapsedSeconds() > delay)
double seconds = delay_watch.elapsedSeconds();
if (delay > 0 && seconds > delay)
{
printNumberOfQueriesExecuted(queries_executed);
cumulative ? report(comparison_info_total) : report(comparison_info_per_interval);
cumulative
? report(comparison_info_total, total_watch.elapsedSeconds())
: report(comparison_info_per_interval, seconds);
delay_watch.restart();
}
}
@ -350,16 +338,7 @@ private:
try
{
for (size_t i = 0; i < concurrency; ++i)
{
EntryPtrs connection_entries;
connection_entries.reserve(connections.size());
for (const auto & connection : connections)
connection_entries.emplace_back(std::make_shared<Entry>(
connection->get(ConnectionTimeouts::getTCPTimeoutsWithoutFailover(settings))));
pool.scheduleOrThrowOnError([this, connection_entries]() mutable { thread(connection_entries); });
}
pool.scheduleOrThrowOnError([this]() mutable { thread(); });
}
catch (...)
{
@ -389,21 +368,18 @@ private:
pool.wait();
total_watch.stop();
if (!json_path.empty())
reportJSON(comparison_info_total, json_path);
printNumberOfQueriesExecuted(queries_executed);
report(comparison_info_total);
report(comparison_info_total, total_watch.elapsedSeconds());
}
void thread(EntryPtrs & connection_entries)
void thread()
{
Query query;
/// Randomly choosing connection index
pcg64 generator(randomSeed());
std::uniform_int_distribution<size_t> distribution(0, connection_entries.size() - 1);
std::uniform_int_distribution<size_t> distribution(0, connections.size() - 1);
/// In these threads we do not accept INT signal.
sigset_t sig_set;
@ -423,15 +399,13 @@ private:
extracted = queue.tryPop(query, 100);
if (shutdown || (max_iterations && queries_executed == max_iterations))
{
return;
}
}
const auto connection_index = distribution(generator);
try
{
execute(connection_entries, query, connection_index);
execute(query, connection_index);
consecutive_errors = 0;
}
catch (...)
@ -460,17 +434,18 @@ private:
}
}
void execute(EntryPtrs & connection_entries, Query & query, size_t connection_index)
void execute(Query & query, size_t connection_index)
{
Stopwatch watch;
Connection & connection = **connection_entries[connection_index];
ConnectionPool::Entry entry = connections[connection_index]->get(
ConnectionTimeouts::getTCPTimeoutsWithoutFailover(settings));
if (reconnect)
connection.disconnect();
entry->disconnect();
RemoteQueryExecutor executor(
connection, query, {}, global_context, nullptr, Scalars(), Tables(), query_processing_stage);
*entry, query, {}, global_context, nullptr, Scalars(), Tables(), query_processing_stage);
if (!query_id.empty())
executor.setQueryId(query_id);
@ -485,19 +460,19 @@ private:
executor.finish();
double seconds = (display_client_side_time || progress.elapsed_ns == 0)
double duration = (display_client_side_time || progress.elapsed_ns == 0)
? watch.elapsedSeconds()
: progress.elapsed_ns / 1e9;
std::lock_guard lock(mutex);
size_t info_index = round_robin ? 0 : connection_index;
comparison_info_per_interval[info_index]->add(seconds, progress.read_rows, progress.read_bytes, info.rows, info.bytes);
comparison_info_total[info_index]->add(seconds, progress.read_rows, progress.read_bytes, info.rows, info.bytes);
t_test.add(info_index, seconds);
comparison_info_per_interval[info_index]->add(duration, progress.read_rows, progress.read_bytes, info.rows, info.bytes);
comparison_info_total[info_index]->add(duration, progress.read_rows, progress.read_bytes, info.rows, info.bytes);
t_test.add(info_index, duration);
}
void report(MultiStats & infos)
void report(MultiStats & infos, double seconds)
{
std::lock_guard lock(mutex);
@ -510,8 +485,6 @@ private:
if (0 == info->queries)
return;
double seconds = info->work_time / concurrency;
std::string connection_description = connections[i]->getDescription();
if (round_robin)
{
@ -525,10 +498,10 @@ private:
}
std::cerr
<< connection_description << ", "
<< "queries " << info->queries << ", ";
<< "queries: " << info->queries << ", ";
if (info->errors)
{
std::cerr << "errors " << info->errors << ", ";
std::cerr << "errors: " << info->errors << ", ";
}
std::cerr
<< "QPS: " << (info->queries / seconds) << ", "
@ -567,62 +540,6 @@ private:
}
}
void reportJSON(MultiStats & infos, const std::string & filename)
{
WriteBufferFromFile json_out(filename);
std::lock_guard lock(mutex);
auto print_key_value = [&](auto key, auto value, bool with_comma = true)
{
json_out << double_quote << key << ": " << value << (with_comma ? ",\n" : "\n");
};
auto print_percentile = [&json_out](Stats & info, auto percent, bool with_comma = true)
{
json_out << "\"" << percent << "\": " << info.sampler.quantileNearest(percent / 100.0) << (with_comma ? ",\n" : "\n");
};
json_out << "{\n";
for (size_t i = 0; i < infos.size(); ++i)
{
const auto & info = infos[i];
json_out << double_quote << connections[i]->getDescription() << ": {\n";
json_out << double_quote << "statistics" << ": {\n";
double seconds = info->work_time / concurrency;
print_key_value("QPS", info->queries.load() / seconds);
print_key_value("RPS", info->read_rows / seconds);
print_key_value("MiBPS", info->read_bytes / seconds / 1048576);
print_key_value("RPS_result", info->result_rows / seconds);
print_key_value("MiBPS_result", info->result_bytes / seconds / 1048576);
print_key_value("num_queries", info->queries.load());
print_key_value("num_errors", info->errors, false);
json_out << "},\n";
json_out << double_quote << "query_time_percentiles" << ": {\n";
if (info->queries != 0)
{
for (int percent = 0; percent <= 90; percent += 10)
print_percentile(*info, percent);
print_percentile(*info, 95);
print_percentile(*info, 99);
print_percentile(*info, 99.9);
print_percentile(*info, 99.99, false);
}
json_out << "}\n";
json_out << (i == infos.size() - 1 ? "}\n" : "},\n");
}
json_out << "}\n";
}
public:
~Benchmark() override
@ -675,7 +592,6 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv)
("iterations,i", value<size_t>()->default_value(0), "amount of queries to be executed")
("timelimit,t", value<double>()->default_value(0.), "stop launch of queries after specified time limit")
("randomize,r", "randomize order of execution")
("json", value<std::string>()->default_value(""), "write final report to specified file in JSON format")
("host,h", value<Strings>()->multitoken(), "list of hosts")
("port", value<Ports>()->multitoken(), "list of ports")
("roundrobin", "Instead of comparing queries for different --host/--port just pick one random --host/--port for every query and send query to it.")
@ -739,7 +655,6 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv)
options.count("randomize"),
options["iterations"].as<size_t>(),
options["timelimit"].as<double>(),
options["json"].as<std::string>(),
options["confidence"].as<size_t>(),
options["query_id"].as<std::string>(),
options["query"].as<std::string>(),

View File

@ -608,6 +608,8 @@ TaskStatus ClusterCopier::tryMoveAllPiecesToDestinationTable(const TaskTable & t
ss << "ALTER TABLE " << getQuotedTable(original_table) << ((partition_name == "'all'") ? " DROP PARTITION ID " : " DROP PARTITION ") << partition_name;
UInt64 num_shards_drop_partition = executeQueryOnCluster(task_table.cluster_push, ss.str(), task_cluster->settings_push, ClusterExecutionMode::ON_EACH_SHARD);
if (num_shards_drop_partition != task_table.cluster_push->getShardCount())
return TaskStatus::Error;
LOG_INFO(log, "Drop partition {} in original table {} have been executed successfully on {} shards of {}",
partition_name, getQuotedTable(original_table), num_shards_drop_partition, task_table.cluster_push->getShardCount());

View File

@ -1,4 +1,6 @@
#include "ICommand.h"
#include <iostream>
namespace DB
{

View File

@ -163,13 +163,15 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
{
ASTPtr res = parseQueryAndMovePosition(
parser, pos, end, "query", multiple, cmd_settings.max_query_size, cmd_settings.max_parser_depth);
/// For insert query with data(INSERT INTO ... VALUES ...), will lead to format fail,
/// should throw exception early and make exception message more readable.
/// For insert query with data(INSERT INTO ... VALUES ...), that will lead to the formatting failure,
/// we should throw an exception early, and make exception message more readable.
if (const auto * insert_query = res->as<ASTInsertQuery>(); insert_query && insert_query->data)
{
throw Exception(DB::ErrorCodes::INVALID_FORMAT_INSERT_QUERY_WITH_DATA,
"Can't format ASTInsertQuery with data, since data will be lost");
}
if (!quiet)
{
if (!backslash)

View File

@ -997,7 +997,9 @@ namespace
{
/// sudo respects limits in /etc/security/limits.conf e.g. open files,
/// that's why we are using it instead of the 'clickhouse su' tool.
command = fmt::format("sudo -u '{}' {}", user, command);
/// by default, sudo resets all the ENV variables, but we should preserve
/// the values /etc/default/clickhouse in /etc/init.d/clickhouse file
command = fmt::format("sudo --preserve-env -u '{}' {}", user, command);
}
fmt::print("Will run {}\n", command);

View File

@ -1,5 +1,6 @@
#include "Commands.h"
#include <queue>
#include "KeeperClient.h"
@ -24,8 +25,18 @@ void LSCommand::execute(const ASTKeeperQuery * query, KeeperClient * client) con
else
path = client->cwd;
for (const auto & child : client->zookeeper->getChildren(path))
std::cout << child << " ";
auto children = client->zookeeper->getChildren(path);
std::sort(children.begin(), children.end());
bool need_space = false;
for (const auto & child : children)
{
if (std::exchange(need_space, true))
std::cout << " ";
std::cout << child;
}
std::cout << "\n";
}
@ -115,6 +126,21 @@ void CreateCommand::execute(const ASTKeeperQuery * query, KeeperClient * client)
static_cast<int>(query->args[2].safeGet<Int64>()));
}
bool TouchCommand::parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const
{
String arg;
if (!parseKeeperPath(pos, expected, arg))
return false;
node->args.push_back(std::move(arg));
return true;
}
void TouchCommand::execute(const ASTKeeperQuery * query, KeeperClient * client) const
{
client->zookeeper->createIfNotExists(client->getAbsolutePath(query->args[0].safeGet<String>()), "");
}
bool GetCommand::parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const
{
String arg;
@ -130,6 +156,173 @@ void GetCommand::execute(const ASTKeeperQuery * query, KeeperClient * client) co
std::cout << client->zookeeper->get(client->getAbsolutePath(query->args[0].safeGet<String>())) << "\n";
}
bool GetStatCommand::parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const
{
String arg;
if (!parseKeeperPath(pos, expected, arg))
return true;
node->args.push_back(std::move(arg));
return true;
}
void GetStatCommand::execute(const ASTKeeperQuery * query, KeeperClient * client) const
{
Coordination::Stat stat;
String path;
if (!query->args.empty())
path = client->getAbsolutePath(query->args[0].safeGet<String>());
else
path = client->cwd;
client->zookeeper->get(path, &stat);
std::cout << "cZxid = " << stat.czxid << "\n";
std::cout << "mZxid = " << stat.mzxid << "\n";
std::cout << "pZxid = " << stat.pzxid << "\n";
std::cout << "ctime = " << stat.ctime << "\n";
std::cout << "mtime = " << stat.mtime << "\n";
std::cout << "version = " << stat.version << "\n";
std::cout << "cversion = " << stat.cversion << "\n";
std::cout << "aversion = " << stat.aversion << "\n";
std::cout << "ephemeralOwner = " << stat.ephemeralOwner << "\n";
std::cout << "dataLength = " << stat.dataLength << "\n";
std::cout << "numChildren = " << stat.numChildren << "\n";
}
bool FindSuperNodes::parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const
{
ASTPtr threshold;
if (!ParserUnsignedInteger{}.parse(pos, threshold, expected))
return false;
node->args.push_back(threshold->as<ASTLiteral &>().value);
String path;
if (!parseKeeperPath(pos, expected, path))
path = ".";
node->args.push_back(std::move(path));
return true;
}
void FindSuperNodes::execute(const ASTKeeperQuery * query, KeeperClient * client) const
{
auto threshold = query->args[0].safeGet<UInt64>();
auto path = client->getAbsolutePath(query->args[1].safeGet<String>());
Coordination::Stat stat;
client->zookeeper->get(path, &stat);
if (stat.numChildren >= static_cast<Int32>(threshold))
{
std::cout << static_cast<String>(path) << "\t" << stat.numChildren << "\n";
return;
}
auto children = client->zookeeper->getChildren(path);
std::sort(children.begin(), children.end());
for (const auto & child : children)
{
auto next_query = *query;
next_query.args[1] = DB::Field(path / child);
execute(&next_query, client);
}
}
bool DeleteStaleBackups::parse(IParser::Pos & /* pos */, std::shared_ptr<ASTKeeperQuery> & /* node */, Expected & /* expected */) const
{
return true;
}
void DeleteStaleBackups::execute(const ASTKeeperQuery * /* query */, KeeperClient * client) const
{
client->askConfirmation(
"You are going to delete all inactive backups in /clickhouse/backups.",
[client]
{
fs::path backup_root = "/clickhouse/backups";
auto backups = client->zookeeper->getChildren(backup_root);
std::sort(backups.begin(), backups.end());
for (const auto & child : backups)
{
auto backup_path = backup_root / child;
std::cout << "Found backup " << backup_path << ", checking if it's active\n";
String stage_path = backup_path / "stage";
auto stages = client->zookeeper->getChildren(stage_path);
bool is_active = false;
for (const auto & stage : stages)
{
if (startsWith(stage, "alive"))
{
is_active = true;
break;
}
}
if (is_active)
{
std::cout << "Backup " << backup_path << " is active, not going to delete\n";
continue;
}
std::cout << "Backup " << backup_path << " is not active, deleting it\n";
client->zookeeper->removeRecursive(backup_path);
}
});
}
bool FindBigFamily::parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const
{
String path;
if (!parseKeeperPath(pos, expected, path))
path = ".";
node->args.push_back(std::move(path));
ASTPtr count;
if (ParserUnsignedInteger{}.parse(pos, count, expected))
node->args.push_back(count->as<ASTLiteral &>().value);
else
node->args.push_back(UInt64(10));
return true;
}
void FindBigFamily::execute(const ASTKeeperQuery * query, KeeperClient * client) const
{
auto path = client->getAbsolutePath(query->args[0].safeGet<String>());
auto n = query->args[1].safeGet<UInt64>();
std::vector<std::tuple<Int32, String>> result;
std::queue<fs::path> queue;
queue.push(path);
while (!queue.empty())
{
auto next_path = queue.front();
queue.pop();
auto children = client->zookeeper->getChildren(next_path);
std::transform(children.cbegin(), children.cend(), children.begin(), [&](const String & child) { return next_path / child; });
auto response = client->zookeeper->get(children);
for (size_t i = 0; i < response.size(); ++i)
{
result.emplace_back(response[i].stat.numChildren, children[i]);
queue.push(children[i]);
}
}
std::sort(result.begin(), result.end(), std::greater());
for (UInt64 i = 0; i < std::min(result.size(), static_cast<size_t>(n)); ++i)
std::cout << std::get<1>(result[i]) << "\t" << std::get<0>(result[i]) << "\n";
}
bool RMCommand::parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const
{
String arg;
@ -170,7 +363,7 @@ bool HelpCommand::parse(IParser::Pos & /* pos */, std::shared_ptr<ASTKeeperQuery
void HelpCommand::execute(const ASTKeeperQuery * /* query */, KeeperClient * /* client */) const
{
for (const auto & pair : KeeperClient::commands)
std::cout << pair.second->getHelpMessage() << "\n";
std::cout << pair.second->generateHelpString() << "\n";
}
bool FourLetterWordCommand::parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const

View File

@ -21,6 +21,12 @@ public:
virtual String getName() const = 0;
virtual ~IKeeperClientCommand() = default;
String generateHelpString() const
{
return fmt::vformat(getHelpMessage(), fmt::make_format_args(getName()));
}
};
using Command = std::shared_ptr<IKeeperClientCommand>;
@ -34,7 +40,7 @@ class LSCommand : public IKeeperClientCommand
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
String getHelpMessage() const override { return "ls [path] -- Lists the nodes for the given path (default: cwd)"; }
String getHelpMessage() const override { return "{} [path] -- Lists the nodes for the given path (default: cwd)"; }
};
class CDCommand : public IKeeperClientCommand
@ -45,7 +51,7 @@ class CDCommand : public IKeeperClientCommand
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
String getHelpMessage() const override { return "cd [path] -- Change the working path (default `.`)"; }
String getHelpMessage() const override { return "{} [path] -- Change the working path (default `.`)"; }
};
class SetCommand : public IKeeperClientCommand
@ -58,7 +64,7 @@ class SetCommand : public IKeeperClientCommand
String getHelpMessage() const override
{
return "set <path> <value> [version] -- Updates the node's value. Only update if version matches (default: -1)";
return "{} <path> <value> [version] -- Updates the node's value. Only update if version matches (default: -1)";
}
};
@ -70,7 +76,18 @@ class CreateCommand : public IKeeperClientCommand
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
String getHelpMessage() const override { return "create <path> <value> -- Creates new node"; }
String getHelpMessage() const override { return "{} <path> <value> [mode] -- Creates new node with the set value"; }
};
class TouchCommand : public IKeeperClientCommand
{
String getName() const override { return "touch"; }
bool parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const override;
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
String getHelpMessage() const override { return "{} <path> -- Creates new node with an empty string as value. Doesn't throw an exception if the node already exists"; }
};
class GetCommand : public IKeeperClientCommand
@ -81,9 +98,63 @@ class GetCommand : public IKeeperClientCommand
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
String getHelpMessage() const override { return "get <path> -- Returns the node's value"; }
String getHelpMessage() const override { return "{} <path> -- Returns the node's value"; }
};
class GetStatCommand : public IKeeperClientCommand
{
String getName() const override { return "get_stat"; }
bool parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const override;
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
String getHelpMessage() const override { return "{} [path] -- Returns the node's stat (default `.`)"; }
};
class FindSuperNodes : public IKeeperClientCommand
{
String getName() const override { return "find_super_nodes"; }
bool parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const override;
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
String getHelpMessage() const override
{
return "{} <threshold> [path] -- Finds nodes with number of children larger than some threshold for the given path (default `.`)";
}
};
class DeleteStaleBackups : public IKeeperClientCommand
{
String getName() const override { return "delete_stale_backups"; }
bool parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const override;
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
String getHelpMessage() const override
{
return "{} -- Deletes ClickHouse nodes used for backups that are now inactive";
}
};
class FindBigFamily : public IKeeperClientCommand
{
String getName() const override { return "find_big_family"; }
bool parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const override;
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
String getHelpMessage() const override
{
return "{} [path] [n] -- Returns the top n nodes with the biggest family in the subtree (default path = `.` and n = 10)";
}
};
class RMCommand : public IKeeperClientCommand
{
String getName() const override { return "rm"; }
@ -92,7 +163,7 @@ class RMCommand : public IKeeperClientCommand
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
String getHelpMessage() const override { return "remove <path> -- Remove the node"; }
String getHelpMessage() const override { return "{} <path> -- Remove the node"; }
};
class RMRCommand : public IKeeperClientCommand
@ -103,7 +174,7 @@ class RMRCommand : public IKeeperClientCommand
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
String getHelpMessage() const override { return "rmr <path> -- Recursively deletes path. Confirmation required"; }
String getHelpMessage() const override { return "{} <path> -- Recursively deletes path. Confirmation required"; }
};
class HelpCommand : public IKeeperClientCommand
@ -114,7 +185,7 @@ class HelpCommand : public IKeeperClientCommand
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
String getHelpMessage() const override { return "help -- Prints this message"; }
String getHelpMessage() const override { return "{} -- Prints this message"; }
};
class FourLetterWordCommand : public IKeeperClientCommand
@ -125,7 +196,7 @@ class FourLetterWordCommand : public IKeeperClientCommand
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
String getHelpMessage() const override { return "flwc <command> -- Executes four-letter-word command"; }
String getHelpMessage() const override { return "{} <command> -- Executes four-letter-word command"; }
};
}

View File

@ -176,7 +176,12 @@ void KeeperClient::initialize(Poco::Util::Application & /* self */)
std::make_shared<CDCommand>(),
std::make_shared<SetCommand>(),
std::make_shared<CreateCommand>(),
std::make_shared<TouchCommand>(),
std::make_shared<GetCommand>(),
std::make_shared<GetStatCommand>(),
std::make_shared<FindSuperNodes>(),
std::make_shared<DeleteStaleBackups>(),
std::make_shared<FindBigFamily>(),
std::make_shared<RMCommand>(),
std::make_shared<RMRCommand>(),
std::make_shared<HelpCommand>(),

View File

@ -57,6 +57,7 @@ if (BUILD_STANDALONE_KEEPER)
${CMAKE_CURRENT_SOURCE_DIR}/../../src/IO/ReadBuffer.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/HTTPPathHints.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/KeeperTCPHandler.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/TCPServer.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/NotFoundHandler.cpp

View File

@ -150,7 +150,7 @@ int Keeper::run()
}
if (config().hasOption("version"))
{
std::cout << DBMS_NAME << " keeper version " << VERSION_STRING << VERSION_OFFICIAL << "." << std::endl;
std::cout << VERSION_NAME << " keeper version " << VERSION_STRING << VERSION_OFFICIAL << "." << std::endl;
return 0;
}

View File

@ -389,7 +389,7 @@ int Server::run()
}
if (config().hasOption("version"))
{
std::cout << DBMS_NAME << " server version " << VERSION_STRING << VERSION_OFFICIAL << "." << std::endl;
std::cout << VERSION_NAME << " server version " << VERSION_STRING << VERSION_OFFICIAL << "." << std::endl;
return 0;
}
return Application::run(); // NOLINT
@ -1650,6 +1650,9 @@ try
database_catalog.initializeAndLoadTemporaryDatabase();
loadMetadataSystem(global_context);
maybeConvertSystemDatabase(global_context);
/// This has to be done before the initialization of system logs,
/// otherwise there is a race condition between the system database initialization
/// and creation of new tables in the database.
startupSystemTables();
/// After attaching system databases we can initialize system log.
global_context->initializeSystemLogs();

View File

@ -317,7 +317,7 @@
<concurrent_threads_soft_limit_ratio_to_cores>0</concurrent_threads_soft_limit_ratio_to_cores>
<!-- Maximum number of concurrent queries. -->
<max_concurrent_queries>100</max_concurrent_queries>
<max_concurrent_queries>1000</max_concurrent_queries>
<!-- Maximum memory usage (resident set size) for server process.
Zero value or unset means default. Default is "max_server_memory_usage_to_ram_ratio" of available physical RAM.

View File

@ -5,6 +5,7 @@ CXXFLAGS = "@RUST_CXXFLAGS@"
[build]
rustflags = @RUSTFLAGS@
rustdocflags = @RUSTFLAGS@
@RUSTCWRAPPER@
[unstable]
@RUST_CARGO_BUILD_STD@

View File

@ -1,4 +0,0 @@
# Just in case ignore any cargo stuff (and just in case someone will run this
# docker build locally with build context using folder root):
target
vendor

4
rust/.gitignore vendored
View File

@ -1,4 +0,0 @@
# This is for tar --exclude-vcs-ignores (and just in case someone will run
# docker build locally with build context created via tar):
target
vendor

View File

@ -14,6 +14,13 @@ macro(configure_rustc)
set(RUST_CFLAGS "${RUST_CFLAGS} --sysroot ${CMAKE_SYSROOT}")
endif()
if(CCACHE_EXECUTABLE MATCHES "/sccache$")
message(STATUS "Using RUSTC_WRAPPER: ${CCACHE_EXECUTABLE}")
set(RUSTCWRAPPER "rustc-wrapper = \"${CCACHE_EXECUTABLE}\"")
else()
set(RUSTCWRAPPER "")
endif()
set(RUSTFLAGS "[]")
set(RUST_CARGO_BUILD_STD "")
# For more info: https://doc.rust-lang.org/beta/unstable-book/compiler-flags/sanitizer.html#memorysanitizer

View File

@ -418,7 +418,7 @@ void AccessControl::addStoragesFromUserDirectoriesConfig(
String type = key_in_user_directories;
if (size_t bracket_pos = type.find('['); bracket_pos != String::npos)
type.resize(bracket_pos);
if ((type == "users_xml") || (type == "users_config"))
if ((type == "users.xml") || (type == "users_config"))
type = UsersConfigAccessStorage::STORAGE_TYPE;
else if ((type == "local") || (type == "local_directory"))
type = DiskAccessStorage::STORAGE_TYPE;
@ -528,12 +528,14 @@ scope_guard AccessControl::subscribeForChanges(const std::vector<UUID> & ids, co
return changes_notifier->subscribeForChanges(ids, handler);
}
std::optional<UUID> AccessControl::insertImpl(const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists)
bool AccessControl::insertImpl(const UUID & id, const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists)
{
auto id = MultipleAccessStorage::insertImpl(entity, replace_if_exists, throw_if_exists);
if (id)
if (MultipleAccessStorage::insertImpl(id, entity, replace_if_exists, throw_if_exists))
{
changes_notifier->sendNotifications();
return id;
return true;
}
return false;
}
bool AccessControl::removeImpl(const UUID & id, bool throw_if_not_exists)

View File

@ -232,7 +232,7 @@ private:
class CustomSettingsPrefixes;
class PasswordComplexityRules;
std::optional<UUID> insertImpl(const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists) override;
bool insertImpl(const UUID & id, const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists) override;
bool removeImpl(const UUID & id, bool throw_if_not_exists) override;
bool updateImpl(const UUID & id, const UpdateFunc & update_func, bool throw_if_not_exists) override;

View File

@ -550,12 +550,12 @@ bool ContextAccess::checkAccessImplHelper(AccessFlags flags, const Args &... arg
return access_denied(ErrorCodes::ACCESS_DENIED,
"{}: Not enough privileges. "
"The required privileges have been granted, but without grant option. "
"To execute this query it's necessary to have grant {} WITH GRANT OPTION",
"To execute this query, it's necessary to have the grant {} WITH GRANT OPTION",
AccessRightsElement{flags, args...}.toStringWithoutOptions());
}
return access_denied(ErrorCodes::ACCESS_DENIED,
"{}: Not enough privileges. To execute this query it's necessary to have grant {}",
"{}: Not enough privileges. To execute this query, it's necessary to have the grant {}",
AccessRightsElement{flags, args...}.toStringWithoutOptions() + (grant_option ? " WITH GRANT OPTION" : ""));
}
@ -756,11 +756,11 @@ bool ContextAccess::checkAdminOptionImplHelper(const Container & role_ids, const
show_error(ErrorCodes::ACCESS_DENIED,
"Not enough privileges. "
"Role {} is granted, but without ADMIN option. "
"To execute this query it's necessary to have the role {} granted with ADMIN option.",
"To execute this query, it's necessary to have the role {} granted with ADMIN option.",
backQuote(*role_name), backQuoteIfNeed(*role_name));
else
show_error(ErrorCodes::ACCESS_DENIED, "Not enough privileges. "
"To execute this query it's necessary to have the role {} granted with ADMIN option.",
"To execute this query, it's necessary to have the role {} granted with ADMIN option.",
backQuoteIfNeed(*role_name));
}

View File

@ -498,20 +498,10 @@ std::optional<std::pair<String, AccessEntityType>> DiskAccessStorage::readNameWi
}
std::optional<UUID> DiskAccessStorage::insertImpl(const AccessEntityPtr & new_entity, bool replace_if_exists, bool throw_if_exists)
{
UUID id = generateRandomID();
if (insertWithID(id, new_entity, replace_if_exists, throw_if_exists, /* write_on_disk= */ true))
return id;
return std::nullopt;
}
bool DiskAccessStorage::insertWithID(const UUID & id, const AccessEntityPtr & new_entity, bool replace_if_exists, bool throw_if_exists, bool write_on_disk)
bool DiskAccessStorage::insertImpl(const UUID & id, const AccessEntityPtr & new_entity, bool replace_if_exists, bool throw_if_exists)
{
std::lock_guard lock{mutex};
return insertNoLock(id, new_entity, replace_if_exists, throw_if_exists, write_on_disk);
return insertNoLock(id, new_entity, replace_if_exists, throw_if_exists, /* write_on_disk = */ true);
}
@ -745,7 +735,7 @@ void DiskAccessStorage::restoreFromBackup(RestorerFromBackup & restorer)
restorer.addDataRestoreTask([this, my_entities = std::move(entities), replace_if_exists, throw_if_exists]
{
for (const auto & [id, entity] : my_entities)
insertWithID(id, entity, replace_if_exists, throw_if_exists, /* write_on_disk= */ true);
insert(id, entity, replace_if_exists, throw_if_exists);
});
}

View File

@ -13,7 +13,7 @@ class AccessChangesNotifier;
class DiskAccessStorage : public IAccessStorage
{
public:
static constexpr char STORAGE_TYPE[] = "local directory";
static constexpr char STORAGE_TYPE[] = "local_directory";
DiskAccessStorage(const String & storage_name_, const String & directory_path_, AccessChangesNotifier & changes_notifier_, bool readonly_, bool allow_backup_);
~DiskAccessStorage() override;
@ -39,7 +39,7 @@ private:
std::vector<UUID> findAllImpl(AccessEntityType type) const override;
AccessEntityPtr readImpl(const UUID & id, bool throw_if_not_exists) const override;
std::optional<std::pair<String, AccessEntityType>> readNameWithTypeImpl(const UUID & id, bool throw_if_not_exists) const override;
std::optional<UUID> insertImpl(const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists) override;
bool insertImpl(const UUID & id, const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists) override;
bool removeImpl(const UUID & id, bool throw_if_not_exists) override;
bool updateImpl(const UUID & id, const UpdateFunc & update_func, bool throw_if_not_exists) override;
@ -53,7 +53,6 @@ private:
void listsWritingThreadFunc() TSA_NO_THREAD_SAFETY_ANALYSIS;
void stopListsWritingThread();
bool insertWithID(const UUID & id, const AccessEntityPtr & new_entity, bool replace_if_exists, bool throw_if_exists, bool write_on_disk);
bool insertNoLock(const UUID & id, const AccessEntityPtr & new_entity, bool replace_if_exists, bool throw_if_exists, bool write_on_disk) TSA_REQUIRES(mutex);
bool updateNoLock(const UUID & id, const UpdateFunc & update_func, bool throw_if_not_exists, bool write_on_disk) TSA_REQUIRES(mutex);
bool removeNoLock(const UUID & id, bool throw_if_not_exists, bool write_on_disk) TSA_REQUIRES(mutex);

View File

@ -93,6 +93,17 @@ String IAccessStorage::readName(const UUID & id) const
}
bool IAccessStorage::exists(const std::vector<UUID> & ids) const
{
for (const auto & id : ids)
{
if (!exists(id))
return false;
}
return true;
}
std::optional<String> IAccessStorage::readName(const UUID & id, bool throw_if_not_exists) const
{
if (auto name_and_type = readNameWithType(id, throw_if_not_exists))
@ -167,38 +178,69 @@ UUID IAccessStorage::insert(const AccessEntityPtr & entity)
return *insert(entity, /* replace_if_exists = */ false, /* throw_if_exists = */ true);
}
std::optional<UUID> IAccessStorage::insert(const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists)
{
return insertImpl(entity, replace_if_exists, throw_if_exists);
auto id = generateRandomID();
if (insert(id, entity, replace_if_exists, throw_if_exists))
return id;
return std::nullopt;
}
bool IAccessStorage::insert(const DB::UUID & id, const DB::AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists)
{
return insertImpl(id, entity, replace_if_exists, throw_if_exists);
}
std::vector<UUID> IAccessStorage::insert(const std::vector<AccessEntityPtr> & multiple_entities, bool replace_if_exists, bool throw_if_exists)
{
return insert(multiple_entities, /* ids = */ {}, replace_if_exists, throw_if_exists);
}
std::vector<UUID> IAccessStorage::insert(const std::vector<AccessEntityPtr> & multiple_entities, const std::vector<UUID> & ids, bool replace_if_exists, bool throw_if_exists)
{
assert(ids.empty() || (multiple_entities.size() == ids.size()));
if (multiple_entities.empty())
return {};
if (multiple_entities.size() == 1)
{
if (auto id = insert(multiple_entities[0], replace_if_exists, throw_if_exists))
return {*id};
UUID id;
if (!ids.empty())
id = ids[0];
else
id = generateRandomID();
if (insert(id, multiple_entities[0], replace_if_exists, throw_if_exists))
return {id};
return {};
}
std::vector<AccessEntityPtr> successfully_inserted;
try
{
std::vector<UUID> ids;
for (const auto & entity : multiple_entities)
std::vector<UUID> new_ids;
for (size_t i = 0; i < multiple_entities.size(); ++i)
{
if (auto id = insertImpl(entity, replace_if_exists, throw_if_exists))
const auto & entity = multiple_entities[i];
UUID id;
if (!ids.empty())
id = ids[i];
else
id = generateRandomID();
if (insert(id, entity, replace_if_exists, throw_if_exists))
{
successfully_inserted.push_back(entity);
ids.push_back(*id);
new_ids.push_back(id);
}
}
return ids;
return new_ids;
}
catch (Exception & e)
{
@ -244,7 +286,7 @@ std::vector<UUID> IAccessStorage::insertOrReplace(const std::vector<AccessEntity
}
std::optional<UUID> IAccessStorage::insertImpl(const AccessEntityPtr & entity, bool, bool)
bool IAccessStorage::insertImpl(const UUID &, const AccessEntityPtr & entity, bool, bool)
{
if (isReadOnly())
throwReadonlyCannotInsert(entity->getType(), entity->getName());

View File

@ -3,6 +3,8 @@
#include <Access/IAccessEntity.h>
#include <Core/Types.h>
#include <Core/UUID.h>
#include <Parsers/IParser.h>
#include <Parsers/parseIdentifierOrStringLiteral.h>
#include <functional>
#include <optional>
#include <vector>
@ -92,6 +94,7 @@ public:
/// Returns whether there is an entity with such identifier in the storage.
virtual bool exists(const UUID & id) const = 0;
bool exists(const std::vector<UUID> & ids) const;
/// Reads an entity. Throws an exception if not found.
template <typename EntityClassT = IAccessEntity>
@ -100,6 +103,9 @@ public:
template <typename EntityClassT = IAccessEntity>
std::shared_ptr<const EntityClassT> read(const String & name, bool throw_if_not_exists = true) const;
template <typename EntityClassT = IAccessEntity>
std::vector<AccessEntityPtr> read(const std::vector<UUID> & ids, bool throw_if_not_exists = true) const;
/// Reads an entity. Returns nullptr if not found.
template <typename EntityClassT = IAccessEntity>
std::shared_ptr<const EntityClassT> tryRead(const UUID & id) const;
@ -128,7 +134,9 @@ public:
/// Throws an exception if the specified name already exists.
UUID insert(const AccessEntityPtr & entity);
std::optional<UUID> insert(const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists);
bool insert(const UUID & id, const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists);
std::vector<UUID> insert(const std::vector<AccessEntityPtr> & multiple_entities, bool replace_if_exists = false, bool throw_if_exists = true);
std::vector<UUID> insert(const std::vector<AccessEntityPtr> & multiple_entities, const std::vector<UUID> & ids, bool replace_if_exists = false, bool throw_if_exists = true);
/// Inserts an entity to the storage. Returns ID of a new entry in the storage.
std::optional<UUID> tryInsert(const AccessEntityPtr & entity);
@ -179,7 +187,7 @@ protected:
virtual std::vector<UUID> findAllImpl(AccessEntityType type) const = 0;
virtual AccessEntityPtr readImpl(const UUID & id, bool throw_if_not_exists) const = 0;
virtual std::optional<std::pair<String, AccessEntityType>> readNameWithTypeImpl(const UUID & id, bool throw_if_not_exists) const;
virtual std::optional<UUID> insertImpl(const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists);
virtual bool insertImpl(const UUID & id, const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists);
virtual bool removeImpl(const UUID & id, bool throw_if_not_exists);
virtual bool updateImpl(const UUID & id, const UpdateFunc & update_func, bool throw_if_not_exists);
virtual std::optional<UUID> authenticateImpl(const Credentials & credentials, const Poco::Net::IPAddress & address, const ExternalAuthenticators & external_authenticators, bool throw_if_user_not_exists, bool allow_no_password, bool allow_plaintext_password) const;
@ -240,6 +248,19 @@ std::shared_ptr<const EntityClassT> IAccessStorage::read(const String & name, bo
}
template <typename EntityClassT>
std::vector<AccessEntityPtr> IAccessStorage::read(const std::vector<UUID> & ids, bool throw_if_not_exists) const
{
std::vector<AccessEntityPtr> result;
result.reserve(ids.size());
for (const auto & id : ids)
result.push_back(read<EntityClassT>(id, throw_if_not_exists));
return result;
}
template <typename EntityClassT>
std::shared_ptr<const EntityClassT> IAccessStorage::tryRead(const UUID & id) const
{
@ -265,4 +286,9 @@ std::vector<std::pair<UUID, std::shared_ptr<const EntityClassT>>> IAccessStorage
return entities;
}
inline bool parseAccessStorageName(IParser::Pos & pos, Expected & expected, String & storage_name)
{
return parseIdentifierOrStringLiteral(pos, expected, storage_name);
}
}

View File

@ -63,17 +63,7 @@ AccessEntityPtr MemoryAccessStorage::readImpl(const UUID & id, bool throw_if_not
}
std::optional<UUID> MemoryAccessStorage::insertImpl(const AccessEntityPtr & new_entity, bool replace_if_exists, bool throw_if_exists)
{
UUID id = generateRandomID();
if (insertWithID(id, new_entity, replace_if_exists, throw_if_exists))
return id;
return std::nullopt;
}
bool MemoryAccessStorage::insertWithID(const UUID & id, const AccessEntityPtr & new_entity, bool replace_if_exists, bool throw_if_exists)
bool MemoryAccessStorage::insertImpl(const UUID & id, const AccessEntityPtr & new_entity, bool replace_if_exists, bool throw_if_exists)
{
std::lock_guard lock{mutex};
return insertNoLock(id, new_entity, replace_if_exists, throw_if_exists);
@ -300,7 +290,7 @@ void MemoryAccessStorage::restoreFromBackup(RestorerFromBackup & restorer)
restorer.addDataRestoreTask([this, my_entities = std::move(entities), replace_if_exists, throw_if_exists]
{
for (const auto & [id, entity] : my_entities)
insertWithID(id, entity, replace_if_exists, throw_if_exists);
insert(id, entity, replace_if_exists, throw_if_exists);
});
}

View File

@ -6,6 +6,7 @@
#include <memory>
#include <mutex>
#include <unordered_map>
#include <boost/container/flat_set.hpp>
namespace DB
@ -22,11 +23,6 @@ public:
const char * getStorageType() const override { return STORAGE_TYPE; }
/// Inserts an entity with a specified ID.
/// If `replace_if_exists == true` it can replace an existing entry with such ID and also remove an existing entry
/// with such name & type.
bool insertWithID(const UUID & id, const AccessEntityPtr & new_entity, bool replace_if_exists, bool throw_if_exists);
/// Removes all entities except the specified list `ids_to_keep`.
/// The function skips IDs not contained in the storage.
void removeAllExcept(const std::vector<UUID> & ids_to_keep);
@ -44,7 +40,7 @@ private:
std::optional<UUID> findImpl(AccessEntityType type, const String & name) const override;
std::vector<UUID> findAllImpl(AccessEntityType type) const override;
AccessEntityPtr readImpl(const UUID & id, bool throw_if_not_exists) const override;
std::optional<UUID> insertImpl(const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists) override;
bool insertImpl(const UUID & id, const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists) override;
bool removeImpl(const UUID & id, bool throw_if_not_exists) override;
bool updateImpl(const UUID & id, const UpdateFunc & update_func, bool throw_if_not_exists) override;

View File

@ -16,6 +16,7 @@ namespace ErrorCodes
{
extern const int ACCESS_ENTITY_ALREADY_EXISTS;
extern const int ACCESS_STORAGE_FOR_INSERTION_NOT_FOUND;
extern const int ACCESS_ENTITY_NOT_FOUND;
}
using Storage = IAccessStorage;
@ -178,6 +179,91 @@ ConstStoragePtr MultipleAccessStorage::getStorage(const UUID & id) const
return const_cast<MultipleAccessStorage *>(this)->getStorage(id);
}
StoragePtr MultipleAccessStorage::findStorageByName(const DB::String & storage_name)
{
auto storages = getStoragesInternal();
for (const auto & storage : *storages)
{
if (storage->getStorageName() == storage_name)
return storage;
}
return nullptr;
}
ConstStoragePtr MultipleAccessStorage::findStorageByName(const DB::String & storage_name) const
{
return const_cast<MultipleAccessStorage *>(this)->findStorageByName(storage_name);
}
StoragePtr MultipleAccessStorage::getStorageByName(const DB::String & storage_name)
{
auto storage = findStorageByName(storage_name);
if (storage)
return storage;
throw Exception(ErrorCodes::ACCESS_ENTITY_NOT_FOUND, "Access storage with name {} is not found", storage_name);
}
ConstStoragePtr MultipleAccessStorage::getStorageByName(const DB::String & storage_name) const
{
return const_cast<MultipleAccessStorage *>(this)->getStorageByName(storage_name);
}
StoragePtr MultipleAccessStorage::findExcludingStorage(AccessEntityType type, const DB::String & name, DB::MultipleAccessStorage::StoragePtr exclude) const
{
auto storages = getStoragesInternal();
for (const auto & storage : *storages)
{
if (storage == exclude)
continue;
if (storage->find(type, name))
return storage;
}
return nullptr;
}
void MultipleAccessStorage::moveAccessEntities(const std::vector<UUID> & ids, const String & source_storage_name, const String & destination_storage_name)
{
auto source_storage = getStorageByName(source_storage_name);
auto destination_storage = getStorageByName(destination_storage_name);
auto to_move = source_storage->read(ids);
bool need_rollback = false;
try
{
source_storage->remove(ids);
need_rollback = true;
destination_storage->insert(to_move, ids);
}
catch (Exception & e)
{
String message;
bool need_comma = false;
for (const auto & entity : to_move)
{
if (std::exchange(need_comma, true))
message += ", ";
message += entity->formatTypeWithName();
}
e.addMessage("while moving {} from {} to {}", message, source_storage_name, destination_storage_name);
if (need_rollback)
source_storage->insert(to_move, ids);
throw;
}
}
AccessEntityPtr MultipleAccessStorage::readImpl(const UUID & id, bool throw_if_not_exists) const
{
if (auto storage = findStorage(id))
@ -245,7 +331,7 @@ void MultipleAccessStorage::reload(ReloadMode reload_mode)
}
std::optional<UUID> MultipleAccessStorage::insertImpl(const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists)
bool MultipleAccessStorage::insertImpl(const UUID & id, const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists)
{
std::shared_ptr<IAccessStorage> storage_for_insertion;
@ -268,13 +354,14 @@ std::optional<UUID> MultipleAccessStorage::insertImpl(const AccessEntityPtr & en
getStorageName());
}
auto id = storage_for_insertion->insert(entity, replace_if_exists, throw_if_exists);
if (id)
if (storage_for_insertion->insert(id, entity, replace_if_exists, throw_if_exists))
{
std::lock_guard lock{mutex};
ids_cache.set(*id, storage_for_insertion);
ids_cache.set(id, storage_for_insertion);
return true;
}
return id;
return false;
}

View File

@ -41,6 +41,16 @@ public:
ConstStoragePtr getStorage(const UUID & id) const;
StoragePtr getStorage(const UUID & id);
ConstStoragePtr findStorageByName(const String & storage_name) const;
StoragePtr findStorageByName(const String & storage_name);
ConstStoragePtr getStorageByName(const String & storage_name) const;
StoragePtr getStorageByName(const String & storage_name);
/// Search for an access entity storage, excluding one. Returns nullptr if not found.
StoragePtr findExcludingStorage(AccessEntityType type, const String & name, StoragePtr exclude) const;
void moveAccessEntities(const std::vector<UUID> & ids, const String & source_storage_name, const String & destination_storage_name);
bool exists(const UUID & id) const override;
bool isBackupAllowed() const override;
@ -53,7 +63,7 @@ protected:
std::vector<UUID> findAllImpl(AccessEntityType type) const override;
AccessEntityPtr readImpl(const UUID & id, bool throw_if_not_exists) const override;
std::optional<std::pair<String, AccessEntityType>> readNameWithTypeImpl(const UUID & id, bool throw_if_not_exists) const override;
std::optional<UUID> insertImpl(const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists) override;
bool insertImpl(const UUID & id, const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists) override;
bool removeImpl(const UUID & id, bool throw_if_not_exists) override;
bool updateImpl(const UUID & id, const UpdateFunc & update_func, bool throw_if_not_exists) override;
std::optional<UUID> authenticateImpl(const Credentials & credentials, const Poco::Net::IPAddress & address, const ExternalAuthenticators & external_authenticators, bool throw_if_user_not_exists, bool allow_no_password, bool allow_plaintext_password) const override;
@ -65,6 +75,8 @@ private:
std::shared_ptr<const Storages> nested_storages TSA_GUARDED_BY(mutex);
mutable CacheBase<UUID, Storage> ids_cache TSA_GUARDED_BY(mutex);
mutable std::mutex mutex;
mutable std::mutex move_mutex;
};
}

View File

@ -108,17 +108,7 @@ static void retryOnZooKeeperUserError(size_t attempts, Func && function)
}
}
std::optional<UUID> ReplicatedAccessStorage::insertImpl(const AccessEntityPtr & new_entity, bool replace_if_exists, bool throw_if_exists)
{
const UUID id = generateRandomID();
if (insertWithID(id, new_entity, replace_if_exists, throw_if_exists))
return id;
return std::nullopt;
}
bool ReplicatedAccessStorage::insertWithID(const UUID & id, const AccessEntityPtr & new_entity, bool replace_if_exists, bool throw_if_exists)
bool ReplicatedAccessStorage::insertImpl(const UUID & id, const AccessEntityPtr & new_entity, bool replace_if_exists, bool throw_if_exists)
{
const AccessEntityTypeInfo type_info = AccessEntityTypeInfo::get(new_entity->getType());
const String & name = new_entity->getName();
@ -619,7 +609,7 @@ AccessEntityPtr ReplicatedAccessStorage::tryReadEntityFromZooKeeper(const zkutil
void ReplicatedAccessStorage::setEntityNoLock(const UUID & id, const AccessEntityPtr & entity)
{
LOG_DEBUG(getLogger(), "Setting id {} to entity named {}", toString(id), entity->getName());
memory_storage.insertWithID(id, entity, /* replace_if_exists= */ true, /* throw_if_exists= */ false);
memory_storage.insert(id, entity, /* replace_if_exists= */ true, /* throw_if_exists= */ false);
}
@ -711,7 +701,7 @@ void ReplicatedAccessStorage::restoreFromBackup(RestorerFromBackup & restorer)
restorer.addDataRestoreTask([this, my_entities = std::move(entities), replace_if_exists, throw_if_exists]
{
for (const auto & [id, entity] : my_entities)
insertWithID(id, entity, replace_if_exists, throw_if_exists);
insert(id, entity, replace_if_exists, throw_if_exists);
});
}

View File

@ -46,11 +46,10 @@ private:
std::unique_ptr<ThreadFromGlobalPool> watching_thread;
std::shared_ptr<ConcurrentBoundedQueue<UUID>> watched_queue;
std::optional<UUID> insertImpl(const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists) override;
bool insertImpl(const UUID & id, const AccessEntityPtr & new_entity, bool replace_if_exists, bool throw_if_exists) override;
bool removeImpl(const UUID & id, bool throw_if_not_exists) override;
bool updateImpl(const UUID & id, const UpdateFunc & update_func, bool throw_if_not_exists) override;
bool insertWithID(const UUID & id, const AccessEntityPtr & new_entity, bool replace_if_exists, bool throw_if_exists);
bool insertZooKeeper(const zkutil::ZooKeeperPtr & zookeeper, const UUID & id, const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists);
bool removeZooKeeper(const zkutil::ZooKeeperPtr & zookeeper, const UUID & id, bool throw_if_not_exists);
bool updateZooKeeper(const zkutil::ZooKeeperPtr & zookeeper, const UUID & id, const UpdateFunc & update_func, bool throw_if_not_exists);

View File

@ -20,7 +20,7 @@ class UsersConfigAccessStorage : public IAccessStorage
{
public:
static constexpr char STORAGE_TYPE[] = "users.xml";
static constexpr char STORAGE_TYPE[] = "users_xml";
UsersConfigAccessStorage(const String & storage_name_, AccessControl & access_control_, bool allow_backup_);
~UsersConfigAccessStorage() override;

View File

@ -240,7 +240,7 @@ public:
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
{
const AggregateFunctionForEachData & state = data(place);
writeBinary(state.dynamic_array_size, buf);
writeBinaryLittleEndian(state.dynamic_array_size, buf);
const char * nested_state = state.array_of_aggregate_datas;
for (size_t i = 0; i < state.dynamic_array_size; ++i)
@ -255,7 +255,7 @@ public:
AggregateFunctionForEachData & state = data(place);
size_t new_size = 0;
readBinary(new_size, buf);
readBinaryLittleEndian(new_size, buf);
ensureAggregateData(place, new_size, *arena);

Some files were not shown because too many files have changed in this diff Show More