Merge branch 'master' into mvcc_prototype

This commit is contained in:
Alexander Tokmakov 2022-04-04 14:24:23 +02:00
commit a2167f12b8
107 changed files with 1884 additions and 700 deletions

View File

@ -16,7 +16,6 @@ Checks: '-*,
modernize-make-unique,
modernize-raw-string-literal,
modernize-redundant-void-arg,
modernize-replace-auto-ptr,
modernize-replace-random-shuffle,
modernize-use-bool-literals,
modernize-use-nullptr,

View File

@ -947,6 +947,34 @@ jobs:
docker rm -f "$(docker ps -a -q)" ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
############################################################################################
##################################### Docker images #######################################
############################################################################################
DockerServerImages:
needs:
- BuilderDebRelease
- BuilderDebAarch64
runs-on: [self-hosted, style-checker]
steps:
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
with:
fetch-depth: 0 # otherwise we will have no version info
- name: Check docker clickhouse/clickhouse-server building
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_server.py --release-type head
python3 docker_server.py --release-type head --no-ubuntu \
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
sudo rm -fr "$TEMP_PATH"
############################################################################################
##################################### BUILD REPORTER #######################################
############################################################################################
BuilderReport:

View File

@ -998,6 +998,34 @@ jobs:
docker rm -f "$(docker ps -a -q)" ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
############################################################################################
##################################### Docker images #######################################
############################################################################################
DockerServerImages:
needs:
- BuilderDebRelease
- BuilderDebAarch64
runs-on: [self-hosted, style-checker]
steps:
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
with:
fetch-depth: 0 # otherwise we will have no version info
- name: Check docker clickhouse/clickhouse-server building
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_server.py --release-type head --no-push
python3 docker_server.py --release-type head --no-push --no-ubuntu \
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
sudo rm -fr "$TEMP_PATH"
############################################################################################
##################################### BUILD REPORTER #######################################
############################################################################################
BuilderReport:
@ -3138,6 +3166,7 @@ jobs:
needs:
- StyleCheck
- DockerHubPush
- DockerServerImages
- CheckLabels
- BuilderReport
- FastTest

View File

@ -36,3 +36,28 @@ jobs:
overwrite: true
tag: ${{ github.ref }}
file_glob: true
############################################################################################
##################################### Docker images #######################################
############################################################################################
DockerServerImages:
runs-on: [self-hosted, style-checker]
steps:
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
with:
fetch-depth: 0 # otherwise we will have no version info
- name: Check docker clickhouse/clickhouse-server building
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_server.py --release-type auto
python3 docker_server.py --release-type auto --no-ubuntu \
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
sudo rm -fr "$TEMP_PATH"

View File

@ -16,6 +16,7 @@ if(NOT AWK_PROGRAM)
endif()
set(KRB5_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/krb5/src")
set(KRB5_ET_BIN_DIR "${CMAKE_CURRENT_BINARY_DIR}/include_private")
set(ALL_SRCS
"${KRB5_SOURCE_DIR}/util/et/et_name.c"
@ -90,7 +91,6 @@ set(ALL_SRCS
"${KRB5_SOURCE_DIR}/lib/gssapi/krb5/get_tkt_flags.c"
"${KRB5_SOURCE_DIR}/lib/gssapi/krb5/set_allowable_enctypes.c"
"${KRB5_SOURCE_DIR}/lib/gssapi/krb5/k5sealiov.c"
"${KRB5_SOURCE_DIR}/lib/gssapi/krb5/gssapi_err_krb5.c"
"${KRB5_SOURCE_DIR}/lib/gssapi/krb5/canon_name.c"
"${KRB5_SOURCE_DIR}/lib/gssapi/krb5/inq_cred.c"
"${KRB5_SOURCE_DIR}/lib/gssapi/krb5/export_sec_context.c"
@ -143,11 +143,12 @@ set(ALL_SRCS
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_buffer_set.c"
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_set.c"
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_token.c"
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/gssapi_err_generic.c"
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/disp_major_status.c"
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_seqstate.c"
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_errmap.c"
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/rel_buffer.c"
"${KRB5_ET_BIN_DIR}/lib/gssapi/krb5/gssapi_err_krb5.c"
"${KRB5_ET_BIN_DIR}/lib/gssapi/generic/gssapi_err_generic.c"
"${KRB5_SOURCE_DIR}/lib/gssapi/spnego/spnego_mech.c"
"${KRB5_SOURCE_DIR}/lib/gssapi/spnego/negoex_util.c"
@ -256,8 +257,8 @@ set(ALL_SRCS
"${KRB5_SOURCE_DIR}/util/profile/prof_parse.c"
"${KRB5_SOURCE_DIR}/util/profile/prof_get.c"
"${KRB5_SOURCE_DIR}/util/profile/prof_set.c"
"${KRB5_SOURCE_DIR}/util/profile/prof_err.c"
"${KRB5_SOURCE_DIR}/util/profile/prof_init.c"
"${KRB5_ET_BIN_DIR}/util/profile/prof_err.c"
"${KRB5_SOURCE_DIR}/lib/krb5/krb/fwd_tgt.c"
"${KRB5_SOURCE_DIR}/lib/krb5/krb/conv_creds.c"
"${KRB5_SOURCE_DIR}/lib/krb5/krb/fast.c"
@ -450,13 +451,12 @@ set(ALL_SRCS
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/k5e1_err.c"
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kdb5_err.c"
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/asn1_err.c"
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb5_err.c"
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb524_err.c"
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kv5m_err.c"
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/k5e1_err.c"
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/kdb5_err.c"
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/asn1_err.c"
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/krb5_err.c"
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/krb524_err.c"
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/kv5m_err.c"
"${KRB5_SOURCE_DIR}/lib/krb5/rcache/rc_base.c"
@ -473,7 +473,7 @@ set(ALL_SRCS
)
add_custom_command(
OUTPUT "${KRB5_SOURCE_DIR}/util/et/compile_et"
OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/compile_et"
COMMAND /bin/sh
./config_script
./compile_et.sh
@ -481,50 +481,17 @@ add_custom_command(
${AWK_PROGRAM}
sed
>
compile_et
${CMAKE_CURRENT_BINARY_DIR}/compile_et
DEPENDS "${KRB5_SOURCE_DIR}/util/et/compile_et.sh" "${KRB5_SOURCE_DIR}/util/et/config_script"
WORKING_DIRECTORY "${KRB5_SOURCE_DIR}/util/et"
)
file(GLOB_RECURSE ET_FILES
"${KRB5_SOURCE_DIR}/*.et"
)
function(preprocess_et out_var)
set(result)
foreach(in_f ${ARGN})
string(REPLACE
.et
.c
F_C
${in_f}
)
string(REPLACE
.et
.h
F_H
${in_f}
)
get_filename_component(ET_PATH ${in_f} DIRECTORY)
add_custom_command(OUTPUT ${F_C} ${F_H}
COMMAND perl "${KRB5_SOURCE_DIR}/util/et/compile_et" -d "${KRB5_SOURCE_DIR}/util/et" ${in_f}
DEPENDS ${in_f} "${KRB5_SOURCE_DIR}/util/et/compile_et"
WORKING_DIRECTORY ${ET_PATH}
VERBATIM
)
list(APPEND result ${F_C})
endforeach()
set(${out_var} "${result}" PARENT_SCOPE)
endfunction()
add_custom_command(
OUTPUT "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/error_map.h"
OUTPUT "${KRB5_ET_BIN_DIR}/error_map.h"
COMMAND perl
-I../../../util
../../../util/gen-map.pl
-oerror_map.h
-o${KRB5_ET_BIN_DIR}/error_map.h
NAME=gsserrmap
KEY=OM_uint32
VALUE=char*
@ -536,22 +503,21 @@ add_custom_command(
add_custom_target(
ERROR_MAP_H
DEPENDS "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/error_map.h"
DEPENDS "${KRB5_ET_BIN_DIR}/error_map.h"
VERBATIM
)
add_custom_command(
OUTPUT "${KRB5_SOURCE_DIR}/lib/gssapi/generic/errmap.h"
COMMAND perl -w -I../../../util ../../../util/gen.pl bimap errmap.h NAME=mecherrmap LEFT=OM_uint32 RIGHT=struct\ mecherror LEFTPRINT=print_OM_uint32 RIGHTPRINT=mecherror_print LEFTCMP=cmp_OM_uint32 RIGHTCMP=mecherror_cmp
OUTPUT "${KRB5_ET_BIN_DIR}/errmap.h"
COMMAND perl -w -I../../../util ../../../util/gen.pl bimap ${KRB5_ET_BIN_DIR}/errmap.h NAME=mecherrmap LEFT=OM_uint32 RIGHT=struct\ mecherror LEFTPRINT=print_OM_uint32 RIGHTPRINT=mecherror_print LEFTCMP=cmp_OM_uint32 RIGHTCMP=mecherror_cmp
WORKING_DIRECTORY "${KRB5_SOURCE_DIR}/lib/gssapi/generic"
)
add_custom_target(
ERRMAP_H
DEPENDS "${KRB5_SOURCE_DIR}/lib/gssapi/generic/errmap.h"
DEPENDS "${KRB5_ET_BIN_DIR}/errmap.h"
VERBATIM
)
add_custom_target(
KRB_5_H
DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/include/krb5/krb5.h"
@ -567,7 +533,40 @@ add_dependencies(
KRB_5_H
)
preprocess_et(processed_et_files ${ET_FILES})
#
# Generate error tables
#
function(preprocess_et et_path)
string(REPLACE .et .c F_C ${et_path})
string(REPLACE .et .h F_H ${et_path})
get_filename_component(et_dir ${et_path} DIRECTORY)
get_filename_component(et_name ${et_path} NAME_WLE)
add_custom_command(OUTPUT ${F_C} ${F_H} ${KRB5_ET_BIN_DIR}/${et_name}.h
COMMAND perl "${CMAKE_CURRENT_BINARY_DIR}/compile_et" -d "${KRB5_SOURCE_DIR}/util/et" ${et_path}
# for #include w/o path (via -iquote)
COMMAND ${CMAKE_COMMAND} -E create_symlink ${F_H} ${KRB5_ET_BIN_DIR}/${et_name}.h
DEPENDS ${et_path} "${CMAKE_CURRENT_BINARY_DIR}/compile_et"
WORKING_DIRECTORY ${et_dir}
VERBATIM
)
endfunction()
function(generate_error_tables)
file(GLOB_RECURSE ET_FILES "${KRB5_SOURCE_DIR}/*.et")
foreach(et_path ${ET_FILES})
string(REPLACE ${KRB5_SOURCE_DIR} ${KRB5_ET_BIN_DIR} et_bin_path ${et_path})
string(REPLACE / _ et_target_name ${et_path})
get_filename_component(et_bin_dir ${et_bin_path} DIRECTORY)
add_custom_command(OUTPUT ${et_bin_path}
COMMAND ${CMAKE_COMMAND} -E make_directory ${et_bin_dir}
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${et_path} ${et_bin_path}
VERBATIM
)
preprocess_et(${et_bin_path})
endforeach()
endfunction()
generate_error_tables()
if(CMAKE_SYSTEM_NAME MATCHES "Darwin")
add_custom_command(
@ -634,12 +633,12 @@ file(MAKE_DIRECTORY
SET(KRBHDEP
"${KRB5_SOURCE_DIR}/include/krb5/krb5.hin"
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb5_err.h"
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/k5e1_err.h"
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kdb5_err.h"
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kv5m_err.h"
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb524_err.h"
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/asn1_err.h"
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/krb5_err.h"
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/k5e1_err.h"
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/kdb5_err.h"
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/kv5m_err.h"
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/krb524_err.h"
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/asn1_err.h"
)
# cmake < 3.18 does not have 'cat' command
@ -656,6 +655,11 @@ target_include_directories(_krb5 SYSTEM BEFORE PUBLIC
"${CMAKE_CURRENT_BINARY_DIR}/include"
)
target_compile_options(_krb5 PRIVATE
# For '#include "file.h"'
-iquote "${CMAKE_CURRENT_BINARY_DIR}/include_private"
)
target_include_directories(_krb5 PRIVATE
"${CMAKE_CURRENT_BINARY_DIR}/include_private" # For autoconf.h and other generated headers.
${KRB5_SOURCE_DIR}

View File

@ -20,7 +20,7 @@ ExecStart=/usr/bin/clickhouse-server --config=/etc/clickhouse-server/config.xml
EnvironmentFile=-/etc/default/clickhouse
LimitCORE=infinity
LimitNOFILE=500000
CapabilityBoundingSet=CAP_NET_ADMIN CAP_IPC_LOCK CAP_SYS_NICE
CapabilityBoundingSet=CAP_NET_ADMIN CAP_IPC_LOCK CAP_SYS_NICE CAP_NET_BIND_SERVICE
[Install]
# ClickHouse should not start from the rescue shell (rescue.target).

72
docker/keeper/Dockerfile Normal file
View File

@ -0,0 +1,72 @@
FROM ubuntu:20.04 AS glibc-donor
ARG TARGETARCH
RUN arch=${TARGETARCH:-amd64} \
&& case $arch in \
amd64) rarch=x86_64 ;; \
arm64) rarch=aarch64 ;; \
esac \
&& ln -s "${rarch}-linux-gnu" /lib/linux-gnu
FROM alpine
ENV LANG=en_US.UTF-8 \
LANGUAGE=en_US:en \
LC_ALL=en_US.UTF-8 \
TZ=UTC \
CLICKHOUSE_CONFIG=/etc/clickhouse-server/config.xml
COPY --from=glibc-donor /lib/linux-gnu/libc.so.6 /lib/linux-gnu/libdl.so.2 /lib/linux-gnu/libm.so.6 /lib/linux-gnu/libpthread.so.0 /lib/linux-gnu/librt.so.1 /lib/linux-gnu/libnss_dns.so.2 /lib/linux-gnu/libnss_files.so.2 /lib/linux-gnu/libresolv.so.2 /lib/linux-gnu/ld-2.31.so /lib/
COPY --from=glibc-donor /etc/nsswitch.conf /etc/
COPY entrypoint.sh /entrypoint.sh
RUN arch=${TARGETARCH:-amd64} \
&& case $arch in \
amd64) mkdir -p /lib64 && ln -sf /lib/ld-2.31.so /lib64/ld-linux-x86-64.so.2 ;; \
arm64) ln -sf /lib/ld-2.31.so /lib/ld-linux-aarch64.so.1 ;; \
esac
ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release"
ARG VERSION="22.4.1.917"
ARG PACKAGES="clickhouse-keeper"
# user/group precreated explicitly with fixed uid/gid on purpose.
# It is especially important for rootless containers: in that case entrypoint
# can't do chown and owners of mounted volumes should be configured externally.
# We do that in advance at the begining of Dockerfile before any packages will be
# installed to prevent picking those uid / gid by some unrelated software.
# The same uid / gid (101) is used both for alpine and ubuntu.
ARG TARGETARCH
RUN arch=${TARGETARCH:-amd64} \
&& for package in ${PACKAGES}; do \
{ \
{ echo "Get ${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \
&& wget -c -q "${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" -O "/tmp/${package}-${VERSION}-${arch}.tgz" \
&& tar xvzf "/tmp/${package}-${VERSION}-${arch}.tgz" --strip-components=1 -C / ; \
} || \
{ echo "Fallback to ${REPOSITORY}/${package}-${VERSION}.tgz" \
&& wget -c -q "${REPOSITORY}/${package}-${VERSION}.tgz" -O "/tmp/${package}-${VERSION}.tgz" \
&& tar xvzf "/tmp/${package}-${VERSION}.tgz" --strip-components=2 -C / ; \
} ; \
} || exit 1 \
; done \
&& rm /tmp/*.tgz /install -r \
&& addgroup -S -g 101 clickhouse \
&& adduser -S -h /var/lib/clickhouse -s /bin/bash -G clickhouse -g "ClickHouse keeper" -u 101 clickhouse \
&& mkdir -p /var/lib/clickhouse /var/log/clickhouse-keeper /etc/clickhouse-keeper \
&& chown clickhouse:clickhouse /var/lib/clickhouse \
&& chown root:clickhouse /var/log/clickhouse-keeper \
&& chmod +x /entrypoint.sh \
&& apk add --no-cache su-exec bash tzdata \
&& cp /usr/share/zoneinfo/UTC /etc/localtime \
&& echo "UTC" > /etc/timezone \
&& chmod ugo+Xrw -R /var/lib/clickhouse /var/log/clickhouse-keeper /etc/clickhouse-keeper
EXPOSE 2181 10181 44444
VOLUME /var/lib/clickhouse /var/log/clickhouse-keeper /etc/clickhouse-keeper
ENTRYPOINT ["/entrypoint.sh"]

View File

@ -0,0 +1 @@
Dockerfile

View File

@ -0,0 +1,93 @@
#!/bin/bash
set +x
set -eo pipefail
shopt -s nullglob
DO_CHOWN=1
if [ "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]; then
DO_CHOWN=0
fi
CLICKHOUSE_UID="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}"
CLICKHOUSE_GID="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}"
# support --user
if [ "$(id -u)" = "0" ]; then
USER=$CLICKHOUSE_UID
GROUP=$CLICKHOUSE_GID
if command -v gosu &> /dev/null; then
gosu="gosu $USER:$GROUP"
elif command -v su-exec &> /dev/null; then
gosu="su-exec $USER:$GROUP"
else
echo "No gosu/su-exec detected!"
exit 1
fi
else
USER="$(id -u)"
GROUP="$(id -g)"
gosu=""
DO_CHOWN=0
fi
KEEPER_CONFIG="${KEEPER_CONFIG:-/etc/clickhouse-keeper/config.yaml}"
if [ -f "$KEEPER_CONFIG" ] && ! $gosu test -f "$KEEPER_CONFIG" -a -r "$KEEPER_CONFIG"; then
echo "Configuration file '$KEEPER_CONFIG' isn't readable by user with id '$USER'"
exit 1
fi
DATA_DIR="${CLICKHOUSE_DATA_DIR:-/var/lib/clickhouse}"
LOG_DIR="${LOG_DIR:-/var/log/clickhouse-keeper}"
LOG_PATH="${LOG_DIR}/clickhouse-keeper.log"
ERROR_LOG_PATH="${LOG_DIR}/clickhouse-keeper.err.log"
COORDINATION_LOG_DIR="${DATA_DIR}/coordination/log"
COORDINATION_SNAPSHOT_DIR="${DATA_DIR}/coordination/snapshots"
CLICKHOUSE_WATCHDOG_ENABLE=${CLICKHOUSE_WATCHDOG_ENABLE:-0}
for dir in "$DATA_DIR" \
"$LOG_DIR" \
"$TMP_DIR" \
"$COORDINATION_LOG_DIR" \
"$COORDINATION_SNAPSHOT_DIR"
do
# check if variable not empty
[ -z "$dir" ] && continue
# ensure directories exist
if ! mkdir -p "$dir"; then
echo "Couldn't create necessary directory: $dir"
exit 1
fi
if [ "$DO_CHOWN" = "1" ]; then
# ensure proper directories permissions
# but skip it for if directory already has proper premissions, cause recursive chown may be slow
if [ "$(stat -c %u "$dir")" != "$USER" ] || [ "$(stat -c %g "$dir")" != "$GROUP" ]; then
chown -R "$USER:$GROUP" "$dir"
fi
elif ! $gosu test -d "$dir" -a -w "$dir" -a -r "$dir"; then
echo "Necessary directory '$dir' isn't accessible by user with id '$USER'"
exit 1
fi
done
# if no args passed to `docker run` or first argument start with `--`, then the user is passing clickhouse-server arguments
if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
# Watchdog is launched by default, but does not send SIGINT to the main process,
# so the container can't be finished by ctrl+c
export CLICKHOUSE_WATCHDOG_ENABLE
cd /var/lib/clickhouse
# There is a config file. It is already tested with gosu (if it is readably by keeper user)
if [ -f "$KEEPER_CONFIG" ]; then
exec $gosu /usr/bin/clickhouse-keeper --config-file="$KEEPER_CONFIG" --log-file="$LOG_PATH" --errorlog-file="$ERROR_LOG_PATH" "$@"
fi
# There is no config file. Will use embedded one
exec $gosu /usr/bin/clickhouse-keeper --log-file="$LOG_PATH" --errorlog-file="$ERROR_LOG_PATH" "$@"
fi
# Otherwise, we assume the user want to run his own process, for example a `bash` shell to explore this image
exec "$@"

View File

@ -1,2 +0,0 @@
alpine-root/*
tgz-packages/*

View File

@ -1,122 +0,0 @@
FROM ubuntu:20.04
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
ARG repository="deb https://packages.clickhouse.com/deb stable main"
ARG version=22.1.1.*
# set non-empty deb_location_url url to create a docker image
# from debs created by CI build, for example:
# docker build . --network host --build-arg version="21.4.1.6282" --build-arg deb_location_url="https://clickhouse-builds.s3.yandex.net/21852/069cfbff388b3d478d1a16dc7060b48073f5d522/clickhouse_build_check/clang-11_relwithdebuginfo_none_bundled_unsplitted_disable_False_deb/" -t filimonovq/clickhouse-server:pr21852
ARG deb_location_url=""
# set non-empty single_binary_location_url to create docker image
# from a single binary url (useful for non-standard builds - with sanitizers, for arm64).
# for example (run on aarch64 server):
# docker build . --network host --build-arg single_binary_location_url="https://builds.clickhouse.com/master/aarch64/clickhouse" -t altinity/clickhouse-server:master-testing-arm
# note: clickhouse-odbc-bridge is not supported there.
ARG single_binary_location_url=""
# see https://github.com/moby/moby/issues/4032#issuecomment-192327844
ARG DEBIAN_FRONTEND=noninteractive
# user/group precreated explicitly with fixed uid/gid on purpose.
# It is especially important for rootless containers: in that case entrypoint
# can't do chown and owners of mounted volumes should be configured externally.
# We do that in advance at the begining of Dockerfile before any packages will be
# installed to prevent picking those uid / gid by some unrelated software.
# The same uid / gid (101) is used both for alpine and ubuntu.
# To drop privileges, we need 'su' command, that simply changes uid and gid.
# In fact, the 'su' command from Linux is not so simple, due to inherent vulnerability in Linux:
# https://ruderich.org/simon/notes/su-sudo-from-root-tty-hijacking
# It has to mitigate this drawback of Linux, and to do this, 'su' command is creating it's own pseudo-terminal
# and forwarding commands. Due to some ridiculous curcumstances, it does not work in Docker (or it does)
# and for these reasons people are using alternatives to the 'su' command in Docker,
# that don't mess with the terminal, don't care about closing the opened files, etc...
# but can only be safe to drop privileges inside Docker.
# The question - what implementation of 'su' command to use.
# It should be a simple script doing about just two syscalls.
# Some people tend to use 'gosu' tool that is written in Go.
# It is not used for several reasons:
# 1. Dependency on some foreign code in yet another programming language - does not sound alright.
# 2. Anselmo D. Adams suggested not to use it due to false positive alarms in some undisclosed security scanners.
COPY su-exec.c /su-exec.c
RUN groupadd -r clickhouse --gid=101 \
&& useradd -r -g clickhouse --uid=101 --home-dir=/var/lib/clickhouse --shell=/bin/bash clickhouse \
&& apt-get update \
&& apt-get install --yes --no-install-recommends \
apt-transport-https \
ca-certificates \
dirmngr \
gnupg \
locales \
wget \
tzdata \
&& mkdir -p /etc/apt/sources.list.d \
&& apt-key adv --keyserver keyserver.ubuntu.com --recv 8919F6BD2B48D754 \
&& echo $repository > /etc/apt/sources.list.d/clickhouse.list \
&& if [ -n "$deb_location_url" ]; then \
echo "installing from custom url with deb packages: $deb_location_url" \
rm -rf /tmp/clickhouse_debs \
&& mkdir -p /tmp/clickhouse_debs \
&& wget --progress=bar:force:noscroll "${deb_location_url}/clickhouse-common-static_${version}_amd64.deb" -P /tmp/clickhouse_debs \
&& wget --progress=bar:force:noscroll "${deb_location_url}/clickhouse-client_${version}_all.deb" -P /tmp/clickhouse_debs \
&& wget --progress=bar:force:noscroll "${deb_location_url}/clickhouse-server_${version}_all.deb" -P /tmp/clickhouse_debs \
&& dpkg -i /tmp/clickhouse_debs/*.deb ; \
elif [ -n "$single_binary_location_url" ]; then \
echo "installing from single binary url: $single_binary_location_url" \
&& rm -rf /tmp/clickhouse_binary \
&& mkdir -p /tmp/clickhouse_binary \
&& wget --progress=bar:force:noscroll "$single_binary_location_url" -O /tmp/clickhouse_binary/clickhouse \
&& chmod +x /tmp/clickhouse_binary/clickhouse \
&& /tmp/clickhouse_binary/clickhouse install --user "clickhouse" --group "clickhouse" ; \
else \
echo "installing from repository: $repository" \
&& apt-get update \
&& apt-get --yes -o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold" upgrade \
&& apt-get install --allow-unauthenticated --yes --no-install-recommends \
clickhouse-common-static=$version \
clickhouse-client=$version \
clickhouse-server=$version ; \
fi \
&& apt-get install -y --no-install-recommends tcc libc-dev && \
tcc /su-exec.c -o /bin/su-exec && \
chown root:root /bin/su-exec && \
chmod 0755 /bin/su-exec && \
rm /su-exec.c && \
apt-get purge -y --auto-remove tcc libc-dev libc-dev-bin libc6-dev linux-libc-dev \
&& clickhouse-local -q 'SELECT * FROM system.build_options' \
&& rm -rf \
/var/lib/apt/lists/* \
/var/cache/debconf \
/tmp/* \
&& apt-get clean \
&& mkdir -p /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client \
&& chmod ugo+Xrw -R /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client
# we need to allow "others" access to clickhouse folder, because docker container
# can be started with arbitrary uid (openshift usecase)
RUN locale-gen en_US.UTF-8
ENV LANG en_US.UTF-8
ENV LANGUAGE en_US:en
ENV LC_ALL en_US.UTF-8
ENV TZ UTC
RUN mkdir /docker-entrypoint-initdb.d
COPY docker_related_config.xml /etc/clickhouse-server/config.d/
COPY entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
EXPOSE 9000 8123 9009
VOLUME /var/lib/clickhouse
ENV CLICKHOUSE_CONFIG /etc/clickhouse-server/config.xml
ENTRYPOINT ["/entrypoint.sh"]

1
docker/server/Dockerfile Symbolic link
View File

@ -0,0 +1 @@
Dockerfile.ubuntu

View File

@ -1,3 +1,14 @@
FROM ubuntu:20.04 AS glibc-donor
ARG TARGETARCH
RUN arch=${TARGETARCH:-amd64} \
&& case $arch in \
amd64) rarch=x86_64 ;; \
arm64) rarch=aarch64 ;; \
esac \
&& ln -s "${rarch}-linux-gnu" /lib/linux-gnu
FROM alpine
ENV LANG=en_US.UTF-8 \
@ -6,7 +17,24 @@ ENV LANG=en_US.UTF-8 \
TZ=UTC \
CLICKHOUSE_CONFIG=/etc/clickhouse-server/config.xml
COPY alpine-root/ /
COPY --from=glibc-donor /lib/linux-gnu/libc.so.6 /lib/linux-gnu/libdl.so.2 /lib/linux-gnu/libm.so.6 /lib/linux-gnu/libpthread.so.0 /lib/linux-gnu/librt.so.1 /lib/linux-gnu/libnss_dns.so.2 /lib/linux-gnu/libnss_files.so.2 /lib/linux-gnu/libresolv.so.2 /lib/linux-gnu/ld-2.31.so /lib/
COPY --from=glibc-donor /etc/nsswitch.conf /etc/
COPY docker_related_config.xml /etc/clickhouse-server/config.d/
COPY entrypoint.sh /entrypoint.sh
ARG TARGETARCH
RUN arch=${TARGETARCH:-amd64} \
&& case $arch in \
amd64) mkdir -p /lib64 && ln -sf /lib/ld-2.31.so /lib64/ld-linux-x86-64.so.2 ;; \
arm64) ln -sf /lib/ld-2.31.so /lib/ld-linux-aarch64.so.1 ;; \
esac
# lts / testing / prestable / etc
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
ARG VERSION="20.9.3.45"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
# user/group precreated explicitly with fixed uid/gid on purpose.
# It is especially important for rootless containers: in that case entrypoint
@ -15,9 +43,23 @@ COPY alpine-root/ /
# installed to prevent picking those uid / gid by some unrelated software.
# The same uid / gid (101) is used both for alpine and ubuntu.
RUN addgroup -S -g 101 clickhouse \
RUN arch=${TARGETARCH:-amd64} \
&& for package in ${PACKAGES}; do \
{ \
{ echo "Get ${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \
&& wget -c -q "${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" -O "/tmp/${package}-${VERSION}-${arch}.tgz" \
&& tar xvzf "/tmp/${package}-${VERSION}-${arch}.tgz" --strip-components=1 -C / ; \
} || \
{ echo "Fallback to ${REPOSITORY}/${package}-${VERSION}.tgz" \
&& wget -c -q "${REPOSITORY}/${package}-${VERSION}.tgz" -O "/tmp/${package}-${VERSION}.tgz" \
&& tar xvzf "/tmp/${package}-${VERSION}.tgz" --strip-components=2 -C / ; \
} ; \
} || exit 1 \
; done \
&& rm /tmp/*.tgz /install -r \
&& addgroup -S -g 101 clickhouse \
&& adduser -S -h /var/lib/clickhouse -s /bin/bash -G clickhouse -g "ClickHouse server" -u 101 clickhouse \
&& mkdir -p /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client \
&& mkdir -p /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server/config.d /etc/clickhouse-server/users.d /etc/clickhouse-client /docker-entrypoint-initdb.d \
&& chown clickhouse:clickhouse /var/lib/clickhouse \
&& chown root:clickhouse /var/log/clickhouse-server \
&& chmod +x /entrypoint.sh \

View File

@ -0,0 +1,128 @@
FROM ubuntu:20.04
# see https://github.com/moby/moby/issues/4032#issuecomment-192327844
ARG DEBIAN_FRONTEND=noninteractive
COPY su-exec.c /su-exec.c
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list \
&& groupadd -r clickhouse --gid=101 \
&& useradd -r -g clickhouse --uid=101 --home-dir=/var/lib/clickhouse --shell=/bin/bash clickhouse \
&& apt-get update \
&& apt-get install --yes --no-install-recommends \
apt-transport-https \
ca-certificates \
dirmngr \
gnupg \
locales \
wget \
tzdata \
&& apt-get install -y --no-install-recommends tcc libc-dev && \
tcc /su-exec.c -o /bin/su-exec && \
chown root:root /bin/su-exec && \
chmod 0755 /bin/su-exec && \
rm /su-exec.c && \
apt-get purge -y --auto-remove tcc libc-dev libc-dev-bin libc6-dev linux-libc-dev \
&& apt-get clean
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="deb https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
ARG VERSION=22.1.1.*
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
# set non-empty deb_location_url url to create a docker image
# from debs created by CI build, for example:
# docker build . --network host --build-arg version="21.4.1.6282" --build-arg deb_location_url="https://clickhouse-builds.s3.yandex.net/21852/069cfbff388b3d478d1a16dc7060b48073f5d522/clickhouse_build_check/clang-11_relwithdebuginfo_none_bundled_unsplitted_disable_False_deb/" -t filimonovq/clickhouse-server:pr21852
ARG deb_location_url=""
# set non-empty single_binary_location_url to create docker image
# from a single binary url (useful for non-standard builds - with sanitizers, for arm64).
# for example (run on aarch64 server):
# docker build . --network host --build-arg single_binary_location_url="https://builds.clickhouse.com/master/aarch64/clickhouse" -t altinity/clickhouse-server:master-testing-arm
# note: clickhouse-odbc-bridge is not supported there.
ARG single_binary_location_url=""
# user/group precreated explicitly with fixed uid/gid on purpose.
# It is especially important for rootless containers: in that case entrypoint
# can't do chown and owners of mounted volumes should be configured externally.
# We do that in advance at the begining of Dockerfile before any packages will be
# installed to prevent picking those uid / gid by some unrelated software.
# The same uid / gid (101) is used both for alpine and ubuntu.
# To drop privileges, we need 'su' command, that simply changes uid and gid.
# In fact, the 'su' command from Linux is not so simple, due to inherent vulnerability in Linux:
# https://ruderich.org/simon/notes/su-sudo-from-root-tty-hijacking
# It has to mitigate this drawback of Linux, and to do this, 'su' command is creating it's own pseudo-terminal
# and forwarding commands. Due to some ridiculous curcumstances, it does not work in Docker (or it does)
# and for these reasons people are using alternatives to the 'su' command in Docker,
# that don't mess with the terminal, don't care about closing the opened files, etc...
# but can only be safe to drop privileges inside Docker.
# The question - what implementation of 'su' command to use.
# It should be a simple script doing about just two syscalls.
# Some people tend to use 'gosu' tool that is written in Go.
# It is not used for several reasons:
# 1. Dependency on some foreign code in yet another programming language - does not sound alright.
# 2. Anselmo D. Adams suggested not to use it due to false positive alarms in some undisclosed security scanners.
ARG TARGETARCH
RUN arch=${TARGETARCH:-amd64} \
&& if [ -n "${deb_location_url}" ]; then \
echo "installing from custom url with deb packages: ${deb_location_url}" \
rm -rf /tmp/clickhouse_debs \
&& mkdir -p /tmp/clickhouse_debs \
&& for package in ${PACKAGES}; do \
{ wget --progress=bar:force:noscroll "${deb_location_url}/${package}_${VERSION}_${arch}.deb" -P /tmp/clickhouse_debs || \
wget --progress=bar:force:noscroll "${deb_location_url}/${package}_${VERSION}_all.deb" -P /tmp/clickhouse_debs ; } \
|| exit 1 \
; done \
&& dpkg -i /tmp/clickhouse_debs/*.deb ; \
elif [ -n "${single_binary_location_url}" ]; then \
echo "installing from single binary url: ${single_binary_location_url}" \
&& rm -rf /tmp/clickhouse_binary \
&& mkdir -p /tmp/clickhouse_binary \
&& wget --progress=bar:force:noscroll "${single_binary_location_url}" -O /tmp/clickhouse_binary/clickhouse \
&& chmod +x /tmp/clickhouse_binary/clickhouse \
&& /tmp/clickhouse_binary/clickhouse install --user "clickhouse" --group "clickhouse" ; \
else \
mkdir -p /etc/apt/sources.list.d \
&& apt-key adv --keyserver keyserver.ubuntu.com --recv 8919F6BD2B48D754 \
&& echo ${REPOSITORY} > /etc/apt/sources.list.d/clickhouse.list \
&& echo "installing from repository: ${REPOSITORY}" \
&& apt-get update \
&& apt-get --yes -o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold" upgrade \
&& for package in ${PACKAGES}; do \
apt-get install --allow-unauthenticated --yes --no-install-recommends "${package}=${VERSION}" || exit 1 \
; done \
; fi \
&& clickhouse-local -q 'SELECT * FROM system.build_options' \
&& rm -rf \
/var/lib/apt/lists/* \
/var/cache/debconf \
/tmp/* \
&& mkdir -p /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client \
&& chmod ugo+Xrw -R /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client
# we need to allow "others" access to clickhouse folder, because docker container
# can be started with arbitrary uid (openshift usecase)
RUN locale-gen en_US.UTF-8
ENV LANG en_US.UTF-8
ENV LANGUAGE en_US:en
ENV LC_ALL en_US.UTF-8
ENV TZ UTC
RUN mkdir /docker-entrypoint-initdb.d
COPY docker_related_config.xml /etc/clickhouse-server/config.d/
COPY entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
EXPOSE 9000 8123 9009
VOLUME /var/lib/clickhouse
ENV CLICKHOUSE_CONFIG /etc/clickhouse-server/config.xml
ENTRYPOINT ["/entrypoint.sh"]

View File

@ -1,63 +0,0 @@
#!/bin/bash
set -x
REPO_CHANNEL="${REPO_CHANNEL:-stable}" # lts / testing / prestable / etc
REPO_URL="${REPO_URL:-"https://repo.yandex.ru/clickhouse/tgz/${REPO_CHANNEL}"}"
VERSION="${VERSION:-20.9.3.45}"
DOCKER_IMAGE="${DOCKER_IMAGE:-clickhouse/clickhouse-server}"
# where original files live
DOCKER_BUILD_FOLDER="${BASH_SOURCE%/*}"
# we will create root for our image here
CONTAINER_ROOT_FOLDER="${DOCKER_BUILD_FOLDER}/alpine-root"
# clean up the root from old runs, it's reconstructed each time
rm -rf "$CONTAINER_ROOT_FOLDER"
mkdir -p "$CONTAINER_ROOT_FOLDER"
# where to put downloaded tgz
TGZ_PACKAGES_FOLDER="${DOCKER_BUILD_FOLDER}/tgz-packages"
mkdir -p "$TGZ_PACKAGES_FOLDER"
PACKAGES=( "clickhouse-client" "clickhouse-server" "clickhouse-common-static" )
# download tars from the repo
for package in "${PACKAGES[@]}"
do
wget -c -q --show-progress "${REPO_URL}/${package}-${VERSION}.tgz" -O "${TGZ_PACKAGES_FOLDER}/${package}-${VERSION}.tgz"
done
# unpack tars
for package in "${PACKAGES[@]}"
do
tar xvzf "${TGZ_PACKAGES_FOLDER}/${package}-${VERSION}.tgz" --strip-components=2 -C "$CONTAINER_ROOT_FOLDER"
done
# prepare few more folders
mkdir -p "${CONTAINER_ROOT_FOLDER}/etc/clickhouse-server/users.d" \
"${CONTAINER_ROOT_FOLDER}/etc/clickhouse-server/config.d" \
"${CONTAINER_ROOT_FOLDER}/var/log/clickhouse-server" \
"${CONTAINER_ROOT_FOLDER}/var/lib/clickhouse" \
"${CONTAINER_ROOT_FOLDER}/docker-entrypoint-initdb.d" \
"${CONTAINER_ROOT_FOLDER}/lib64"
cp "${DOCKER_BUILD_FOLDER}/docker_related_config.xml" "${CONTAINER_ROOT_FOLDER}/etc/clickhouse-server/config.d/"
cp "${DOCKER_BUILD_FOLDER}/entrypoint.sh" "${CONTAINER_ROOT_FOLDER}/entrypoint.sh"
## get glibc components from ubuntu 20.04 and put them to expected place
docker pull ubuntu:20.04
ubuntu20image=$(docker create --rm ubuntu:20.04)
docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libc.so.6 "${CONTAINER_ROOT_FOLDER}/lib"
docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libdl.so.2 "${CONTAINER_ROOT_FOLDER}/lib"
docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libm.so.6 "${CONTAINER_ROOT_FOLDER}/lib"
docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libpthread.so.0 "${CONTAINER_ROOT_FOLDER}/lib"
docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/librt.so.1 "${CONTAINER_ROOT_FOLDER}/lib"
docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libnss_dns.so.2 "${CONTAINER_ROOT_FOLDER}/lib"
docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libnss_files.so.2 "${CONTAINER_ROOT_FOLDER}/lib"
docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libresolv.so.2 "${CONTAINER_ROOT_FOLDER}/lib"
docker cp -L "${ubuntu20image}":/lib64/ld-linux-x86-64.so.2 "${CONTAINER_ROOT_FOLDER}/lib64"
docker cp -L "${ubuntu20image}":/etc/nsswitch.conf "${CONTAINER_ROOT_FOLDER}/etc"
docker build "$DOCKER_BUILD_FOLDER" -f Dockerfile.alpine -t "${DOCKER_IMAGE}:${VERSION}-alpine" --pull
rm -rf "$CONTAINER_ROOT_FOLDER"

View File

@ -1,47 +0,0 @@
# Since right now we can't set volumes to the docker during build, we split building container in stages:
# 1. build base container
# 2. run base conatiner with mounted volumes
# 3. commit container as image
# 4. build final container atop that image
# Middle steps are performed by the bash script.
FROM ubuntu:18.04 as clickhouse-server-base
ARG gosu_ver=1.14
VOLUME /packages/
# update to allow installing dependencies of clickhouse automatically
RUN apt update; \
DEBIAN_FRONTEND=noninteractive \
apt install -y locales;
ADD https://github.com/tianon/gosu/releases/download/${gosu_ver}/gosu-amd64 /bin/gosu
RUN locale-gen en_US.UTF-8
ENV LANG en_US.UTF-8
ENV LANGUAGE en_US:en
ENV LC_ALL en_US.UTF-8
# installing via apt to simulate real-world scenario, where user installs deb package and all it's dependecies automatically.
CMD DEBIAN_FRONTEND=noninteractive \
apt install -y \
/packages/clickhouse-common-static_*.deb \
/packages/clickhouse-server_*.deb ;
FROM clickhouse-server-base:postinstall as clickhouse-server
RUN mkdir /docker-entrypoint-initdb.d
COPY docker_related_config.xml /etc/clickhouse-server/config.d/
COPY entrypoint.sh /entrypoint.sh
RUN chmod +x \
/entrypoint.sh \
/bin/gosu
EXPOSE 9000 8123 9009
VOLUME /var/lib/clickhouse
ENV CLICKHOUSE_CONFIG /etc/clickhouse-server/config.xml
ENTRYPOINT ["/entrypoint.sh"]

View File

@ -1,86 +0,0 @@
#!/bin/sh
set -e -x
# Not sure why shellcheck complains that rc is not assigned before it is referenced.
# shellcheck disable=SC2154
trap 'rc=$?; echo EXITED WITH: $rc; exit $rc' EXIT
# CLI option to prevent rebuilding images, just re-run tests with images leftover from previuos time
readonly NO_REBUILD_FLAG="--no-rebuild"
readonly CLICKHOUSE_DOCKER_DIR="$(realpath "${1}")"
readonly CLICKHOUSE_PACKAGES_ARG="${2}"
CLICKHOUSE_SERVER_IMAGE="${3}"
if [ "${CLICKHOUSE_PACKAGES_ARG}" != "${NO_REBUILD_FLAG}" ]; then
readonly CLICKHOUSE_PACKAGES_DIR="$(realpath "${2}")" # or --no-rebuild
fi
# In order to allow packages directory to be anywhere, and to reduce amount of context sent to the docker daemon,
# all images are built in multiple stages:
# 1. build base image, install dependencies
# 2. run image with volume mounted, install what needed from those volumes
# 3. tag container as image
# 4. [optional] build another image atop of tagged.
# TODO: optionally mount most recent clickhouse-test and queries directory from local machine
if [ "${CLICKHOUSE_PACKAGES_ARG}" != "${NO_REBUILD_FLAG}" ]; then
docker build --network=host \
-f "${CLICKHOUSE_DOCKER_DIR}/test/stateless/clickhouse-statelest-test-runner.Dockerfile" \
--target clickhouse-test-runner-base \
-t clickhouse-test-runner-base:preinstall \
"${CLICKHOUSE_DOCKER_DIR}/test/stateless"
docker rm -f clickhouse-test-runner-installing-packages || true
docker run --network=host \
-v "${CLICKHOUSE_PACKAGES_DIR}:/packages" \
--name clickhouse-test-runner-installing-packages \
clickhouse-test-runner-base:preinstall
docker commit clickhouse-test-runner-installing-packages clickhouse-statelest-test-runner:local
docker rm -f clickhouse-test-runner-installing-packages || true
fi
# # Create a bind-volume to the clickhouse-test script file
# docker volume create --driver local --opt type=none --opt device=/home/enmk/proj/ClickHouse_master/tests/clickhouse-test --opt o=bind clickhouse-test-script-volume
# docker volume create --driver local --opt type=none --opt device=/home/enmk/proj/ClickHouse_master/tests/queries --opt o=bind clickhouse-test-queries-dir-volume
# Build server image (optional) from local packages
if [ -z "${CLICKHOUSE_SERVER_IMAGE}" ]; then
CLICKHOUSE_SERVER_IMAGE="clickhouse/server:local"
if [ "${CLICKHOUSE_PACKAGES_ARG}" != "${NO_REBUILD_FLAG}" ]; then
docker build --network=host \
-f "${CLICKHOUSE_DOCKER_DIR}/server/local.Dockerfile" \
--target clickhouse-server-base \
-t clickhouse-server-base:preinstall \
"${CLICKHOUSE_DOCKER_DIR}/server"
docker rm -f clickhouse_server_base_installing_server || true
docker run --network=host -v "${CLICKHOUSE_PACKAGES_DIR}:/packages" \
--name clickhouse_server_base_installing_server \
clickhouse-server-base:preinstall
docker commit clickhouse_server_base_installing_server clickhouse-server-base:postinstall
docker build --network=host \
-f "${CLICKHOUSE_DOCKER_DIR}/server/local.Dockerfile" \
--target clickhouse-server \
-t "${CLICKHOUSE_SERVER_IMAGE}" \
"${CLICKHOUSE_DOCKER_DIR}/server"
fi
fi
docker rm -f test-runner || true
docker-compose down
CLICKHOUSE_SERVER_IMAGE="${CLICKHOUSE_SERVER_IMAGE}" \
docker-compose -f "${CLICKHOUSE_DOCKER_DIR}/test/test_runner_docker_compose.yaml" \
create \
--build --force-recreate
CLICKHOUSE_SERVER_IMAGE="${CLICKHOUSE_SERVER_IMAGE}" \
docker-compose -f "${CLICKHOUSE_DOCKER_DIR}/test/test_runner_docker_compose.yaml" \
run \
--name test-runner \
test-runner

View File

@ -1,34 +0,0 @@
version: "2"
services:
clickhouse-server:
image: ${CLICKHOUSE_SERVER_IMAGE}
expose:
- "8123" # HTTP
- "9000" # TCP
- "9009" # HTTP-interserver
restart: "no"
test-runner:
image: clickhouse-statelest-test-runner:local
restart: "no"
depends_on:
- clickhouse-server
environment:
# these are used by clickhouse-test to point clickhouse-client to the right server
- CLICKHOUSE_HOST=clickhouse-server
- CLICKHOUSE_PORT=9009
- CLICKHOUSE_TEST_HOST_EXPOSED_PORT=51234
expose:
# port for any test to serve data to clickhouse-server on rare occasion (like URL-engine tables in 00646),
# should match value of CLICKHOUSE_TEST_HOST_EXPOSED_PORT above
- "51234"
# NOTE: Dev-mode: mount newest versions of the queries and clickhouse-test script into container.
# volumes:
# - /home/enmk/proj/ClickHouse_master/tests/queries:/usr/share/clickhouse-test/queries:ro
# - /home/enmk/proj/ClickHouse_master/tests/clickhouse-test:/usr/bin/clickhouse-test:ro
# String-form instead of list-form to allow multiple arguments in "${CLICKHOUSE_TEST_ARGS}"
entrypoint: "clickhouse-test ${CLICKHOUSE_TEST_ARGS}"

View File

@ -393,6 +393,13 @@ This is a generalization of other functions named `toStartOf*`. For example,
`toStartOfInterval(t, INTERVAL 1 day)` returns the same as `toStartOfDay(t)`,
`toStartOfInterval(t, INTERVAL 15 minute)` returns the same as `toStartOfFifteenMinutes(t)` etc.
## toLastDayOfMonth {#toLastDayOfMonth}
Rounds up a date or date with time to the last day of the month.
Returns the date.
Alias: `LAST_DAY`.
## toTime {#totime}
Converts a date with time to a certain fixed date, while preserving the time.

View File

@ -2499,3 +2499,41 @@ Result:
│ 286 │
└──────────────────────────┘
```
## getTypeSerializationStreams {#getTypeSerializationStreams}
return the serialization streams of data type.
**Syntax**
``` sql
getTypeSerializationStreams(type_name)
getTypeSerializationStreams(column)
```
**Arguments**
- `type_name` - Name of data type to get its serialization paths. [String](../../sql-reference/data-types/string.md#string).
- `column` - any column which has a data type
**Returned value**
- List of serialization streams;
Type: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)).
**Example**
Query:
``` sql
SELECT getTypeSerializationStreams('Array(Array(Int8))')
```
Result:
``` text
┌───────────────────────getTypeSerializationStreams('Array(Array(Int8))')─────────────────────────────┐
│ ['{ArraySizes}','{ArrayElements, ArraySizes}','{ArrayElements, ArrayElements, Regular}'] │
└─────────────────────────────────────────────────────────────────────────────────────────────────────┘
```

View File

@ -114,9 +114,9 @@ In addition, this column is not substituted when using an asterisk in a SELECT q
### EPHEMERAL {#ephemeral}
`EPHEMERAL expr`
`EPHEMERAL [expr]`
Ephemeral column. Such a column isn't stored in the table and cannot be SELECTed, but can be referenced in the defaults of CREATE statement.
Ephemeral column. Such a column isn't stored in the table and cannot be SELECTed, but can be referenced in the defaults of CREATE statement. If `expr` is omitted type for column is required.
INSERT without list of columns will skip such column, so SELECT/INSERT invariant is preserved - the dump obtained using `SELECT *` can be inserted back into the table using INSERT without specifying the list of columns.
### ALIAS {#alias}

View File

@ -110,9 +110,9 @@ SELECT x, toTypeName(x) FROM t1;
### EPHEMERAL {#ephemeral}
`EPHEMERAL expr`
`EPHEMERAL [expr]`
Эфемерное выражение. Такой столбец не хранится в таблице и не может быть получен в запросе SELECT, но на него можно ссылаться в выражениях по умолчанию запроса CREATE.
Эфемерное выражение. Такой столбец не хранится в таблице и не может быть получен в запросе SELECT, но на него можно ссылаться в выражениях по умолчанию запроса CREATE. Если значение по умолчанию `expr` не указано, то тип колонки должен быть специфицирован.
INSERT без списка столбцов игнорирует этот столбец, таким образом сохраняется инвариант - т.е. дамп, полученный путём `SELECT *`, можно вставить обратно в таблицу INSERT-ом без указания списка столбцов.
### ALIAS {#alias}

View File

@ -126,7 +126,7 @@ CREATE TABLE pg_table_schema_with_dots (a UInt32)
**См. также**
- [Движок таблиц PostgreSQL](../../sql-reference/table-functions/postgresql.md)
- [Движок таблиц PostgreSQL](../../engines/table-engines/integrations/postgresql.md)
- [Использование PostgreSQL как источника данных для внешнего словаря](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql)
[Оригинальная статья](https://clickhouse.com/docs/ru/sql-reference/table-functions/postgresql/) <!--hide-->

View File

@ -16,7 +16,7 @@ jsmin==3.0.0
livereload==2.6.3
Markdown==3.3.2
MarkupSafe==2.1.0
mkdocs==1.1.2
mkdocs==1.3.0
mkdocs-htmlproofer-plugin==0.0.3
mkdocs-macros-plugin==0.4.20
nltk==3.7

View File

@ -20,7 +20,7 @@ ExecStart=/usr/bin/clickhouse-server --config=/etc/clickhouse-server/config.xml
EnvironmentFile=-/etc/default/clickhouse
LimitCORE=infinity
LimitNOFILE=500000
CapabilityBoundingSet=CAP_NET_ADMIN CAP_IPC_LOCK CAP_SYS_NICE
CapabilityBoundingSet=CAP_NET_ADMIN CAP_IPC_LOCK CAP_SYS_NICE CAP_NET_BIND_SERVICE
[Install]
# ClickHouse should not start from the rescue shell (rescue.target).

View File

@ -148,13 +148,13 @@
<!-- <interserver_https_port>9010</interserver_https_port> -->
<!-- Hostname that is used by other replicas to request this server.
If not specified, than it is determined analogous to 'hostname -f' command.
If not specified, then it is determined analogous to 'hostname -f' command.
This setting could be used to switch replication to another network interface
(the server may be connected to multiple networks via multiple addresses)
-->
<!--
<interserver_http_host>example.yandex.ru</interserver_http_host>
<interserver_http_host>example.clickhouse.com</interserver_http_host>
-->
<!-- You can specify credentials for authenthication between replicas.
@ -765,14 +765,14 @@
-->
<!--<remote_url_allow_hosts>-->
<!-- Host should be specified exactly as in URL. The name is checked before DNS resolution.
Example: "yandex.ru", "yandex.ru." and "www.yandex.ru" are different hosts.
Example: "clickhouse.com", "clickhouse.com." and "www.clickhouse.com" are different hosts.
If port is explicitly specified in URL, the host:port is checked as a whole.
If host specified here without port, any port with this host allowed.
"yandex.ru" -> "yandex.ru:443", "yandex.ru:80" etc. is allowed, but "yandex.ru:80" -> only "yandex.ru:80" is allowed.
"clickhouse.com" -> "clickhouse.com:443", "clickhouse.com:80" etc. is allowed, but "clickhouse.com:80" -> only "clickhouse.com:80" is allowed.
If the host is specified as IP address, it is checked as specified in URL. Example: "[2a02:6b8:a::a]".
If there are redirects and support for redirects is enabled, every redirect (the Location field) is checked.
Host should be specified using the host xml tag:
<host>yandex.ru</host>
<host>clickhouse.com</host>
-->
<!-- Regular expression can be specified. RE2 engine is used for regexps.
@ -1030,25 +1030,17 @@
<flush_interval_milliseconds>1000</flush_interval_milliseconds>
</crash_log>
<!-- Session log. Stores user log in (successful or not) and log out events. -->
<session_log>
<!-- Session log. Stores user log in (successful or not) and log out events.
Note: session log has known security issues and should not be used in production.
-->
<!-- <session_log>
<database>system</database>
<table>session_log</table>
<partition_by>toYYYYMM(event_date)</partition_by>
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
</session_log>
<!-- Parameters for embedded dictionaries, used in Yandex.Metrica.
See https://clickhouse.com/docs/en/dicts/internal_dicts/
-->
<!-- Path to file with region hierarchy. -->
<!-- <path_to_regions_hierarchy_file>/opt/geo/regions_hierarchy.txt</path_to_regions_hierarchy_file> -->
<!-- Path to directory with files containing names of regions -->
<!-- <path_to_regions_names_files>/opt/geo/</path_to_regions_names_files> -->
</session_log> -->
<!-- <top_level_domains_path>/var/lib/clickhouse/top_level_domains/</top_level_domains_path> -->
<!-- Custom TLD lists.

View File

@ -103,7 +103,7 @@ interserver_http_port: 9009
# If not specified, than it is determined analogous to 'hostname -f' command.
# This setting could be used to switch replication to another network interface
# (the server may be connected to multiple networks via multiple addresses)
# interserver_http_host: example.yandex.ru
# interserver_http_host: example.clickhouse.com
# You can specify credentials for authenthication between replicas.
# This is required when interserver_https_port is accessible from untrusted networks,
@ -592,10 +592,10 @@ remote_servers:
# remote_url_allow_hosts:
# Host should be specified exactly as in URL. The name is checked before DNS resolution.
# Example: "yandex.ru", "yandex.ru." and "www.yandex.ru" are different hosts.
# Example: "clickhouse.com", "clickhouse.com." and "www.clickhouse.com" are different hosts.
# If port is explicitly specified in URL, the host:port is checked as a whole.
# If host specified here without port, any port with this host allowed.
# "yandex.ru" -> "yandex.ru:443", "yandex.ru:80" etc. is allowed, but "yandex.ru:80" -> only "yandex.ru:80" is allowed.
# "clickhouse.com" -> "clickhouse.com:443", "clickhouse.com:80" etc. is allowed, but "clickhouse.com:80" -> only "clickhouse.com:80" is allowed.
# If the host is specified as IP address, it is checked as specified in URL. Example: "[2a02:6b8:a::a]".
# If there are redirects and support for redirects is enabled, every redirect (the Location field) is checked.
@ -803,16 +803,6 @@ crash_log:
partition_by: ''
flush_interval_milliseconds: 1000
# Parameters for embedded dictionaries, used in Yandex.Metrica.
# See https://clickhouse.com/docs/en/dicts/internal_dicts/
# Path to file with region hierarchy.
# path_to_regions_hierarchy_file: /opt/geo/regions_hierarchy.txt
# Path to directory with files containing names of regions
# path_to_regions_names_files: /opt/geo/
# top_level_domains_path: /var/lib/clickhouse/top_level_domains/
# Custom TLD lists.
# Format: name: /path/to/file

View File

@ -266,12 +266,25 @@
color: var(--null-color);
}
@keyframes hourglass-animation {
0% {
transform: rotate(-180deg);
}
50% {
transform: rotate(-180deg);
}
100% {
transform: none;
}
}
#hourglass
{
display: none;
padding-left: 1rem;
margin-left: 1rem;
font-size: 110%;
color: #888;
animation: hourglass-animation 1s linear infinite;
}
#check-mark
@ -457,7 +470,7 @@
}
document.getElementById('check-mark').style.display = 'none';
document.getElementById('hourglass').style.display = 'inline';
document.getElementById('hourglass').style.display = 'inline-block';
xhr.send(query);
}

View File

@ -79,9 +79,9 @@
Each element of list has one of the following forms:
<ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
<host> Hostname. Example: server01.yandex.ru.
<host> Hostname. Example: server01.clickhouse.com.
To check access, DNS query is performed, and all received addresses compared to peer address.
<host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.yandex\.ru$
<host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.clickhouse\.com$
To check access, DNS PTR query is performed for peer address and then regexp is applied.
Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
Strongly recommended that regexp is ends with $

View File

@ -70,9 +70,9 @@ users:
# Each element of list has one of the following forms:
# ip: IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
# 2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
# host: Hostname. Example: server01.yandex.ru.
# host: Hostname. Example: server01.clickhouse.com.
# To check access, DNS query is performed, and all received addresses compared to peer address.
# host_regexp: Regular expression for host names. Example, ^server\d\d-\d\d-\d\.yandex\.ru$
# host_regexp: Regular expression for host names. Example, ^server\d\d-\d\d-\d\.clickhouse\.com$
# To check access, DNS PTR query is performed for peer address and then regexp is applied.
# Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
# Strongly recommended that regexp is ends with $ and take all expression in ''

View File

@ -107,6 +107,11 @@ const QuotaTypeInfo & QuotaTypeInfo::get(QuotaType type)
static const auto info = make_info("EXECUTION_TIME", 1000000000 /* execution_time is stored in nanoseconds */);
return info;
}
case QuotaType::WRITTEN_BYTES:
{
static const auto info = make_info("WRITTEN_BYTES", 1);
return info;
}
case QuotaType::MAX: break;
}
throw Exception("Unexpected quota type: " + std::to_string(static_cast<int>(type)), ErrorCodes::LOGICAL_ERROR);

View File

@ -20,6 +20,7 @@ enum class QuotaType
READ_ROWS, /// Number of rows read from tables.
READ_BYTES, /// Number of bytes read from tables.
EXECUTION_TIME, /// Total amount of query execution time in nanoseconds.
WRITTEN_BYTES, /// Number of bytes written to tables.
MAX
};

View File

@ -13,7 +13,7 @@ namespace DB
{
namespace ErrorCodes
{
extern const int QUOTA_EXPIRED;
extern const int QUOTA_EXCEEDED;
}
@ -33,7 +33,7 @@ struct EnabledQuota::Impl
"Quota for user " + backQuote(user_name) + " for " + to_string(duration) + " has been exceeded: "
+ type_info.valueToStringWithName(used) + "/" + type_info.valueToString(max) + ". "
+ "Interval will end at " + to_string(end_of_interval) + ". " + "Name of quota template: " + backQuote(quota_name),
ErrorCodes::QUOTA_EXPIRED);
ErrorCodes::QUOTA_EXCEEDED);
}

View File

@ -360,6 +360,27 @@ public:
return toDayNum(LUTIndex(i - (lut[i].day_of_month - 1)));
}
/// Round up to last day of month.
template <typename DateOrTime>
inline Time toLastDayOfMonth(DateOrTime v) const
{
const LUTIndex i = toLUTIndex(v);
if constexpr (std::is_unsigned_v<DateOrTime> || std::is_same_v<DateOrTime, DayNum>)
return lut_saturated[i - lut[i].day_of_month + lut[i].days_in_month].date;
else
return lut[i - lut[i].day_of_month + lut[i].days_in_month].date;
}
template <typename DateOrTime>
inline auto toLastDayNumOfMonth(DateOrTime v) const
{
const LUTIndex i = toLUTIndex(v);
if constexpr (std::is_unsigned_v<DateOrTime> || std::is_same_v<DateOrTime, DayNum>)
return toDayNum(LUTIndexWithSaturation(i - lut[i].day_of_month + lut[i].days_in_month));
else
return toDayNum(LUTIndex(i - lut[i].day_of_month + lut[i].days_in_month));
}
/// Round down to start of quarter.
template <typename DateOrTime>
inline auto toFirstDayNumOfQuarter(DateOrTime v) const

View File

@ -208,7 +208,7 @@
M(198, DNS_ERROR) \
M(199, UNKNOWN_QUOTA) \
M(200, QUOTA_DOESNT_ALLOW_KEYS) \
M(201, QUOTA_EXPIRED) \
M(201, QUOTA_EXCEEDED) \
M(202, TOO_MANY_SIMULTANEOUS_QUERIES) \
M(203, NO_FREE_CONNECTION) \
M(204, CANNOT_FSYNC) \

View File

@ -532,11 +532,8 @@ FileSegmentsHolder::~FileSegmentsHolder()
}
catch (...)
{
#ifdef NDEBUG
tryLogCurrentException(__PRETTY_FUNCTION__);
#else
throw;
#endif
assert(false);
}
}
}

View File

@ -146,6 +146,8 @@ TEST(DateLUTTest, TimeValuesInMiddleOfRange)
EXPECT_EQ(lut.addYears(time, 10), 1884270011 /*time_t*/);
EXPECT_EQ(lut.timeToString(time), "2019-09-16 19:20:11" /*std::string*/);
EXPECT_EQ(lut.dateToString(time), "2019-09-16" /*std::string*/);
EXPECT_EQ(lut.toLastDayOfMonth(time), 1569790800 /*time_t*/);
EXPECT_EQ(lut.toLastDayNumOfMonth(time), DayNum(18169) /*DayNum*/);
}
@ -206,6 +208,8 @@ TEST(DateLUTTest, TimeValuesAtLeftBoderOfRange)
EXPECT_EQ(lut.addYears(time, 10), 315532800 /*time_t*/);
EXPECT_EQ(lut.timeToString(time), "1970-01-01 00:00:00" /*std::string*/);
EXPECT_EQ(lut.dateToString(time), "1970-01-01" /*std::string*/);
EXPECT_EQ(lut.toLastDayOfMonth(time), 2592000 /*time_t*/);
EXPECT_EQ(lut.toLastDayNumOfMonth(time), DayNum(30) /*DayNum*/);
}
TEST(DateLUTTest, TimeValuesAtRightBoderOfRangeOfOldLUT)
@ -225,7 +229,7 @@ TEST(DateLUTTest, TimeValuesAtRightBoderOfRangeOfOldLUT)
EXPECT_EQ(lut.toFirstDayOfWeek(time), 4293820800 /*time_t*/);
EXPECT_EQ(lut.toFirstDayNumOfWeek(time), DayNum(49697));
EXPECT_EQ(lut.toFirstDayOfMonth(time), 4291747200 /*time_t*/); // 2016-01-01
EXPECT_EQ(lut.toFirstDayOfMonth(time), 4291747200 /*time_t*/); // 2106-01-01
EXPECT_EQ(lut.toFirstDayNumOfMonth(time), DayNum(49673));
EXPECT_EQ(lut.toFirstDayNumOfQuarter(time), DayNum(49673) /*DayNum*/);
EXPECT_EQ(lut.toFirstDayOfQuarter(time), 4291747200 /*time_t*/);
@ -268,6 +272,8 @@ TEST(DateLUTTest, TimeValuesAtRightBoderOfRangeOfOldLUT)
EXPECT_EQ(lut.timeToString(time), "2106-01-31 01:17:53" /*std::string*/);
EXPECT_EQ(lut.dateToString(time), "2106-01-31" /*std::string*/);
EXPECT_EQ(lut.toLastDayOfMonth(time), 4294339200 /*time_t*/); // 2106-01-01
EXPECT_EQ(lut.toLastDayNumOfMonth(time), DayNum(49703));
}

View File

@ -642,6 +642,8 @@ class IColumn;
M(DateTimeInputFormat, date_time_input_format, FormatSettings::DateTimeInputFormat::Basic, "Method to read DateTime from text input formats. Possible values: 'basic', 'best_effort' and 'best_effort_us'.", 0) \
M(DateTimeOutputFormat, date_time_output_format, FormatSettings::DateTimeOutputFormat::Simple, "Method to write DateTime to text output. Possible values: 'simple', 'iso', 'unix_timestamp'.", 0) \
\
M(Bool, input_format_ipv4_default_on_conversion_error, false, "Deserialization of IPv4 will use default values instead of throwing exception on conversion error.", 0) \
M(Bool, input_format_ipv6_default_on_conversion_error, false, "Deserialization of IPV6 will use default values instead of throwing exception on conversion error.", 0) \
M(String, bool_true_representation, "true", "Text to represent bool value in TSV/CSV formats.", 0) \
M(String, bool_false_representation, "false", "Text to represent bool value in TSV/CSV formats.", 0) \
\

View File

@ -6,6 +6,8 @@
#include <Common/formatIPv6.h>
#include <IO/WriteBuffer.h>
#include <IO/ReadBuffer.h>
#include <Formats/FormatSettings.h>
namespace DB
{
@ -47,9 +49,11 @@ void SerializationIPv4::deserializeText(IColumn & column, ReadBuffer & istr, con
char buffer[IPV4_MAX_TEXT_LENGTH + 1] = {'\0'};
istr.read(buffer, sizeof(buffer) - 1);
UInt32 ipv4_value = 0;
if (!parseIPv4(buffer, reinterpret_cast<unsigned char *>(&ipv4_value)))
bool parse_result = parseIPv4(buffer, reinterpret_cast<unsigned char *>(&ipv4_value));
if (!parse_result && !settings.input_format_ipv4_default_on_conversion_error)
{
throw Exception("Invalid IPv4 value.", ErrorCodes::CANNOT_PARSE_DOMAIN_VALUE_FROM_STRING);
throw Exception("Invalid IPv4 value", ErrorCodes::CANNOT_PARSE_DOMAIN_VALUE_FROM_STRING);
}
col->insert(ipv4_value);
@ -89,9 +93,11 @@ void SerializationIPv6::deserializeText(IColumn & column, ReadBuffer & istr, con
istr.read(buffer, sizeof(buffer) - 1);
std::string ipv6_value(IPV6_BINARY_LENGTH, '\0');
if (!parseIPv6(buffer, reinterpret_cast<unsigned char *>(ipv6_value.data())))
bool parse_result = parseIPv6(buffer, reinterpret_cast<unsigned char *>(ipv6_value.data()));
if (!parse_result && !settings.input_format_ipv6_default_on_conversion_error)
{
throw Exception("Invalid IPv6 value.", ErrorCodes::CANNOT_PARSE_DOMAIN_VALUE_FROM_STRING);
throw Exception("Invalid IPv6 value", ErrorCodes::CANNOT_PARSE_DOMAIN_VALUE_FROM_STRING);
}
col->insertString(ipv6_value);

View File

@ -77,6 +77,8 @@ FormatSettings getFormatSettings(ContextPtr context, const Settings & settings)
format_settings.custom.row_between_delimiter = settings.format_custom_row_between_delimiter;
format_settings.date_time_input_format = settings.date_time_input_format;
format_settings.date_time_output_format = settings.date_time_output_format;
format_settings.input_format_ipv4_default_on_conversion_error = settings.input_format_ipv4_default_on_conversion_error;
format_settings.input_format_ipv6_default_on_conversion_error = settings.input_format_ipv6_default_on_conversion_error;
format_settings.bool_true_representation = settings.bool_true_representation;
format_settings.bool_false_representation = settings.bool_false_representation;
format_settings.enable_streaming = settings.output_format_enable_streaming;

View File

@ -65,6 +65,9 @@ struct FormatSettings
DateTimeOutputFormat date_time_output_format = DateTimeOutputFormat::Simple;
bool input_format_ipv4_default_on_conversion_error = false;
bool input_format_ipv6_default_on_conversion_error = false;
UInt64 input_allow_errors_num = 0;
Float32 input_allow_errors_ratio = 0;

View File

@ -270,13 +270,13 @@ struct JSONEachRowFieldsExtractor
std::vector<String> column_names;
};
std::unordered_map<String, DataTypePtr> readRowAndGetNamesAndDataTypesForJSONEachRow(ReadBuffer & in, bool json_strings)
NamesAndTypesList readRowAndGetNamesAndDataTypesForJSONEachRow(ReadBuffer & in, bool json_strings)
{
JSONEachRowFieldsExtractor extractor;
auto data_types = determineColumnDataTypesFromJSONEachRowDataImpl<JSONEachRowFieldsExtractor, '{', '}'>(in, json_strings, extractor);
std::unordered_map<String, DataTypePtr> result;
NamesAndTypesList result;
for (size_t i = 0; i != extractor.column_names.size(); ++i)
result[extractor.column_names[i]] = data_types[i];
result.emplace_back(extractor.column_names[i], data_types[i]);
return result;
}

View File

@ -20,9 +20,9 @@ std::pair<bool, size_t> fileSegmentationEngineJSONCompactEachRow(ReadBuffer & in
DataTypePtr getDataTypeFromJSONField(const String & field);
/// Read row in JSONEachRow format and try to determine type for each field.
/// Return map {column_name : type}.
/// Return list of names and types.
/// If cannot determine the type of some field, return nullptr for it.
std::unordered_map<String, DataTypePtr> readRowAndGetNamesAndDataTypesForJSONEachRow(ReadBuffer & in, bool json_strings);
NamesAndTypesList readRowAndGetNamesAndDataTypesForJSONEachRow(ReadBuffer & in, bool json_strings);
/// Read row in JSONCompactEachRow format and try to determine type for each field.
/// If cannot determine the type of some field, return nullptr for it.

View File

@ -179,6 +179,30 @@ struct ToStartOfMonthImpl
using FactorTransform = ZeroTransform;
};
struct ToLastDayOfMonthImpl
{
static constexpr auto name = "toLastDayOfMonth";
static inline UInt16 execute(Int64 t, const DateLUTImpl & time_zone)
{
return time_zone.toLastDayNumOfMonth(time_zone.toDayNum(t));
}
static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone)
{
return time_zone.toLastDayNumOfMonth(time_zone.toDayNum(t));
}
static inline UInt16 execute(Int32 d, const DateLUTImpl & time_zone)
{
return time_zone.toLastDayNumOfMonth(ExtendedDayNum(d));
}
static inline UInt16 execute(UInt16 d, const DateLUTImpl & time_zone)
{
return time_zone.toLastDayNumOfMonth(DayNum(d));
}
using FactorTransform = ZeroTransform;
};
struct ToStartOfQuarterImpl
{
static constexpr auto name = "toStartOfQuarter";

View File

@ -0,0 +1,87 @@
#include <Functions/IFunction.h>
#include <Functions/FunctionFactory.h>
#include <DataTypes/DataTypeString.h>
#include <DataTypes/DataTypeArray.h>
#include <DataTypes/DataTypeFactory.h>
#include <DataTypes/Serializations/ISerialization.h>
#include <Core/Field.h>
#include <Columns/ColumnString.h>
#include <Functions/FunctionHelpers.h>
namespace DB
{
namespace
{
/// Enumerate stream paths of data type.
class FunctionGetTypeSerializationStreams : public IFunction
{
public:
static constexpr auto name = "getTypeSerializationStreams";
static FunctionPtr create(ContextPtr)
{
return std::make_shared<FunctionGetTypeSerializationStreams>();
}
String getName() const override
{
return name;
}
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
size_t getNumberOfArguments() const override
{
return 1;
}
DataTypePtr getReturnTypeImpl(const DataTypes &) const override
{
return std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>());
}
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
{
auto type = getType(arguments[0]);
SerializationPtr serialization = type->getDefaultSerialization();
auto col_res = ColumnArray::create(ColumnString::create());
ColumnString & col_res_strings = typeid_cast<ColumnString &>(col_res->getData());
ColumnVectorHelper::Offsets & col_res_offsets = typeid_cast<ColumnArray::Offsets &>(col_res->getOffsets());
serialization->enumerateStreams([&](const ISerialization::SubstreamPath & substream_path)
{
col_res_strings.insert(substream_path.toString());
});
col_res_offsets.push_back(col_res_strings.size());
return ColumnConst::create(std::move(col_res), input_rows_count);
}
private:
static DataTypePtr getType(const ColumnWithTypeAndName & argument)
{
const IColumn * arg_column = argument.column.get();
const ColumnString * arg_string = checkAndGetColumnConstData<ColumnString>(arg_column);
if (!arg_string)
return argument.type;
try
{
DataTypePtr type = DataTypeFactory::instance().get(arg_string->getDataAt(0).toString());
return type;
}
catch (const DB::Exception &)
{
return argument.type;
}
}
};
}
void registerFunctionGetTypeSerializationStreams(FunctionFactory & factory)
{
factory.registerFunction<FunctionGetTypeSerializationStreams>();
}
}

View File

@ -23,6 +23,7 @@ void registerFunctionToISOYear(FunctionFactory &);
void registerFunctionToCustomWeek(FunctionFactory &);
void registerFunctionToModifiedJulianDay(FunctionFactory &);
void registerFunctionToStartOfMonth(FunctionFactory &);
void registerFunctionToLastDayOfMonth(FunctionFactory &);
void registerFunctionToStartOfQuarter(FunctionFactory &);
void registerFunctionToStartOfYear(FunctionFactory &);
void registerFunctionToStartOfMinute(FunctionFactory &);
@ -100,6 +101,7 @@ void registerFunctionsDateTime(FunctionFactory & factory)
registerFunctionToCustomWeek(factory);
registerFunctionToModifiedJulianDay(factory);
registerFunctionToStartOfMonth(factory);
registerFunctionToLastDayOfMonth(factory);
registerFunctionToStartOfQuarter(factory);
registerFunctionToStartOfYear(factory);
registerFunctionToStartOfNanosecond(factory);

View File

@ -71,16 +71,17 @@ void registerFunctionErrorCodeToName(FunctionFactory &);
void registerFunctionTcpPort(FunctionFactory &);
void registerFunctionGetServerPort(FunctionFactory &);
void registerFunctionByteSize(FunctionFactory &);
void registerFunctionFile(FunctionFactory & factory);
void registerFunctionConnectionId(FunctionFactory & factory);
void registerFunctionPartitionId(FunctionFactory & factory);
void registerFunctionFile(FunctionFactory &);
void registerFunctionConnectionId(FunctionFactory &);
void registerFunctionPartitionId(FunctionFactory &);
void registerFunctionIsIPAddressContainedIn(FunctionFactory &);
void registerFunctionsTransactionCounters(FunctionFactory & factory);
void registerFunctionQueryID(FunctionFactory & factory);
void registerFunctionInitialQueryID(FunctionFactory & factory);
void registerFunctionQueryID(FunctionFactory &);
void registerFunctionInitialQueryID(FunctionFactory &);
void registerFunctionServerUUID(FunctionFactory &);
void registerFunctionZooKeeperSessionUptime(FunctionFactory &);
void registerFunctionGetOSKernelVersion(FunctionFactory &);
void registerFunctionGetTypeSerializationStreams(FunctionFactory &);
void registerFunctionFlattenTuple(FunctionFactory &);
#if USE_ICU
@ -169,6 +170,7 @@ void registerFunctionsMiscellaneous(FunctionFactory & factory)
registerFunctionServerUUID(factory);
registerFunctionZooKeeperSessionUptime(factory);
registerFunctionGetOSKernelVersion(factory);
registerFunctionGetTypeSerializationStreams(factory);
registerFunctionFlattenTuple(factory);
#if USE_ICU

View File

@ -0,0 +1,21 @@
#include <Functions/FunctionFactory.h>
#include <Functions/DateTimeTransforms.h>
#include <Functions/FunctionDateOrDateTimeToSomething.h>
namespace DB
{
using FunctionToLastDayOfMonth = FunctionDateOrDateTimeToSomething<DataTypeDate, ToLastDayOfMonthImpl>;
void registerFunctionToLastDayOfMonth(FunctionFactory & factory)
{
factory.registerFunction<FunctionToLastDayOfMonth>();
/// MySQL compatibility alias.
factory.registerFunction<FunctionToLastDayOfMonth>("LAST_DAY", FunctionFactory::CaseInsensitive);
}
}

View File

@ -20,6 +20,7 @@
#include <Common/SipHash.h>
#include <Common/FieldVisitorHash.h>
#include <Access/Common/AccessFlags.h>
#include <Access/EnabledQuota.h>
#include <Formats/FormatFactory.h>
#include <base/logger_useful.h>
@ -197,6 +198,9 @@ void AsynchronousInsertQueue::push(ASTPtr query, ContextPtr query_context)
copyData(*read_buf, write_buf);
}
if (auto quota = query_context->getQuota())
quota->used(QuotaType::WRITTEN_BYTES, bytes.size());
auto entry = std::make_shared<InsertData::Entry>(std::move(bytes), query_context->getCurrentQueryId());
InsertQuery key{query, settings};

View File

@ -1100,6 +1100,17 @@ StoragePtr Context::executeTableFunction(const ASTPtr & table_expression)
if (!res)
{
TableFunctionPtr table_function_ptr = TableFunctionFactory::instance().get(table_expression, shared_from_this());
if (table_function_ptr->needStructureHint())
{
const auto & insertion_table = getInsertionTable();
if (!insertion_table.empty())
{
const auto & structure_hint
= DatabaseCatalog::instance().getTable(insertion_table, shared_from_this())->getInMemoryMetadataPtr()->columns;
table_function_ptr->setStructureHint(structure_hint);
}
}
res = table_function_ptr->execute(table_expression, shared_from_this(), table_function_ptr->getName());
/// Since ITableFunction::parseArguments() may change table_expression, i.e.:

View File

@ -16,6 +16,7 @@
#include <Common/isLocalAddress.h>
#include <base/types.h>
#include <Storages/MergeTree/ParallelReplicasReadingCoordinator.h>
#include <Storages/ColumnsDescription.h>
#include "config_core.h"

View File

@ -508,6 +508,8 @@ ColumnsDescription InterpreterCreateQuery::getColumnsDescription(
default_expr_list->children.emplace_back(
setAlias(
col_decl.default_specifier == "EPHEMERAL" ? /// can be ASTLiteral::value NULL
std::make_shared<ASTLiteral>(data_type_ptr->getDefault()) :
col_decl.default_expression->clone(),
tmp_column_name));
}
@ -536,7 +538,11 @@ ColumnsDescription InterpreterCreateQuery::getColumnsDescription(
if (col_decl.default_expression)
{
ASTPtr default_expr = col_decl.default_expression->clone();
ASTPtr default_expr =
col_decl.default_specifier == "EPHEMERAL" && col_decl.default_expression->as<ASTLiteral>()->value.isNull() ?
std::make_shared<ASTLiteral>(DataTypeFactory::instance().get(col_decl.type)->getDefault()) :
col_decl.default_expression->clone();
if (col_decl.type)
column.type = name_type_it->type;
else

View File

@ -1,6 +1,7 @@
#include <Interpreters/InterpreterInsertQuery.h>
#include <Access/Common/AccessFlags.h>
#include <Access/EnabledQuota.h>
#include <Columns/ColumnNullable.h>
#include <Processors/Transforms/buildPushingToViewsChain.h>
#include <DataTypes/DataTypeNullable.h>
@ -51,6 +52,8 @@ InterpreterInsertQuery::InterpreterInsertQuery(
, async_insert(async_insert_)
{
checkStackSize();
if (auto quota = getContext()->getQuota())
quota->checkExceeded(QuotaType::WRITTEN_BYTES);
}
@ -269,7 +272,7 @@ Chain InterpreterInsertQuery::buildChainImpl(
table_prefers_large_blocks ? settings.min_insert_block_size_bytes : 0));
}
auto counting = std::make_shared<CountingTransform>(out.getInputHeader(), thread_status);
auto counting = std::make_shared<CountingTransform>(out.getInputHeader(), thread_status, getContext()->getQuota());
counting->setProcessListElement(context_ptr->getProcessListElement());
out.addSource(std::move(counting));

View File

@ -1,6 +1,7 @@
#include <Parsers/ASTColumnDeclaration.h>
#include <Common/quoteString.h>
#include <IO/Operators.h>
#include <Parsers/ASTLiteral.h>
namespace DB
@ -71,9 +72,13 @@ void ASTColumnDeclaration::formatImpl(const FormatSettings & settings, FormatSta
if (default_expression)
{
settings.ostr << ' ' << (settings.hilite ? hilite_keyword : "") << default_specifier << (settings.hilite ? hilite_none : "") << ' ';
settings.ostr << ' ' << (settings.hilite ? hilite_keyword : "") << default_specifier << (settings.hilite ? hilite_none : "");
if (default_specifier != "EPHEMERAL" || !default_expression->as<ASTLiteral>()->value.isNull())
{
settings.ostr << ' ';
default_expression->formatImpl(settings, state, frame);
}
}
if (comment)
{

View File

@ -9,6 +9,7 @@
#include <Parsers/CommonParsers.h>
#include <Parsers/ParserDataType.h>
#include <Poco/String.h>
#include <Parsers/ASTLiteral.h>
namespace DB
@ -185,8 +186,7 @@ bool IParserColumnDeclaration<NameParser>::parseImpl(Pos & pos, ASTPtr & node, E
}
Pos pos_before_specifier = pos;
if (s_default.ignore(pos, expected) || s_materialized.ignore(pos, expected) ||
s_ephemeral.ignore(pos, expected) || s_alias.ignore(pos, expected))
if (s_default.ignore(pos, expected) || s_materialized.ignore(pos, expected) || s_alias.ignore(pos, expected))
{
default_specifier = Poco::toUpper(std::string{pos_before_specifier->begin, pos_before_specifier->end});
@ -194,6 +194,12 @@ bool IParserColumnDeclaration<NameParser>::parseImpl(Pos & pos, ASTPtr & node, E
if (!expr_parser.parse(pos, default_expression, expected))
return false;
}
else if (s_ephemeral.ignore(pos, expected))
{
default_specifier = "EPHEMERAL";
if (!expr_parser.parse(pos, default_expression, expected) && type)
default_expression = std::make_shared<ASTLiteral>(Field());
}
if (require_type && !type && !default_expression)
return false; /// reject column name without type

View File

@ -7,7 +7,7 @@ namespace DB
namespace ErrorCodes
{
extern const int TOO_MANY_ROWS_OR_BYTES;
extern const int QUOTA_EXPIRED;
extern const int QUOTA_EXCEEDED;
extern const int QUERY_WAS_CANCELLED;
}
@ -34,7 +34,7 @@ static bool checkCanAddAdditionalInfoToException(const DB::Exception & exception
{
/// Don't add additional info to limits and quota exceptions, and in case of kill query (to pass tests).
return exception.code() != ErrorCodes::TOO_MANY_ROWS_OR_BYTES
&& exception.code() != ErrorCodes::QUOTA_EXPIRED
&& exception.code() != ErrorCodes::QUOTA_EXCEEDED
&& exception.code() != ErrorCodes::QUERY_WAS_CANCELLED;
}

View File

@ -96,21 +96,33 @@ IRowWithNamesSchemaReader::IRowWithNamesSchemaReader(ReadBuffer & in_, size_t ma
NamesAndTypesList IRowWithNamesSchemaReader::readSchema()
{
auto names_and_types = readRowAndGetNamesAndDataTypes();
bool eof = false;
auto names_and_types = readRowAndGetNamesAndDataTypes(eof);
std::unordered_map<String, DataTypePtr> names_to_types;
std::vector<String> names_order;
names_to_types.reserve(names_and_types.size());
names_order.reserve(names_and_types.size());
for (const auto & [name, type] : names_and_types)
{
names_to_types[name] = type;
names_order.push_back(name);
}
for (size_t row = 1; row < max_rows_to_read; ++row)
{
auto new_names_and_types = readRowAndGetNamesAndDataTypes();
if (new_names_and_types.empty())
auto new_names_and_types = readRowAndGetNamesAndDataTypes(eof);
if (eof)
/// We reached eof.
break;
for (const auto & [name, new_type] : new_names_and_types)
{
auto it = names_and_types.find(name);
auto it = names_to_types.find(name);
/// If we didn't see this column before, just add it.
if (it == names_and_types.end())
if (it == names_to_types.end())
{
names_and_types[name] = new_type;
names_to_types[name] = new_type;
names_order.push_back(name);
continue;
}
@ -133,12 +145,13 @@ NamesAndTypesList IRowWithNamesSchemaReader::readSchema()
}
/// Check that we read at list one column.
if (names_and_types.empty())
if (names_to_types.empty())
throw Exception(ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE, "Cannot read rows from the data");
NamesAndTypesList result;
for (auto & [name, type] : names_and_types)
for (auto & name : names_order)
{
auto & type = names_to_types[name];
/// Check that we could determine the type of this column.
if (!type)
{

View File

@ -68,10 +68,10 @@ public:
protected:
/// Read one row and determine types of columns in it.
/// Return map {column_name : type}.
/// Return list with names and types.
/// If it's impossible to determine the type for some column, return nullptr for it.
/// Return empty map is can't read more data.
virtual std::unordered_map<String, DataTypePtr> readRowAndGetNamesAndDataTypes() = 0;
/// Set eof = true if can't read more data.
virtual NamesAndTypesList readRowAndGetNamesAndDataTypes(bool & eof) = 0;
private:
size_t max_rows_to_read;

View File

@ -312,7 +312,7 @@ JSONEachRowSchemaReader::JSONEachRowSchemaReader(ReadBuffer & in_, bool json_str
}
std::unordered_map<String, DataTypePtr> JSONEachRowSchemaReader::readRowAndGetNamesAndDataTypes()
NamesAndTypesList JSONEachRowSchemaReader::readRowAndGetNamesAndDataTypes(bool & eof)
{
if (first_row)
{
@ -339,7 +339,10 @@ std::unordered_map<String, DataTypePtr> JSONEachRowSchemaReader::readRowAndGetNa
skipWhitespaceIfAny(in);
if (in.eof())
{
eof = true;
return {};
}
return readRowAndGetNamesAndDataTypesForJSONEachRow(in, json_strings);
}

View File

@ -91,7 +91,7 @@ public:
JSONEachRowSchemaReader(ReadBuffer & in_, bool json_strings, const FormatSettings & format_settings);
private:
std::unordered_map<String, DataTypePtr> readRowAndGetNamesAndDataTypes() override;
NamesAndTypesList readRowAndGetNamesAndDataTypes(bool & eof) override;
bool json_strings;
bool first_row = true;

View File

@ -222,7 +222,7 @@ TSKVSchemaReader::TSKVSchemaReader(ReadBuffer & in_, const FormatSettings & form
{
}
std::unordered_map<String, DataTypePtr> TSKVSchemaReader::readRowAndGetNamesAndDataTypes()
NamesAndTypesList TSKVSchemaReader::readRowAndGetNamesAndDataTypes(bool & eof)
{
if (first_row)
{
@ -231,7 +231,10 @@ std::unordered_map<String, DataTypePtr> TSKVSchemaReader::readRowAndGetNamesAndD
}
if (in.eof())
{
eof = true;
return {};
}
if (*in.position() == '\n')
{
@ -239,7 +242,7 @@ std::unordered_map<String, DataTypePtr> TSKVSchemaReader::readRowAndGetNamesAndD
return {};
}
std::unordered_map<String, DataTypePtr> names_and_types;
NamesAndTypesList names_and_types;
StringRef name_ref;
String name_buf;
String value;
@ -250,7 +253,7 @@ std::unordered_map<String, DataTypePtr> TSKVSchemaReader::readRowAndGetNamesAndD
if (has_value)
{
readEscapedString(value, in);
names_and_types[std::move(name)] = determineDataTypeByEscapingRule(value, format_settings, FormatSettings::EscapingRule::Escaped);
names_and_types.emplace_back(std::move(name), determineDataTypeByEscapingRule(value, format_settings, FormatSettings::EscapingRule::Escaped));
}
else
{

View File

@ -59,7 +59,7 @@ public:
TSKVSchemaReader(ReadBuffer & in_, const FormatSettings & format_settings_);
private:
std::unordered_map<String, DataTypePtr> readRowAndGetNamesAndDataTypes() override;
NamesAndTypesList readRowAndGetNamesAndDataTypes(bool & eof) override;
const FormatSettings format_settings;
bool first_row = true;

View File

@ -18,11 +18,12 @@ namespace DB
void CountingTransform::onConsume(Chunk chunk)
{
if (quota)
quota->used(QuotaType::WRITTEN_BYTES, chunk.bytes());
Progress local_progress{WriteProgress(chunk.getNumRows(), chunk.bytes())};
progress.incrementPiecewiseAtomically(local_progress);
//std::cerr << "============ counting adding progress for " << static_cast<const void *>(thread_status) << ' ' << chunk.getNumRows() << " rows\n";
if (thread_status)
{
thread_status->performance_counters.increment(ProfileEvents::InsertedRows, local_progress.written_rows);

View File

@ -2,6 +2,7 @@
#include <IO/Progress.h>
#include <Processors/Transforms/ExceptionKeepingTransform.h>
#include <Access/EnabledQuota.h>
namespace DB
@ -14,8 +15,12 @@ class ThreadStatus;
class CountingTransform final : public ExceptionKeepingTransform
{
public:
explicit CountingTransform(const Block & header, ThreadStatus * thread_status_ = nullptr)
: ExceptionKeepingTransform(header, header), thread_status(thread_status_) {}
explicit CountingTransform(
const Block & header,
ThreadStatus * thread_status_ = nullptr,
std::shared_ptr<const EnabledQuota> quota_ = nullptr)
: ExceptionKeepingTransform(header, header)
, thread_status(thread_status_), quota(std::move(quota_)) {}
String getName() const override { return "CountingTransform"; }
@ -47,6 +52,9 @@ protected:
ProgressCallback progress_callback;
QueryStatus * process_elem = nullptr;
ThreadStatus * thread_status = nullptr;
/// Quota is used to limit amount of written bytes.
std::shared_ptr<const EnabledQuota> quota;
Chunk cur_chunk;
};

View File

@ -25,6 +25,8 @@ namespace ErrorCodes
const String HDFSBuilderWrapper::CONFIG_PREFIX = "hdfs";
const String HDFS_URL_REGEXP = "^hdfs://[^/]*/.*";
std::once_flag init_libhdfs3_conf_flag;
void HDFSBuilderWrapper::loadFromConfig(const Poco::Util::AbstractConfiguration & config,
const String & prefix, bool isUser)
{
@ -123,6 +125,8 @@ HDFSBuilderWrapper createHDFSBuilder(const String & uri_str, const Poco::Util::A
throw Exception("Illegal HDFS URI: " + uri.toString(), ErrorCodes::BAD_ARGUMENTS);
// Shall set env LIBHDFS3_CONF *before* HDFSBuilderWrapper construction.
std::call_once(init_libhdfs3_conf_flag, [&config]()
{
String libhdfs3_conf = config.getString(HDFSBuilderWrapper::CONFIG_PREFIX + ".libhdfs3_conf", "");
if (!libhdfs3_conf.empty())
{
@ -133,9 +137,10 @@ HDFSBuilderWrapper createHDFSBuilder(const String & uri_str, const Poco::Util::A
if (std::filesystem::exists(config_dir / libhdfs3_conf))
libhdfs3_conf = std::filesystem::absolute(config_dir / libhdfs3_conf);
}
setenv("LIBHDFS3_CONF", libhdfs3_conf.c_str(), 1);
}
});
HDFSBuilderWrapper builder;
if (builder.get() == nullptr)
throw Exception("Unable to create builder to connect to HDFS: " +

View File

@ -22,8 +22,6 @@ ReadBufferFromHDFS::~ReadBufferFromHDFS() = default;
struct ReadBufferFromHDFS::ReadBufferFromHDFSImpl : public BufferWithOwnMemory<SeekableReadBuffer>
{
/// HDFS create/open functions are not thread safe
static std::mutex hdfs_init_mutex;
String hdfs_uri;
String hdfs_file_path;
@ -46,8 +44,6 @@ struct ReadBufferFromHDFS::ReadBufferFromHDFSImpl : public BufferWithOwnMemory<S
, builder(createHDFSBuilder(hdfs_uri_, config_))
, read_until_position(read_until_position_)
{
std::lock_guard lock(hdfs_init_mutex);
fs = createHDFSFS(builder.get());
fin = hdfsOpenFile(fs.get(), hdfs_file_path.c_str(), O_RDONLY, 0, 0, 0);
@ -59,7 +55,6 @@ struct ReadBufferFromHDFS::ReadBufferFromHDFSImpl : public BufferWithOwnMemory<S
~ReadBufferFromHDFSImpl() override
{
std::lock_guard lock(hdfs_init_mutex);
hdfsCloseFile(fs.get(), fin);
}
@ -124,9 +119,6 @@ struct ReadBufferFromHDFS::ReadBufferFromHDFSImpl : public BufferWithOwnMemory<S
}
};
std::mutex ReadBufferFromHDFS::ReadBufferFromHDFSImpl::hdfs_init_mutex;
ReadBufferFromHDFS::ReadBufferFromHDFS(
const String & hdfs_uri_,
const String & hdfs_file_path_,

View File

@ -643,6 +643,8 @@ void IMergeTreeDataPart::loadColumnsChecksumsIndexes(bool require_columns_checks
/// Motivation: memory for index is shared between queries - not belong to the query itself.
MemoryTrackerBlockerInThread temporarily_disable_memory_tracker(VariableContext::Global);
try
{
loadUUID();
loadColumns(require_columns_checksums);
loadChecksums(require_columns_checksums);
@ -661,6 +663,15 @@ void IMergeTreeDataPart::loadColumnsChecksumsIndexes(bool require_columns_checks
checkConsistency(require_columns_checksums);
loadDefaultCompressionCodec();
}
catch (...)
{
// There could be conditions that data part to be loaded is broken, but some of meta infos are already written
// into meta data before exception, need to clean them all.
metadata_manager->deleteAll(/*include_projection*/ true);
metadata_manager->assertAllDeleted(/*include_projection*/ true);
throw;
}
}
void IMergeTreeDataPart::appendFilesOfColumnsChecksumsIndexes(Strings & files, bool include_projection) const

View File

@ -467,7 +467,7 @@ std::pair<BlocksPtr, Block> StorageWindowView::getNewBlocks(UInt32 watermark)
InterpreterSelectQuery fetch(
getFetchColumnQuery(w_start, watermark),
window_view_context,
getContext(),
getInnerStorage(),
nullptr,
SelectQueryOptions(QueryProcessingStage::FetchColumns));
@ -509,11 +509,11 @@ std::pair<BlocksPtr, Block> StorageWindowView::getNewBlocks(UInt32 watermark)
return StorageBlocks::createStorage(blocks_id_global, required_columns, std::move(pipes), QueryProcessingStage::WithMergeableState);
};
TemporaryTableHolder blocks_storage(window_view_context, creator);
TemporaryTableHolder blocks_storage(getContext(), creator);
InterpreterSelectQuery select(
getFinalQuery(),
window_view_context,
getContext(),
blocks_storage.getTable(),
blocks_storage.getTable()->getInMemoryMetadataPtr(),
SelectQueryOptions(QueryProcessingStage::Complete));
@ -617,8 +617,8 @@ std::shared_ptr<ASTCreateQuery> StorageWindowView::getInnerTableCreateQuery(
auto t_sample_block
= InterpreterSelectQuery(
inner_select_query, window_view_context, getParentStorage(), nullptr,
SelectQueryOptions(QueryProcessingStage::WithMergeableState)) .getSampleBlock();
inner_select_query, getContext(), getParentStorage(), nullptr, SelectQueryOptions(QueryProcessingStage::WithMergeableState))
.getSampleBlock();
auto columns_list = std::make_shared<ASTExpressionList>();
@ -891,7 +891,7 @@ void StorageWindowView::updateMaxWatermark(UInt32 watermark)
inline void StorageWindowView::cleanup()
{
InterpreterAlterQuery alter_query(getCleanupQuery(), window_view_context);
InterpreterAlterQuery alter_query(getCleanupQuery(), getContext());
alter_query.execute();
std::lock_guard lock(fire_signal_mutex);
@ -999,9 +999,6 @@ StorageWindowView::StorageWindowView(
, WithContext(context_->getGlobalContext())
, log(&Poco::Logger::get(fmt::format("StorageWindowView({}.{})", table_id_.database_name, table_id_.table_name)))
{
window_view_context = Context::createCopy(getContext());
window_view_context->makeQueryContext();
StorageInMemoryMetadata storage_metadata;
storage_metadata.setColumns(columns_);
setInMemoryMetadata(storage_metadata);
@ -1089,11 +1086,11 @@ StorageWindowView::StorageWindowView(
clean_interval_ms = getContext()->getSettingsRef().window_view_clean_interval.totalMilliseconds();
next_fire_signal = getWindowUpperBound(std::time(nullptr));
clean_cache_task = window_view_context->getSchedulePool().createTask(getStorageID().getFullTableName(), [this] { threadFuncCleanup(); });
clean_cache_task = getContext()->getSchedulePool().createTask(getStorageID().getFullTableName(), [this] { threadFuncCleanup(); });
if (is_proctime)
fire_task = window_view_context->getSchedulePool().createTask(getStorageID().getFullTableName(), [this] { threadFuncFireProc(); });
fire_task = getContext()->getSchedulePool().createTask(getStorageID().getFullTableName(), [this] { threadFuncFireProc(); });
else
fire_task = window_view_context->getSchedulePool().createTask(getStorageID().getFullTableName(), [this] { threadFuncFireEvent(); });
fire_task = getContext()->getSchedulePool().createTask(getStorageID().getFullTableName(), [this] { threadFuncFireEvent(); });
clean_cache_task->deactivate();
fire_task->deactivate();
}
@ -1424,9 +1421,10 @@ Block & StorageWindowView::getHeader() const
std::lock_guard lock(sample_block_lock);
if (!sample_block)
{
sample_block = InterpreterSelectQuery(
select_query->clone(), window_view_context, getParentStorage(), nullptr,
SelectQueryOptions(QueryProcessingStage::Complete)).getSampleBlock();
sample_block
= InterpreterSelectQuery(
select_query->clone(), getContext(), getParentStorage(), nullptr, SelectQueryOptions(QueryProcessingStage::Complete))
.getSampleBlock();
/// convert all columns to full columns
/// in case some of them are constant
for (size_t i = 0; i < sample_block.columns(); ++i)

View File

@ -157,7 +157,6 @@ private:
/// Used to fetch the mergeable state and generate the final result. e.g. SELECT * FROM * GROUP BY tumble(____timestamp, *)
ASTPtr final_query;
ContextMutablePtr window_view_context;
bool is_proctime{true};
bool is_time_column_func_now;
bool is_tumble; // false if is hop

View File

@ -18,6 +18,7 @@ namespace ErrorCodes
{
extern const int LOGICAL_ERROR;
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int CANNOT_EXTRACT_TABLE_STRUCTURE;
}
void TableFunctionInput::parseArguments(const ASTPtr & ast_function, ContextPtr context)
@ -29,6 +30,12 @@ void TableFunctionInput::parseArguments(const ASTPtr & ast_function, ContextPtr
auto args = function->arguments->children;
if (args.empty())
{
structure = "auto";
return;
}
if (args.size() != 1)
throw Exception("Table function '" + getName() + "' requires exactly 1 argument: structure",
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
@ -38,6 +45,16 @@ void TableFunctionInput::parseArguments(const ASTPtr & ast_function, ContextPtr
ColumnsDescription TableFunctionInput::getActualTableStructure(ContextPtr context) const
{
if (structure == "auto")
{
if (structure_hint.empty())
throw Exception(
ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE,
"Table function '{}' was used without structure argument but structure could not be determined automatically. Please, "
"provide structure manually",
getName());
return structure_hint;
}
return parseColumnsListFromString(structure, context);
}

View File

@ -16,6 +16,8 @@ public:
static constexpr auto name = "input";
std::string getName() const override { return name; }
bool hasStaticStructure() const override { return true; }
bool needStructureHint() const override { return true; }
void setStructureHint(const ColumnsDescription & structure_hint_) override { structure_hint = structure_hint_; }
private:
StoragePtr executeImpl(const ASTPtr & ast_function, ContextPtr context, const std::string & table_name, ColumnsDescription cached_columns) const override;
@ -25,6 +27,7 @@ private:
void parseArguments(const ASTPtr & ast_function, ContextPtr context) override;
String structure;
ColumnsDescription structure_hint;
};
}

View File

@ -264,7 +264,7 @@ def main():
version_type = "stable"
official_flag = True
update_version_local(REPO_COPY, version, version_type)
update_version_local(version, version_type)
logging.info("Updated local files with version")

View File

@ -397,17 +397,19 @@ def main():
images_dict = get_images_dict(GITHUB_WORKSPACE, "docker/images.json")
if args.all:
pr_info = PRInfo()
if args.all:
pr_info.changed_files = set(images_dict.keys())
elif args.image_path:
pr_info = PRInfo()
pr_info.changed_files = set(i for i in args.image_path)
else:
pr_info = PRInfo(need_changed_files=True)
pr_info.fetch_changed_files()
changed_images = get_changed_docker_images(pr_info, images_dict)
logging.info("Has changed images %s", ", ".join([im.path for im in changed_images]))
if changed_images:
logging.info(
"Has changed images: %s", ", ".join([im.path for im in changed_images])
)
image_versions, result_version = gen_versions(pr_info, args.suffix)

360
tests/ci/docker_server.py Normal file
View File

@ -0,0 +1,360 @@
#!/usr/bin/env python
# here
import argparse
import json
import logging
import subprocess
from os import path as p, makedirs
from typing import List, Tuple
from github import Github
from build_check import get_release_or_pr
from clickhouse_helper import ClickHouseHelper, prepare_tests_results_for_clickhouse
from commit_status_helper import post_commit_status
from docker_images_check import DockerImage
from env_helper import CI, GITHUB_RUN_URL, RUNNER_TEMP, S3_BUILDS_BUCKET
from get_robot_token import get_best_robot_token, get_parameter_from_ssm
from pr_info import PRInfo
from s3_helper import S3Helper
from stopwatch import Stopwatch
from upload_result_helper import upload_results
from version_helper import (
ClickHouseVersion,
get_tagged_versions,
get_version_from_repo,
get_version_from_string,
)
TEMP_PATH = p.join(RUNNER_TEMP, "docker_images_check")
BUCKETS = {"amd64": "package_release", "arm64": "package_aarch64"}
class DelOS(argparse.Action):
def __call__(self, _, namespace, __, option_string=None):
no_build = self.dest[3:] if self.dest.startswith("no_") else self.dest
if no_build in namespace.os:
namespace.os.remove(no_build)
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="A program to build clickhouse-server image, both alpine and "
"ubuntu versions",
)
parser.add_argument(
"--version",
type=version_arg,
default=get_version_from_repo().string,
help="a version to build",
)
parser.add_argument(
"--release-type",
type=str,
choices=("auto", "latest", "major", "minor", "patch", "head"),
default="head",
help="version part that will be updated when '--version' is set; "
"'auto' is a special case, it will get versions from github and detect the "
"release type (latest, major, minor or patch) automatically",
)
parser.add_argument(
"--image-path",
type=str,
default="docker/server",
help="a path to docker context directory",
)
parser.add_argument(
"--image-repo",
type=str,
default="clickhouse/clickhouse-server",
help="image name on docker hub",
)
parser.add_argument(
"--bucket-prefix",
help="if set, then is used as source for deb and tgz files",
)
parser.add_argument("--reports", default=True, help=argparse.SUPPRESS)
parser.add_argument(
"--no-reports",
action="store_false",
dest="reports",
default=argparse.SUPPRESS,
help="don't push reports to S3 and github",
)
parser.add_argument("--push", default=True, help=argparse.SUPPRESS)
parser.add_argument(
"--no-push-images",
action="store_false",
dest="push",
default=argparse.SUPPRESS,
help="don't push images to docker hub",
)
parser.add_argument("--os", default=["ubuntu", "alpine"], help=argparse.SUPPRESS)
parser.add_argument(
"--no-ubuntu",
action=DelOS,
nargs=0,
default=argparse.SUPPRESS,
help="don't build ubuntu image",
)
parser.add_argument(
"--no-alpine",
action=DelOS,
nargs=0,
default=argparse.SUPPRESS,
help="don't build alpine image",
)
return parser.parse_args()
def version_arg(version: str) -> ClickHouseVersion:
try:
return get_version_from_string(version)
except ValueError as e:
raise argparse.ArgumentTypeError(e)
def auto_release_type(version: ClickHouseVersion, release_type: str) -> str:
if release_type != "auto":
return release_type
git_versions = get_tagged_versions()
reference_version = git_versions[0]
for i in reversed(range(len(git_versions))):
if git_versions[i] < version:
if i == len(git_versions) - 1:
return "latest"
reference_version = git_versions[i + 1]
break
if version.major < reference_version.major:
return "major"
if version.minor < reference_version.minor:
return "minor"
if version.patch < reference_version.patch:
return "patch"
raise ValueError(
"Release type 'tweak' is not supported for "
f"{version.string} < {reference_version.string}"
)
def gen_tags(version: ClickHouseVersion, release_type: str) -> List[str]:
"""
22.2.2.2 + latest:
- latest
- 22
- 22.2
- 22.2.2
- 22.2.2.2
22.2.2.2 + major:
- 22
- 22.2
- 22.2.2
- 22.2.2.2
22.2.2.2 + minor:
- 22.2
- 22.2.2
- 22.2.2.2
22.2.2.2 + patch:
- 22.2.2
- 22.2.2.2
22.2.2.2 + head:
- head
"""
parts = version.string.split(".")
tags = []
if release_type == "latest":
tags.append(release_type)
for i in range(len(parts)):
tags.append(".".join(parts[: i + 1]))
elif release_type == "major":
for i in range(len(parts)):
tags.append(".".join(parts[: i + 1]))
elif release_type == "minor":
for i in range(1, len(parts)):
tags.append(".".join(parts[: i + 1]))
elif release_type == "patch":
for i in range(2, len(parts)):
tags.append(".".join(parts[: i + 1]))
elif release_type == "head":
tags.append(release_type)
else:
raise ValueError(f"{release_type} is not valid release part")
return tags
def buildx_args(bucket_prefix: str, arch: str) -> List[str]:
args = [f"--platform=linux/{arch}", f"--label=build-url={GITHUB_RUN_URL}"]
if bucket_prefix:
url = p.join(bucket_prefix, BUCKETS[arch]) # to prevent a double //
args.append(f"--build-arg=REPOSITORY='{url}'")
args.append(f"--build-arg=deb_location_url='{url}'")
return args
def build_and_push_image(
image: DockerImage,
push: bool,
bucket_prefix: str,
os: str,
tag: str,
version: ClickHouseVersion,
) -> List[Tuple[str, str]]:
result = []
if os != "ubuntu":
tag += f"-{os}"
init_args = ["docker", "buildx", "build"]
if push:
init_args.append("--push")
init_args.append("--output=type=image,push-by-digest=true")
init_args.append(f"--tag={image.repo}")
else:
init_args.append("--output=type=docker")
# `docker buildx build --load` does not support multiple images currently
# images must be built separately and merged together with `docker manifest`
digests = []
for arch in BUCKETS:
arch_tag = f"{tag}-{arch}"
metadata_path = p.join(TEMP_PATH, arch_tag)
dockerfile = p.join(image.full_path, f"Dockerfile.{os}")
cmd_args = list(init_args)
cmd_args.extend(buildx_args(bucket_prefix, arch))
if not push:
cmd_args.append(f"--tag={image.repo}:{arch_tag}")
cmd_args.extend(
[
f"--metadata-file={metadata_path}",
f"--build-arg=VERSION='{version.string}'",
"--progress=plain",
f"--file={dockerfile}",
image.full_path,
]
)
cmd = " ".join(cmd_args)
logging.info("Building image %s:%s for arch %s: %s", image.repo, tag, arch, cmd)
with subprocess.Popen(
cmd,
shell=True,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
universal_newlines=True,
) as process:
for line in process.stdout: # type: ignore
print(line, end="")
retcode = process.wait()
if retcode != 0:
result.append((f"{image.repo}:{tag}-{arch}", "FAIL"))
return result
result.append((f"{image.repo}:{tag}-{arch}", "OK"))
with open(metadata_path, "rb") as m:
metadata = json.load(m)
digests.append(metadata["containerimage.digest"])
if push:
cmd = (
"docker buildx imagetools create "
f"--tag {image.repo}:{tag} {' '.join(digests)}"
)
logging.info("Pushing merged %s:%s image: %s", image.repo, tag, cmd)
with subprocess.Popen(
cmd,
shell=True,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
universal_newlines=True,
) as process:
for line in process.stdout: # type: ignore
print(line, end="")
retcode = process.wait()
if retcode != 0:
result.append((f"{image.repo}:{tag}", "FAIL"))
else:
logging.info(
"Merging is available only on push, separate %s images are created",
f"{image.repo}:{tag}-$arch",
)
return result
def main():
logging.basicConfig(level=logging.INFO)
stopwatch = Stopwatch()
makedirs(TEMP_PATH, exist_ok=True)
args = parse_args()
image = DockerImage(args.image_path, args.image_repo, False)
args.release_type = auto_release_type(args.version, args.release_type)
tags = gen_tags(args.version, args.release_type)
NAME = f"Docker image {image.repo} building check (actions)"
pr_info = None
if CI:
pr_info = PRInfo()
release_or_pr = get_release_or_pr(pr_info, {"package_type": ""}, args.version)
args.bucket_prefix = (
f"https://s3.amazonaws.com/{S3_BUILDS_BUCKET}/"
f"{release_or_pr}/{pr_info.sha}"
)
if args.push:
subprocess.check_output( # pylint: disable=unexpected-keyword-arg
"docker login --username 'robotclickhouse' --password-stdin",
input=get_parameter_from_ssm("dockerhub_robot_password"),
encoding="utf-8",
shell=True,
)
NAME = f"Docker image {image.repo} build and push (actions)"
logging.info("Following tags will be created: %s", ", ".join(tags))
status = "success"
test_results = [] # type: List[Tuple[str, str]]
for os in args.os:
for tag in tags:
test_results.extend(
build_and_push_image(
image, args.push, args.bucket_prefix, os, tag, args.version
)
)
if test_results[-1][1] != "OK":
status = "failure"
pr_info = pr_info or PRInfo()
s3_helper = S3Helper("https://s3.amazonaws.com")
url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, [], NAME)
print(f"::notice ::Report url: {url}")
print(f'::set-output name=url_output::"{url}"')
if not args.reports:
return
description = f"Processed tags: {', '.join(tags)}"
if len(description) >= 140:
description = description[:136] + "..."
gh = Github(get_best_robot_token())
post_commit_status(gh, pr_info.sha, NAME, description, status, url)
prepared_events = prepare_tests_results_for_clickhouse(
pr_info,
test_results,
status,
stopwatch.duration_seconds,
stopwatch.start_time_str,
url,
NAME,
)
ch_helper = ClickHouseHelper()
ch_helper.insert_events_into(db="default", table="checks", events=prepared_events)
if __name__ == "__main__":
main()

View File

@ -2,12 +2,16 @@
import os
import unittest
from unittest.mock import patch
from unittest.mock import patch, MagicMock
from env_helper import GITHUB_RUN_URL
from pr_info import PRInfo
import docker_images_check as di
with patch("git_helper.Git"):
from version_helper import get_version_from_string, get_tagged_versions
import docker_server as ds
# di.logging.basicConfig(level=di.logging.INFO)
@ -221,5 +225,46 @@ class TestDockerImageCheck(unittest.TestCase):
self.assertEqual(results, expected)
class TestDockerServer(unittest.TestCase):
def test_gen_tags(self):
version = get_version_from_string("22.2.2.2")
cases = (
("latest", ["latest", "22", "22.2", "22.2.2", "22.2.2.2"]),
("major", ["22", "22.2", "22.2.2", "22.2.2.2"]),
("minor", ["22.2", "22.2.2", "22.2.2.2"]),
("patch", ["22.2.2", "22.2.2.2"]),
("head", ["head"]),
)
for case in cases:
release_type = case[0]
self.assertEqual(case[1], ds.gen_tags(version, release_type))
with self.assertRaises(ValueError):
ds.gen_tags(version, "auto")
@patch("docker_server.get_tagged_versions")
def test_auto_release_type(self, mock_tagged_versions: MagicMock):
mock_tagged_versions.return_value = [
get_version_from_string("1.1.1.1"),
get_version_from_string("1.2.1.1"),
get_version_from_string("2.1.1.1"),
get_version_from_string("2.2.1.1"),
get_version_from_string("2.2.2.1"),
]
cases = (
(get_version_from_string("1.0.1.1"), "minor"),
(get_version_from_string("1.1.2.1"), "minor"),
(get_version_from_string("1.3.1.1"), "major"),
(get_version_from_string("2.1.2.1"), "minor"),
(get_version_from_string("2.2.1.3"), "patch"),
(get_version_from_string("2.2.3.1"), "latest"),
(get_version_from_string("2.3.1.1"), "latest"),
)
_ = get_tagged_versions()
for case in cases:
release = ds.auto_release_type(case[0], "auto")
self.assertEqual(case[1], release)
if __name__ == "__main__":
unittest.main()

View File

@ -1,7 +1,11 @@
import os
from os import path as p
module_dir = p.abspath(p.dirname(__file__))
git_root = p.abspath(p.join(module_dir, "..", ".."))
CI = bool(os.getenv("CI"))
TEMP_PATH = os.getenv("TEMP_PATH", os.path.abspath("."))
TEMP_PATH = os.getenv("TEMP_PATH", module_dir)
CACHES_PATH = os.getenv("CACHES_PATH", TEMP_PATH)
CLOUDFLARE_TOKEN = os.getenv("CLOUDFLARE_TOKEN")
@ -9,11 +13,11 @@ GITHUB_EVENT_PATH = os.getenv("GITHUB_EVENT_PATH")
GITHUB_REPOSITORY = os.getenv("GITHUB_REPOSITORY", "ClickHouse/ClickHouse")
GITHUB_RUN_ID = os.getenv("GITHUB_RUN_ID", "0")
GITHUB_SERVER_URL = os.getenv("GITHUB_SERVER_URL", "https://github.com")
GITHUB_WORKSPACE = os.getenv("GITHUB_WORKSPACE", os.path.abspath("../../"))
GITHUB_WORKSPACE = os.getenv("GITHUB_WORKSPACE", git_root)
GITHUB_RUN_URL = f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}/actions/runs/{GITHUB_RUN_ID}"
IMAGES_PATH = os.getenv("IMAGES_PATH")
REPORTS_PATH = os.getenv("REPORTS_PATH", "./reports")
REPO_COPY = os.getenv("REPO_COPY", os.path.abspath("../../"))
RUNNER_TEMP = os.getenv("RUNNER_TEMP", os.path.abspath("./tmp"))
REPORTS_PATH = os.getenv("REPORTS_PATH", p.abspath(p.join(module_dir, "./reports")))
REPO_COPY = os.getenv("REPO_COPY", git_root)
RUNNER_TEMP = os.getenv("RUNNER_TEMP", p.abspath(p.join(module_dir, "./tmp")))
S3_BUILDS_BUCKET = os.getenv("S3_BUILDS_BUCKET", "clickhouse-builds")
S3_TEST_REPORTS_BUCKET = os.getenv("S3_TEST_REPORTS_BUCKET", "clickhouse-test-reports")

View File

@ -3,7 +3,7 @@ import argparse
import os.path as p
import re
import subprocess
from typing import Optional
from typing import List, Optional
# ^ and $ match subline in `multiple\nlines`
# \A and \Z match only start and end of the whole string
@ -89,7 +89,7 @@ class Git:
self.run(f"git rev-list {self.latest_tag}..HEAD --count")
)
def _check_tag(self, value: str):
def check_tag(self, value: str):
if value == "":
return
if not self._tag_pattern.match(value):
@ -101,7 +101,7 @@ class Git:
@latest_tag.setter
def latest_tag(self, value: str):
self._check_tag(value)
self.check_tag(value)
self._latest_tag = value
@property
@ -110,7 +110,7 @@ class Git:
@new_tag.setter
def new_tag(self, value: str):
self._check_tag(value)
self.check_tag(value)
self._new_tag = value
@property
@ -122,3 +122,6 @@ class Git:
version = self.latest_tag.split("-", maxsplit=1)[0]
return int(version.split(".")[-1]) + self.commits_since_tag
def get_tags(self) -> List[str]:
return self.run("git tag").split()

View File

@ -1,10 +1,8 @@
#!/usr/bin/env python3
import datetime
import logging
import os.path as p
import subprocess
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from typing import Dict, Tuple, Union
from typing import Dict, List, Tuple, Union
from git_helper import Git, removeprefix
@ -49,12 +47,16 @@ class ClickHouseVersion:
patch: Union[int, str],
revision: Union[int, str],
git: Git,
tweak: str = None,
):
self._major = int(major)
self._minor = int(minor)
self._patch = int(patch)
self._revision = int(revision)
self._git = git
self._tweak = None
if tweak is not None:
self._tweak = int(tweak)
self._describe = ""
def update(self, part: str) -> "ClickHouseVersion":
@ -89,7 +91,7 @@ class ClickHouseVersion:
@property
def tweak(self) -> int:
return self._git.tweak
return self._tweak or self._git.tweak
@property
def revision(self) -> int:
@ -129,6 +131,25 @@ class ClickHouseVersion:
raise ValueError(f"version type {version_type} not in {VersionType.VALID}")
self._describe = f"v{self.string}-{version_type}"
def __eq__(self, other) -> bool:
if not isinstance(self, type(other)):
return NotImplemented
return (
self.major == other.major
and self.minor == other.minor
and self.patch == other.patch
and self.tweak == other.tweak
)
def __lt__(self, other: "ClickHouseVersion") -> bool:
for part in ("major", "minor", "patch", "tweak"):
if getattr(self, part) < getattr(other, part):
return True
elif getattr(self, part) > getattr(other, part):
return False
return False
class VersionType:
LTS = "lts"
@ -138,6 +159,14 @@ class VersionType:
VALID = (TESTING, PRESTABLE, STABLE, LTS)
def validate_version(version: str):
parts = version.split(".")
if len(parts) != 4:
raise ValueError(f"{version} does not contain 4 parts")
for part in parts:
int(part)
def get_abs_path(path: str) -> str:
return p.abspath(p.join(git.root, path))
@ -176,6 +205,29 @@ def get_version_from_repo(
)
def get_version_from_string(version: str) -> ClickHouseVersion:
validate_version(version)
parts = version.split(".")
return ClickHouseVersion(parts[0], parts[1], parts[2], -1, git, parts[3])
def get_version_from_tag(tag: str) -> ClickHouseVersion:
git.check_tag(tag)
tag = tag[1:].split("-")[0]
return get_version_from_string(tag)
def get_tagged_versions() -> List[ClickHouseVersion]:
versions = []
for tag in git.get_tags():
try:
version = get_version_from_tag(tag)
versions.append(version)
except Exception:
continue
return sorted(versions)
def update_cmake_version(
version: ClickHouseVersion,
versions_path: str = FILE_WITH_VERSION_PATH,
@ -185,22 +237,6 @@ def update_cmake_version(
f.write(VERSIONS_TEMPLATE.format_map(version.as_dict()))
def _update_changelog(repo_path: str, version: ClickHouseVersion):
cmd = """sed \
-e "s/[@]VERSION_STRING[@]/{version_str}/g" \
-e "s/[@]DATE[@]/{date}/g" \
-e "s/[@]AUTHOR[@]/clickhouse-release/g" \
-e "s/[@]EMAIL[@]/clickhouse-release@yandex-team.ru/g" \
< {in_path} > {changelog_path}
""".format(
version_str=version.string,
date=datetime.datetime.now().strftime("%a, %d %b %Y %H:%M:%S") + " +0300",
in_path=p.join(repo_path, CHANGELOG_IN_PATH),
changelog_path=p.join(repo_path, CHANGELOG_PATH),
)
subprocess.check_call(cmd, shell=True)
def update_contributors(
relative_contributors_path: str = GENERATED_CONTRIBUTORS, force: bool = False
):
@ -225,22 +261,10 @@ def update_contributors(
cfd.write(content)
def _update_dockerfile(repo_path: str, version: ClickHouseVersion):
version_str_for_docker = ".".join(
[str(version.major), str(version.minor), str(version.patch), "*"]
)
cmd = "ls -1 {path}/docker/*/Dockerfile | xargs sed -i -r -e 's/ARG version=.+$/ARG version='{ver}'/'".format(
path=repo_path, ver=version_str_for_docker
)
subprocess.check_call(cmd, shell=True)
def update_version_local(repo_path, version, version_type="testing"):
def update_version_local(version, version_type="testing"):
update_contributors()
version.with_description(version_type)
update_cmake_version(version)
_update_changelog(repo_path, version)
_update_dockerfile(repo_path, version)
def main():

View File

@ -129,6 +129,7 @@ def test_quota_from_users_xml():
1000,
"\\N",
"\\N",
"\\N",
]
]
)
@ -349,6 +350,7 @@ def test_tracking_quota():
"\\N",
"\\N",
"\\N",
"\\N",
]
]
)
@ -454,7 +456,7 @@ def test_exceed_quota():
]
)
system_quota_limits(
[["myQuota", 31556952, 0, 1, 1, 1, 1, 1, "\\N", 1, "\\N", "\\N"]]
[["myQuota", 31556952, 0, 1, 1, 1, 1, 1, "\\N", 1, "\\N", "\\N", "\\N"]]
)
system_quota_usage(
[
@ -545,6 +547,7 @@ def test_exceed_quota():
1000,
"\\N",
"\\N",
"\\N",
]
]
)
@ -634,6 +637,7 @@ def test_add_remove_interval():
1000,
"\\N",
"\\N",
"\\N",
]
]
)
@ -695,6 +699,7 @@ def test_add_remove_interval():
1000,
"\\N",
"\\N",
"\\N",
],
[
"myQuota",
@ -709,6 +714,7 @@ def test_add_remove_interval():
"\\N",
20000,
120,
"\\N",
],
]
)
@ -842,6 +848,7 @@ def test_add_remove_interval():
1000,
"\\N",
"\\N",
"\\N",
]
]
)
@ -1003,6 +1010,7 @@ def test_add_remove_interval():
1000,
"\\N",
"\\N",
"\\N",
]
]
)
@ -1064,6 +1072,7 @@ def test_add_remove_quota():
1000,
"\\N",
"\\N",
"\\N",
]
]
)
@ -1136,6 +1145,7 @@ def test_add_remove_quota():
1000,
"\\N",
"\\N",
"\\N",
],
[
"myQuota2",
@ -1150,6 +1160,7 @@ def test_add_remove_quota():
4000,
400000,
60,
"\\N",
],
[
"myQuota2",
@ -1164,6 +1175,7 @@ def test_add_remove_quota():
"\\N",
"\\N",
1800,
"\\N",
],
]
)
@ -1226,6 +1238,7 @@ def test_add_remove_quota():
1000,
"\\N",
"\\N",
"\\N",
]
]
)
@ -1294,6 +1307,7 @@ def test_add_remove_quota():
1000,
"\\N",
"\\N",
"\\N",
]
]
)
@ -1356,6 +1370,7 @@ def test_reload_users_xml_by_timer():
1000,
"\\N",
"\\N",
"\\N",
]
]
)
@ -1382,7 +1397,7 @@ def test_reload_users_xml_by_timer():
assert_eq_with_retry(
instance,
"SELECT * FROM system.quota_limits",
[["myQuota", 31556952, 0, 1, 1, 1, 1, 1, "\\N", 1, "\\N", "\\N"]],
[["myQuota", 31556952, 0, 1, 1, 1, 1, 1, "\\N", 1, "\\N", "\\N", "\\N"]],
)
@ -1481,15 +1496,15 @@ def test_dcl_management():
== "CREATE QUOTA qA FOR INTERVAL 30 minute MAX execution_time = 0.5, FOR INTERVAL 5 quarter MAX queries = 321, errors = 10 TO default\n"
)
assert re.match(
"qA\\t\\t.*\\t1800\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t.*\\t0.5\n"
"qA\\t\\t.*\\t39446190\\t1\\t321\\t1\\t\\\\N\\t0\\t\\\\N\\t0\\t10\\t50\\t\\\\N\\t200\\t\\\\N\\t50\\t\\\\N\\t200\\t\\\\N\\t.*\\t\\\\N\n",
"qA\\t\\t.*\\t1800\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t.*\\t0.5\\t0\\t\\\\N\n"
"qA\\t\\t.*\\t39446190\\t1\\t321\\t1\\t\\\\N\\t0\\t\\\\N\\t0\\t10\\t50\\t\\\\N\\t200\\t\\\\N\\t50\\t\\\\N\\t200\\t\\\\N\\t.*\\t\\\\N\\t0\\t\\\\N\n",
instance.query("SHOW QUOTA"),
)
instance.query("SELECT * from test_table")
assert re.match(
"qA\\t\\t.*\\t1800\\t1\\t\\\\N\\t1\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t50\\t\\\\N\\t200\\t\\\\N\\t50\\t\\\\N\\t200\\t\\\\N\\t.*\\t0.5\n"
"qA\\t\\t.*\\t39446190\\t2\\t321\\t2\\t\\\\N\\t0\\t\\\\N\\t0\\t10\\t100\\t\\\\N\\t400\\t\\\\N\\t100\\t\\\\N\\t400\\t\\\\N\\t.*\\t\\\\N\n",
"qA\\t\\t.*\\t1800\\t1\\t\\\\N\\t1\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t50\\t\\\\N\\t200\\t\\\\N\\t50\\t\\\\N\\t200\\t\\\\N\\t.*\\t0.5\\t0\\t\\\\N\n"
"qA\\t\\t.*\\t39446190\\t2\\t321\\t2\\t\\\\N\\t0\\t\\\\N\\t0\\t10\\t100\\t\\\\N\\t400\\t\\\\N\\t100\\t\\\\N\\t400\\t\\\\N\\t.*\\t\\\\N\\t0\\t\\\\N\n",
instance.query("SHOW QUOTA"),
)
@ -1503,7 +1518,7 @@ def test_dcl_management():
instance.query("SELECT * from test_table")
assert re.match(
"qA\\t\\t.*\\t42075936\\t1\\t\\\\N\\t1\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t50\\t\\\\N\\t200\\t\\\\N\\t50\\t\\\\N\\t200\\t\\\\N\\t.*\\t\\\\N\n",
"qA\\t\\t.*\\t42075936\\t1\\t\\\\N\\t1\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t50\\t\\\\N\\t200\\t\\\\N\\t50\\t\\\\N\\t200\\t\\\\N\\t.*\\t\\\\N\\t0\\t\\\\N\n",
instance.query("SHOW QUOTA"),
)
@ -1519,7 +1534,7 @@ def test_dcl_management():
instance.query("SELECT * from test_table")
assert re.match(
"qB\\t\\t.*\\t42075936\\t2\\t\\\\N\\t2\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t100\\t\\\\N\\t400\\t\\\\N\\t100\\t\\\\N\\t400\\t\\\\N\\t.*\\t\\\\N\n",
"qB\\t\\t.*\\t42075936\\t2\\t\\\\N\\t2\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t100\\t\\\\N\\t400\\t\\\\N\\t100\\t\\\\N\\t400\\t\\\\N\\t.*\\t\\\\N\\t0\\t\\\\N\n",
instance.query("SHOW QUOTA"),
)
@ -1563,6 +1578,7 @@ def test_query_inserts():
1000,
"\\N",
"\\N",
"\\N",
]
]
)

View File

@ -57,10 +57,10 @@ q2_01297 local directory [] [5259492] 0 ['r1_01297','u1_01297'] []
q3_01297 local directory ['client_key','user_name'] [5259492,15778476] 0 [] []
q4_01297 local directory [] [604800] 1 [] ['u1_01297']
-- system.quota_limits
q2_01297 5259492 0 100 \N \N 11 1000 10000 1001 10001 2.5
q3_01297 5259492 0 \N \N \N \N 1002 \N \N \N \N
q3_01297 15778476 0 100 \N \N 11 \N \N \N \N \N
q4_01297 604800 0 \N \N \N \N \N \N \N \N \N
q2_01297 5259492 0 100 \N \N 11 1000 10000 1001 10001 2.5 \N
q3_01297 5259492 0 \N \N \N \N 1002 \N \N \N \N \N
q3_01297 15778476 0 100 \N \N 11 \N \N \N \N \N \N
q4_01297 604800 0 \N \N \N \N \N \N \N \N \N \N
-- query_selects query_inserts
CREATE QUOTA q1_01297 KEYED BY user_name FOR INTERVAL 1 minute MAX query_selects = 1 TO r1_01297
CREATE QUOTA q2_01297 KEYED BY user_name FOR INTERVAL 1 minute MAX query_inserts = 1 TO r1_01297

View File

@ -5,4 +5,4 @@ Tuple(k1 Int8, k2 Tuple(k3 String, k4 Nested(k5 Int8, k6 Int8)), some Int8)
{"id":"2","obj":"bbb","s":"bar"}
{"map":{"k1":1,"k2":2},"obj":{"k1":1,"k2.k3":2},"map_type":"Map(String, Nullable(Float64))","obj_type":"Object('json')"}
{"obj":{"k1":1,"k2":2},"map":{"k1":"1","k2":"2"}}
Tuple(k1 Float64, k2 Float64)
Tuple(k1 Int8, k2 Int8)

View File

@ -39,10 +39,10 @@ CREATE TABLE system.privileges\n(\n `privilege` Enum16(\'SHOW DATABASES\' = 0
CREATE TABLE system.processes\n(\n `is_initial_query` UInt8,\n `user` String,\n `query_id` String,\n `address` IPv6,\n `port` UInt16,\n `initial_user` String,\n `initial_query_id` String,\n `initial_address` IPv6,\n `initial_port` UInt16,\n `interface` UInt8,\n `os_user` String,\n `client_hostname` String,\n `client_name` String,\n `client_revision` UInt64,\n `client_version_major` UInt64,\n `client_version_minor` UInt64,\n `client_version_patch` UInt64,\n `http_method` UInt8,\n `http_user_agent` String,\n `http_referer` String,\n `forwarded_for` String,\n `quota_key` String,\n `distributed_depth` UInt64,\n `elapsed` Float64,\n `is_cancelled` UInt8,\n `read_rows` UInt64,\n `read_bytes` UInt64,\n `total_rows_approx` UInt64,\n `written_rows` UInt64,\n `written_bytes` UInt64,\n `memory_usage` Int64,\n `peak_memory_usage` Int64,\n `query` String,\n `thread_ids` Array(UInt64),\n `ProfileEvents` Map(String, UInt64),\n `Settings` Map(String, String),\n `current_database` String,\n `ProfileEvents.Names` Array(String),\n `ProfileEvents.Values` Array(UInt64),\n `Settings.Names` Array(String),\n `Settings.Values` Array(String)\n)\nENGINE = SystemProcesses()\nCOMMENT \'SYSTEM TABLE is built on the fly.\'
CREATE TABLE system.projection_parts\n(\n `partition` String,\n `name` String,\n `part_type` String,\n `parent_name` String,\n `parent_uuid` UUID,\n `parent_part_type` String,\n `active` UInt8,\n `marks` UInt64,\n `rows` UInt64,\n `bytes_on_disk` UInt64,\n `data_compressed_bytes` UInt64,\n `data_uncompressed_bytes` UInt64,\n `marks_bytes` UInt64,\n `parent_marks` UInt64,\n `parent_rows` UInt64,\n `parent_bytes_on_disk` UInt64,\n `parent_data_compressed_bytes` UInt64,\n `parent_data_uncompressed_bytes` UInt64,\n `parent_marks_bytes` UInt64,\n `modification_time` DateTime,\n `remove_time` DateTime,\n `refcount` UInt32,\n `min_date` Date,\n `max_date` Date,\n `min_time` DateTime,\n `max_time` DateTime,\n `partition_id` String,\n `min_block_number` Int64,\n `max_block_number` Int64,\n `level` UInt32,\n `data_version` UInt64,\n `primary_key_bytes_in_memory` UInt64,\n `primary_key_bytes_in_memory_allocated` UInt64,\n `is_frozen` UInt8,\n `database` String,\n `table` String,\n `engine` String,\n `disk_name` String,\n `path` String,\n `hash_of_all_files` String,\n `hash_of_uncompressed_files` String,\n `uncompressed_hash_of_compressed_files` String,\n `delete_ttl_info_min` DateTime,\n `delete_ttl_info_max` DateTime,\n `move_ttl_info.expression` Array(String),\n `move_ttl_info.min` Array(DateTime),\n `move_ttl_info.max` Array(DateTime),\n `default_compression_codec` String,\n `recompression_ttl_info.expression` Array(String),\n `recompression_ttl_info.min` Array(DateTime),\n `recompression_ttl_info.max` Array(DateTime),\n `group_by_ttl_info.expression` Array(String),\n `group_by_ttl_info.min` Array(DateTime),\n `group_by_ttl_info.max` Array(DateTime),\n `rows_where_ttl_info.expression` Array(String),\n `rows_where_ttl_info.min` Array(DateTime),\n `rows_where_ttl_info.max` Array(DateTime),\n `bytes` UInt64,\n `marks_size` UInt64\n)\nENGINE = SystemProjectionParts()\nCOMMENT \'SYSTEM TABLE is built on the fly.\'
CREATE TABLE system.projection_parts_columns\n(\n `partition` String,\n `name` String,\n `part_type` String,\n `parent_name` String,\n `parent_uuid` UUID,\n `parent_part_type` String,\n `active` UInt8,\n `marks` UInt64,\n `rows` UInt64,\n `bytes_on_disk` UInt64,\n `data_compressed_bytes` UInt64,\n `data_uncompressed_bytes` UInt64,\n `marks_bytes` UInt64,\n `parent_marks` UInt64,\n `parent_rows` UInt64,\n `parent_bytes_on_disk` UInt64,\n `parent_data_compressed_bytes` UInt64,\n `parent_data_uncompressed_bytes` UInt64,\n `parent_marks_bytes` UInt64,\n `modification_time` DateTime,\n `remove_time` DateTime,\n `refcount` UInt32,\n `min_date` Date,\n `max_date` Date,\n `min_time` DateTime,\n `max_time` DateTime,\n `partition_id` String,\n `min_block_number` Int64,\n `max_block_number` Int64,\n `level` UInt32,\n `data_version` UInt64,\n `primary_key_bytes_in_memory` UInt64,\n `primary_key_bytes_in_memory_allocated` UInt64,\n `database` String,\n `table` String,\n `engine` String,\n `disk_name` String,\n `path` String,\n `column` String,\n `type` String,\n `column_position` UInt64,\n `default_kind` String,\n `default_expression` String,\n `column_bytes_on_disk` UInt64,\n `column_data_compressed_bytes` UInt64,\n `column_data_uncompressed_bytes` UInt64,\n `column_marks_bytes` UInt64,\n `bytes` UInt64,\n `marks_size` UInt64\n)\nENGINE = SystemProjectionPartsColumns()\nCOMMENT \'SYSTEM TABLE is built on the fly.\'
CREATE TABLE system.quota_limits\n(\n `quota_name` String,\n `duration` UInt32,\n `is_randomized_interval` UInt8,\n `max_queries` Nullable(UInt64),\n `max_query_selects` Nullable(UInt64),\n `max_query_inserts` Nullable(UInt64),\n `max_errors` Nullable(UInt64),\n `max_result_rows` Nullable(UInt64),\n `max_result_bytes` Nullable(UInt64),\n `max_read_rows` Nullable(UInt64),\n `max_read_bytes` Nullable(UInt64),\n `max_execution_time` Nullable(Float64)\n)\nENGINE = SystemQuotaLimits()\nCOMMENT \'SYSTEM TABLE is built on the fly.\'
CREATE TABLE system.quota_usage\n(\n `quota_name` String,\n `quota_key` String,\n `start_time` Nullable(DateTime),\n `end_time` Nullable(DateTime),\n `duration` Nullable(UInt32),\n `queries` Nullable(UInt64),\n `max_queries` Nullable(UInt64),\n `query_selects` Nullable(UInt64),\n `max_query_selects` Nullable(UInt64),\n `query_inserts` Nullable(UInt64),\n `max_query_inserts` Nullable(UInt64),\n `errors` Nullable(UInt64),\n `max_errors` Nullable(UInt64),\n `result_rows` Nullable(UInt64),\n `max_result_rows` Nullable(UInt64),\n `result_bytes` Nullable(UInt64),\n `max_result_bytes` Nullable(UInt64),\n `read_rows` Nullable(UInt64),\n `max_read_rows` Nullable(UInt64),\n `read_bytes` Nullable(UInt64),\n `max_read_bytes` Nullable(UInt64),\n `execution_time` Nullable(Float64),\n `max_execution_time` Nullable(Float64)\n)\nENGINE = SystemQuotaUsage()\nCOMMENT \'SYSTEM TABLE is built on the fly.\'
CREATE TABLE system.quota_limits\n(\n `quota_name` String,\n `duration` UInt32,\n `is_randomized_interval` UInt8,\n `max_queries` Nullable(UInt64),\n `max_query_selects` Nullable(UInt64),\n `max_query_inserts` Nullable(UInt64),\n `max_errors` Nullable(UInt64),\n `max_result_rows` Nullable(UInt64),\n `max_result_bytes` Nullable(UInt64),\n `max_read_rows` Nullable(UInt64),\n `max_read_bytes` Nullable(UInt64),\n `max_execution_time` Nullable(Float64),\n `max_written_bytes` Nullable(UInt64)\n)\nENGINE = SystemQuotaLimits()\nCOMMENT \'SYSTEM TABLE is built on the fly.\'
CREATE TABLE system.quota_usage\n(\n `quota_name` String,\n `quota_key` String,\n `start_time` Nullable(DateTime),\n `end_time` Nullable(DateTime),\n `duration` Nullable(UInt32),\n `queries` Nullable(UInt64),\n `max_queries` Nullable(UInt64),\n `query_selects` Nullable(UInt64),\n `max_query_selects` Nullable(UInt64),\n `query_inserts` Nullable(UInt64),\n `max_query_inserts` Nullable(UInt64),\n `errors` Nullable(UInt64),\n `max_errors` Nullable(UInt64),\n `result_rows` Nullable(UInt64),\n `max_result_rows` Nullable(UInt64),\n `result_bytes` Nullable(UInt64),\n `max_result_bytes` Nullable(UInt64),\n `read_rows` Nullable(UInt64),\n `max_read_rows` Nullable(UInt64),\n `read_bytes` Nullable(UInt64),\n `max_read_bytes` Nullable(UInt64),\n `execution_time` Nullable(Float64),\n `max_execution_time` Nullable(Float64),\n `written_bytes` Nullable(UInt64),\n `max_written_bytes` Nullable(UInt64)\n)\nENGINE = SystemQuotaUsage()\nCOMMENT \'SYSTEM TABLE is built on the fly.\'
CREATE TABLE system.quotas\n(\n `name` String,\n `id` UUID,\n `storage` String,\n `keys` Array(Enum8(\'user_name\' = 1, \'ip_address\' = 2, \'forwarded_ip_address\' = 3, \'client_key\' = 4)),\n `durations` Array(UInt32),\n `apply_to_all` UInt8,\n `apply_to_list` Array(String),\n `apply_to_except` Array(String)\n)\nENGINE = SystemQuotas()\nCOMMENT \'SYSTEM TABLE is built on the fly.\'
CREATE TABLE system.quotas_usage\n(\n `quota_name` String,\n `quota_key` String,\n `is_current` UInt8,\n `start_time` Nullable(DateTime),\n `end_time` Nullable(DateTime),\n `duration` Nullable(UInt32),\n `queries` Nullable(UInt64),\n `max_queries` Nullable(UInt64),\n `query_selects` Nullable(UInt64),\n `max_query_selects` Nullable(UInt64),\n `query_inserts` Nullable(UInt64),\n `max_query_inserts` Nullable(UInt64),\n `errors` Nullable(UInt64),\n `max_errors` Nullable(UInt64),\n `result_rows` Nullable(UInt64),\n `max_result_rows` Nullable(UInt64),\n `result_bytes` Nullable(UInt64),\n `max_result_bytes` Nullable(UInt64),\n `read_rows` Nullable(UInt64),\n `max_read_rows` Nullable(UInt64),\n `read_bytes` Nullable(UInt64),\n `max_read_bytes` Nullable(UInt64),\n `execution_time` Nullable(Float64),\n `max_execution_time` Nullable(Float64)\n)\nENGINE = SystemQuotasUsage()\nCOMMENT \'SYSTEM TABLE is built on the fly.\'
CREATE TABLE system.quotas_usage\n(\n `quota_name` String,\n `quota_key` String,\n `is_current` UInt8,\n `start_time` Nullable(DateTime),\n `end_time` Nullable(DateTime),\n `duration` Nullable(UInt32),\n `queries` Nullable(UInt64),\n `max_queries` Nullable(UInt64),\n `query_selects` Nullable(UInt64),\n `max_query_selects` Nullable(UInt64),\n `query_inserts` Nullable(UInt64),\n `max_query_inserts` Nullable(UInt64),\n `errors` Nullable(UInt64),\n `max_errors` Nullable(UInt64),\n `result_rows` Nullable(UInt64),\n `max_result_rows` Nullable(UInt64),\n `result_bytes` Nullable(UInt64),\n `max_result_bytes` Nullable(UInt64),\n `read_rows` Nullable(UInt64),\n `max_read_rows` Nullable(UInt64),\n `read_bytes` Nullable(UInt64),\n `max_read_bytes` Nullable(UInt64),\n `execution_time` Nullable(Float64),\n `max_execution_time` Nullable(Float64),\n `written_bytes` Nullable(UInt64),\n `max_written_bytes` Nullable(UInt64)\n)\nENGINE = SystemQuotasUsage()\nCOMMENT \'SYSTEM TABLE is built on the fly.\'
CREATE TABLE system.replicas\n(\n `database` String,\n `table` String,\n `engine` String,\n `is_leader` UInt8,\n `can_become_leader` UInt8,\n `is_readonly` UInt8,\n `is_session_expired` UInt8,\n `future_parts` UInt32,\n `parts_to_check` UInt32,\n `zookeeper_path` String,\n `replica_name` String,\n `replica_path` String,\n `columns_version` Int32,\n `queue_size` UInt32,\n `inserts_in_queue` UInt32,\n `merges_in_queue` UInt32,\n `part_mutations_in_queue` UInt32,\n `queue_oldest_time` DateTime,\n `inserts_oldest_time` DateTime,\n `merges_oldest_time` DateTime,\n `part_mutations_oldest_time` DateTime,\n `oldest_part_to_get` String,\n `oldest_part_to_merge_to` String,\n `oldest_part_to_mutate_to` String,\n `log_max_index` UInt64,\n `log_pointer` UInt64,\n `last_queue_update` DateTime,\n `absolute_delay` UInt64,\n `total_replicas` UInt8,\n `active_replicas` UInt8,\n `last_queue_update_exception` String,\n `zookeeper_exception` String,\n `replica_is_active` Map(String, UInt8)\n)\nENGINE = SystemReplicas()\nCOMMENT \'SYSTEM TABLE is built on the fly.\'
CREATE TABLE system.replicated_fetches\n(\n `database` String,\n `table` String,\n `elapsed` Float64,\n `progress` Float64,\n `result_part_name` String,\n `result_part_path` String,\n `partition_id` String,\n `total_size_bytes_compressed` UInt64,\n `bytes_read_compressed` UInt64,\n `source_replica_path` String,\n `source_replica_hostname` String,\n `source_replica_port` UInt16,\n `interserver_scheme` String,\n `URI` String,\n `to_detached` UInt8,\n `thread_id` UInt64\n)\nENGINE = SystemReplicatedFetches()\nCOMMENT \'SYSTEM TABLE is built on the fly.\'
CREATE TABLE system.replicated_merge_tree_settings\n(\n `name` String,\n `value` String,\n `changed` UInt8,\n `description` String,\n `type` String\n)\nENGINE = SystemReplicatedMergeTreeSettings()\nCOMMENT \'SYSTEM TABLE is built on the fly.\'

View File

@ -53,34 +53,34 @@ c Map(String, Nullable(Float64))
d Nullable(UInt8)
42.42 [(1,'String'),(2,'abcd')] {'key':42,'key2':24} 1
JSONEachRow
d Nullable(UInt8)
a Nullable(Float64)
b Array(Tuple(Nullable(Float64), Nullable(String)))
c Map(String, Nullable(Float64))
a Nullable(Float64)
1 [(1,'String'),(2,'abcd')] {'key':42,'key2':24} 42.42
d Nullable(UInt8)
42.42 [(1,'String'),(2,'abcd')] {'key':42,'key2':24} 1
a Nullable(Float64)
b Array(Tuple(Nullable(Float64), Nullable(String)))
c Map(String, Nullable(Float64))
a Nullable(Float64)
d Nullable(UInt8)
\N [(1,'String'),(2,NULL)] {'key':NULL,'key2':24} \N
1 [(2,'String 2'),(3,'hello')] {'key3':4242,'key4':2424} 32
32 [(2,'String 2'),(3,'hello')] {'key3':4242,'key4':2424} 1
a Nullable(Float64)
b Nullable(String)
c Array(Nullable(Float64))
a Nullable(Float64)
s1 [] 1
\N [2] 2
\N [] \N
\N [] \N
\N [3] \N
1 s1 []
2 \N [2]
\N \N []
\N \N []
\N \N [3]
TSKV
a Nullable(String)
b Nullable(String)
c Nullable(String)
a Nullable(String)
s1 \N 1
} [2] 2
1 s1 \N
2 } [2]
\N \N \N
\N \N \N
\N [3] \N
\N \N [3]
Values
c1 Nullable(Float64)
c2 Nullable(String)

View File

@ -1,52 +1,52 @@
111 Hello
123 World
111 Hello
123 World
111 Hello
123 World
111 Hello
123 World
111 Hello
123 World
111 Hello
123 World
111 Hello
123 World
111 Hello
123 World
111 Hello
123 World
111 Hello
123 World
111 Hello
123 World
111 Hello
123 World
111 Hello
123 World
111 Hello
123 World
111 Hello
123 World
111 Hello
123 World
111 Hello
123 World
111 Hello
123 World
111 Hello
123 World
111 Hello
123 World
Hello 111
World 123
Hello 111
World 123
Hello 111
World 123
Hello 111
World 123
Hello 111
World 123
Hello 111
World 123
Hello 111
World 123
Hello 111
World 123
Hello 111
World 123
Hello 111
World 123
Hello 111
World 123
Hello 111
World 123
Hello 111
World 123
Hello 111
World 123
Hello 111
World 123
Hello 111
World 123
Hello 111
World 123
Hello 111
World 123
Hello 111
World 123
Hello 111
World 123
1 2 [1,2,3] [['abc'],[],['d','e']]
c1 Nullable(Float64)
c2 Nullable(Float64)
c3 Array(Nullable(Float64))
c4 Array(Array(Nullable(String)))
111 Hello
123 World
111 Hello
131 Hello
123 World
b Nullable(Float64)
Hello 111
World 123
Hello 111
Hello 131
World 123
a Nullable(String)
b Nullable(Float64)

View File

@ -6,3 +6,11 @@ z UInt32 DEFAULT 5
17 5
7 5
21 5
x UInt32 DEFAULT y
y UInt32 EPHEMERAL 0
z UInt32 DEFAULT 5
1 2
0 2
0 5
7 5
21 5

View File

@ -38,3 +38,43 @@ SELECT * FROM t_ephemeral_02205_1;
DROP TABLE IF EXISTS t_ephemeral_02205_1;
# Test without default
CREATE TABLE t_ephemeral_02205_1 (x UInt32 DEFAULT y, y UInt32 EPHEMERAL, z UInt32 DEFAULT 5) ENGINE = Memory;
DESCRIBE t_ephemeral_02205_1;
# Test INSERT without columns list - should participate only ordinary columns (x, z)
INSERT INTO t_ephemeral_02205_1 VALUES (1, 2);
# SELECT * should only return ordinary columns (x, z) - ephemeral is not stored in the table
SELECT * FROM t_ephemeral_02205_1;
TRUNCATE TABLE t_ephemeral_02205_1;
INSERT INTO t_ephemeral_02205_1 VALUES (DEFAULT, 2);
SELECT * FROM t_ephemeral_02205_1;
TRUNCATE TABLE t_ephemeral_02205_1;
# Test INSERT using ephemerals default
INSERT INTO t_ephemeral_02205_1 (x, y) VALUES (DEFAULT, DEFAULT);
SELECT * FROM t_ephemeral_02205_1;
TRUNCATE TABLE t_ephemeral_02205_1;
# Test INSERT using explicit ephemerals value
INSERT INTO t_ephemeral_02205_1 (x, y) VALUES (DEFAULT, 7);
SELECT * FROM t_ephemeral_02205_1;
# Test ALTER TABLE DELETE
ALTER TABLE t_ephemeral_02205_1 DELETE WHERE x = 7;
SELECT * FROM t_ephemeral_02205_1;
TRUNCATE TABLE t_ephemeral_02205_1;
# Test INSERT into column, defaulted to ephemeral, but explicitly provided with value
INSERT INTO t_ephemeral_02205_1 (x, y) VALUES (21, 7);
SELECT * FROM t_ephemeral_02205_1;
DROP TABLE IF EXISTS t_ephemeral_02205_1;

View File

@ -1,3 +1,3 @@
CREATE TABLE default.test\n(\n `y` Nullable(String),\n `x` Nullable(Float64)\n)\nENGINE = File(\'JSONEachRow\', \'data.jsonl\')
CREATE TABLE default.test\n(\n `x` Nullable(Float64),\n `y` Nullable(String)\n)\nENGINE = File(\'JSONEachRow\', \'data.jsonl\')
OK
OK

View File

@ -0,0 +1,8 @@
['{ArraySizes}','{ArrayElements, Regular}']
['{ArraySizes}','{ArrayElements, TupleElement(keys, escape_tuple_delimiter = true), Regular}','{ArrayElements, TupleElement(values, escape_tuple_delimiter = true), Regular}']
['{TupleElement(1, escape_tuple_delimiter = true), Regular}','{TupleElement(2, escape_tuple_delimiter = true), Regular}','{TupleElement(3, escape_tuple_delimiter = true), Regular}']
['{DictionaryKeys, Regular}','{DictionaryIndexes}']
['{NullMap}','{NullableElements, Regular}']
['{ArraySizes}','{ArrayElements, Regular}']
['{ArraySizes}','{ArrayElements, TupleElement(keys, escape_tuple_delimiter = true), Regular}','{ArrayElements, TupleElement(values, escape_tuple_delimiter = true), Regular}']
['{TupleElement(1, escape_tuple_delimiter = true), Regular}','{TupleElement(2, escape_tuple_delimiter = true), Regular}','{TupleElement(3, escape_tuple_delimiter = true), Regular}','{TupleElement(4, escape_tuple_delimiter = true), Regular}']

View File

@ -0,0 +1,8 @@
select getTypeSerializationStreams('Array(Int8)');
select getTypeSerializationStreams('Map(String, Int64)');
select getTypeSerializationStreams('Tuple(String, Int64, Float64)');
select getTypeSerializationStreams('LowCardinality(String)');
select getTypeSerializationStreams('Nullable(String)');
select getTypeSerializationStreams([1,2,3]);
select getTypeSerializationStreams(map('a', 1, 'b', 2));
select getTypeSerializationStreams(tuple('a', 1, 'b', 2));

View File

@ -1,8 +1,8 @@
a Nullable(String)
b Nullable(String)
c Nullable(String)
a Nullable(String)
s1 \N 1
} [2] 2
1 s1 \N
2 } [2]
\N \N \N
\N \N \N
\N [3] \N
\N \N [3]

View File

@ -0,0 +1,10 @@
1.1.1.1 1.1.1.1
0.0.0.0
1.1.1.1 1.1.1.1
0.0.0.0
fe80::9801:43ff:fe1f:7690 fe80::9801:43ff:fe1f:7690
1.1.1.1 ::
::
fe80::9801:43ff:fe1f:7690 fe80::9801:43ff:fe1f:7690
1.1.1.1 ::ffff:1.1.1.1
::

View File

@ -0,0 +1,81 @@
DROP TABLE IF EXISTS test_table_ipv4;
CREATE TABLE test_table_ipv4
(
ip String,
ipv4 IPv4
) ENGINE = TinyLog;
INSERT INTO test_table_ipv4 VALUES ('1.1.1.1', '1.1.1.1'), ('', ''); --{clientError 441}
SET input_format_ipv4_default_on_conversion_error = 1;
INSERT INTO test_table_ipv4 VALUES ('1.1.1.1', '1.1.1.1'), ('', '');
SELECT ip, ipv4 FROM test_table_ipv4;
SET input_format_ipv4_default_on_conversion_error = 0;
DROP TABLE test_table_ipv4;
DROP TABLE IF EXISTS test_table_ipv4_materialized;
CREATE TABLE test_table_ipv4_materialized
(
ip String,
ipv6 IPv4 MATERIALIZED toIPv4(ip)
) ENGINE = TinyLog;
INSERT INTO test_table_ipv4_materialized(ip) VALUES ('1.1.1.1'), (''); --{serverError 441}
SET input_format_ipv4_default_on_conversion_error = 1;
INSERT INTO test_table_ipv4_materialized(ip) VALUES ('1.1.1.1'), (''); --{serverError 441}
SET cast_ipv4_ipv6_default_on_conversion_error = 1;
INSERT INTO test_table_ipv4_materialized(ip) VALUES ('1.1.1.1'), ('');
SELECT ip, ipv6 FROM test_table_ipv4_materialized;
SET input_format_ipv4_default_on_conversion_error = 0;
SET cast_ipv4_ipv6_default_on_conversion_error = 0;
DROP TABLE test_table_ipv4_materialized;
DROP TABLE IF EXISTS test_table_ipv6;
CREATE TABLE test_table_ipv6
(
ip String,
ipv6 IPv6
) ENGINE = TinyLog;
INSERT INTO test_table_ipv6 VALUES ('fe80::9801:43ff:fe1f:7690', 'fe80::9801:43ff:fe1f:7690'), ('1.1.1.1', '1.1.1.1'), ('', ''); --{clientError 441}
SET input_format_ipv6_default_on_conversion_error = 1;
INSERT INTO test_table_ipv6 VALUES ('fe80::9801:43ff:fe1f:7690', 'fe80::9801:43ff:fe1f:7690'), ('1.1.1.1', '1.1.1.1'), ('', '');
SELECT ip, ipv6 FROM test_table_ipv6;
SET input_format_ipv6_default_on_conversion_error = 0;
DROP TABLE test_table_ipv6;
DROP TABLE IF EXISTS test_table_ipv6_materialized;
CREATE TABLE test_table_ipv6_materialized
(
ip String,
ipv6 IPv6 MATERIALIZED toIPv6(ip)
) ENGINE = TinyLog;
INSERT INTO test_table_ipv6_materialized(ip) VALUES ('fe80::9801:43ff:fe1f:7690'), ('1.1.1.1'), (''); --{serverError 441}
SET input_format_ipv6_default_on_conversion_error = 1;
INSERT INTO test_table_ipv6_materialized(ip) VALUES ('fe80::9801:43ff:fe1f:7690'), ('1.1.1.1'), (''); --{serverError 441}
SET cast_ipv4_ipv6_default_on_conversion_error = 1;
INSERT INTO test_table_ipv6_materialized(ip) VALUES ('fe80::9801:43ff:fe1f:7690'), ('1.1.1.1'), ('');
SELECT ip, ipv6 FROM test_table_ipv6_materialized;
SET input_format_ipv6_default_on_conversion_error = 0;
SET cast_ipv4_ipv6_default_on_conversion_error = 0;
DROP TABLE test_table_ipv6_materialized;

View File

@ -1,2 +1,2 @@
QUOTA_EXPIRED
QUOTA_EXCEEDED
2

View File

@ -20,7 +20,7 @@ ${CLICKHOUSE_CLIENT} -q "CREATE QUOTA q02246 FOR INTERVAL 100 YEAR MAX QUERY INS
${CLICKHOUSE_CLIENT} --user u02246 --async_insert 1 -q "INSERT INTO async_inserts_02246 VALUES (1, 'a')"
${CLICKHOUSE_CLIENT} --user u02246 --async_insert 1 -q "INSERT INTO async_inserts_02246 VALUES (2, 'b')"
${CLICKHOUSE_CLIENT} --user u02246 --async_insert 1 -q "INSERT INTO async_inserts_02246 VALUES (3, 'c')" 2>&1 | grep -m1 -o QUOTA_EXPIRED
${CLICKHOUSE_CLIENT} --user u02246 --async_insert 1 -q "INSERT INTO async_inserts_02246 VALUES (3, 'c')" 2>&1 | grep -m1 -o QUOTA_EXCEEDED
sleep 1.0

View File

@ -0,0 +1,34 @@
a Nullable(String)
b Nullable(String)
c Nullable(String)
1 s1 \N
2 } [2]
\N \N \N
\N \N \N
\N \N [3]
b Nullable(String)
a Nullable(String)
c Nullable(String)
e Nullable(String)
1 \N \N \N
\N 2 3 \N
\N \N \N \N
\N \N \N 3
3 3 1 \N
a Nullable(Float64)
b Nullable(String)
c Array(Nullable(Float64))
1 s1 []
2 \N [2]
\N \N []
\N \N []
\N \N [3]
b Nullable(Float64)
a Nullable(Float64)
c Nullable(Float64)
e Nullable(Float64)
1 \N \N \N
\N 2 3 \N
\N \N \N \N
\N \N \N 3
3 3 1 \N

View File

@ -0,0 +1,49 @@
#!/usr/bin/env bash
# Tags: no-parallel, no-fasttest
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}')
FILE_NAME=test_02247.data
DATA_FILE=${USER_FILES_PATH:?}/$FILE_NAME
touch $DATA_FILE
echo -e 'a=1\tb=s1\tc=\N
c=[2]\ta=2\tb=\N}
a=\N
c=[3]\ta=\N' > $DATA_FILE
$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'TSKV')"
$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'TSKV')"
echo -e 'b=1
a=2\tc=3
e=3
c=1\tb=3\ta=3' > $DATA_FILE
$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'TSKV')"
$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'TSKV')"
echo -e '{"a" : 1, "b" : "s1", "c" : null}
{"c" : [2], "a" : 2, "b" : null}
{}
{"a" : null}
{"c" : [3], "a" : null}' > $DATA_FILE
$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'JSONEachRow')"
$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'JSONEachRow')"
echo -e '{"b" : 1}
{"a" : 2, "c" : 3}
{}
{"e" : 3}
{"c" : 1, "b" : 3, "a" : 3}' > $DATA_FILE
$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'JSONEachRow')"
$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'JSONEachRow')"
rm $DATA_FILE

View File

@ -0,0 +1,7 @@
QUOTA_EXCEEDED
QUOTA_EXCEEDED
1
2
QUOTA_EXCEEDED
1
50

View File

@ -0,0 +1,42 @@
#!/usr/bin/env bash
# Tags: no-parallel
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS written_bytes_02247"
${CLICKHOUSE_CLIENT} -q "DROP ROLE IF EXISTS r02247"
${CLICKHOUSE_CLIENT} -q "DROP USER IF EXISTS u02247"
${CLICKHOUSE_CLIENT} -q "DROP QUOTA IF EXISTS q02247"
${CLICKHOUSE_CLIENT} -q "CREATE TABLE written_bytes_02247(s String) ENGINE = Memory"
${CLICKHOUSE_CLIENT} -q "CREATE ROLE r02247"
${CLICKHOUSE_CLIENT} -q "CREATE USER u02247"
${CLICKHOUSE_CLIENT} -q "GRANT ALL ON *.* TO r02247"
${CLICKHOUSE_CLIENT} -q "GRANT r02247 to u02247"
${CLICKHOUSE_CLIENT} -q "CREATE QUOTA q02247 FOR INTERVAL 100 YEAR MAX WRITTEN BYTES = 25 TO r02247"
${CLICKHOUSE_CLIENT} --user u02247 --async_insert 1 -q "INSERT INTO written_bytes_02247 VALUES ('qwqw')"
${CLICKHOUSE_CLIENT} --user u02247 --async_insert 0 -q "INSERT INTO written_bytes_02247 VALUES ('qwqw')"
${CLICKHOUSE_CLIENT} --user u02247 --async_insert 1 -q "INSERT INTO written_bytes_02247 VALUES ('qwqw')" 2>&1 | grep -m1 -o QUOTA_EXCEEDED
${CLICKHOUSE_CLIENT} --user u02247 --async_insert 0 -q "INSERT INTO written_bytes_02247 VALUES ('qwqw')" 2>&1 | grep -m1 -o QUOTA_EXCEEDED
${CLICKHOUSE_CLIENT} -q "SELECT written_bytes > 10 FROM system.quotas_usage WHERE quota_name = 'q02247'"
${CLICKHOUSE_CLIENT} -q "SELECT count() FROM written_bytes_02247"
${CLICKHOUSE_CLIENT} -q "DROP QUOTA q02247"
${CLICKHOUSE_CLIENT} -q "CREATE QUOTA q02247 FOR INTERVAL 100 YEAR MAX WRITTEN BYTES = 1000 TO r02247"
${CLICKHOUSE_CLIENT} -q "TRUNCATE TABLE written_bytes_02247"
${CLICKHOUSE_CLIENT} --user u02247 -q "INSERT INTO written_bytes_02247 SELECT toString(number) FROM numbers(50)"
${CLICKHOUSE_CLIENT} --user u02247 -q "INSERT INTO written_bytes_02247 SELECT toString(number) FROM numbers(100)" 2>&1 | grep -m1 -o QUOTA_EXCEEDED
${CLICKHOUSE_CLIENT} -q "SELECT written_bytes > 100 FROM system.quotas_usage WHERE quota_name = 'q02247'"
${CLICKHOUSE_CLIENT} -q "SELECT count() FROM written_bytes_02247"
${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS written_bytes_02247"
${CLICKHOUSE_CLIENT} -q "DROP ROLE IF EXISTS r02247"
${CLICKHOUSE_CLIENT} -q "DROP USER IF EXISTS u02247"
${CLICKHOUSE_CLIENT} -q "DROP QUOTA IF EXISTS q02247"

View File

@ -0,0 +1,2 @@
1 string1
2 string2

View File

@ -0,0 +1,5 @@
drop table if exists test_02249;
create table test_02249 (x UInt32, y String) engine=Memory();
insert into test_02249 select * from input() format JSONEachRow {"x" : 1, "y" : "string1"}, {"y" : "string2", "x" : 2};
select * from test_02249;
drop table test_02249;

Some files were not shown because too many files have changed in this diff Show More