mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 15:12:02 +00:00
Merge branch 'master' into allow-read-bools-as-numbers
This commit is contained in:
commit
ec2213493f
@ -16,7 +16,6 @@ Checks: '-*,
|
|||||||
modernize-make-unique,
|
modernize-make-unique,
|
||||||
modernize-raw-string-literal,
|
modernize-raw-string-literal,
|
||||||
modernize-redundant-void-arg,
|
modernize-redundant-void-arg,
|
||||||
modernize-replace-auto-ptr,
|
|
||||||
modernize-replace-random-shuffle,
|
modernize-replace-random-shuffle,
|
||||||
modernize-use-bool-literals,
|
modernize-use-bool-literals,
|
||||||
modernize-use-nullptr,
|
modernize-use-nullptr,
|
||||||
@ -145,6 +144,7 @@ Checks: '-*,
|
|||||||
clang-analyzer-cplusplus.SelfAssignment,
|
clang-analyzer-cplusplus.SelfAssignment,
|
||||||
clang-analyzer-deadcode.DeadStores,
|
clang-analyzer-deadcode.DeadStores,
|
||||||
clang-analyzer-cplusplus.Move,
|
clang-analyzer-cplusplus.Move,
|
||||||
|
clang-analyzer-optin.cplusplus.UninitializedObject,
|
||||||
clang-analyzer-optin.cplusplus.VirtualCall,
|
clang-analyzer-optin.cplusplus.VirtualCall,
|
||||||
clang-analyzer-security.insecureAPI.UncheckedReturn,
|
clang-analyzer-security.insecureAPI.UncheckedReturn,
|
||||||
clang-analyzer-security.insecureAPI.bcmp,
|
clang-analyzer-security.insecureAPI.bcmp,
|
||||||
@ -164,6 +164,8 @@ Checks: '-*,
|
|||||||
clang-analyzer-unix.cstring.NullArg,
|
clang-analyzer-unix.cstring.NullArg,
|
||||||
|
|
||||||
boost-use-to-string,
|
boost-use-to-string,
|
||||||
|
|
||||||
|
alpha.security.cert.env.InvalidPtr,
|
||||||
'
|
'
|
||||||
WarningsAsErrors: '*'
|
WarningsAsErrors: '*'
|
||||||
|
|
||||||
|
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -1,4 +1,4 @@
|
|||||||
Changelog category (leave one):
|
### Changelog category (leave one):
|
||||||
- New Feature
|
- New Feature
|
||||||
- Improvement
|
- Improvement
|
||||||
- Bug Fix (user-visible misbehaviour in official stable or prestable release)
|
- Bug Fix (user-visible misbehaviour in official stable or prestable release)
|
||||||
@ -9,7 +9,7 @@ Changelog category (leave one):
|
|||||||
- Not for changelog (changelog entry is not required)
|
- Not for changelog (changelog entry is not required)
|
||||||
|
|
||||||
|
|
||||||
Changelog entry (a user-readable short description of the changes that goes to CHANGELOG.md):
|
### Changelog entry (a user-readable short description of the changes that goes to CHANGELOG.md):
|
||||||
...
|
...
|
||||||
|
|
||||||
|
|
||||||
|
28
.github/workflows/master.yml
vendored
28
.github/workflows/master.yml
vendored
@ -947,6 +947,34 @@ jobs:
|
|||||||
docker rm -f "$(docker ps -a -q)" ||:
|
docker rm -f "$(docker ps -a -q)" ||:
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
############################################################################################
|
############################################################################################
|
||||||
|
##################################### Docker images #######################################
|
||||||
|
############################################################################################
|
||||||
|
DockerServerImages:
|
||||||
|
needs:
|
||||||
|
- BuilderDebRelease
|
||||||
|
- BuilderDebAarch64
|
||||||
|
runs-on: [self-hosted, style-checker]
|
||||||
|
steps:
|
||||||
|
- name: Clear repository
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
fetch-depth: 0 # otherwise we will have no version info
|
||||||
|
- name: Check docker clickhouse/clickhouse-server building
|
||||||
|
run: |
|
||||||
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
|
python3 docker_server.py --release-type head
|
||||||
|
python3 docker_server.py --release-type head --no-ubuntu \
|
||||||
|
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker kill "$(docker ps -q)" ||:
|
||||||
|
docker rm -f "$(docker ps -a -q)" ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
############################################################################################
|
||||||
##################################### BUILD REPORTER #######################################
|
##################################### BUILD REPORTER #######################################
|
||||||
############################################################################################
|
############################################################################################
|
||||||
BuilderReport:
|
BuilderReport:
|
||||||
|
31
.github/workflows/pull_request.yml
vendored
31
.github/workflows/pull_request.yml
vendored
@ -4,7 +4,7 @@ env:
|
|||||||
# Force the stdout and stderr streams to be unbuffered
|
# Force the stdout and stderr streams to be unbuffered
|
||||||
PYTHONUNBUFFERED: 1
|
PYTHONUNBUFFERED: 1
|
||||||
|
|
||||||
on: # yamllint disable-line rule:truthy
|
on: # yamllint disable-line rule:truthy
|
||||||
pull_request:
|
pull_request:
|
||||||
types:
|
types:
|
||||||
- synchronize
|
- synchronize
|
||||||
@ -998,6 +998,34 @@ jobs:
|
|||||||
docker rm -f "$(docker ps -a -q)" ||:
|
docker rm -f "$(docker ps -a -q)" ||:
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
############################################################################################
|
############################################################################################
|
||||||
|
##################################### Docker images #######################################
|
||||||
|
############################################################################################
|
||||||
|
DockerServerImages:
|
||||||
|
needs:
|
||||||
|
- BuilderDebRelease
|
||||||
|
- BuilderDebAarch64
|
||||||
|
runs-on: [self-hosted, style-checker]
|
||||||
|
steps:
|
||||||
|
- name: Clear repository
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
fetch-depth: 0 # otherwise we will have no version info
|
||||||
|
- name: Check docker clickhouse/clickhouse-server building
|
||||||
|
run: |
|
||||||
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
|
python3 docker_server.py --release-type head --no-push
|
||||||
|
python3 docker_server.py --release-type head --no-push --no-ubuntu \
|
||||||
|
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker kill "$(docker ps -q)" ||:
|
||||||
|
docker rm -f "$(docker ps -a -q)" ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
############################################################################################
|
||||||
##################################### BUILD REPORTER #######################################
|
##################################### BUILD REPORTER #######################################
|
||||||
############################################################################################
|
############################################################################################
|
||||||
BuilderReport:
|
BuilderReport:
|
||||||
@ -3138,6 +3166,7 @@ jobs:
|
|||||||
needs:
|
needs:
|
||||||
- StyleCheck
|
- StyleCheck
|
||||||
- DockerHubPush
|
- DockerHubPush
|
||||||
|
- DockerServerImages
|
||||||
- CheckLabels
|
- CheckLabels
|
||||||
- BuilderReport
|
- BuilderReport
|
||||||
- FastTest
|
- FastTest
|
||||||
|
25
.github/workflows/release.yml
vendored
25
.github/workflows/release.yml
vendored
@ -36,3 +36,28 @@ jobs:
|
|||||||
overwrite: true
|
overwrite: true
|
||||||
tag: ${{ github.ref }}
|
tag: ${{ github.ref }}
|
||||||
file_glob: true
|
file_glob: true
|
||||||
|
############################################################################################
|
||||||
|
##################################### Docker images #######################################
|
||||||
|
############################################################################################
|
||||||
|
DockerServerImages:
|
||||||
|
runs-on: [self-hosted, style-checker]
|
||||||
|
steps:
|
||||||
|
- name: Clear repository
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
fetch-depth: 0 # otherwise we will have no version info
|
||||||
|
- name: Check docker clickhouse/clickhouse-server building
|
||||||
|
run: |
|
||||||
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
|
python3 docker_server.py --release-type auto
|
||||||
|
python3 docker_server.py --release-type auto --no-ubuntu \
|
||||||
|
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker kill "$(docker ps -q)" ||:
|
||||||
|
docker rm -f "$(docker ps -a -q)" ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
set (ENABLE_KRB5_DEFAULT 1)
|
set (ENABLE_KRB5_DEFAULT ${ENABLE_LIBRARIES})
|
||||||
if (NOT CMAKE_SYSTEM_NAME MATCHES "Linux" AND NOT (CMAKE_SYSTEM_NAME MATCHES "Darwin" AND NOT CMAKE_CROSSCOMPILING))
|
if (NOT CMAKE_SYSTEM_NAME MATCHES "Linux" AND NOT (CMAKE_SYSTEM_NAME MATCHES "Darwin" AND NOT CMAKE_CROSSCOMPILING))
|
||||||
message (WARNING "krb5 disabled in non-Linux and non-native-Darwin environments")
|
message (WARNING "krb5 disabled in non-Linux and non-native-Darwin environments")
|
||||||
set (ENABLE_KRB5_DEFAULT 0)
|
set (ENABLE_KRB5_DEFAULT 0)
|
||||||
@ -16,6 +16,7 @@ if(NOT AWK_PROGRAM)
|
|||||||
endif()
|
endif()
|
||||||
|
|
||||||
set(KRB5_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/krb5/src")
|
set(KRB5_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/krb5/src")
|
||||||
|
set(KRB5_ET_BIN_DIR "${CMAKE_CURRENT_BINARY_DIR}/include_private")
|
||||||
|
|
||||||
set(ALL_SRCS
|
set(ALL_SRCS
|
||||||
"${KRB5_SOURCE_DIR}/util/et/et_name.c"
|
"${KRB5_SOURCE_DIR}/util/et/et_name.c"
|
||||||
@ -90,7 +91,6 @@ set(ALL_SRCS
|
|||||||
"${KRB5_SOURCE_DIR}/lib/gssapi/krb5/get_tkt_flags.c"
|
"${KRB5_SOURCE_DIR}/lib/gssapi/krb5/get_tkt_flags.c"
|
||||||
"${KRB5_SOURCE_DIR}/lib/gssapi/krb5/set_allowable_enctypes.c"
|
"${KRB5_SOURCE_DIR}/lib/gssapi/krb5/set_allowable_enctypes.c"
|
||||||
"${KRB5_SOURCE_DIR}/lib/gssapi/krb5/k5sealiov.c"
|
"${KRB5_SOURCE_DIR}/lib/gssapi/krb5/k5sealiov.c"
|
||||||
"${KRB5_SOURCE_DIR}/lib/gssapi/krb5/gssapi_err_krb5.c"
|
|
||||||
"${KRB5_SOURCE_DIR}/lib/gssapi/krb5/canon_name.c"
|
"${KRB5_SOURCE_DIR}/lib/gssapi/krb5/canon_name.c"
|
||||||
"${KRB5_SOURCE_DIR}/lib/gssapi/krb5/inq_cred.c"
|
"${KRB5_SOURCE_DIR}/lib/gssapi/krb5/inq_cred.c"
|
||||||
"${KRB5_SOURCE_DIR}/lib/gssapi/krb5/export_sec_context.c"
|
"${KRB5_SOURCE_DIR}/lib/gssapi/krb5/export_sec_context.c"
|
||||||
@ -143,11 +143,12 @@ set(ALL_SRCS
|
|||||||
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_buffer_set.c"
|
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_buffer_set.c"
|
||||||
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_set.c"
|
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_set.c"
|
||||||
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_token.c"
|
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_token.c"
|
||||||
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/gssapi_err_generic.c"
|
|
||||||
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/disp_major_status.c"
|
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/disp_major_status.c"
|
||||||
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_seqstate.c"
|
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_seqstate.c"
|
||||||
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_errmap.c"
|
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_errmap.c"
|
||||||
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/rel_buffer.c"
|
"${KRB5_SOURCE_DIR}/lib/gssapi/generic/rel_buffer.c"
|
||||||
|
"${KRB5_ET_BIN_DIR}/lib/gssapi/krb5/gssapi_err_krb5.c"
|
||||||
|
"${KRB5_ET_BIN_DIR}/lib/gssapi/generic/gssapi_err_generic.c"
|
||||||
|
|
||||||
"${KRB5_SOURCE_DIR}/lib/gssapi/spnego/spnego_mech.c"
|
"${KRB5_SOURCE_DIR}/lib/gssapi/spnego/spnego_mech.c"
|
||||||
"${KRB5_SOURCE_DIR}/lib/gssapi/spnego/negoex_util.c"
|
"${KRB5_SOURCE_DIR}/lib/gssapi/spnego/negoex_util.c"
|
||||||
@ -256,8 +257,8 @@ set(ALL_SRCS
|
|||||||
"${KRB5_SOURCE_DIR}/util/profile/prof_parse.c"
|
"${KRB5_SOURCE_DIR}/util/profile/prof_parse.c"
|
||||||
"${KRB5_SOURCE_DIR}/util/profile/prof_get.c"
|
"${KRB5_SOURCE_DIR}/util/profile/prof_get.c"
|
||||||
"${KRB5_SOURCE_DIR}/util/profile/prof_set.c"
|
"${KRB5_SOURCE_DIR}/util/profile/prof_set.c"
|
||||||
"${KRB5_SOURCE_DIR}/util/profile/prof_err.c"
|
|
||||||
"${KRB5_SOURCE_DIR}/util/profile/prof_init.c"
|
"${KRB5_SOURCE_DIR}/util/profile/prof_init.c"
|
||||||
|
"${KRB5_ET_BIN_DIR}/util/profile/prof_err.c"
|
||||||
"${KRB5_SOURCE_DIR}/lib/krb5/krb/fwd_tgt.c"
|
"${KRB5_SOURCE_DIR}/lib/krb5/krb/fwd_tgt.c"
|
||||||
"${KRB5_SOURCE_DIR}/lib/krb5/krb/conv_creds.c"
|
"${KRB5_SOURCE_DIR}/lib/krb5/krb/conv_creds.c"
|
||||||
"${KRB5_SOURCE_DIR}/lib/krb5/krb/fast.c"
|
"${KRB5_SOURCE_DIR}/lib/krb5/krb/fast.c"
|
||||||
@ -450,13 +451,12 @@ set(ALL_SRCS
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/k5e1_err.c"
|
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/k5e1_err.c"
|
||||||
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kdb5_err.c"
|
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/kdb5_err.c"
|
||||||
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/asn1_err.c"
|
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/asn1_err.c"
|
||||||
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb5_err.c"
|
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/krb5_err.c"
|
||||||
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb524_err.c"
|
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/krb524_err.c"
|
||||||
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kv5m_err.c"
|
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/kv5m_err.c"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
"${KRB5_SOURCE_DIR}/lib/krb5/rcache/rc_base.c"
|
"${KRB5_SOURCE_DIR}/lib/krb5/rcache/rc_base.c"
|
||||||
@ -473,7 +473,7 @@ set(ALL_SRCS
|
|||||||
)
|
)
|
||||||
|
|
||||||
add_custom_command(
|
add_custom_command(
|
||||||
OUTPUT "${KRB5_SOURCE_DIR}/util/et/compile_et"
|
OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/compile_et"
|
||||||
COMMAND /bin/sh
|
COMMAND /bin/sh
|
||||||
./config_script
|
./config_script
|
||||||
./compile_et.sh
|
./compile_et.sh
|
||||||
@ -481,50 +481,17 @@ add_custom_command(
|
|||||||
${AWK_PROGRAM}
|
${AWK_PROGRAM}
|
||||||
sed
|
sed
|
||||||
>
|
>
|
||||||
compile_et
|
${CMAKE_CURRENT_BINARY_DIR}/compile_et
|
||||||
DEPENDS "${KRB5_SOURCE_DIR}/util/et/compile_et.sh" "${KRB5_SOURCE_DIR}/util/et/config_script"
|
DEPENDS "${KRB5_SOURCE_DIR}/util/et/compile_et.sh" "${KRB5_SOURCE_DIR}/util/et/config_script"
|
||||||
WORKING_DIRECTORY "${KRB5_SOURCE_DIR}/util/et"
|
WORKING_DIRECTORY "${KRB5_SOURCE_DIR}/util/et"
|
||||||
)
|
)
|
||||||
|
|
||||||
file(GLOB_RECURSE ET_FILES
|
|
||||||
"${KRB5_SOURCE_DIR}/*.et"
|
|
||||||
)
|
|
||||||
|
|
||||||
function(preprocess_et out_var)
|
|
||||||
set(result)
|
|
||||||
foreach(in_f ${ARGN})
|
|
||||||
string(REPLACE
|
|
||||||
.et
|
|
||||||
.c
|
|
||||||
F_C
|
|
||||||
${in_f}
|
|
||||||
)
|
|
||||||
string(REPLACE
|
|
||||||
.et
|
|
||||||
.h
|
|
||||||
F_H
|
|
||||||
${in_f}
|
|
||||||
)
|
|
||||||
|
|
||||||
get_filename_component(ET_PATH ${in_f} DIRECTORY)
|
|
||||||
|
|
||||||
add_custom_command(OUTPUT ${F_C} ${F_H}
|
|
||||||
COMMAND perl "${KRB5_SOURCE_DIR}/util/et/compile_et" -d "${KRB5_SOURCE_DIR}/util/et" ${in_f}
|
|
||||||
DEPENDS ${in_f} "${KRB5_SOURCE_DIR}/util/et/compile_et"
|
|
||||||
WORKING_DIRECTORY ${ET_PATH}
|
|
||||||
VERBATIM
|
|
||||||
)
|
|
||||||
list(APPEND result ${F_C})
|
|
||||||
endforeach()
|
|
||||||
set(${out_var} "${result}" PARENT_SCOPE)
|
|
||||||
endfunction()
|
|
||||||
|
|
||||||
add_custom_command(
|
add_custom_command(
|
||||||
OUTPUT "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/error_map.h"
|
OUTPUT "${KRB5_ET_BIN_DIR}/error_map.h"
|
||||||
COMMAND perl
|
COMMAND perl
|
||||||
-I../../../util
|
-I../../../util
|
||||||
../../../util/gen-map.pl
|
../../../util/gen-map.pl
|
||||||
-oerror_map.h
|
-o${KRB5_ET_BIN_DIR}/error_map.h
|
||||||
NAME=gsserrmap
|
NAME=gsserrmap
|
||||||
KEY=OM_uint32
|
KEY=OM_uint32
|
||||||
VALUE=char*
|
VALUE=char*
|
||||||
@ -536,22 +503,21 @@ add_custom_command(
|
|||||||
|
|
||||||
add_custom_target(
|
add_custom_target(
|
||||||
ERROR_MAP_H
|
ERROR_MAP_H
|
||||||
DEPENDS "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/error_map.h"
|
DEPENDS "${KRB5_ET_BIN_DIR}/error_map.h"
|
||||||
VERBATIM
|
VERBATIM
|
||||||
)
|
)
|
||||||
|
|
||||||
add_custom_command(
|
add_custom_command(
|
||||||
OUTPUT "${KRB5_SOURCE_DIR}/lib/gssapi/generic/errmap.h"
|
OUTPUT "${KRB5_ET_BIN_DIR}/errmap.h"
|
||||||
COMMAND perl -w -I../../../util ../../../util/gen.pl bimap errmap.h NAME=mecherrmap LEFT=OM_uint32 RIGHT=struct\ mecherror LEFTPRINT=print_OM_uint32 RIGHTPRINT=mecherror_print LEFTCMP=cmp_OM_uint32 RIGHTCMP=mecherror_cmp
|
COMMAND perl -w -I../../../util ../../../util/gen.pl bimap ${KRB5_ET_BIN_DIR}/errmap.h NAME=mecherrmap LEFT=OM_uint32 RIGHT=struct\ mecherror LEFTPRINT=print_OM_uint32 RIGHTPRINT=mecherror_print LEFTCMP=cmp_OM_uint32 RIGHTCMP=mecherror_cmp
|
||||||
WORKING_DIRECTORY "${KRB5_SOURCE_DIR}/lib/gssapi/generic"
|
WORKING_DIRECTORY "${KRB5_SOURCE_DIR}/lib/gssapi/generic"
|
||||||
)
|
)
|
||||||
|
|
||||||
add_custom_target(
|
add_custom_target(
|
||||||
ERRMAP_H
|
ERRMAP_H
|
||||||
DEPENDS "${KRB5_SOURCE_DIR}/lib/gssapi/generic/errmap.h"
|
DEPENDS "${KRB5_ET_BIN_DIR}/errmap.h"
|
||||||
VERBATIM
|
VERBATIM
|
||||||
)
|
)
|
||||||
|
|
||||||
add_custom_target(
|
add_custom_target(
|
||||||
KRB_5_H
|
KRB_5_H
|
||||||
DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/include/krb5/krb5.h"
|
DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/include/krb5/krb5.h"
|
||||||
@ -567,7 +533,40 @@ add_dependencies(
|
|||||||
KRB_5_H
|
KRB_5_H
|
||||||
)
|
)
|
||||||
|
|
||||||
preprocess_et(processed_et_files ${ET_FILES})
|
#
|
||||||
|
# Generate error tables
|
||||||
|
#
|
||||||
|
function(preprocess_et et_path)
|
||||||
|
string(REPLACE .et .c F_C ${et_path})
|
||||||
|
string(REPLACE .et .h F_H ${et_path})
|
||||||
|
get_filename_component(et_dir ${et_path} DIRECTORY)
|
||||||
|
get_filename_component(et_name ${et_path} NAME_WLE)
|
||||||
|
|
||||||
|
add_custom_command(OUTPUT ${F_C} ${F_H} ${KRB5_ET_BIN_DIR}/${et_name}.h
|
||||||
|
COMMAND perl "${CMAKE_CURRENT_BINARY_DIR}/compile_et" -d "${KRB5_SOURCE_DIR}/util/et" ${et_path}
|
||||||
|
# for #include w/o path (via -iquote)
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E create_symlink ${F_H} ${KRB5_ET_BIN_DIR}/${et_name}.h
|
||||||
|
DEPENDS ${et_path} "${CMAKE_CURRENT_BINARY_DIR}/compile_et"
|
||||||
|
WORKING_DIRECTORY ${et_dir}
|
||||||
|
VERBATIM
|
||||||
|
)
|
||||||
|
endfunction()
|
||||||
|
|
||||||
|
function(generate_error_tables)
|
||||||
|
file(GLOB_RECURSE ET_FILES "${KRB5_SOURCE_DIR}/*.et")
|
||||||
|
foreach(et_path ${ET_FILES})
|
||||||
|
string(REPLACE ${KRB5_SOURCE_DIR} ${KRB5_ET_BIN_DIR} et_bin_path ${et_path})
|
||||||
|
string(REPLACE / _ et_target_name ${et_path})
|
||||||
|
get_filename_component(et_bin_dir ${et_bin_path} DIRECTORY)
|
||||||
|
add_custom_command(OUTPUT ${et_bin_path}
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E make_directory ${et_bin_dir}
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${et_path} ${et_bin_path}
|
||||||
|
VERBATIM
|
||||||
|
)
|
||||||
|
preprocess_et(${et_bin_path})
|
||||||
|
endforeach()
|
||||||
|
endfunction()
|
||||||
|
generate_error_tables()
|
||||||
|
|
||||||
if(CMAKE_SYSTEM_NAME MATCHES "Darwin")
|
if(CMAKE_SYSTEM_NAME MATCHES "Darwin")
|
||||||
add_custom_command(
|
add_custom_command(
|
||||||
@ -634,12 +633,12 @@ file(MAKE_DIRECTORY
|
|||||||
|
|
||||||
SET(KRBHDEP
|
SET(KRBHDEP
|
||||||
"${KRB5_SOURCE_DIR}/include/krb5/krb5.hin"
|
"${KRB5_SOURCE_DIR}/include/krb5/krb5.hin"
|
||||||
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb5_err.h"
|
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/krb5_err.h"
|
||||||
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/k5e1_err.h"
|
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/k5e1_err.h"
|
||||||
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kdb5_err.h"
|
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/kdb5_err.h"
|
||||||
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kv5m_err.h"
|
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/kv5m_err.h"
|
||||||
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb524_err.h"
|
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/krb524_err.h"
|
||||||
"${KRB5_SOURCE_DIR}/lib/krb5/error_tables/asn1_err.h"
|
"${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/asn1_err.h"
|
||||||
)
|
)
|
||||||
|
|
||||||
# cmake < 3.18 does not have 'cat' command
|
# cmake < 3.18 does not have 'cat' command
|
||||||
@ -656,6 +655,11 @@ target_include_directories(_krb5 SYSTEM BEFORE PUBLIC
|
|||||||
"${CMAKE_CURRENT_BINARY_DIR}/include"
|
"${CMAKE_CURRENT_BINARY_DIR}/include"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
target_compile_options(_krb5 PRIVATE
|
||||||
|
# For '#include "file.h"'
|
||||||
|
-iquote "${CMAKE_CURRENT_BINARY_DIR}/include_private"
|
||||||
|
)
|
||||||
|
|
||||||
target_include_directories(_krb5 PRIVATE
|
target_include_directories(_krb5 PRIVATE
|
||||||
"${CMAKE_CURRENT_BINARY_DIR}/include_private" # For autoconf.h and other generated headers.
|
"${CMAKE_CURRENT_BINARY_DIR}/include_private" # For autoconf.h and other generated headers.
|
||||||
${KRB5_SOURCE_DIR}
|
${KRB5_SOURCE_DIR}
|
||||||
|
@ -1,12 +1,9 @@
|
|||||||
# During cross-compilation in our CI we have to use llvm-tblgen and other building tools
|
if (APPLE OR NOT ARCH_AMD64 OR SANITIZE STREQUAL "undefined")
|
||||||
# tools to be build for host architecture and everything else for target architecture (e.g. AArch64)
|
set (ENABLE_EMBEDDED_COMPILER_DEFAULT OFF)
|
||||||
# Possible workaround is to use llvm-tblgen from some package...
|
|
||||||
# But lets just enable LLVM for native builds
|
|
||||||
if (CMAKE_CROSSCOMPILING OR SANITIZE STREQUAL "undefined")
|
|
||||||
set (ENABLE_EMBEDDED_COMPILER_DEFAULT OFF)
|
|
||||||
else()
|
else()
|
||||||
set (ENABLE_EMBEDDED_COMPILER_DEFAULT ON)
|
set (ENABLE_EMBEDDED_COMPILER_DEFAULT ON)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
option (ENABLE_EMBEDDED_COMPILER "Enable support for 'compile_expressions' option for query execution" ${ENABLE_EMBEDDED_COMPILER_DEFAULT})
|
option (ENABLE_EMBEDDED_COMPILER "Enable support for 'compile_expressions' option for query execution" ${ENABLE_EMBEDDED_COMPILER_DEFAULT})
|
||||||
|
|
||||||
if (NOT ENABLE_EMBEDDED_COMPILER)
|
if (NOT ENABLE_EMBEDDED_COMPILER)
|
||||||
|
2
contrib/poco
vendored
2
contrib/poco
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 520a90e02e3e5cb90afeae1846d161dbc508a6f1
|
Subproject commit 008b16469471d55b176db181756c94e3f14dd2dc
|
2
contrib/unixodbc
vendored
2
contrib/unixodbc
vendored
@ -1 +1 @@
|
|||||||
Subproject commit b0ad30f7f6289c12b76f04bfb9d466374bb32168
|
Subproject commit a2cd5395e8c7f7390025ec93af5bfebef3fb5fcd
|
2
debian/clickhouse-server.service
vendored
2
debian/clickhouse-server.service
vendored
@ -20,7 +20,7 @@ ExecStart=/usr/bin/clickhouse-server --config=/etc/clickhouse-server/config.xml
|
|||||||
EnvironmentFile=-/etc/default/clickhouse
|
EnvironmentFile=-/etc/default/clickhouse
|
||||||
LimitCORE=infinity
|
LimitCORE=infinity
|
||||||
LimitNOFILE=500000
|
LimitNOFILE=500000
|
||||||
CapabilityBoundingSet=CAP_NET_ADMIN CAP_IPC_LOCK CAP_SYS_NICE
|
CapabilityBoundingSet=CAP_NET_ADMIN CAP_IPC_LOCK CAP_SYS_NICE CAP_NET_BIND_SERVICE
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
# ClickHouse should not start from the rescue shell (rescue.target).
|
# ClickHouse should not start from the rescue shell (rescue.target).
|
||||||
|
74
docker/keeper/Dockerfile
Normal file
74
docker/keeper/Dockerfile
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
FROM ubuntu:20.04 AS glibc-donor
|
||||||
|
|
||||||
|
ARG TARGETARCH
|
||||||
|
RUN arch=${TARGETARCH:-amd64} \
|
||||||
|
&& case $arch in \
|
||||||
|
amd64) rarch=x86_64 ;; \
|
||||||
|
arm64) rarch=aarch64 ;; \
|
||||||
|
esac \
|
||||||
|
&& ln -s "${rarch}-linux-gnu" /lib/linux-gnu
|
||||||
|
|
||||||
|
|
||||||
|
FROM alpine
|
||||||
|
|
||||||
|
ENV LANG=en_US.UTF-8 \
|
||||||
|
LANGUAGE=en_US:en \
|
||||||
|
LC_ALL=en_US.UTF-8 \
|
||||||
|
TZ=UTC \
|
||||||
|
CLICKHOUSE_CONFIG=/etc/clickhouse-server/config.xml
|
||||||
|
|
||||||
|
COPY --from=glibc-donor /lib/linux-gnu/libc.so.6 /lib/linux-gnu/libdl.so.2 /lib/linux-gnu/libm.so.6 /lib/linux-gnu/libpthread.so.0 /lib/linux-gnu/librt.so.1 /lib/linux-gnu/libnss_dns.so.2 /lib/linux-gnu/libnss_files.so.2 /lib/linux-gnu/libresolv.so.2 /lib/linux-gnu/ld-2.31.so /lib/
|
||||||
|
COPY --from=glibc-donor /etc/nsswitch.conf /etc/
|
||||||
|
COPY entrypoint.sh /entrypoint.sh
|
||||||
|
|
||||||
|
ARG TARGETARCH
|
||||||
|
RUN arch=${TARGETARCH:-amd64} \
|
||||||
|
&& case $arch in \
|
||||||
|
amd64) mkdir -p /lib64 && ln -sf /lib/ld-2.31.so /lib64/ld-linux-x86-64.so.2 ;; \
|
||||||
|
arm64) ln -sf /lib/ld-2.31.so /lib/ld-linux-aarch64.so.1 ;; \
|
||||||
|
esac
|
||||||
|
|
||||||
|
ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release"
|
||||||
|
ARG VERSION="22.4.1.917"
|
||||||
|
ARG PACKAGES="clickhouse-keeper"
|
||||||
|
|
||||||
|
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||||
|
# It is especially important for rootless containers: in that case entrypoint
|
||||||
|
# can't do chown and owners of mounted volumes should be configured externally.
|
||||||
|
# We do that in advance at the begining of Dockerfile before any packages will be
|
||||||
|
# installed to prevent picking those uid / gid by some unrelated software.
|
||||||
|
# The same uid / gid (101) is used both for alpine and ubuntu.
|
||||||
|
|
||||||
|
|
||||||
|
ARG TARGETARCH
|
||||||
|
RUN arch=${TARGETARCH:-amd64} \
|
||||||
|
&& for package in ${PACKAGES}; do \
|
||||||
|
{ \
|
||||||
|
{ echo "Get ${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \
|
||||||
|
&& wget -c -q "${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" -O "/tmp/${package}-${VERSION}-${arch}.tgz" \
|
||||||
|
&& tar xvzf "/tmp/${package}-${VERSION}-${arch}.tgz" --strip-components=1 -C / ; \
|
||||||
|
} || \
|
||||||
|
{ echo "Fallback to ${REPOSITORY}/${package}-${VERSION}.tgz" \
|
||||||
|
&& wget -c -q "${REPOSITORY}/${package}-${VERSION}.tgz" -O "/tmp/${package}-${VERSION}.tgz" \
|
||||||
|
&& tar xvzf "/tmp/${package}-${VERSION}.tgz" --strip-components=2 -C / ; \
|
||||||
|
} ; \
|
||||||
|
} || exit 1 \
|
||||||
|
; done \
|
||||||
|
&& rm /tmp/*.tgz /install -r \
|
||||||
|
&& addgroup -S -g 101 clickhouse \
|
||||||
|
&& adduser -S -h /var/lib/clickhouse -s /bin/bash -G clickhouse -g "ClickHouse keeper" -u 101 clickhouse \
|
||||||
|
&& mkdir -p /var/lib/clickhouse /var/log/clickhouse-keeper /etc/clickhouse-keeper \
|
||||||
|
&& chown clickhouse:clickhouse /var/lib/clickhouse \
|
||||||
|
&& chown root:clickhouse /var/log/clickhouse-keeper \
|
||||||
|
&& chmod +x /entrypoint.sh \
|
||||||
|
&& apk add --no-cache su-exec bash tzdata \
|
||||||
|
&& cp /usr/share/zoneinfo/UTC /etc/localtime \
|
||||||
|
&& echo "UTC" > /etc/timezone \
|
||||||
|
&& chmod ugo+Xrw -R /var/lib/clickhouse /var/log/clickhouse-keeper /etc/clickhouse-keeper
|
||||||
|
|
||||||
|
|
||||||
|
EXPOSE 2181 10181 44444
|
||||||
|
|
||||||
|
VOLUME /var/lib/clickhouse /var/log/clickhouse-keeper /etc/clickhouse-keeper
|
||||||
|
|
||||||
|
ENTRYPOINT ["/entrypoint.sh"]
|
1
docker/keeper/Dockerfile.alpine
Symbolic link
1
docker/keeper/Dockerfile.alpine
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
Dockerfile
|
93
docker/keeper/entrypoint.sh
Normal file
93
docker/keeper/entrypoint.sh
Normal file
@ -0,0 +1,93 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set +x
|
||||||
|
set -eo pipefail
|
||||||
|
shopt -s nullglob
|
||||||
|
|
||||||
|
DO_CHOWN=1
|
||||||
|
if [ "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]; then
|
||||||
|
DO_CHOWN=0
|
||||||
|
fi
|
||||||
|
|
||||||
|
CLICKHOUSE_UID="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}"
|
||||||
|
CLICKHOUSE_GID="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}"
|
||||||
|
|
||||||
|
# support --user
|
||||||
|
if [ "$(id -u)" = "0" ]; then
|
||||||
|
USER=$CLICKHOUSE_UID
|
||||||
|
GROUP=$CLICKHOUSE_GID
|
||||||
|
if command -v gosu &> /dev/null; then
|
||||||
|
gosu="gosu $USER:$GROUP"
|
||||||
|
elif command -v su-exec &> /dev/null; then
|
||||||
|
gosu="su-exec $USER:$GROUP"
|
||||||
|
else
|
||||||
|
echo "No gosu/su-exec detected!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
USER="$(id -u)"
|
||||||
|
GROUP="$(id -g)"
|
||||||
|
gosu=""
|
||||||
|
DO_CHOWN=0
|
||||||
|
fi
|
||||||
|
|
||||||
|
KEEPER_CONFIG="${KEEPER_CONFIG:-/etc/clickhouse-keeper/config.yaml}"
|
||||||
|
|
||||||
|
if [ -f "$KEEPER_CONFIG" ] && ! $gosu test -f "$KEEPER_CONFIG" -a -r "$KEEPER_CONFIG"; then
|
||||||
|
echo "Configuration file '$KEEPER_CONFIG' isn't readable by user with id '$USER'"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
DATA_DIR="${CLICKHOUSE_DATA_DIR:-/var/lib/clickhouse}"
|
||||||
|
LOG_DIR="${LOG_DIR:-/var/log/clickhouse-keeper}"
|
||||||
|
LOG_PATH="${LOG_DIR}/clickhouse-keeper.log"
|
||||||
|
ERROR_LOG_PATH="${LOG_DIR}/clickhouse-keeper.err.log"
|
||||||
|
COORDINATION_LOG_DIR="${DATA_DIR}/coordination/log"
|
||||||
|
COORDINATION_SNAPSHOT_DIR="${DATA_DIR}/coordination/snapshots"
|
||||||
|
CLICKHOUSE_WATCHDOG_ENABLE=${CLICKHOUSE_WATCHDOG_ENABLE:-0}
|
||||||
|
|
||||||
|
for dir in "$DATA_DIR" \
|
||||||
|
"$LOG_DIR" \
|
||||||
|
"$TMP_DIR" \
|
||||||
|
"$COORDINATION_LOG_DIR" \
|
||||||
|
"$COORDINATION_SNAPSHOT_DIR"
|
||||||
|
do
|
||||||
|
# check if variable not empty
|
||||||
|
[ -z "$dir" ] && continue
|
||||||
|
# ensure directories exist
|
||||||
|
if ! mkdir -p "$dir"; then
|
||||||
|
echo "Couldn't create necessary directory: $dir"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$DO_CHOWN" = "1" ]; then
|
||||||
|
# ensure proper directories permissions
|
||||||
|
# but skip it for if directory already has proper premissions, cause recursive chown may be slow
|
||||||
|
if [ "$(stat -c %u "$dir")" != "$USER" ] || [ "$(stat -c %g "$dir")" != "$GROUP" ]; then
|
||||||
|
chown -R "$USER:$GROUP" "$dir"
|
||||||
|
fi
|
||||||
|
elif ! $gosu test -d "$dir" -a -w "$dir" -a -r "$dir"; then
|
||||||
|
echo "Necessary directory '$dir' isn't accessible by user with id '$USER'"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# if no args passed to `docker run` or first argument start with `--`, then the user is passing clickhouse-server arguments
|
||||||
|
if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
|
||||||
|
# Watchdog is launched by default, but does not send SIGINT to the main process,
|
||||||
|
# so the container can't be finished by ctrl+c
|
||||||
|
export CLICKHOUSE_WATCHDOG_ENABLE
|
||||||
|
|
||||||
|
cd /var/lib/clickhouse
|
||||||
|
|
||||||
|
# There is a config file. It is already tested with gosu (if it is readably by keeper user)
|
||||||
|
if [ -f "$KEEPER_CONFIG" ]; then
|
||||||
|
exec $gosu /usr/bin/clickhouse-keeper --config-file="$KEEPER_CONFIG" --log-file="$LOG_PATH" --errorlog-file="$ERROR_LOG_PATH" "$@"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# There is no config file. Will use embedded one
|
||||||
|
exec $gosu /usr/bin/clickhouse-keeper --log-file="$LOG_PATH" --errorlog-file="$ERROR_LOG_PATH" "$@"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Otherwise, we assume the user want to run his own process, for example a `bash` shell to explore this image
|
||||||
|
exec "$@"
|
2
docker/server/.gitignore
vendored
2
docker/server/.gitignore
vendored
@ -1,2 +0,0 @@
|
|||||||
alpine-root/*
|
|
||||||
tgz-packages/*
|
|
@ -1,122 +0,0 @@
|
|||||||
FROM ubuntu:20.04
|
|
||||||
|
|
||||||
# ARG for quick switch to a given ubuntu mirror
|
|
||||||
ARG apt_archive="http://archive.ubuntu.com"
|
|
||||||
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
|
||||||
|
|
||||||
ARG repository="deb https://packages.clickhouse.com/deb stable main"
|
|
||||||
ARG version=22.1.1.*
|
|
||||||
|
|
||||||
# set non-empty deb_location_url url to create a docker image
|
|
||||||
# from debs created by CI build, for example:
|
|
||||||
# docker build . --network host --build-arg version="21.4.1.6282" --build-arg deb_location_url="https://clickhouse-builds.s3.yandex.net/21852/069cfbff388b3d478d1a16dc7060b48073f5d522/clickhouse_build_check/clang-11_relwithdebuginfo_none_bundled_unsplitted_disable_False_deb/" -t filimonovq/clickhouse-server:pr21852
|
|
||||||
ARG deb_location_url=""
|
|
||||||
|
|
||||||
# set non-empty single_binary_location_url to create docker image
|
|
||||||
# from a single binary url (useful for non-standard builds - with sanitizers, for arm64).
|
|
||||||
# for example (run on aarch64 server):
|
|
||||||
# docker build . --network host --build-arg single_binary_location_url="https://builds.clickhouse.com/master/aarch64/clickhouse" -t altinity/clickhouse-server:master-testing-arm
|
|
||||||
# note: clickhouse-odbc-bridge is not supported there.
|
|
||||||
ARG single_binary_location_url=""
|
|
||||||
|
|
||||||
# see https://github.com/moby/moby/issues/4032#issuecomment-192327844
|
|
||||||
ARG DEBIAN_FRONTEND=noninteractive
|
|
||||||
|
|
||||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
|
||||||
# It is especially important for rootless containers: in that case entrypoint
|
|
||||||
# can't do chown and owners of mounted volumes should be configured externally.
|
|
||||||
# We do that in advance at the begining of Dockerfile before any packages will be
|
|
||||||
# installed to prevent picking those uid / gid by some unrelated software.
|
|
||||||
# The same uid / gid (101) is used both for alpine and ubuntu.
|
|
||||||
|
|
||||||
# To drop privileges, we need 'su' command, that simply changes uid and gid.
|
|
||||||
# In fact, the 'su' command from Linux is not so simple, due to inherent vulnerability in Linux:
|
|
||||||
# https://ruderich.org/simon/notes/su-sudo-from-root-tty-hijacking
|
|
||||||
# It has to mitigate this drawback of Linux, and to do this, 'su' command is creating it's own pseudo-terminal
|
|
||||||
# and forwarding commands. Due to some ridiculous curcumstances, it does not work in Docker (or it does)
|
|
||||||
# and for these reasons people are using alternatives to the 'su' command in Docker,
|
|
||||||
# that don't mess with the terminal, don't care about closing the opened files, etc...
|
|
||||||
# but can only be safe to drop privileges inside Docker.
|
|
||||||
# The question - what implementation of 'su' command to use.
|
|
||||||
# It should be a simple script doing about just two syscalls.
|
|
||||||
# Some people tend to use 'gosu' tool that is written in Go.
|
|
||||||
# It is not used for several reasons:
|
|
||||||
# 1. Dependency on some foreign code in yet another programming language - does not sound alright.
|
|
||||||
# 2. Anselmo D. Adams suggested not to use it due to false positive alarms in some undisclosed security scanners.
|
|
||||||
|
|
||||||
COPY su-exec.c /su-exec.c
|
|
||||||
|
|
||||||
RUN groupadd -r clickhouse --gid=101 \
|
|
||||||
&& useradd -r -g clickhouse --uid=101 --home-dir=/var/lib/clickhouse --shell=/bin/bash clickhouse \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install --yes --no-install-recommends \
|
|
||||||
apt-transport-https \
|
|
||||||
ca-certificates \
|
|
||||||
dirmngr \
|
|
||||||
gnupg \
|
|
||||||
locales \
|
|
||||||
wget \
|
|
||||||
tzdata \
|
|
||||||
&& mkdir -p /etc/apt/sources.list.d \
|
|
||||||
&& apt-key adv --keyserver keyserver.ubuntu.com --recv 8919F6BD2B48D754 \
|
|
||||||
&& echo $repository > /etc/apt/sources.list.d/clickhouse.list \
|
|
||||||
&& if [ -n "$deb_location_url" ]; then \
|
|
||||||
echo "installing from custom url with deb packages: $deb_location_url" \
|
|
||||||
rm -rf /tmp/clickhouse_debs \
|
|
||||||
&& mkdir -p /tmp/clickhouse_debs \
|
|
||||||
&& wget --progress=bar:force:noscroll "${deb_location_url}/clickhouse-common-static_${version}_amd64.deb" -P /tmp/clickhouse_debs \
|
|
||||||
&& wget --progress=bar:force:noscroll "${deb_location_url}/clickhouse-client_${version}_all.deb" -P /tmp/clickhouse_debs \
|
|
||||||
&& wget --progress=bar:force:noscroll "${deb_location_url}/clickhouse-server_${version}_all.deb" -P /tmp/clickhouse_debs \
|
|
||||||
&& dpkg -i /tmp/clickhouse_debs/*.deb ; \
|
|
||||||
elif [ -n "$single_binary_location_url" ]; then \
|
|
||||||
echo "installing from single binary url: $single_binary_location_url" \
|
|
||||||
&& rm -rf /tmp/clickhouse_binary \
|
|
||||||
&& mkdir -p /tmp/clickhouse_binary \
|
|
||||||
&& wget --progress=bar:force:noscroll "$single_binary_location_url" -O /tmp/clickhouse_binary/clickhouse \
|
|
||||||
&& chmod +x /tmp/clickhouse_binary/clickhouse \
|
|
||||||
&& /tmp/clickhouse_binary/clickhouse install --user "clickhouse" --group "clickhouse" ; \
|
|
||||||
else \
|
|
||||||
echo "installing from repository: $repository" \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get --yes -o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold" upgrade \
|
|
||||||
&& apt-get install --allow-unauthenticated --yes --no-install-recommends \
|
|
||||||
clickhouse-common-static=$version \
|
|
||||||
clickhouse-client=$version \
|
|
||||||
clickhouse-server=$version ; \
|
|
||||||
fi \
|
|
||||||
&& apt-get install -y --no-install-recommends tcc libc-dev && \
|
|
||||||
tcc /su-exec.c -o /bin/su-exec && \
|
|
||||||
chown root:root /bin/su-exec && \
|
|
||||||
chmod 0755 /bin/su-exec && \
|
|
||||||
rm /su-exec.c && \
|
|
||||||
apt-get purge -y --auto-remove tcc libc-dev libc-dev-bin libc6-dev linux-libc-dev \
|
|
||||||
&& clickhouse-local -q 'SELECT * FROM system.build_options' \
|
|
||||||
&& rm -rf \
|
|
||||||
/var/lib/apt/lists/* \
|
|
||||||
/var/cache/debconf \
|
|
||||||
/tmp/* \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& mkdir -p /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client \
|
|
||||||
&& chmod ugo+Xrw -R /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client
|
|
||||||
|
|
||||||
# we need to allow "others" access to clickhouse folder, because docker container
|
|
||||||
# can be started with arbitrary uid (openshift usecase)
|
|
||||||
|
|
||||||
RUN locale-gen en_US.UTF-8
|
|
||||||
ENV LANG en_US.UTF-8
|
|
||||||
ENV LANGUAGE en_US:en
|
|
||||||
ENV LC_ALL en_US.UTF-8
|
|
||||||
ENV TZ UTC
|
|
||||||
|
|
||||||
RUN mkdir /docker-entrypoint-initdb.d
|
|
||||||
|
|
||||||
COPY docker_related_config.xml /etc/clickhouse-server/config.d/
|
|
||||||
COPY entrypoint.sh /entrypoint.sh
|
|
||||||
RUN chmod +x /entrypoint.sh
|
|
||||||
|
|
||||||
EXPOSE 9000 8123 9009
|
|
||||||
VOLUME /var/lib/clickhouse
|
|
||||||
|
|
||||||
ENV CLICKHOUSE_CONFIG /etc/clickhouse-server/config.xml
|
|
||||||
|
|
||||||
ENTRYPOINT ["/entrypoint.sh"]
|
|
1
docker/server/Dockerfile
Symbolic link
1
docker/server/Dockerfile
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
Dockerfile.ubuntu
|
@ -1,3 +1,14 @@
|
|||||||
|
FROM ubuntu:20.04 AS glibc-donor
|
||||||
|
ARG TARGETARCH
|
||||||
|
|
||||||
|
RUN arch=${TARGETARCH:-amd64} \
|
||||||
|
&& case $arch in \
|
||||||
|
amd64) rarch=x86_64 ;; \
|
||||||
|
arm64) rarch=aarch64 ;; \
|
||||||
|
esac \
|
||||||
|
&& ln -s "${rarch}-linux-gnu" /lib/linux-gnu
|
||||||
|
|
||||||
|
|
||||||
FROM alpine
|
FROM alpine
|
||||||
|
|
||||||
ENV LANG=en_US.UTF-8 \
|
ENV LANG=en_US.UTF-8 \
|
||||||
@ -6,7 +17,24 @@ ENV LANG=en_US.UTF-8 \
|
|||||||
TZ=UTC \
|
TZ=UTC \
|
||||||
CLICKHOUSE_CONFIG=/etc/clickhouse-server/config.xml
|
CLICKHOUSE_CONFIG=/etc/clickhouse-server/config.xml
|
||||||
|
|
||||||
COPY alpine-root/ /
|
COPY --from=glibc-donor /lib/linux-gnu/libc.so.6 /lib/linux-gnu/libdl.so.2 /lib/linux-gnu/libm.so.6 /lib/linux-gnu/libpthread.so.0 /lib/linux-gnu/librt.so.1 /lib/linux-gnu/libnss_dns.so.2 /lib/linux-gnu/libnss_files.so.2 /lib/linux-gnu/libresolv.so.2 /lib/linux-gnu/ld-2.31.so /lib/
|
||||||
|
COPY --from=glibc-donor /etc/nsswitch.conf /etc/
|
||||||
|
COPY docker_related_config.xml /etc/clickhouse-server/config.d/
|
||||||
|
COPY entrypoint.sh /entrypoint.sh
|
||||||
|
|
||||||
|
ARG TARGETARCH
|
||||||
|
|
||||||
|
RUN arch=${TARGETARCH:-amd64} \
|
||||||
|
&& case $arch in \
|
||||||
|
amd64) mkdir -p /lib64 && ln -sf /lib/ld-2.31.so /lib64/ld-linux-x86-64.so.2 ;; \
|
||||||
|
arm64) ln -sf /lib/ld-2.31.so /lib/ld-linux-aarch64.so.1 ;; \
|
||||||
|
esac
|
||||||
|
|
||||||
|
# lts / testing / prestable / etc
|
||||||
|
ARG REPO_CHANNEL="stable"
|
||||||
|
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||||
|
ARG VERSION="20.9.3.45"
|
||||||
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
|
|
||||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||||
# It is especially important for rootless containers: in that case entrypoint
|
# It is especially important for rootless containers: in that case entrypoint
|
||||||
@ -15,9 +43,23 @@ COPY alpine-root/ /
|
|||||||
# installed to prevent picking those uid / gid by some unrelated software.
|
# installed to prevent picking those uid / gid by some unrelated software.
|
||||||
# The same uid / gid (101) is used both for alpine and ubuntu.
|
# The same uid / gid (101) is used both for alpine and ubuntu.
|
||||||
|
|
||||||
RUN addgroup -S -g 101 clickhouse \
|
RUN arch=${TARGETARCH:-amd64} \
|
||||||
|
&& for package in ${PACKAGES}; do \
|
||||||
|
{ \
|
||||||
|
{ echo "Get ${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \
|
||||||
|
&& wget -c -q "${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" -O "/tmp/${package}-${VERSION}-${arch}.tgz" \
|
||||||
|
&& tar xvzf "/tmp/${package}-${VERSION}-${arch}.tgz" --strip-components=1 -C / ; \
|
||||||
|
} || \
|
||||||
|
{ echo "Fallback to ${REPOSITORY}/${package}-${VERSION}.tgz" \
|
||||||
|
&& wget -c -q "${REPOSITORY}/${package}-${VERSION}.tgz" -O "/tmp/${package}-${VERSION}.tgz" \
|
||||||
|
&& tar xvzf "/tmp/${package}-${VERSION}.tgz" --strip-components=2 -C / ; \
|
||||||
|
} ; \
|
||||||
|
} || exit 1 \
|
||||||
|
; done \
|
||||||
|
&& rm /tmp/*.tgz /install -r \
|
||||||
|
&& addgroup -S -g 101 clickhouse \
|
||||||
&& adduser -S -h /var/lib/clickhouse -s /bin/bash -G clickhouse -g "ClickHouse server" -u 101 clickhouse \
|
&& adduser -S -h /var/lib/clickhouse -s /bin/bash -G clickhouse -g "ClickHouse server" -u 101 clickhouse \
|
||||||
&& mkdir -p /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client \
|
&& mkdir -p /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server/config.d /etc/clickhouse-server/users.d /etc/clickhouse-client /docker-entrypoint-initdb.d \
|
||||||
&& chown clickhouse:clickhouse /var/lib/clickhouse \
|
&& chown clickhouse:clickhouse /var/lib/clickhouse \
|
||||||
&& chown root:clickhouse /var/log/clickhouse-server \
|
&& chown root:clickhouse /var/log/clickhouse-server \
|
||||||
&& chmod +x /entrypoint.sh \
|
&& chmod +x /entrypoint.sh \
|
||||||
|
129
docker/server/Dockerfile.ubuntu
Normal file
129
docker/server/Dockerfile.ubuntu
Normal file
@ -0,0 +1,129 @@
|
|||||||
|
FROM ubuntu:20.04
|
||||||
|
|
||||||
|
# see https://github.com/moby/moby/issues/4032#issuecomment-192327844
|
||||||
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
|
COPY su-exec.c /su-exec.c
|
||||||
|
|
||||||
|
# ARG for quick switch to a given ubuntu mirror
|
||||||
|
ARG apt_archive="http://archive.ubuntu.com"
|
||||||
|
RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list \
|
||||||
|
&& groupadd -r clickhouse --gid=101 \
|
||||||
|
&& useradd -r -g clickhouse --uid=101 --home-dir=/var/lib/clickhouse --shell=/bin/bash clickhouse \
|
||||||
|
&& apt-get update \
|
||||||
|
&& apt-get install --yes --no-install-recommends \
|
||||||
|
apt-transport-https \
|
||||||
|
ca-certificates \
|
||||||
|
dirmngr \
|
||||||
|
gnupg \
|
||||||
|
locales \
|
||||||
|
wget \
|
||||||
|
tzdata \
|
||||||
|
&& apt-get install -y --no-install-recommends tcc libc-dev && \
|
||||||
|
tcc /su-exec.c -o /bin/su-exec && \
|
||||||
|
chown root:root /bin/su-exec && \
|
||||||
|
chmod 0755 /bin/su-exec && \
|
||||||
|
rm /su-exec.c && \
|
||||||
|
apt-get purge -y --auto-remove tcc libc-dev libc-dev-bin libc6-dev linux-libc-dev \
|
||||||
|
&& apt-get clean
|
||||||
|
|
||||||
|
ARG REPO_CHANNEL="stable"
|
||||||
|
ARG REPOSITORY="deb https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||||
|
ARG VERSION=22.1.1.*
|
||||||
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
|
|
||||||
|
# set non-empty deb_location_url url to create a docker image
|
||||||
|
# from debs created by CI build, for example:
|
||||||
|
# docker build . --network host --build-arg version="21.4.1.6282" --build-arg deb_location_url="https://clickhouse-builds.s3.yandex.net/21852/069cfbff388b3d478d1a16dc7060b48073f5d522/clickhouse_build_check/clang-11_relwithdebuginfo_none_bundled_unsplitted_disable_False_deb/" -t filimonovq/clickhouse-server:pr21852
|
||||||
|
ARG deb_location_url=""
|
||||||
|
|
||||||
|
# set non-empty single_binary_location_url to create docker image
|
||||||
|
# from a single binary url (useful for non-standard builds - with sanitizers, for arm64).
|
||||||
|
# for example (run on aarch64 server):
|
||||||
|
# docker build . --network host --build-arg single_binary_location_url="https://builds.clickhouse.com/master/aarch64/clickhouse" -t altinity/clickhouse-server:master-testing-arm
|
||||||
|
# note: clickhouse-odbc-bridge is not supported there.
|
||||||
|
ARG single_binary_location_url=""
|
||||||
|
|
||||||
|
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||||
|
# It is especially important for rootless containers: in that case entrypoint
|
||||||
|
# can't do chown and owners of mounted volumes should be configured externally.
|
||||||
|
# We do that in advance at the begining of Dockerfile before any packages will be
|
||||||
|
# installed to prevent picking those uid / gid by some unrelated software.
|
||||||
|
# The same uid / gid (101) is used both for alpine and ubuntu.
|
||||||
|
|
||||||
|
# To drop privileges, we need 'su' command, that simply changes uid and gid.
|
||||||
|
# In fact, the 'su' command from Linux is not so simple, due to inherent vulnerability in Linux:
|
||||||
|
# https://ruderich.org/simon/notes/su-sudo-from-root-tty-hijacking
|
||||||
|
# It has to mitigate this drawback of Linux, and to do this, 'su' command is creating it's own pseudo-terminal
|
||||||
|
# and forwarding commands. Due to some ridiculous curcumstances, it does not work in Docker (or it does)
|
||||||
|
# and for these reasons people are using alternatives to the 'su' command in Docker,
|
||||||
|
# that don't mess with the terminal, don't care about closing the opened files, etc...
|
||||||
|
# but can only be safe to drop privileges inside Docker.
|
||||||
|
# The question - what implementation of 'su' command to use.
|
||||||
|
# It should be a simple script doing about just two syscalls.
|
||||||
|
# Some people tend to use 'gosu' tool that is written in Go.
|
||||||
|
# It is not used for several reasons:
|
||||||
|
# 1. Dependency on some foreign code in yet another programming language - does not sound alright.
|
||||||
|
# 2. Anselmo D. Adams suggested not to use it due to false positive alarms in some undisclosed security scanners.
|
||||||
|
|
||||||
|
ARG TARGETARCH
|
||||||
|
|
||||||
|
RUN arch=${TARGETARCH:-amd64} \
|
||||||
|
&& if [ -n "${deb_location_url}" ]; then \
|
||||||
|
echo "installing from custom url with deb packages: ${deb_location_url}" \
|
||||||
|
rm -rf /tmp/clickhouse_debs \
|
||||||
|
&& mkdir -p /tmp/clickhouse_debs \
|
||||||
|
&& for package in ${PACKAGES}; do \
|
||||||
|
{ wget --progress=bar:force:noscroll "${deb_location_url}/${package}_${VERSION}_${arch}.deb" -P /tmp/clickhouse_debs || \
|
||||||
|
wget --progress=bar:force:noscroll "${deb_location_url}/${package}_${VERSION}_all.deb" -P /tmp/clickhouse_debs ; } \
|
||||||
|
|| exit 1 \
|
||||||
|
; done \
|
||||||
|
&& dpkg -i /tmp/clickhouse_debs/*.deb ; \
|
||||||
|
elif [ -n "${single_binary_location_url}" ]; then \
|
||||||
|
echo "installing from single binary url: ${single_binary_location_url}" \
|
||||||
|
&& rm -rf /tmp/clickhouse_binary \
|
||||||
|
&& mkdir -p /tmp/clickhouse_binary \
|
||||||
|
&& wget --progress=bar:force:noscroll "${single_binary_location_url}" -O /tmp/clickhouse_binary/clickhouse \
|
||||||
|
&& chmod +x /tmp/clickhouse_binary/clickhouse \
|
||||||
|
&& /tmp/clickhouse_binary/clickhouse install --user "clickhouse" --group "clickhouse" ; \
|
||||||
|
else \
|
||||||
|
mkdir -p /etc/apt/sources.list.d \
|
||||||
|
&& apt-key adv --keyserver keyserver.ubuntu.com --recv 8919F6BD2B48D754 \
|
||||||
|
&& echo ${REPOSITORY} > /etc/apt/sources.list.d/clickhouse.list \
|
||||||
|
&& echo "installing from repository: ${REPOSITORY}" \
|
||||||
|
&& apt-get update \
|
||||||
|
&& apt-get --yes -o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold" upgrade \
|
||||||
|
&& for package in ${PACKAGES}; do \
|
||||||
|
packages="${packages} ${package}=${VERSION}" \
|
||||||
|
; done \
|
||||||
|
&& apt-get install --allow-unauthenticated --yes --no-install-recommends ${packages} || exit 1 \
|
||||||
|
; fi \
|
||||||
|
&& clickhouse-local -q 'SELECT * FROM system.build_options' \
|
||||||
|
&& rm -rf \
|
||||||
|
/var/lib/apt/lists/* \
|
||||||
|
/var/cache/debconf \
|
||||||
|
/tmp/* \
|
||||||
|
&& mkdir -p /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client \
|
||||||
|
&& chmod ugo+Xrw -R /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client
|
||||||
|
|
||||||
|
# we need to allow "others" access to clickhouse folder, because docker container
|
||||||
|
# can be started with arbitrary uid (openshift usecase)
|
||||||
|
|
||||||
|
RUN locale-gen en_US.UTF-8
|
||||||
|
ENV LANG en_US.UTF-8
|
||||||
|
ENV LANGUAGE en_US:en
|
||||||
|
ENV LC_ALL en_US.UTF-8
|
||||||
|
ENV TZ UTC
|
||||||
|
|
||||||
|
RUN mkdir /docker-entrypoint-initdb.d
|
||||||
|
|
||||||
|
COPY docker_related_config.xml /etc/clickhouse-server/config.d/
|
||||||
|
COPY entrypoint.sh /entrypoint.sh
|
||||||
|
RUN chmod +x /entrypoint.sh
|
||||||
|
|
||||||
|
EXPOSE 9000 8123 9009
|
||||||
|
VOLUME /var/lib/clickhouse
|
||||||
|
|
||||||
|
ENV CLICKHOUSE_CONFIG /etc/clickhouse-server/config.xml
|
||||||
|
|
||||||
|
ENTRYPOINT ["/entrypoint.sh"]
|
@ -1,63 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -x
|
|
||||||
|
|
||||||
REPO_CHANNEL="${REPO_CHANNEL:-stable}" # lts / testing / prestable / etc
|
|
||||||
REPO_URL="${REPO_URL:-"https://repo.yandex.ru/clickhouse/tgz/${REPO_CHANNEL}"}"
|
|
||||||
VERSION="${VERSION:-20.9.3.45}"
|
|
||||||
DOCKER_IMAGE="${DOCKER_IMAGE:-clickhouse/clickhouse-server}"
|
|
||||||
|
|
||||||
# where original files live
|
|
||||||
DOCKER_BUILD_FOLDER="${BASH_SOURCE%/*}"
|
|
||||||
|
|
||||||
# we will create root for our image here
|
|
||||||
CONTAINER_ROOT_FOLDER="${DOCKER_BUILD_FOLDER}/alpine-root"
|
|
||||||
|
|
||||||
# clean up the root from old runs, it's reconstructed each time
|
|
||||||
rm -rf "$CONTAINER_ROOT_FOLDER"
|
|
||||||
mkdir -p "$CONTAINER_ROOT_FOLDER"
|
|
||||||
|
|
||||||
# where to put downloaded tgz
|
|
||||||
TGZ_PACKAGES_FOLDER="${DOCKER_BUILD_FOLDER}/tgz-packages"
|
|
||||||
mkdir -p "$TGZ_PACKAGES_FOLDER"
|
|
||||||
|
|
||||||
PACKAGES=( "clickhouse-client" "clickhouse-server" "clickhouse-common-static" )
|
|
||||||
|
|
||||||
# download tars from the repo
|
|
||||||
for package in "${PACKAGES[@]}"
|
|
||||||
do
|
|
||||||
wget -c -q --show-progress "${REPO_URL}/${package}-${VERSION}.tgz" -O "${TGZ_PACKAGES_FOLDER}/${package}-${VERSION}.tgz"
|
|
||||||
done
|
|
||||||
|
|
||||||
# unpack tars
|
|
||||||
for package in "${PACKAGES[@]}"
|
|
||||||
do
|
|
||||||
tar xvzf "${TGZ_PACKAGES_FOLDER}/${package}-${VERSION}.tgz" --strip-components=2 -C "$CONTAINER_ROOT_FOLDER"
|
|
||||||
done
|
|
||||||
|
|
||||||
# prepare few more folders
|
|
||||||
mkdir -p "${CONTAINER_ROOT_FOLDER}/etc/clickhouse-server/users.d" \
|
|
||||||
"${CONTAINER_ROOT_FOLDER}/etc/clickhouse-server/config.d" \
|
|
||||||
"${CONTAINER_ROOT_FOLDER}/var/log/clickhouse-server" \
|
|
||||||
"${CONTAINER_ROOT_FOLDER}/var/lib/clickhouse" \
|
|
||||||
"${CONTAINER_ROOT_FOLDER}/docker-entrypoint-initdb.d" \
|
|
||||||
"${CONTAINER_ROOT_FOLDER}/lib64"
|
|
||||||
|
|
||||||
cp "${DOCKER_BUILD_FOLDER}/docker_related_config.xml" "${CONTAINER_ROOT_FOLDER}/etc/clickhouse-server/config.d/"
|
|
||||||
cp "${DOCKER_BUILD_FOLDER}/entrypoint.sh" "${CONTAINER_ROOT_FOLDER}/entrypoint.sh"
|
|
||||||
|
|
||||||
## get glibc components from ubuntu 20.04 and put them to expected place
|
|
||||||
docker pull ubuntu:20.04
|
|
||||||
ubuntu20image=$(docker create --rm ubuntu:20.04)
|
|
||||||
docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libc.so.6 "${CONTAINER_ROOT_FOLDER}/lib"
|
|
||||||
docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libdl.so.2 "${CONTAINER_ROOT_FOLDER}/lib"
|
|
||||||
docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libm.so.6 "${CONTAINER_ROOT_FOLDER}/lib"
|
|
||||||
docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libpthread.so.0 "${CONTAINER_ROOT_FOLDER}/lib"
|
|
||||||
docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/librt.so.1 "${CONTAINER_ROOT_FOLDER}/lib"
|
|
||||||
docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libnss_dns.so.2 "${CONTAINER_ROOT_FOLDER}/lib"
|
|
||||||
docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libnss_files.so.2 "${CONTAINER_ROOT_FOLDER}/lib"
|
|
||||||
docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libresolv.so.2 "${CONTAINER_ROOT_FOLDER}/lib"
|
|
||||||
docker cp -L "${ubuntu20image}":/lib64/ld-linux-x86-64.so.2 "${CONTAINER_ROOT_FOLDER}/lib64"
|
|
||||||
docker cp -L "${ubuntu20image}":/etc/nsswitch.conf "${CONTAINER_ROOT_FOLDER}/etc"
|
|
||||||
|
|
||||||
docker build "$DOCKER_BUILD_FOLDER" -f Dockerfile.alpine -t "${DOCKER_IMAGE}:${VERSION}-alpine" --pull
|
|
||||||
rm -rf "$CONTAINER_ROOT_FOLDER"
|
|
@ -1,47 +0,0 @@
|
|||||||
# Since right now we can't set volumes to the docker during build, we split building container in stages:
|
|
||||||
# 1. build base container
|
|
||||||
# 2. run base conatiner with mounted volumes
|
|
||||||
# 3. commit container as image
|
|
||||||
# 4. build final container atop that image
|
|
||||||
# Middle steps are performed by the bash script.
|
|
||||||
|
|
||||||
FROM ubuntu:18.04 as clickhouse-server-base
|
|
||||||
ARG gosu_ver=1.14
|
|
||||||
|
|
||||||
VOLUME /packages/
|
|
||||||
|
|
||||||
# update to allow installing dependencies of clickhouse automatically
|
|
||||||
RUN apt update; \
|
|
||||||
DEBIAN_FRONTEND=noninteractive \
|
|
||||||
apt install -y locales;
|
|
||||||
|
|
||||||
ADD https://github.com/tianon/gosu/releases/download/${gosu_ver}/gosu-amd64 /bin/gosu
|
|
||||||
|
|
||||||
RUN locale-gen en_US.UTF-8
|
|
||||||
ENV LANG en_US.UTF-8
|
|
||||||
ENV LANGUAGE en_US:en
|
|
||||||
ENV LC_ALL en_US.UTF-8
|
|
||||||
|
|
||||||
# installing via apt to simulate real-world scenario, where user installs deb package and all it's dependecies automatically.
|
|
||||||
CMD DEBIAN_FRONTEND=noninteractive \
|
|
||||||
apt install -y \
|
|
||||||
/packages/clickhouse-common-static_*.deb \
|
|
||||||
/packages/clickhouse-server_*.deb ;
|
|
||||||
|
|
||||||
FROM clickhouse-server-base:postinstall as clickhouse-server
|
|
||||||
|
|
||||||
RUN mkdir /docker-entrypoint-initdb.d
|
|
||||||
|
|
||||||
COPY docker_related_config.xml /etc/clickhouse-server/config.d/
|
|
||||||
COPY entrypoint.sh /entrypoint.sh
|
|
||||||
|
|
||||||
RUN chmod +x \
|
|
||||||
/entrypoint.sh \
|
|
||||||
/bin/gosu
|
|
||||||
|
|
||||||
EXPOSE 9000 8123 9009
|
|
||||||
VOLUME /var/lib/clickhouse
|
|
||||||
|
|
||||||
ENV CLICKHOUSE_CONFIG /etc/clickhouse-server/config.xml
|
|
||||||
|
|
||||||
ENTRYPOINT ["/entrypoint.sh"]
|
|
@ -226,7 +226,6 @@ quit
|
|||||||
--receive_data_timeout_ms=10000 \
|
--receive_data_timeout_ms=10000 \
|
||||||
--stacktrace \
|
--stacktrace \
|
||||||
--query-fuzzer-runs=1000 \
|
--query-fuzzer-runs=1000 \
|
||||||
--testmode \
|
|
||||||
--queries-file $(ls -1 ch/tests/queries/0_stateless/*.sql | sort -R) \
|
--queries-file $(ls -1 ch/tests/queries/0_stateless/*.sql | sort -R) \
|
||||||
$NEW_TESTS_OPT \
|
$NEW_TESTS_OPT \
|
||||||
> >(tail -n 100000 > fuzzer.log) \
|
> >(tail -n 100000 > fuzzer.log) \
|
||||||
|
@ -1,8 +1,10 @@
|
|||||||
# docker build -t clickhouse/mysql-js-client .
|
# docker build -t clickhouse/mysql-js-client .
|
||||||
# MySQL JavaScript client docker container
|
# MySQL JavaScript client docker container
|
||||||
|
|
||||||
FROM node:8
|
FROM node:16.14.2
|
||||||
|
|
||||||
|
WORKDIR /usr/app
|
||||||
|
|
||||||
RUN npm install mysql
|
RUN npm install mysql
|
||||||
|
|
||||||
COPY ./test.js test.js
|
COPY ./test.js ./test.js
|
||||||
|
@ -348,13 +348,13 @@ then
|
|||||||
rm -f /test_output/tmp
|
rm -f /test_output/tmp
|
||||||
|
|
||||||
# OOM
|
# OOM
|
||||||
zgrep -Fa " <Fatal> Application: Child process was terminated by signal 9" /var/log/clickhouse-server/clickhouse-server.log > /dev/null \
|
zgrep -Fa " <Fatal> Application: Child process was terminated by signal 9" /var/log/clickhouse-server/clickhouse-server.log* > /dev/null \
|
||||||
&& echo -e 'Backward compatibility check: OOM killer (or signal 9) in clickhouse-server.log\tFAIL' >> /test_output/test_results.tsv \
|
&& echo -e 'Backward compatibility check: OOM killer (or signal 9) in clickhouse-server.log\tFAIL' >> /test_output/test_results.tsv \
|
||||||
|| echo -e 'Backward compatibility check: No OOM messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
|| echo -e 'Backward compatibility check: No OOM messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
||||||
|
|
||||||
# Logical errors
|
# Logical errors
|
||||||
echo "Check for Logical errors in server log:"
|
echo "Check for Logical errors in server log:"
|
||||||
zgrep -Fa -A20 "Code: 49, e.displayText() = DB::Exception:" /var/log/clickhouse-server/clickhouse-server.log > /test_output/bc_check_logical_errors.txt \
|
zgrep -Fa -A20 "Code: 49, e.displayText() = DB::Exception:" /var/log/clickhouse-server/clickhouse-server.log* > /test_output/bc_check_logical_errors.txt \
|
||||||
&& echo -e 'Backward compatibility check: Logical error thrown (see clickhouse-server.log or bc_check_logical_errors.txt)\tFAIL' >> /test_output/test_results.tsv \
|
&& echo -e 'Backward compatibility check: Logical error thrown (see clickhouse-server.log or bc_check_logical_errors.txt)\tFAIL' >> /test_output/test_results.tsv \
|
||||||
|| echo -e 'Backward compatibility check: No logical errors\tOK' >> /test_output/test_results.tsv
|
|| echo -e 'Backward compatibility check: No logical errors\tOK' >> /test_output/test_results.tsv
|
||||||
|
|
||||||
@ -362,13 +362,13 @@ then
|
|||||||
[ -s /test_output/bc_check_logical_errors.txt ] || rm /test_output/bc_check_logical_errors.txt
|
[ -s /test_output/bc_check_logical_errors.txt ] || rm /test_output/bc_check_logical_errors.txt
|
||||||
|
|
||||||
# Crash
|
# Crash
|
||||||
zgrep -Fa "########################################" /var/log/clickhouse-server/clickhouse-server.log > /dev/null \
|
zgrep -Fa "########################################" /var/log/clickhouse-server/clickhouse-server.log* > /dev/null \
|
||||||
&& echo -e 'Backward compatibility check: Killed by signal (in clickhouse-server.log)\tFAIL' >> /test_output/test_results.tsv \
|
&& echo -e 'Backward compatibility check: Killed by signal (in clickhouse-server.log)\tFAIL' >> /test_output/test_results.tsv \
|
||||||
|| echo -e 'Backward compatibility check: Not crashed\tOK' >> /test_output/test_results.tsv
|
|| echo -e 'Backward compatibility check: Not crashed\tOK' >> /test_output/test_results.tsv
|
||||||
|
|
||||||
# It also checks for crash without stacktrace (printed by watchdog)
|
# It also checks for crash without stacktrace (printed by watchdog)
|
||||||
echo "Check for Fatal message in server log:"
|
echo "Check for Fatal message in server log:"
|
||||||
zgrep -Fa " <Fatal> " /var/log/clickhouse-server/clickhouse-server.log > /test_output/bc_check_fatal_messages.txt \
|
zgrep -Fa " <Fatal> " /var/log/clickhouse-server/clickhouse-server.log* > /test_output/bc_check_fatal_messages.txt \
|
||||||
&& echo -e 'Backward compatibility check: Fatal message in clickhouse-server.log (see bc_check_fatal_messages.txt)\tFAIL' >> /test_output/test_results.tsv \
|
&& echo -e 'Backward compatibility check: Fatal message in clickhouse-server.log (see bc_check_fatal_messages.txt)\tFAIL' >> /test_output/test_results.tsv \
|
||||||
|| echo -e 'Backward compatibility check: No fatal messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
|| echo -e 'Backward compatibility check: No fatal messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
||||||
|
|
||||||
|
@ -1,86 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
set -e -x
|
|
||||||
|
|
||||||
# Not sure why shellcheck complains that rc is not assigned before it is referenced.
|
|
||||||
# shellcheck disable=SC2154
|
|
||||||
trap 'rc=$?; echo EXITED WITH: $rc; exit $rc' EXIT
|
|
||||||
|
|
||||||
# CLI option to prevent rebuilding images, just re-run tests with images leftover from previuos time
|
|
||||||
readonly NO_REBUILD_FLAG="--no-rebuild"
|
|
||||||
|
|
||||||
readonly CLICKHOUSE_DOCKER_DIR="$(realpath "${1}")"
|
|
||||||
readonly CLICKHOUSE_PACKAGES_ARG="${2}"
|
|
||||||
CLICKHOUSE_SERVER_IMAGE="${3}"
|
|
||||||
|
|
||||||
if [ "${CLICKHOUSE_PACKAGES_ARG}" != "${NO_REBUILD_FLAG}" ]; then
|
|
||||||
readonly CLICKHOUSE_PACKAGES_DIR="$(realpath "${2}")" # or --no-rebuild
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
# In order to allow packages directory to be anywhere, and to reduce amount of context sent to the docker daemon,
|
|
||||||
# all images are built in multiple stages:
|
|
||||||
# 1. build base image, install dependencies
|
|
||||||
# 2. run image with volume mounted, install what needed from those volumes
|
|
||||||
# 3. tag container as image
|
|
||||||
# 4. [optional] build another image atop of tagged.
|
|
||||||
|
|
||||||
# TODO: optionally mount most recent clickhouse-test and queries directory from local machine
|
|
||||||
|
|
||||||
if [ "${CLICKHOUSE_PACKAGES_ARG}" != "${NO_REBUILD_FLAG}" ]; then
|
|
||||||
docker build --network=host \
|
|
||||||
-f "${CLICKHOUSE_DOCKER_DIR}/test/stateless/clickhouse-statelest-test-runner.Dockerfile" \
|
|
||||||
--target clickhouse-test-runner-base \
|
|
||||||
-t clickhouse-test-runner-base:preinstall \
|
|
||||||
"${CLICKHOUSE_DOCKER_DIR}/test/stateless"
|
|
||||||
|
|
||||||
docker rm -f clickhouse-test-runner-installing-packages || true
|
|
||||||
docker run --network=host \
|
|
||||||
-v "${CLICKHOUSE_PACKAGES_DIR}:/packages" \
|
|
||||||
--name clickhouse-test-runner-installing-packages \
|
|
||||||
clickhouse-test-runner-base:preinstall
|
|
||||||
docker commit clickhouse-test-runner-installing-packages clickhouse-statelest-test-runner:local
|
|
||||||
docker rm -f clickhouse-test-runner-installing-packages || true
|
|
||||||
fi
|
|
||||||
|
|
||||||
# # Create a bind-volume to the clickhouse-test script file
|
|
||||||
# docker volume create --driver local --opt type=none --opt device=/home/enmk/proj/ClickHouse_master/tests/clickhouse-test --opt o=bind clickhouse-test-script-volume
|
|
||||||
# docker volume create --driver local --opt type=none --opt device=/home/enmk/proj/ClickHouse_master/tests/queries --opt o=bind clickhouse-test-queries-dir-volume
|
|
||||||
|
|
||||||
# Build server image (optional) from local packages
|
|
||||||
if [ -z "${CLICKHOUSE_SERVER_IMAGE}" ]; then
|
|
||||||
CLICKHOUSE_SERVER_IMAGE="clickhouse/server:local"
|
|
||||||
|
|
||||||
if [ "${CLICKHOUSE_PACKAGES_ARG}" != "${NO_REBUILD_FLAG}" ]; then
|
|
||||||
docker build --network=host \
|
|
||||||
-f "${CLICKHOUSE_DOCKER_DIR}/server/local.Dockerfile" \
|
|
||||||
--target clickhouse-server-base \
|
|
||||||
-t clickhouse-server-base:preinstall \
|
|
||||||
"${CLICKHOUSE_DOCKER_DIR}/server"
|
|
||||||
|
|
||||||
docker rm -f clickhouse_server_base_installing_server || true
|
|
||||||
docker run --network=host -v "${CLICKHOUSE_PACKAGES_DIR}:/packages" \
|
|
||||||
--name clickhouse_server_base_installing_server \
|
|
||||||
clickhouse-server-base:preinstall
|
|
||||||
docker commit clickhouse_server_base_installing_server clickhouse-server-base:postinstall
|
|
||||||
|
|
||||||
docker build --network=host \
|
|
||||||
-f "${CLICKHOUSE_DOCKER_DIR}/server/local.Dockerfile" \
|
|
||||||
--target clickhouse-server \
|
|
||||||
-t "${CLICKHOUSE_SERVER_IMAGE}" \
|
|
||||||
"${CLICKHOUSE_DOCKER_DIR}/server"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
docker rm -f test-runner || true
|
|
||||||
docker-compose down
|
|
||||||
CLICKHOUSE_SERVER_IMAGE="${CLICKHOUSE_SERVER_IMAGE}" \
|
|
||||||
docker-compose -f "${CLICKHOUSE_DOCKER_DIR}/test/test_runner_docker_compose.yaml" \
|
|
||||||
create \
|
|
||||||
--build --force-recreate
|
|
||||||
|
|
||||||
CLICKHOUSE_SERVER_IMAGE="${CLICKHOUSE_SERVER_IMAGE}" \
|
|
||||||
docker-compose -f "${CLICKHOUSE_DOCKER_DIR}/test/test_runner_docker_compose.yaml" \
|
|
||||||
run \
|
|
||||||
--name test-runner \
|
|
||||||
test-runner
|
|
@ -1,34 +0,0 @@
|
|||||||
version: "2"
|
|
||||||
|
|
||||||
services:
|
|
||||||
clickhouse-server:
|
|
||||||
image: ${CLICKHOUSE_SERVER_IMAGE}
|
|
||||||
expose:
|
|
||||||
- "8123" # HTTP
|
|
||||||
- "9000" # TCP
|
|
||||||
- "9009" # HTTP-interserver
|
|
||||||
restart: "no"
|
|
||||||
|
|
||||||
test-runner:
|
|
||||||
image: clickhouse-statelest-test-runner:local
|
|
||||||
|
|
||||||
restart: "no"
|
|
||||||
depends_on:
|
|
||||||
- clickhouse-server
|
|
||||||
environment:
|
|
||||||
# these are used by clickhouse-test to point clickhouse-client to the right server
|
|
||||||
- CLICKHOUSE_HOST=clickhouse-server
|
|
||||||
- CLICKHOUSE_PORT=9009
|
|
||||||
- CLICKHOUSE_TEST_HOST_EXPOSED_PORT=51234
|
|
||||||
expose:
|
|
||||||
# port for any test to serve data to clickhouse-server on rare occasion (like URL-engine tables in 00646),
|
|
||||||
# should match value of CLICKHOUSE_TEST_HOST_EXPOSED_PORT above
|
|
||||||
- "51234"
|
|
||||||
|
|
||||||
# NOTE: Dev-mode: mount newest versions of the queries and clickhouse-test script into container.
|
|
||||||
# volumes:
|
|
||||||
# - /home/enmk/proj/ClickHouse_master/tests/queries:/usr/share/clickhouse-test/queries:ro
|
|
||||||
# - /home/enmk/proj/ClickHouse_master/tests/clickhouse-test:/usr/bin/clickhouse-test:ro
|
|
||||||
|
|
||||||
# String-form instead of list-form to allow multiple arguments in "${CLICKHOUSE_TEST_ARGS}"
|
|
||||||
entrypoint: "clickhouse-test ${CLICKHOUSE_TEST_ARGS}"
|
|
@ -688,7 +688,7 @@ Tags:
|
|||||||
- `volume_name_N` — Volume name. Volume names must be unique.
|
- `volume_name_N` — Volume name. Volume names must be unique.
|
||||||
- `disk` — a disk within a volume.
|
- `disk` — a disk within a volume.
|
||||||
- `max_data_part_size_bytes` — the maximum size of a part that can be stored on any of the volume’s disks. If the a size of a merged part estimated to be bigger than `max_data_part_size_bytes` then this part will be written to a next volume. Basically this feature allows to keep new/small parts on a hot (SSD) volume and move them to a cold (HDD) volume when they reach large size. Do not use this setting if your policy has only one volume.
|
- `max_data_part_size_bytes` — the maximum size of a part that can be stored on any of the volume’s disks. If the a size of a merged part estimated to be bigger than `max_data_part_size_bytes` then this part will be written to a next volume. Basically this feature allows to keep new/small parts on a hot (SSD) volume and move them to a cold (HDD) volume when they reach large size. Do not use this setting if your policy has only one volume.
|
||||||
- `move_factor` — when the amount of available space gets lower than this factor, data automatically start to move on the next volume if any (by default, 0.1).
|
- `move_factor` — when the amount of available space gets lower than this factor, data automatically starts to move on the next volume if any (by default, 0.1). ClickHouse sorts existing parts by size from largest to smallest (in descending order) and selects parts with the total size that is sufficient to meet the `move_factor` condition. If the total size of all parts is insufficient, all parts will be moved.
|
||||||
- `prefer_not_to_merge` — Disables merging of data parts on this volume. When this setting is enabled, merging data on this volume is not allowed. This allows controlling how ClickHouse works with slow disks.
|
- `prefer_not_to_merge` — Disables merging of data parts on this volume. When this setting is enabled, merging data on this volume is not allowed. This allows controlling how ClickHouse works with slow disks.
|
||||||
|
|
||||||
Cofiguration examples:
|
Cofiguration examples:
|
||||||
|
@ -43,7 +43,7 @@ toc_title: Adopters
|
|||||||
| <a href="https://city-mobil.ru" class="favicon">Citymobil</a> | Taxi | Analytics | — | — | [Blog Post in Russian, March 2020](https://habr.com/en/company/citymobil/blog/490660/) |
|
| <a href="https://city-mobil.ru" class="favicon">Citymobil</a> | Taxi | Analytics | — | — | [Blog Post in Russian, March 2020](https://habr.com/en/company/citymobil/blog/490660/) |
|
||||||
| <a href="https://cloudflare.com" class="favicon">Cloudflare</a> | CDN | Traffic analysis | 36 servers | — | [Blog post, May 2017](https://blog.cloudflare.com/how-cloudflare-analyzes-1m-dns-queries-per-second/), [Blog post, March 2018](https://blog.cloudflare.com/http-analytics-for-6m-requests-per-second-using-clickhouse/) |
|
| <a href="https://cloudflare.com" class="favicon">Cloudflare</a> | CDN | Traffic analysis | 36 servers | — | [Blog post, May 2017](https://blog.cloudflare.com/how-cloudflare-analyzes-1m-dns-queries-per-second/), [Blog post, March 2018](https://blog.cloudflare.com/http-analytics-for-6m-requests-per-second-using-clickhouse/) |
|
||||||
| <a href="https://corporate.comcast.com/" class="favicon">Comcast</a> | Media | CDN Traffic Analysis | — | — | [ApacheCon 2019 Talk](https://www.youtube.com/watch?v=e9TZ6gFDjNg) |
|
| <a href="https://corporate.comcast.com/" class="favicon">Comcast</a> | Media | CDN Traffic Analysis | — | — | [ApacheCon 2019 Talk](https://www.youtube.com/watch?v=e9TZ6gFDjNg) |
|
||||||
| <a href="https://contentsquare.com" class="favicon">ContentSquare</a> | Web analytics | Main product | — | — | [Blog post in French, November 2018](http://souslecapot.net/2018/11/21/patrick-chatain-vp-engineering-chez-contentsquare-penser-davantage-amelioration-continue-que-revolution-constante/) |
|
| <a href="https://contentsquare.com" class="favicon">Contentsquare</a> | Web analytics | Main product | — | — | [Blog post in French, November 2018](http://souslecapot.net/2018/11/21/patrick-chatain-vp-engineering-chez-contentsquare-penser-davantage-amelioration-continue-que-revolution-constante/) |
|
||||||
| <a href="https://coru.net/" class="favicon">Corunet</a> | Analytics | Main product | — | — | [Slides in English, April 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup21/predictive_models.pdf) |
|
| <a href="https://coru.net/" class="favicon">Corunet</a> | Analytics | Main product | — | — | [Slides in English, April 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup21/predictive_models.pdf) |
|
||||||
| <a href="https://www.creditx.com" class="favicon">CraiditX 氪信</a> | Finance AI | Analysis | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/udf.pptx) |
|
| <a href="https://www.creditx.com" class="favicon">CraiditX 氪信</a> | Finance AI | Analysis | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/udf.pptx) |
|
||||||
| <a href="https://crazypanda.ru/en/" class="favicon">Crazypanda</a> | Games | | — | — | Live session on ClickHouse meetup |
|
| <a href="https://crazypanda.ru/en/" class="favicon">Crazypanda</a> | Games | | — | — | Live session on ClickHouse meetup |
|
||||||
|
@ -36,6 +36,7 @@ Example of configuration:
|
|||||||
<access_key_id>AKIAIOSFODNN7EXAMPLE</access_key_id>
|
<access_key_id>AKIAIOSFODNN7EXAMPLE</access_key_id>
|
||||||
<secret_access_key> wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY</secret_access_key>
|
<secret_access_key> wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY</secret_access_key>
|
||||||
<format>CSV</format>
|
<format>CSV</format>
|
||||||
|
<url>https://s3.us-east-1.amazonaws.com/yourbucket/mydata/</url>
|
||||||
</s3_mydata>
|
</s3_mydata>
|
||||||
</named_collections>
|
</named_collections>
|
||||||
</clickhouse>
|
</clickhouse>
|
||||||
@ -44,12 +45,12 @@ Example of configuration:
|
|||||||
### Example of using named connections with the s3 function
|
### Example of using named connections with the s3 function
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
INSERT INTO FUNCTION s3(s3_mydata, url = 'https://s3.us-east-1.amazonaws.com/yourbucket/mydata/test_file.tsv.gz',
|
INSERT INTO FUNCTION s3(s3_mydata, filename = 'test_file.tsv.gz',
|
||||||
format = 'TSV', structure = 'number UInt64', compression_method = 'gzip')
|
format = 'TSV', structure = 'number UInt64', compression_method = 'gzip')
|
||||||
SELECT * FROM numbers(10000);
|
SELECT * FROM numbers(10000);
|
||||||
|
|
||||||
SELECT count()
|
SELECT count()
|
||||||
FROM s3(s3_mydata, url = 'https://s3.us-east-1.amazonaws.com/yourbucket/mydata/test_file.tsv.gz')
|
FROM s3(s3_mydata, filename = 'test_file.tsv.gz')
|
||||||
|
|
||||||
┌─count()─┐
|
┌─count()─┐
|
||||||
│ 10000 │
|
│ 10000 │
|
||||||
|
@ -393,6 +393,13 @@ This is a generalization of other functions named `toStartOf*`. For example,
|
|||||||
`toStartOfInterval(t, INTERVAL 1 day)` returns the same as `toStartOfDay(t)`,
|
`toStartOfInterval(t, INTERVAL 1 day)` returns the same as `toStartOfDay(t)`,
|
||||||
`toStartOfInterval(t, INTERVAL 15 minute)` returns the same as `toStartOfFifteenMinutes(t)` etc.
|
`toStartOfInterval(t, INTERVAL 15 minute)` returns the same as `toStartOfFifteenMinutes(t)` etc.
|
||||||
|
|
||||||
|
## toLastDayOfMonth {#toLastDayOfMonth}
|
||||||
|
|
||||||
|
Rounds up a date or date with time to the last day of the month.
|
||||||
|
Returns the date.
|
||||||
|
|
||||||
|
Alias: `LAST_DAY`.
|
||||||
|
|
||||||
## toTime {#totime}
|
## toTime {#totime}
|
||||||
|
|
||||||
Converts a date with time to a certain fixed date, while preserving the time.
|
Converts a date with time to a certain fixed date, while preserving the time.
|
||||||
|
@ -77,7 +77,7 @@ A function configuration contains the following settings:
|
|||||||
- `argument` - argument description with the `type`, and optional `name` of an argument. Each argument is described in a separate setting. Specifying name is necessary if argument names are part of serialization for user defined function format like [Native](../../interfaces/formats.md#native) or [JSONEachRow](../../interfaces/formats.md#jsoneachrow). Default argument name value is `c` + argument_number.
|
- `argument` - argument description with the `type`, and optional `name` of an argument. Each argument is described in a separate setting. Specifying name is necessary if argument names are part of serialization for user defined function format like [Native](../../interfaces/formats.md#native) or [JSONEachRow](../../interfaces/formats.md#jsoneachrow). Default argument name value is `c` + argument_number.
|
||||||
- `format` - a [format](../../interfaces/formats.md) in which arguments are passed to the command.
|
- `format` - a [format](../../interfaces/formats.md) in which arguments are passed to the command.
|
||||||
- `return_type` - the type of a returned value.
|
- `return_type` - the type of a returned value.
|
||||||
- `return_name` - name of retuned value. Specifying return name is necessary if return name is part of serialization for user defined function format like [Native](../../interfaces/formats.md#native) or [JSONEachRow](../../interfaces/formats.md#jsoneachrow). Optional. Default value is `result`.
|
- `return_name` - name of returned value. Specifying return name is necessary if return name is part of serialization for user defined function format like [Native](../../interfaces/formats.md#native) or [JSONEachRow](../../interfaces/formats.md#jsoneachrow). Optional. Default value is `result`.
|
||||||
- `type` - an executable type. If `type` is set to `executable` then single command is started. If it is set to `executable_pool` then a pool of commands is created.
|
- `type` - an executable type. If `type` is set to `executable` then single command is started. If it is set to `executable_pool` then a pool of commands is created.
|
||||||
- `max_command_execution_time` - maximum execution time in seconds for processing block of data. This setting is valid for `executable_pool` commands only. Optional. Default value is `10`.
|
- `max_command_execution_time` - maximum execution time in seconds for processing block of data. This setting is valid for `executable_pool` commands only. Optional. Default value is `10`.
|
||||||
- `command_termination_timeout` - time in seconds during which a command should finish after its pipe is closed. After that time `SIGTERM` is sent to the process executing the command. Optional. Default value is `10`.
|
- `command_termination_timeout` - time in seconds during which a command should finish after its pipe is closed. After that time `SIGTERM` is sent to the process executing the command. Optional. Default value is `10`.
|
||||||
|
@ -2499,3 +2499,41 @@ Result:
|
|||||||
│ 286 │
|
│ 286 │
|
||||||
└──────────────────────────┘
|
└──────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## getTypeSerializationStreams {#getTypeSerializationStreams}
|
||||||
|
|
||||||
|
return the serialization streams of data type.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
``` sql
|
||||||
|
getTypeSerializationStreams(type_name)
|
||||||
|
|
||||||
|
getTypeSerializationStreams(column)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
- `type_name` - Name of data type to get its serialization paths. [String](../../sql-reference/data-types/string.md#string).
|
||||||
|
- `column` - any column which has a data type
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
- List of serialization streams;
|
||||||
|
|
||||||
|
Type: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)).
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT getTypeSerializationStreams('Array(Array(Int8))')
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌───────────────────────getTypeSerializationStreams('Array(Array(Int8))')─────────────────────────────┐
|
||||||
|
│ ['{ArraySizes}','{ArrayElements, ArraySizes}','{ArrayElements, ArrayElements, Regular}'] │
|
||||||
|
└─────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
@ -114,9 +114,9 @@ In addition, this column is not substituted when using an asterisk in a SELECT q
|
|||||||
|
|
||||||
### EPHEMERAL {#ephemeral}
|
### EPHEMERAL {#ephemeral}
|
||||||
|
|
||||||
`EPHEMERAL expr`
|
`EPHEMERAL [expr]`
|
||||||
|
|
||||||
Ephemeral column. Such a column isn't stored in the table and cannot be SELECTed, but can be referenced in the defaults of CREATE statement.
|
Ephemeral column. Such a column isn't stored in the table and cannot be SELECTed, but can be referenced in the defaults of CREATE statement. If `expr` is omitted type for column is required.
|
||||||
INSERT without list of columns will skip such column, so SELECT/INSERT invariant is preserved - the dump obtained using `SELECT *` can be inserted back into the table using INSERT without specifying the list of columns.
|
INSERT without list of columns will skip such column, so SELECT/INSERT invariant is preserved - the dump obtained using `SELECT *` can be inserted back into the table using INSERT without specifying the list of columns.
|
||||||
|
|
||||||
### ALIAS {#alias}
|
### ALIAS {#alias}
|
||||||
|
@ -273,7 +273,7 @@ GitHubのUIでforkリポジトリに移動します。 ブランチで開発し
|
|||||||
|
|
||||||
プル要求は、作業がまだ完了していない場合でも作成できます。 この場合、単語を入れてください “WIP” (進行中の作業)タイトルの先頭に、それは後で変更することができます。 これは、変更の協調的なレビューと議論、および利用可能なすべてのテストの実行に役立ちます。 変更の簡単な説明を提供することが重要です。
|
プル要求は、作業がまだ完了していない場合でも作成できます。 この場合、単語を入れてください “WIP” (進行中の作業)タイトルの先頭に、それは後で変更することができます。 これは、変更の協調的なレビューと議論、および利用可能なすべてのテストの実行に役立ちます。 変更の簡単な説明を提供することが重要です。
|
||||||
|
|
||||||
Yandexの従業員がタグであなたのPRにラベルを付けるとすぐにテストが開始されます “can be tested”. The results of some first checks (e.g. code style) will come in within several minutes. Build check results will arrive within half an hour. And the main set of tests will report itself within an hour.
|
ClickHouseの従業員がタグであなたのPRにラベルを付けるとすぐにテストが開始されます “can be tested”. The results of some first checks (e.g. code style) will come in within several minutes. Build check results will arrive within half an hour. And the main set of tests will report itself within an hour.
|
||||||
|
|
||||||
システムは、プル要求用にClickHouseバイナリビルドを個別に準備します。 これらのビルドを取得するには “Details” 次のリンク “ClickHouse build check” 小切手のリストのエントリ。 そこには、ビルドへの直接リンクがあります。ClickHouseのdebパッケージは、本番サーバーにも展開できます(恐れがない場合)。
|
システムは、プル要求用にClickHouseバイナリビルドを個別に準備します。 これらのビルドを取得するには “Details” 次のリンク “ClickHouse build check” 小切手のリストのエントリ。 そこには、ビルドへの直接リンクがあります。ClickHouseのdebパッケージは、本番サーバーにも展開できます(恐れがない場合)。
|
||||||
|
|
||||||
|
@ -27,7 +27,7 @@ toc_title: "\u30A2\u30C0\u30D7\u30BF\u30FC"
|
|||||||
| <a href="http://cisco.com/" class="favicon">Cisco</a> | ネットワーク | トラフィック分析 | — | — | [ライトニングトーク2019](https://youtu.be/-hI1vDR2oPY?t=5057) |
|
| <a href="http://cisco.com/" class="favicon">Cisco</a> | ネットワーク | トラフィック分析 | — | — | [ライトニングトーク2019](https://youtu.be/-hI1vDR2oPY?t=5057) |
|
||||||
| <a href="https://www.citadelsecurities.com/" class="favicon">Citadel Securities</a> | 金融 | — | — | — | [2019年の貢献](https://github.com/ClickHouse/ClickHouse/pull/4774) |
|
| <a href="https://www.citadelsecurities.com/" class="favicon">Citadel Securities</a> | 金融 | — | — | — | [2019年の貢献](https://github.com/ClickHouse/ClickHouse/pull/4774) |
|
||||||
| <a href="https://city-mobil.ru" class="favicon">シティモービル</a> | タクシー | 分析 | — | — | [ロシア語でのブログ投稿,月2020](https://habr.com/en/company/citymobil/blog/490660/) |
|
| <a href="https://city-mobil.ru" class="favicon">シティモービル</a> | タクシー | 分析 | — | — | [ロシア語でのブログ投稿,月2020](https://habr.com/en/company/citymobil/blog/490660/) |
|
||||||
| <a href="https://contentsquare.com" class="favicon">ContentSquare</a> | ウェブ分析 | 主な製品 | — | — | [フランス語でのブログ投稿,November2018](http://souslecapot.net/2018/11/21/patrick-chatain-vp-engineering-chez-contentsquare-penser-davantage-amelioration-continue-que-revolution-constante/) |
|
| <a href="https://contentsquare.com" class="favicon">Contentsquare</a> | ウェブ分析 | 主な製品 | — | — | [フランス語でのブログ投稿,November2018](http://souslecapot.net/2018/11/21/patrick-chatain-vp-engineering-chez-contentsquare-penser-davantage-amelioration-continue-que-revolution-constante/) |
|
||||||
| <a href="https://cloudflare.com" class="favicon">Cloudflare</a> | CDN | トラフィック分析 | 36台のサーバー | — | [ブログ投稿,月2017](https://blog.cloudflare.com/how-cloudflare-analyzes-1m-dns-queries-per-second/), [ブログ投稿,月2018](https://blog.cloudflare.com/http-analytics-for-6m-requests-per-second-using-clickhouse/) |
|
| <a href="https://cloudflare.com" class="favicon">Cloudflare</a> | CDN | トラフィック分析 | 36台のサーバー | — | [ブログ投稿,月2017](https://blog.cloudflare.com/how-cloudflare-analyzes-1m-dns-queries-per-second/), [ブログ投稿,月2018](https://blog.cloudflare.com/http-analytics-for-6m-requests-per-second-using-clickhouse/) |
|
||||||
| <a href="https://coru.net/" class="favicon">コルネット</a> | 分析 | 主な製品 | — | — | [2019年英語スライド](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup21/predictive_models.pdf) |
|
| <a href="https://coru.net/" class="favicon">コルネット</a> | 分析 | 主な製品 | — | — | [2019年英語スライド](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup21/predictive_models.pdf) |
|
||||||
| <a href="https://www.creditx.com" class="favicon">CraiditX 氪信</a> | ファイナンスAI | 分析 | — | — | [2019年のスライド](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/udf.pptx) |
|
| <a href="https://www.creditx.com" class="favicon">CraiditX 氪信</a> | ファイナンスAI | 分析 | — | — | [2019年のスライド](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/udf.pptx) |
|
||||||
|
@ -72,11 +72,11 @@ ClickHouse не работает и не собирается на 32-битны
|
|||||||
|
|
||||||
Этот вариант не подходит для отправки изменений на сервер. Вы можете временно его использовать, а затем добавить ssh ключи и заменить адрес репозитория с помощью команды `git remote`.
|
Этот вариант не подходит для отправки изменений на сервер. Вы можете временно его использовать, а затем добавить ssh ключи и заменить адрес репозитория с помощью команды `git remote`.
|
||||||
|
|
||||||
Вы можете также добавить для своего локального репозитория адрес оригинального репозитория Яндекса, чтобы притягивать оттуда обновления:
|
Вы можете также добавить для своего локального репозитория адрес оригинального репозитория, чтобы притягивать оттуда обновления:
|
||||||
|
|
||||||
git remote add upstream git@github.com:ClickHouse/ClickHouse.git
|
git remote add upstream git@github.com:ClickHouse/ClickHouse.git
|
||||||
|
|
||||||
После этого, вы сможете добавлять в свой репозиторий обновления из репозитория Яндекса с помощью команды `git pull upstream master`.
|
После этого, вы сможете добавлять в свой репозиторий обновления из репозитория ClickHouse с помощью команды `git pull upstream master`.
|
||||||
|
|
||||||
### Работа с сабмодулями Git {#rabota-s-sabmoduliami-git}
|
### Работа с сабмодулями Git {#rabota-s-sabmoduliami-git}
|
||||||
|
|
||||||
@ -288,7 +288,7 @@ sudo ./llvm.sh 12
|
|||||||
|
|
||||||
Pull request можно создать, даже если работа над задачей ещё не завершена. В этом случае, добавьте в его название слово «WIP» (work in progress). Название можно будет изменить позже. Это полезно для совместного просмотра и обсуждения изменений, а также для запуска всех имеющихся тестов. Введите краткое описание изменений - впоследствии, оно будет использовано для релизных changelog.
|
Pull request можно создать, даже если работа над задачей ещё не завершена. В этом случае, добавьте в его название слово «WIP» (work in progress). Название можно будет изменить позже. Это полезно для совместного просмотра и обсуждения изменений, а также для запуска всех имеющихся тестов. Введите краткое описание изменений - впоследствии, оно будет использовано для релизных changelog.
|
||||||
|
|
||||||
Тесты будут запущены, как только сотрудники Яндекса поставят для pull request тег «Can be tested». Результаты первых проверок (стиль кода) появятся уже через несколько минут. Результаты сборки появятся примерно через пол часа. Результаты основного набора тестов будут доступны в пределах часа.
|
Тесты будут запущены, как только сотрудники ClickHouse поставят для pull request тег «Can be tested». Результаты первых проверок (стиль кода) появятся уже через несколько минут. Результаты сборки появятся примерно через пол часа. Результаты основного набора тестов будут доступны в пределах часа.
|
||||||
|
|
||||||
Система подготовит сборки ClickHouse специально для вашего pull request. Для их получения, нажмите на ссылку «Details» у проверки «Clickhouse build check». Там вы сможете найти прямые ссылки на собранные .deb пакеты ClickHouse, которые, при желании, вы даже сможете установить на свои продакшен серверы (если не страшно).
|
Система подготовит сборки ClickHouse специально для вашего pull request. Для их получения, нажмите на ссылку «Details» у проверки «Clickhouse build check». Там вы сможете найти прямые ссылки на собранные .deb пакеты ClickHouse, которые, при желании, вы даже сможете установить на свои продакшен серверы (если не страшно).
|
||||||
|
|
||||||
|
@ -678,7 +678,7 @@ TTL d + INTERVAL 1 MONTH GROUP BY k1, k2 SET x = max(x), y = min(y);
|
|||||||
- `volume_name_N` — название тома. Названия томов должны быть уникальны.
|
- `volume_name_N` — название тома. Названия томов должны быть уникальны.
|
||||||
- `disk` — диск, находящийся внутри тома.
|
- `disk` — диск, находящийся внутри тома.
|
||||||
- `max_data_part_size_bytes` — максимальный размер куска данных, который может находится на любом из дисков этого тома. Если в результате слияния размер куска ожидается больше, чем max_data_part_size_bytes, то этот кусок будет записан в следующий том. В основном эта функция позволяет хранить новые / мелкие куски на горячем (SSD) томе и перемещать их на холодный (HDD) том, когда они достигают большого размера. Не используйте этот параметр, если политика имеет только один том.
|
- `max_data_part_size_bytes` — максимальный размер куска данных, который может находится на любом из дисков этого тома. Если в результате слияния размер куска ожидается больше, чем max_data_part_size_bytes, то этот кусок будет записан в следующий том. В основном эта функция позволяет хранить новые / мелкие куски на горячем (SSD) томе и перемещать их на холодный (HDD) том, когда они достигают большого размера. Не используйте этот параметр, если политика имеет только один том.
|
||||||
- `move_factor` — доля доступного свободного места на томе, если места становится меньше, то данные начнут перемещение на следующий том, если он есть (по умолчанию 0.1).
|
- `move_factor` — доля доступного свободного места на томе, если места становится меньше, то данные начнут перемещение на следующий том, если он есть (по умолчанию 0.1). Для перемещения куски сортируются по размеру от большего к меньшему (по убыванию) и выбираются куски, совокупный размер которых достаточен для соблюдения условия `move_factor`, если совокупный размер всех партов недостаточен, будут перемещены все парты.
|
||||||
- `prefer_not_to_merge` — Отключает слияние кусков данных, хранящихся на данном томе. Если данная настройка включена, то слияние данных, хранящихся на данном томе, не допускается. Это позволяет контролировать работу ClickHouse с медленными дисками.
|
- `prefer_not_to_merge` — Отключает слияние кусков данных, хранящихся на данном томе. Если данная настройка включена, то слияние данных, хранящихся на данном томе, не допускается. Это позволяет контролировать работу ClickHouse с медленными дисками.
|
||||||
|
|
||||||
Примеры конфигураций:
|
Примеры конфигураций:
|
||||||
|
@ -110,9 +110,9 @@ SELECT x, toTypeName(x) FROM t1;
|
|||||||
|
|
||||||
### EPHEMERAL {#ephemeral}
|
### EPHEMERAL {#ephemeral}
|
||||||
|
|
||||||
`EPHEMERAL expr`
|
`EPHEMERAL [expr]`
|
||||||
|
|
||||||
Эфемерное выражение. Такой столбец не хранится в таблице и не может быть получен в запросе SELECT, но на него можно ссылаться в выражениях по умолчанию запроса CREATE.
|
Эфемерное выражение. Такой столбец не хранится в таблице и не может быть получен в запросе SELECT, но на него можно ссылаться в выражениях по умолчанию запроса CREATE. Если значение по умолчанию `expr` не указано, то тип колонки должен быть специфицирован.
|
||||||
INSERT без списка столбцов игнорирует этот столбец, таким образом сохраняется инвариант - т.е. дамп, полученный путём `SELECT *`, можно вставить обратно в таблицу INSERT-ом без указания списка столбцов.
|
INSERT без списка столбцов игнорирует этот столбец, таким образом сохраняется инвариант - т.е. дамп, полученный путём `SELECT *`, можно вставить обратно в таблицу INSERT-ом без указания списка столбцов.
|
||||||
|
|
||||||
### ALIAS {#alias}
|
### ALIAS {#alias}
|
||||||
|
@ -126,7 +126,7 @@ CREATE TABLE pg_table_schema_with_dots (a UInt32)
|
|||||||
|
|
||||||
**См. также**
|
**См. также**
|
||||||
|
|
||||||
- [Движок таблиц PostgreSQL](../../sql-reference/table-functions/postgresql.md)
|
- [Движок таблиц PostgreSQL](../../engines/table-engines/integrations/postgresql.md)
|
||||||
- [Использование PostgreSQL как источника данных для внешнего словаря](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql)
|
- [Использование PostgreSQL как источника данных для внешнего словаря](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql)
|
||||||
|
|
||||||
[Оригинальная статья](https://clickhouse.com/docs/ru/sql-reference/table-functions/postgresql/) <!--hide-->
|
[Оригинальная статья](https://clickhouse.com/docs/ru/sql-reference/table-functions/postgresql/) <!--hide-->
|
||||||
|
@ -16,7 +16,7 @@ jsmin==3.0.0
|
|||||||
livereload==2.6.3
|
livereload==2.6.3
|
||||||
Markdown==3.3.2
|
Markdown==3.3.2
|
||||||
MarkupSafe==2.1.0
|
MarkupSafe==2.1.0
|
||||||
mkdocs==1.1.2
|
mkdocs==1.3.0
|
||||||
mkdocs-htmlproofer-plugin==0.0.3
|
mkdocs-htmlproofer-plugin==0.0.3
|
||||||
mkdocs-macros-plugin==0.4.20
|
mkdocs-macros-plugin==0.4.20
|
||||||
nltk==3.7
|
nltk==3.7
|
||||||
|
@ -259,7 +259,7 @@ ClickHouse的架构描述可以在此处查看:https://clickhouse.com/docs/en/
|
|||||||
|
|
||||||
即使工作尚未完成,也可以创建拉取请求。在这种情况下,请在标题的开头加上«WIP»(正在进行中),以便后续更改。这对于协同审查和讨论更改以及运行所有可用测试用例很有用。提供有关变更的简短描述很重要,这将在后续用于生成重新发布变更日志。
|
即使工作尚未完成,也可以创建拉取请求。在这种情况下,请在标题的开头加上«WIP»(正在进行中),以便后续更改。这对于协同审查和讨论更改以及运行所有可用测试用例很有用。提供有关变更的简短描述很重要,这将在后续用于生成重新发布变更日志。
|
||||||
|
|
||||||
Yandex成员一旦在您的拉取请求上贴上«可以测试»标签,就会开始测试。一些初始检查项(例如,代码类型)的结果会在几分钟内反馈。构建的检查结果将在半小时内完成。而主要的测试用例集结果将在一小时内报告给您。
|
ClickHouse成员一旦在您的拉取请求上贴上«可以测试»标签,就会开始测试。一些初始检查项(例如,代码类型)的结果会在几分钟内反馈。构建的检查结果将在半小时内完成。而主要的测试用例集结果将在一小时内报告给您。
|
||||||
|
|
||||||
系统将分别为您的拉取请求准备ClickHouse二进制版本。若要检索这些构建信息,请在检查列表中单击« ClickHouse构建检查»旁边的«详细信息»链接。在这里,您会找到指向ClickHouse的.deb软件包的直接链接,此外,甚至可以将其部署在生产服务器上(如果您不担心)。
|
系统将分别为您的拉取请求准备ClickHouse二进制版本。若要检索这些构建信息,请在检查列表中单击« ClickHouse构建检查»旁边的«详细信息»链接。在这里,您会找到指向ClickHouse的.deb软件包的直接链接,此外,甚至可以将其部署在生产服务器上(如果您不担心)。
|
||||||
|
|
||||||
|
@ -20,7 +20,7 @@ ExecStart=/usr/bin/clickhouse-server --config=/etc/clickhouse-server/config.xml
|
|||||||
EnvironmentFile=-/etc/default/clickhouse
|
EnvironmentFile=-/etc/default/clickhouse
|
||||||
LimitCORE=infinity
|
LimitCORE=infinity
|
||||||
LimitNOFILE=500000
|
LimitNOFILE=500000
|
||||||
CapabilityBoundingSet=CAP_NET_ADMIN CAP_IPC_LOCK CAP_SYS_NICE
|
CapabilityBoundingSet=CAP_NET_ADMIN CAP_IPC_LOCK CAP_SYS_NICE CAP_NET_BIND_SERVICE
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
# ClickHouse should not start from the rescue shell (rescue.target).
|
# ClickHouse should not start from the rescue shell (rescue.target).
|
||||||
|
@ -163,10 +163,24 @@ void Client::initialize(Poco::Util::Application & self)
|
|||||||
|
|
||||||
configReadClient(config(), home_path);
|
configReadClient(config(), home_path);
|
||||||
|
|
||||||
|
/** getenv is thread-safe in Linux glibc and in all sane libc implementations.
|
||||||
|
* But the standard does not guarantee that subsequent calls will not rewrite the value by returned pointer.
|
||||||
|
*
|
||||||
|
* man getenv:
|
||||||
|
*
|
||||||
|
* As typically implemented, getenv() returns a pointer to a string within the environment list.
|
||||||
|
* The caller must take care not to modify this string, since that would change the environment of
|
||||||
|
* the process.
|
||||||
|
*
|
||||||
|
* The implementation of getenv() is not required to be reentrant. The string pointed to by the return value of getenv()
|
||||||
|
* may be statically allocated, and can be modified by a subsequent call to getenv(), putenv(3), setenv(3), or unsetenv(3).
|
||||||
|
*/
|
||||||
|
|
||||||
const char * env_user = getenv("CLICKHOUSE_USER");
|
const char * env_user = getenv("CLICKHOUSE_USER");
|
||||||
const char * env_password = getenv("CLICKHOUSE_PASSWORD");
|
|
||||||
if (env_user)
|
if (env_user)
|
||||||
config().setString("user", env_user);
|
config().setString("user", env_user);
|
||||||
|
|
||||||
|
const char * env_password = getenv("CLICKHOUSE_PASSWORD");
|
||||||
if (env_password)
|
if (env_password)
|
||||||
config().setString("password", env_password);
|
config().setString("password", env_password);
|
||||||
|
|
||||||
@ -810,7 +824,7 @@ void Client::addOptions(OptionsDescription & options_description)
|
|||||||
("quota_key", po::value<std::string>(), "A string to differentiate quotas when the user have keyed quotas configured on server")
|
("quota_key", po::value<std::string>(), "A string to differentiate quotas when the user have keyed quotas configured on server")
|
||||||
|
|
||||||
("max_client_network_bandwidth", po::value<int>(), "the maximum speed of data exchange over the network for the client in bytes per second.")
|
("max_client_network_bandwidth", po::value<int>(), "the maximum speed of data exchange over the network for the client in bytes per second.")
|
||||||
("compression", po::value<bool>(), "enable or disable compression")
|
("compression", po::value<bool>(), "enable or disable compression (enabled by default for remote communication and disabled for localhost communication).")
|
||||||
|
|
||||||
("query-fuzzer-runs", po::value<int>()->default_value(0), "After executing every SELECT query, do random mutations in it and run again specified number of times. This is used for testing to discover unexpected corner cases.")
|
("query-fuzzer-runs", po::value<int>()->default_value(0), "After executing every SELECT query, do random mutations in it and run again specified number of times. This is used for testing to discover unexpected corner cases.")
|
||||||
("interleave-queries-file", po::value<std::vector<std::string>>()->multitoken(),
|
("interleave-queries-file", po::value<std::vector<std::string>>()->multitoken(),
|
||||||
|
@ -148,13 +148,13 @@
|
|||||||
<!-- <interserver_https_port>9010</interserver_https_port> -->
|
<!-- <interserver_https_port>9010</interserver_https_port> -->
|
||||||
|
|
||||||
<!-- Hostname that is used by other replicas to request this server.
|
<!-- Hostname that is used by other replicas to request this server.
|
||||||
If not specified, than it is determined analogous to 'hostname -f' command.
|
If not specified, then it is determined analogous to 'hostname -f' command.
|
||||||
This setting could be used to switch replication to another network interface
|
This setting could be used to switch replication to another network interface
|
||||||
(the server may be connected to multiple networks via multiple addresses)
|
(the server may be connected to multiple networks via multiple addresses)
|
||||||
-->
|
-->
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
<interserver_http_host>example.yandex.ru</interserver_http_host>
|
<interserver_http_host>example.clickhouse.com</interserver_http_host>
|
||||||
-->
|
-->
|
||||||
|
|
||||||
<!-- You can specify credentials for authenthication between replicas.
|
<!-- You can specify credentials for authenthication between replicas.
|
||||||
@ -765,14 +765,14 @@
|
|||||||
-->
|
-->
|
||||||
<!--<remote_url_allow_hosts>-->
|
<!--<remote_url_allow_hosts>-->
|
||||||
<!-- Host should be specified exactly as in URL. The name is checked before DNS resolution.
|
<!-- Host should be specified exactly as in URL. The name is checked before DNS resolution.
|
||||||
Example: "yandex.ru", "yandex.ru." and "www.yandex.ru" are different hosts.
|
Example: "clickhouse.com", "clickhouse.com." and "www.clickhouse.com" are different hosts.
|
||||||
If port is explicitly specified in URL, the host:port is checked as a whole.
|
If port is explicitly specified in URL, the host:port is checked as a whole.
|
||||||
If host specified here without port, any port with this host allowed.
|
If host specified here without port, any port with this host allowed.
|
||||||
"yandex.ru" -> "yandex.ru:443", "yandex.ru:80" etc. is allowed, but "yandex.ru:80" -> only "yandex.ru:80" is allowed.
|
"clickhouse.com" -> "clickhouse.com:443", "clickhouse.com:80" etc. is allowed, but "clickhouse.com:80" -> only "clickhouse.com:80" is allowed.
|
||||||
If the host is specified as IP address, it is checked as specified in URL. Example: "[2a02:6b8:a::a]".
|
If the host is specified as IP address, it is checked as specified in URL. Example: "[2a02:6b8:a::a]".
|
||||||
If there are redirects and support for redirects is enabled, every redirect (the Location field) is checked.
|
If there are redirects and support for redirects is enabled, every redirect (the Location field) is checked.
|
||||||
Host should be specified using the host xml tag:
|
Host should be specified using the host xml tag:
|
||||||
<host>yandex.ru</host>
|
<host>clickhouse.com</host>
|
||||||
-->
|
-->
|
||||||
|
|
||||||
<!-- Regular expression can be specified. RE2 engine is used for regexps.
|
<!-- Regular expression can be specified. RE2 engine is used for regexps.
|
||||||
@ -1030,25 +1030,17 @@
|
|||||||
<flush_interval_milliseconds>1000</flush_interval_milliseconds>
|
<flush_interval_milliseconds>1000</flush_interval_milliseconds>
|
||||||
</crash_log>
|
</crash_log>
|
||||||
|
|
||||||
<!-- Session log. Stores user log in (successful or not) and log out events. -->
|
<!-- Session log. Stores user log in (successful or not) and log out events.
|
||||||
<session_log>
|
|
||||||
|
Note: session log has known security issues and should not be used in production.
|
||||||
|
-->
|
||||||
|
<!-- <session_log>
|
||||||
<database>system</database>
|
<database>system</database>
|
||||||
<table>session_log</table>
|
<table>session_log</table>
|
||||||
|
|
||||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||||
</session_log>
|
</session_log> -->
|
||||||
|
|
||||||
<!-- Parameters for embedded dictionaries, used in Yandex.Metrica.
|
|
||||||
See https://clickhouse.com/docs/en/dicts/internal_dicts/
|
|
||||||
-->
|
|
||||||
|
|
||||||
<!-- Path to file with region hierarchy. -->
|
|
||||||
<!-- <path_to_regions_hierarchy_file>/opt/geo/regions_hierarchy.txt</path_to_regions_hierarchy_file> -->
|
|
||||||
|
|
||||||
<!-- Path to directory with files containing names of regions -->
|
|
||||||
<!-- <path_to_regions_names_files>/opt/geo/</path_to_regions_names_files> -->
|
|
||||||
|
|
||||||
|
|
||||||
<!-- <top_level_domains_path>/var/lib/clickhouse/top_level_domains/</top_level_domains_path> -->
|
<!-- <top_level_domains_path>/var/lib/clickhouse/top_level_domains/</top_level_domains_path> -->
|
||||||
<!-- Custom TLD lists.
|
<!-- Custom TLD lists.
|
||||||
|
@ -103,7 +103,7 @@ interserver_http_port: 9009
|
|||||||
# If not specified, than it is determined analogous to 'hostname -f' command.
|
# If not specified, than it is determined analogous to 'hostname -f' command.
|
||||||
# This setting could be used to switch replication to another network interface
|
# This setting could be used to switch replication to another network interface
|
||||||
# (the server may be connected to multiple networks via multiple addresses)
|
# (the server may be connected to multiple networks via multiple addresses)
|
||||||
# interserver_http_host: example.yandex.ru
|
# interserver_http_host: example.clickhouse.com
|
||||||
|
|
||||||
# You can specify credentials for authenthication between replicas.
|
# You can specify credentials for authenthication between replicas.
|
||||||
# This is required when interserver_https_port is accessible from untrusted networks,
|
# This is required when interserver_https_port is accessible from untrusted networks,
|
||||||
@ -592,10 +592,10 @@ remote_servers:
|
|||||||
# remote_url_allow_hosts:
|
# remote_url_allow_hosts:
|
||||||
|
|
||||||
# Host should be specified exactly as in URL. The name is checked before DNS resolution.
|
# Host should be specified exactly as in URL. The name is checked before DNS resolution.
|
||||||
# Example: "yandex.ru", "yandex.ru." and "www.yandex.ru" are different hosts.
|
# Example: "clickhouse.com", "clickhouse.com." and "www.clickhouse.com" are different hosts.
|
||||||
# If port is explicitly specified in URL, the host:port is checked as a whole.
|
# If port is explicitly specified in URL, the host:port is checked as a whole.
|
||||||
# If host specified here without port, any port with this host allowed.
|
# If host specified here without port, any port with this host allowed.
|
||||||
# "yandex.ru" -> "yandex.ru:443", "yandex.ru:80" etc. is allowed, but "yandex.ru:80" -> only "yandex.ru:80" is allowed.
|
# "clickhouse.com" -> "clickhouse.com:443", "clickhouse.com:80" etc. is allowed, but "clickhouse.com:80" -> only "clickhouse.com:80" is allowed.
|
||||||
# If the host is specified as IP address, it is checked as specified in URL. Example: "[2a02:6b8:a::a]".
|
# If the host is specified as IP address, it is checked as specified in URL. Example: "[2a02:6b8:a::a]".
|
||||||
# If there are redirects and support for redirects is enabled, every redirect (the Location field) is checked.
|
# If there are redirects and support for redirects is enabled, every redirect (the Location field) is checked.
|
||||||
|
|
||||||
@ -803,16 +803,6 @@ crash_log:
|
|||||||
partition_by: ''
|
partition_by: ''
|
||||||
flush_interval_milliseconds: 1000
|
flush_interval_milliseconds: 1000
|
||||||
|
|
||||||
# Parameters for embedded dictionaries, used in Yandex.Metrica.
|
|
||||||
# See https://clickhouse.com/docs/en/dicts/internal_dicts/
|
|
||||||
|
|
||||||
# Path to file with region hierarchy.
|
|
||||||
# path_to_regions_hierarchy_file: /opt/geo/regions_hierarchy.txt
|
|
||||||
|
|
||||||
# Path to directory with files containing names of regions
|
|
||||||
# path_to_regions_names_files: /opt/geo/
|
|
||||||
|
|
||||||
|
|
||||||
# top_level_domains_path: /var/lib/clickhouse/top_level_domains/
|
# top_level_domains_path: /var/lib/clickhouse/top_level_domains/
|
||||||
# Custom TLD lists.
|
# Custom TLD lists.
|
||||||
# Format: name: /path/to/file
|
# Format: name: /path/to/file
|
||||||
|
@ -266,12 +266,25 @@
|
|||||||
color: var(--null-color);
|
color: var(--null-color);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@keyframes hourglass-animation {
|
||||||
|
0% {
|
||||||
|
transform: rotate(-180deg);
|
||||||
|
}
|
||||||
|
50% {
|
||||||
|
transform: rotate(-180deg);
|
||||||
|
}
|
||||||
|
100% {
|
||||||
|
transform: none;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#hourglass
|
#hourglass
|
||||||
{
|
{
|
||||||
display: none;
|
display: none;
|
||||||
padding-left: 1rem;
|
margin-left: 1rem;
|
||||||
font-size: 110%;
|
font-size: 110%;
|
||||||
color: #888;
|
color: #888;
|
||||||
|
animation: hourglass-animation 1s linear infinite;
|
||||||
}
|
}
|
||||||
|
|
||||||
#check-mark
|
#check-mark
|
||||||
@ -457,7 +470,7 @@
|
|||||||
}
|
}
|
||||||
|
|
||||||
document.getElementById('check-mark').style.display = 'none';
|
document.getElementById('check-mark').style.display = 'none';
|
||||||
document.getElementById('hourglass').style.display = 'inline';
|
document.getElementById('hourglass').style.display = 'inline-block';
|
||||||
|
|
||||||
xhr.send(query);
|
xhr.send(query);
|
||||||
}
|
}
|
||||||
|
@ -79,9 +79,9 @@
|
|||||||
Each element of list has one of the following forms:
|
Each element of list has one of the following forms:
|
||||||
<ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
|
<ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
|
||||||
2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
|
2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
|
||||||
<host> Hostname. Example: server01.yandex.ru.
|
<host> Hostname. Example: server01.clickhouse.com.
|
||||||
To check access, DNS query is performed, and all received addresses compared to peer address.
|
To check access, DNS query is performed, and all received addresses compared to peer address.
|
||||||
<host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.yandex\.ru$
|
<host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.clickhouse\.com$
|
||||||
To check access, DNS PTR query is performed for peer address and then regexp is applied.
|
To check access, DNS PTR query is performed for peer address and then regexp is applied.
|
||||||
Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
|
Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
|
||||||
Strongly recommended that regexp is ends with $
|
Strongly recommended that regexp is ends with $
|
||||||
|
@ -70,9 +70,9 @@ users:
|
|||||||
# Each element of list has one of the following forms:
|
# Each element of list has one of the following forms:
|
||||||
# ip: IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
|
# ip: IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
|
||||||
# 2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
|
# 2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
|
||||||
# host: Hostname. Example: server01.yandex.ru.
|
# host: Hostname. Example: server01.clickhouse.com.
|
||||||
# To check access, DNS query is performed, and all received addresses compared to peer address.
|
# To check access, DNS query is performed, and all received addresses compared to peer address.
|
||||||
# host_regexp: Regular expression for host names. Example, ^server\d\d-\d\d-\d\.yandex\.ru$
|
# host_regexp: Regular expression for host names. Example, ^server\d\d-\d\d-\d\.clickhouse\.com$
|
||||||
# To check access, DNS PTR query is performed for peer address and then regexp is applied.
|
# To check access, DNS PTR query is performed for peer address and then regexp is applied.
|
||||||
# Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
|
# Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
|
||||||
# Strongly recommended that regexp is ends with $ and take all expression in ''
|
# Strongly recommended that regexp is ends with $ and take all expression in ''
|
||||||
|
@ -182,6 +182,7 @@ enum class AccessType
|
|||||||
M(JDBC, "", GLOBAL, SOURCES) \
|
M(JDBC, "", GLOBAL, SOURCES) \
|
||||||
M(HDFS, "", GLOBAL, SOURCES) \
|
M(HDFS, "", GLOBAL, SOURCES) \
|
||||||
M(S3, "", GLOBAL, SOURCES) \
|
M(S3, "", GLOBAL, SOURCES) \
|
||||||
|
M(HIVE, "", GLOBAL, SOURCES) \
|
||||||
M(SOURCES, "", GROUP, ALL) \
|
M(SOURCES, "", GROUP, ALL) \
|
||||||
\
|
\
|
||||||
M(ALL, "ALL PRIVILEGES", GROUP, NONE) /* full access */ \
|
M(ALL, "ALL PRIVILEGES", GROUP, NONE) /* full access */ \
|
||||||
|
@ -107,6 +107,11 @@ const QuotaTypeInfo & QuotaTypeInfo::get(QuotaType type)
|
|||||||
static const auto info = make_info("EXECUTION_TIME", 1000000000 /* execution_time is stored in nanoseconds */);
|
static const auto info = make_info("EXECUTION_TIME", 1000000000 /* execution_time is stored in nanoseconds */);
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
|
case QuotaType::WRITTEN_BYTES:
|
||||||
|
{
|
||||||
|
static const auto info = make_info("WRITTEN_BYTES", 1);
|
||||||
|
return info;
|
||||||
|
}
|
||||||
case QuotaType::MAX: break;
|
case QuotaType::MAX: break;
|
||||||
}
|
}
|
||||||
throw Exception("Unexpected quota type: " + std::to_string(static_cast<int>(type)), ErrorCodes::LOGICAL_ERROR);
|
throw Exception("Unexpected quota type: " + std::to_string(static_cast<int>(type)), ErrorCodes::LOGICAL_ERROR);
|
||||||
|
@ -20,6 +20,7 @@ enum class QuotaType
|
|||||||
READ_ROWS, /// Number of rows read from tables.
|
READ_ROWS, /// Number of rows read from tables.
|
||||||
READ_BYTES, /// Number of bytes read from tables.
|
READ_BYTES, /// Number of bytes read from tables.
|
||||||
EXECUTION_TIME, /// Total amount of query execution time in nanoseconds.
|
EXECUTION_TIME, /// Total amount of query execution time in nanoseconds.
|
||||||
|
WRITTEN_BYTES, /// Number of bytes written to tables.
|
||||||
|
|
||||||
MAX
|
MAX
|
||||||
};
|
};
|
||||||
|
@ -13,7 +13,7 @@ namespace DB
|
|||||||
{
|
{
|
||||||
namespace ErrorCodes
|
namespace ErrorCodes
|
||||||
{
|
{
|
||||||
extern const int QUOTA_EXPIRED;
|
extern const int QUOTA_EXCEEDED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -33,7 +33,7 @@ struct EnabledQuota::Impl
|
|||||||
"Quota for user " + backQuote(user_name) + " for " + to_string(duration) + " has been exceeded: "
|
"Quota for user " + backQuote(user_name) + " for " + to_string(duration) + " has been exceeded: "
|
||||||
+ type_info.valueToStringWithName(used) + "/" + type_info.valueToString(max) + ". "
|
+ type_info.valueToStringWithName(used) + "/" + type_info.valueToString(max) + ". "
|
||||||
+ "Interval will end at " + to_string(end_of_interval) + ". " + "Name of quota template: " + backQuote(quota_name),
|
+ "Interval will end at " + to_string(end_of_interval) + ". " + "Name of quota template: " + backQuote(quota_name),
|
||||||
ErrorCodes::QUOTA_EXPIRED);
|
ErrorCodes::QUOTA_EXCEEDED);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -49,6 +49,18 @@ if (COMPILER_GCC)
|
|||||||
add_definitions ("-fno-tree-loop-distribute-patterns")
|
add_definitions ("-fno-tree-loop-distribute-patterns")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
|
# ClickHouse developers may use platform-dependent code under some macro (e.g. `#ifdef ENABLE_MULTITARGET`).
|
||||||
|
# If turned ON, this option defines such macro.
|
||||||
|
# See `src/Common/TargetSpecific.h`
|
||||||
|
option(ENABLE_MULTITARGET_CODE "Enable platform-dependent code" ON)
|
||||||
|
|
||||||
|
if (ENABLE_MULTITARGET_CODE)
|
||||||
|
add_definitions(-DENABLE_MULTITARGET_CODE=1)
|
||||||
|
else()
|
||||||
|
add_definitions(-DENABLE_MULTITARGET_CODE=0)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
|
||||||
add_subdirectory (Access)
|
add_subdirectory (Access)
|
||||||
add_subdirectory (Backups)
|
add_subdirectory (Backups)
|
||||||
add_subdirectory (Columns)
|
add_subdirectory (Columns)
|
||||||
|
@ -220,7 +220,7 @@ static void incrementProfileEventsBlock(Block & dst, const Block & src)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
std::atomic_flag exit_on_signal = ATOMIC_FLAG_INIT;
|
std::atomic_flag exit_on_signal;
|
||||||
|
|
||||||
class QueryInterruptHandler : private boost::noncopyable
|
class QueryInterruptHandler : private boost::noncopyable
|
||||||
{
|
{
|
||||||
@ -1494,24 +1494,19 @@ MultiQueryProcessingStage ClientBase::analyzeMultiQueryText(
|
|||||||
|
|
||||||
bool ClientBase::executeMultiQuery(const String & all_queries_text)
|
bool ClientBase::executeMultiQuery(const String & all_queries_text)
|
||||||
{
|
{
|
||||||
// It makes sense not to base any control flow on this, so that it is
|
|
||||||
// the same in tests and in normal usage. The only difference is that in
|
|
||||||
// normal mode we ignore the test hints.
|
|
||||||
const bool test_mode = config().has("testmode");
|
|
||||||
if (test_mode)
|
|
||||||
{
|
|
||||||
/// disable logs if expects errors
|
|
||||||
TestHint test_hint(test_mode, all_queries_text);
|
|
||||||
if (test_hint.clientError() || test_hint.serverError())
|
|
||||||
processTextAsSingleQuery("SET send_logs_level = 'fatal'");
|
|
||||||
}
|
|
||||||
|
|
||||||
bool echo_query = echo_queries;
|
bool echo_query = echo_queries;
|
||||||
|
|
||||||
/// Test tags are started with "--" so they are interpreted as comments anyway.
|
/// Test tags are started with "--" so they are interpreted as comments anyway.
|
||||||
/// But if the echo is enabled we have to remove the test tags from `all_queries_text`
|
/// But if the echo is enabled we have to remove the test tags from `all_queries_text`
|
||||||
/// because we don't want test tags to be echoed.
|
/// because we don't want test tags to be echoed.
|
||||||
size_t test_tags_length = test_mode ? getTestTagsLength(all_queries_text) : 0;
|
{
|
||||||
|
/// disable logs if expects errors
|
||||||
|
TestHint test_hint(all_queries_text);
|
||||||
|
if (test_hint.clientError() || test_hint.serverError())
|
||||||
|
processTextAsSingleQuery("SET send_logs_level = 'fatal'");
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t test_tags_length = getTestTagsLength(all_queries_text);
|
||||||
|
|
||||||
/// Several queries separated by ';'.
|
/// Several queries separated by ';'.
|
||||||
/// INSERT data is ended by the end of line, not ';'.
|
/// INSERT data is ended by the end of line, not ';'.
|
||||||
@ -1548,7 +1543,7 @@ bool ClientBase::executeMultiQuery(const String & all_queries_text)
|
|||||||
// Try to find test hint for syntax error. We don't know where
|
// Try to find test hint for syntax error. We don't know where
|
||||||
// the query ends because we failed to parse it, so we consume
|
// the query ends because we failed to parse it, so we consume
|
||||||
// the entire line.
|
// the entire line.
|
||||||
TestHint hint(test_mode, String(this_query_begin, this_query_end - this_query_begin));
|
TestHint hint(String(this_query_begin, this_query_end - this_query_begin));
|
||||||
if (hint.serverError())
|
if (hint.serverError())
|
||||||
{
|
{
|
||||||
// Syntax errors are considered as client errors
|
// Syntax errors are considered as client errors
|
||||||
@ -1586,7 +1581,7 @@ bool ClientBase::executeMultiQuery(const String & all_queries_text)
|
|||||||
// Look for the hint in the text of query + insert data + trailing
|
// Look for the hint in the text of query + insert data + trailing
|
||||||
// comments, e.g. insert into t format CSV 'a' -- { serverError 123 }.
|
// comments, e.g. insert into t format CSV 'a' -- { serverError 123 }.
|
||||||
// Use the updated query boundaries we just calculated.
|
// Use the updated query boundaries we just calculated.
|
||||||
TestHint test_hint(test_mode, full_query);
|
TestHint test_hint(full_query);
|
||||||
|
|
||||||
// Echo all queries if asked; makes for a more readable reference file.
|
// Echo all queries if asked; makes for a more readable reference file.
|
||||||
echo_query = test_hint.echoQueries().value_or(echo_query);
|
echo_query = test_hint.echoQueries().value_or(echo_query);
|
||||||
@ -2187,8 +2182,6 @@ void ClientBase::init(int argc, char ** argv)
|
|||||||
("suggestion_limit", po::value<int>()->default_value(10000),
|
("suggestion_limit", po::value<int>()->default_value(10000),
|
||||||
"Suggestion limit for how many databases, tables and columns to fetch.")
|
"Suggestion limit for how many databases, tables and columns to fetch.")
|
||||||
|
|
||||||
("testmode,T", "enable test hints in comments")
|
|
||||||
|
|
||||||
("format,f", po::value<std::string>(), "default output format")
|
("format,f", po::value<std::string>(), "default output format")
|
||||||
("vertical,E", "vertical output format, same as --format=Vertical or FORMAT Vertical or \\G at end of command")
|
("vertical,E", "vertical output format, same as --format=Vertical or FORMAT Vertical or \\G at end of command")
|
||||||
("highlight", po::value<bool>()->default_value(true), "enable or disable basic syntax highlight in interactive command line")
|
("highlight", po::value<bool>()->default_value(true), "enable or disable basic syntax highlight in interactive command line")
|
||||||
@ -2294,8 +2287,6 @@ void ClientBase::init(int argc, char ** argv)
|
|||||||
config().setBool("interactive", true);
|
config().setBool("interactive", true);
|
||||||
if (options.count("pager"))
|
if (options.count("pager"))
|
||||||
config().setString("pager", options["pager"].as<std::string>());
|
config().setString("pager", options["pager"].as<std::string>());
|
||||||
if (options.count("testmode"))
|
|
||||||
config().setBool("testmode", true);
|
|
||||||
|
|
||||||
if (options.count("log-level"))
|
if (options.count("log-level"))
|
||||||
Poco::Logger::root().setLevel(options["log-level"].as<std::string>());
|
Poco::Logger::root().setLevel(options["log-level"].as<std::string>());
|
||||||
|
@ -32,12 +32,9 @@ int parseErrorCode(DB::ReadBufferFromString & in)
|
|||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
TestHint::TestHint(bool enabled_, const String & query_)
|
TestHint::TestHint(const String & query_)
|
||||||
: query(query_)
|
: query(query_)
|
||||||
{
|
{
|
||||||
if (!enabled_)
|
|
||||||
return;
|
|
||||||
|
|
||||||
// Don't parse error hints in leading comments, because it feels weird.
|
// Don't parse error hints in leading comments, because it feels weird.
|
||||||
// Leading 'echo' hint is OK.
|
// Leading 'echo' hint is OK.
|
||||||
bool is_leading_hint = true;
|
bool is_leading_hint = true;
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
/// Checks expected server and client error codes in --testmode.
|
/// Checks expected server and client error codes.
|
||||||
///
|
///
|
||||||
/// The following comment hints are supported:
|
/// The following comment hints are supported:
|
||||||
///
|
///
|
||||||
@ -25,12 +25,12 @@ namespace DB
|
|||||||
///
|
///
|
||||||
/// Examples:
|
/// Examples:
|
||||||
///
|
///
|
||||||
/// - echo 'select / -- { clientError 62 }' | clickhouse-client --testmode -nm
|
/// - echo 'select / -- { clientError 62 }' | clickhouse-client -nm
|
||||||
///
|
///
|
||||||
// Here the client parses the query but it is incorrect, so it expects
|
// Here the client parses the query but it is incorrect, so it expects
|
||||||
/// SYNTAX_ERROR (62).
|
/// SYNTAX_ERROR (62).
|
||||||
///
|
///
|
||||||
/// - echo 'select foo -- { serverError 47 }' | clickhouse-client --testmode -nm
|
/// - echo 'select foo -- { serverError 47 }' | clickhouse-client -nm
|
||||||
///
|
///
|
||||||
/// But here the query is correct, but there is no such column "foo", so it
|
/// But here the query is correct, but there is no such column "foo", so it
|
||||||
/// is UNKNOWN_IDENTIFIER server error.
|
/// is UNKNOWN_IDENTIFIER server error.
|
||||||
@ -43,7 +43,7 @@ namespace DB
|
|||||||
class TestHint
|
class TestHint
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
TestHint(bool enabled_, const String & query_);
|
TestHint(const String & query_);
|
||||||
|
|
||||||
int serverError() const { return server_error; }
|
int serverError() const { return server_error; }
|
||||||
int clientError() const { return client_error; }
|
int clientError() const { return client_error; }
|
||||||
|
@ -125,7 +125,7 @@ class FindResultImpl : public FindResultImplBase, public FindResultImplOffsetBas
|
|||||||
|
|
||||||
public:
|
public:
|
||||||
FindResultImpl()
|
FindResultImpl()
|
||||||
: FindResultImplBase(false), FindResultImplOffsetBase<need_offset>(0)
|
: FindResultImplBase(false), FindResultImplOffsetBase<need_offset>(0) // NOLINT(clang-analyzer-optin.cplusplus.UninitializedObject) intentionally allow uninitialized value here
|
||||||
{}
|
{}
|
||||||
|
|
||||||
FindResultImpl(Mapped * value_, bool found_, size_t off)
|
FindResultImpl(Mapped * value_, bool found_, size_t off)
|
||||||
|
@ -214,6 +214,9 @@ private:
|
|||||||
|
|
||||||
/// offset in bits to the next to the rightmost bit at that byte; or zero if the rightmost bit is the rightmost bit in that byte.
|
/// offset in bits to the next to the rightmost bit at that byte; or zero if the rightmost bit is the rightmost bit in that byte.
|
||||||
offset_r = (l + content_width) % 8;
|
offset_r = (l + content_width) % 8;
|
||||||
|
|
||||||
|
content_l = nullptr;
|
||||||
|
content_r = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
UInt8 ALWAYS_INLINE read(UInt8 value_l) const
|
UInt8 ALWAYS_INLINE read(UInt8 value_l) const
|
||||||
|
@ -81,6 +81,14 @@
|
|||||||
M(ActiveSyncDrainedConnections, "Number of active connections drained synchronously.") \
|
M(ActiveSyncDrainedConnections, "Number of active connections drained synchronously.") \
|
||||||
M(AsynchronousReadWait, "Number of threads waiting for asynchronous read.") \
|
M(AsynchronousReadWait, "Number of threads waiting for asynchronous read.") \
|
||||||
M(PendingAsyncInsert, "Number of asynchronous inserts that are waiting for flush.") \
|
M(PendingAsyncInsert, "Number of asynchronous inserts that are waiting for flush.") \
|
||||||
|
M(KafkaConsumers, "Number of active Kafka consumers") \
|
||||||
|
M(KafkaConsumersWithAssignment, "Number of active Kafka consumers which have some partitions assigned.") \
|
||||||
|
M(KafkaProducers, "Number of active Kafka producer created") \
|
||||||
|
M(KafkaLibrdkafkaThreads, "Number of active librdkafka threads") \
|
||||||
|
M(KafkaBackgroundReads, "Number of background reads currently working (populating materialized views from Kafka)") \
|
||||||
|
M(KafkaConsumersInUse, "Number of consumers which are currently used by direct or background reads") \
|
||||||
|
M(KafkaWrites, "Number of currently running inserts to Kafka") \
|
||||||
|
M(KafkaAssignedPartitions, "Number of partitions Kafka tables currently assigned to") \
|
||||||
|
|
||||||
namespace CurrentMetrics
|
namespace CurrentMetrics
|
||||||
{
|
{
|
||||||
|
@ -360,6 +360,27 @@ public:
|
|||||||
return toDayNum(LUTIndex(i - (lut[i].day_of_month - 1)));
|
return toDayNum(LUTIndex(i - (lut[i].day_of_month - 1)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Round up to last day of month.
|
||||||
|
template <typename DateOrTime>
|
||||||
|
inline Time toLastDayOfMonth(DateOrTime v) const
|
||||||
|
{
|
||||||
|
const LUTIndex i = toLUTIndex(v);
|
||||||
|
if constexpr (std::is_unsigned_v<DateOrTime> || std::is_same_v<DateOrTime, DayNum>)
|
||||||
|
return lut_saturated[i - lut[i].day_of_month + lut[i].days_in_month].date;
|
||||||
|
else
|
||||||
|
return lut[i - lut[i].day_of_month + lut[i].days_in_month].date;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename DateOrTime>
|
||||||
|
inline auto toLastDayNumOfMonth(DateOrTime v) const
|
||||||
|
{
|
||||||
|
const LUTIndex i = toLUTIndex(v);
|
||||||
|
if constexpr (std::is_unsigned_v<DateOrTime> || std::is_same_v<DateOrTime, DayNum>)
|
||||||
|
return toDayNum(LUTIndexWithSaturation(i - lut[i].day_of_month + lut[i].days_in_month));
|
||||||
|
else
|
||||||
|
return toDayNum(LUTIndex(i - lut[i].day_of_month + lut[i].days_in_month));
|
||||||
|
}
|
||||||
|
|
||||||
/// Round down to start of quarter.
|
/// Round down to start of quarter.
|
||||||
template <typename DateOrTime>
|
template <typename DateOrTime>
|
||||||
inline auto toFirstDayNumOfQuarter(DateOrTime v) const
|
inline auto toFirstDayNumOfQuarter(DateOrTime v) const
|
||||||
|
@ -208,7 +208,7 @@
|
|||||||
M(198, DNS_ERROR) \
|
M(198, DNS_ERROR) \
|
||||||
M(199, UNKNOWN_QUOTA) \
|
M(199, UNKNOWN_QUOTA) \
|
||||||
M(200, QUOTA_DOESNT_ALLOW_KEYS) \
|
M(200, QUOTA_DOESNT_ALLOW_KEYS) \
|
||||||
M(201, QUOTA_EXPIRED) \
|
M(201, QUOTA_EXCEEDED) \
|
||||||
M(202, TOO_MANY_SIMULTANEOUS_QUERIES) \
|
M(202, TOO_MANY_SIMULTANEOUS_QUERIES) \
|
||||||
M(203, NO_FREE_CONNECTION) \
|
M(203, NO_FREE_CONNECTION) \
|
||||||
M(204, CANNOT_FSYNC) \
|
M(204, CANNOT_FSYNC) \
|
||||||
|
@ -241,6 +241,10 @@ FileSegmentsHolder LRUFileCache::getOrSet(const Key & key, size_t offset, size_t
|
|||||||
|
|
||||||
std::lock_guard cache_lock(mutex);
|
std::lock_guard cache_lock(mutex);
|
||||||
|
|
||||||
|
#ifndef NDEBUG
|
||||||
|
assertCacheCorrectness(key, cache_lock);
|
||||||
|
#endif
|
||||||
|
|
||||||
/// Get all segments which intersect with the given range.
|
/// Get all segments which intersect with the given range.
|
||||||
auto file_segments = getImpl(key, range, cache_lock);
|
auto file_segments = getImpl(key, range, cache_lock);
|
||||||
|
|
||||||
@ -315,7 +319,7 @@ FileSegmentsHolder LRUFileCache::getOrSet(const Key & key, size_t offset, size_t
|
|||||||
|
|
||||||
LRUFileCache::FileSegmentCell * LRUFileCache::addCell(
|
LRUFileCache::FileSegmentCell * LRUFileCache::addCell(
|
||||||
const Key & key, size_t offset, size_t size, FileSegment::State state,
|
const Key & key, size_t offset, size_t size, FileSegment::State state,
|
||||||
std::lock_guard<std::mutex> & /* cache_lock */)
|
std::lock_guard<std::mutex> & cache_lock)
|
||||||
{
|
{
|
||||||
/// Create a file segment cell and put it in `files` map by [key][offset].
|
/// Create a file segment cell and put it in `files` map by [key][offset].
|
||||||
|
|
||||||
@ -323,8 +327,10 @@ LRUFileCache::FileSegmentCell * LRUFileCache::addCell(
|
|||||||
return nullptr; /// Empty files are not cached.
|
return nullptr; /// Empty files are not cached.
|
||||||
|
|
||||||
if (files[key].contains(offset))
|
if (files[key].contains(offset))
|
||||||
throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR,
|
throw Exception(
|
||||||
"Cache already exists for key: `{}`, offset: {}, size: {}", keyToStr(key), offset, size);
|
ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR,
|
||||||
|
"Cache already exists for key: `{}`, offset: {}, size: {}.\nCurrent cache structure: {}",
|
||||||
|
keyToStr(key), offset, size, dumpStructureImpl(key, cache_lock));
|
||||||
|
|
||||||
auto file_segment = std::make_shared<FileSegment>(offset, size, key, this, state);
|
auto file_segment = std::make_shared<FileSegment>(offset, size, key, this, state);
|
||||||
FileSegmentCell cell(std::move(file_segment), queue);
|
FileSegmentCell cell(std::move(file_segment), queue);
|
||||||
@ -340,8 +346,10 @@ LRUFileCache::FileSegmentCell * LRUFileCache::addCell(
|
|||||||
|
|
||||||
auto [it, inserted] = offsets.insert({offset, std::move(cell)});
|
auto [it, inserted] = offsets.insert({offset, std::move(cell)});
|
||||||
if (!inserted)
|
if (!inserted)
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
throw Exception(
|
||||||
"Failed to insert into cache key: `{}`, offset: {}, size: {}", keyToStr(key), offset, size);
|
ErrorCodes::LOGICAL_ERROR,
|
||||||
|
"Failed to insert into cache key: `{}`, offset: {}, size: {}",
|
||||||
|
keyToStr(key), offset, size);
|
||||||
|
|
||||||
return &(it->second);
|
return &(it->second);
|
||||||
}
|
}
|
||||||
@ -523,8 +531,8 @@ void LRUFileCache::loadCacheInfoIntoMemory()
|
|||||||
std::lock_guard cache_lock(mutex);
|
std::lock_guard cache_lock(mutex);
|
||||||
|
|
||||||
Key key;
|
Key key;
|
||||||
UInt64 offset;
|
UInt64 offset = 0;
|
||||||
size_t size;
|
size_t size = 0;
|
||||||
std::vector<FileSegmentCell *> cells;
|
std::vector<FileSegmentCell *> cells;
|
||||||
|
|
||||||
/// cache_base_path / key_prefix / key / offset
|
/// cache_base_path / key_prefix / key / offset
|
||||||
@ -687,22 +695,32 @@ LRUFileCache::FileSegmentCell::FileSegmentCell(FileSegmentPtr file_segment_, LRU
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
String LRUFileCache::dumpStructure(const Key & key_)
|
String LRUFileCache::dumpStructure(const Key & key)
|
||||||
{
|
{
|
||||||
std::lock_guard cache_lock(mutex);
|
std::lock_guard cache_lock(mutex);
|
||||||
|
return dumpStructureImpl(key, cache_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
String LRUFileCache::dumpStructureImpl(const Key & key, std::lock_guard<std::mutex> & /* cache_lock */)
|
||||||
|
{
|
||||||
WriteBufferFromOwnString result;
|
WriteBufferFromOwnString result;
|
||||||
for (auto it = queue.begin(); it != queue.end(); ++it)
|
const auto & cells_by_offset = files[key];
|
||||||
{
|
|
||||||
auto [key, offset] = *it;
|
for (const auto & [offset, cell] : cells_by_offset)
|
||||||
if (key == key_)
|
result << cell.file_segment->getInfoForLog() << "\n";
|
||||||
{
|
|
||||||
auto * cell = getCell(key, offset, cache_lock);
|
|
||||||
result << (it != queue.begin() ? ", " : "") << cell->file_segment->range().toString();
|
|
||||||
result << "(state: " << cell->file_segment->download_state << ")";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return result.str();
|
return result.str();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void LRUFileCache::assertCacheCorrectness(const Key & key, std::lock_guard<std::mutex> & /* cache_lock */)
|
||||||
|
{
|
||||||
|
const auto & cells_by_offset = files[key];
|
||||||
|
|
||||||
|
for (const auto & [_, cell] : cells_by_offset)
|
||||||
|
{
|
||||||
|
const auto & file_segment = cell.file_segment;
|
||||||
|
file_segment->assertCorrectness();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -25,6 +25,7 @@ namespace DB
|
|||||||
class IFileCache : private boost::noncopyable
|
class IFileCache : private boost::noncopyable
|
||||||
{
|
{
|
||||||
friend class FileSegment;
|
friend class FileSegment;
|
||||||
|
friend struct FileSegmentsHolder;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
using Key = UInt128;
|
using Key = UInt128;
|
||||||
@ -196,6 +197,8 @@ private:
|
|||||||
FileSegments splitRangeIntoEmptyCells(
|
FileSegments splitRangeIntoEmptyCells(
|
||||||
const Key & key, size_t offset, size_t size, std::lock_guard<std::mutex> & cache_lock);
|
const Key & key, size_t offset, size_t size, std::lock_guard<std::mutex> & cache_lock);
|
||||||
|
|
||||||
|
String dumpStructureImpl(const Key & key_, std::lock_guard<std::mutex> & cache_lock);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
struct Stat
|
struct Stat
|
||||||
{
|
{
|
||||||
@ -208,6 +211,7 @@ public:
|
|||||||
Stat getStat();
|
Stat getStat();
|
||||||
|
|
||||||
String dumpStructure(const Key & key_) override;
|
String dumpStructure(const Key & key_) override;
|
||||||
|
void assertCacheCorrectness(const Key & key, std::lock_guard<std::mutex> & cache_lock);
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -159,7 +159,18 @@ void FileSegment::setRemoteFileReader(RemoteFileReaderPtr remote_file_reader_)
|
|||||||
remote_file_reader = remote_file_reader_;
|
remote_file_reader = remote_file_reader_;
|
||||||
}
|
}
|
||||||
|
|
||||||
void FileSegment::write(const char * from, size_t size)
|
void FileSegment::resetRemoteFileReader()
|
||||||
|
{
|
||||||
|
if (!isDownloader())
|
||||||
|
throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, "Only downloader can use remote filesystem file reader");
|
||||||
|
|
||||||
|
if (!remote_file_reader)
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Remote file reader does not exist");
|
||||||
|
|
||||||
|
remote_file_reader.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
void FileSegment::write(const char * from, size_t size, size_t offset_)
|
||||||
{
|
{
|
||||||
if (!size)
|
if (!size)
|
||||||
throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, "Writing zero size is not allowed");
|
throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, "Writing zero size is not allowed");
|
||||||
@ -174,8 +185,24 @@ void FileSegment::write(const char * from, size_t size)
|
|||||||
"Only downloader can do the downloading. (CallerId: {}, DownloaderId: {})",
|
"Only downloader can do the downloading. (CallerId: {}, DownloaderId: {})",
|
||||||
getCallerId(), downloader_id);
|
getCallerId(), downloader_id);
|
||||||
|
|
||||||
|
if (downloaded_size == range().size())
|
||||||
|
throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR,
|
||||||
|
"Attempt to write {} bytes to offset: {}, but current file segment is already fully downloaded",
|
||||||
|
size, offset_);
|
||||||
|
|
||||||
|
auto download_offset = range().left + downloaded_size;
|
||||||
|
if (offset_ != download_offset)
|
||||||
|
throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR,
|
||||||
|
"Attempt to write {} bytes to offset: {}, but current download offset is {}",
|
||||||
|
size, offset_, download_offset);
|
||||||
|
|
||||||
if (!cache_writer)
|
if (!cache_writer)
|
||||||
{
|
{
|
||||||
|
if (downloaded_size > 0)
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||||
|
"Cache writer was finalized (downloaded size: {}, state: {})",
|
||||||
|
downloaded_size, stateToString(download_state));
|
||||||
|
|
||||||
auto download_path = cache->getPathInLocalCache(key(), offset());
|
auto download_path = cache->getPathInLocalCache(key(), offset());
|
||||||
cache_writer = std::make_unique<WriteBufferFromFile>(download_path);
|
cache_writer = std::make_unique<WriteBufferFromFile>(download_path);
|
||||||
}
|
}
|
||||||
@ -190,19 +217,26 @@ void FileSegment::write(const char * from, size_t size)
|
|||||||
|
|
||||||
downloaded_size += size;
|
downloaded_size += size;
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (Exception & e)
|
||||||
{
|
{
|
||||||
std::lock_guard segment_lock(mutex);
|
std::lock_guard segment_lock(mutex);
|
||||||
|
|
||||||
LOG_ERROR(log, "Failed to write to cache. File segment info: {}", getInfoForLogImpl(segment_lock));
|
auto info = getInfoForLogImpl(segment_lock);
|
||||||
|
e.addMessage("while writing into cache, info: " + info);
|
||||||
|
|
||||||
|
LOG_ERROR(log, "Failed to write to cache. File segment info: {}", info);
|
||||||
|
|
||||||
download_state = State::PARTIALLY_DOWNLOADED_NO_CONTINUATION;
|
download_state = State::PARTIALLY_DOWNLOADED_NO_CONTINUATION;
|
||||||
|
|
||||||
cache_writer->finalize();
|
cache_writer->finalize();
|
||||||
cache_writer.reset();
|
cache_writer.reset();
|
||||||
|
|
||||||
|
cv.notify_all();
|
||||||
|
|
||||||
throw;
|
throw;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
assert(getDownloadOffset() == offset_ + size);
|
||||||
}
|
}
|
||||||
|
|
||||||
FileSegment::State FileSegment::wait()
|
FileSegment::State FileSegment::wait()
|
||||||
@ -270,7 +304,6 @@ void FileSegment::setDownloaded(std::lock_guard<std::mutex> & /* segment_lock */
|
|||||||
download_state = State::DOWNLOADED;
|
download_state = State::DOWNLOADED;
|
||||||
is_downloaded = true;
|
is_downloaded = true;
|
||||||
|
|
||||||
assert(cache_writer);
|
|
||||||
if (cache_writer)
|
if (cache_writer)
|
||||||
{
|
{
|
||||||
cache_writer->finalize();
|
cache_writer->finalize();
|
||||||
@ -299,107 +332,125 @@ void FileSegment::completeBatchAndResetDownloader()
|
|||||||
|
|
||||||
void FileSegment::complete(State state)
|
void FileSegment::complete(State state)
|
||||||
{
|
{
|
||||||
{
|
|
||||||
std::lock_guard segment_lock(mutex);
|
|
||||||
|
|
||||||
bool is_downloader = downloader_id == getCallerId();
|
|
||||||
if (!is_downloader)
|
|
||||||
{
|
|
||||||
cv.notify_all();
|
|
||||||
throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR,
|
|
||||||
"File segment can be completed only by downloader or downloader's FileSegmentsHodler");
|
|
||||||
}
|
|
||||||
|
|
||||||
if (state != State::DOWNLOADED
|
|
||||||
&& state != State::PARTIALLY_DOWNLOADED
|
|
||||||
&& state != State::PARTIALLY_DOWNLOADED_NO_CONTINUATION)
|
|
||||||
{
|
|
||||||
cv.notify_all();
|
|
||||||
throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR,
|
|
||||||
"Cannot complete file segment with state: {}", stateToString(state));
|
|
||||||
}
|
|
||||||
|
|
||||||
download_state = state;
|
|
||||||
}
|
|
||||||
|
|
||||||
completeImpl();
|
|
||||||
cv.notify_all();
|
|
||||||
}
|
|
||||||
|
|
||||||
void FileSegment::complete()
|
|
||||||
{
|
|
||||||
{
|
|
||||||
std::lock_guard segment_lock(mutex);
|
|
||||||
|
|
||||||
if (download_state == State::SKIP_CACHE || detached)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (download_state != State::DOWNLOADED && getDownloadedSize(segment_lock) == range().size())
|
|
||||||
setDownloaded(segment_lock);
|
|
||||||
|
|
||||||
if (download_state == State::DOWNLOADING || download_state == State::EMPTY)
|
|
||||||
download_state = State::PARTIALLY_DOWNLOADED;
|
|
||||||
}
|
|
||||||
|
|
||||||
completeImpl(true);
|
|
||||||
cv.notify_all();
|
|
||||||
}
|
|
||||||
|
|
||||||
void FileSegment::completeImpl(bool allow_non_strict_checking)
|
|
||||||
{
|
|
||||||
/// cache lock is always taken before segment lock.
|
|
||||||
std::lock_guard cache_lock(cache->mutex);
|
std::lock_guard cache_lock(cache->mutex);
|
||||||
std::lock_guard segment_lock(mutex);
|
std::lock_guard segment_lock(mutex);
|
||||||
|
|
||||||
bool download_can_continue = false;
|
bool is_downloader = downloader_id == getCallerId();
|
||||||
|
if (!is_downloader)
|
||||||
if (download_state == State::PARTIALLY_DOWNLOADED
|
|
||||||
|| download_state == State::PARTIALLY_DOWNLOADED_NO_CONTINUATION)
|
|
||||||
{
|
{
|
||||||
bool is_last_holder = cache->isLastFileSegmentHolder(key(), offset(), cache_lock, segment_lock);
|
cv.notify_all();
|
||||||
download_can_continue = !is_last_holder && download_state == State::PARTIALLY_DOWNLOADED;
|
throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR,
|
||||||
|
"File segment can be completed only by downloader or downloader's FileSegmentsHodler");
|
||||||
|
}
|
||||||
|
|
||||||
if (!download_can_continue)
|
if (state != State::DOWNLOADED
|
||||||
|
&& state != State::PARTIALLY_DOWNLOADED
|
||||||
|
&& state != State::PARTIALLY_DOWNLOADED_NO_CONTINUATION)
|
||||||
|
{
|
||||||
|
cv.notify_all();
|
||||||
|
throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR,
|
||||||
|
"Cannot complete file segment with state: {}", stateToString(state));
|
||||||
|
}
|
||||||
|
|
||||||
|
download_state = state;
|
||||||
|
|
||||||
|
try
|
||||||
|
{
|
||||||
|
completeImpl(cache_lock, segment_lock);
|
||||||
|
}
|
||||||
|
catch (...)
|
||||||
|
{
|
||||||
|
if (!downloader_id.empty() && downloader_id == getCallerIdImpl(true))
|
||||||
|
downloader_id.clear();
|
||||||
|
|
||||||
|
cv.notify_all();
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
|
||||||
|
cv.notify_all();
|
||||||
|
}
|
||||||
|
|
||||||
|
void FileSegment::complete(std::lock_guard<std::mutex> & cache_lock)
|
||||||
|
{
|
||||||
|
std::lock_guard segment_lock(mutex);
|
||||||
|
|
||||||
|
if (download_state == State::SKIP_CACHE || detached)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (download_state != State::DOWNLOADED && getDownloadedSize(segment_lock) == range().size())
|
||||||
|
setDownloaded(segment_lock);
|
||||||
|
|
||||||
|
if (download_state == State::DOWNLOADING || download_state == State::EMPTY)
|
||||||
|
{
|
||||||
|
/// Segment state can be changed from DOWNLOADING or EMPTY only if the caller is the
|
||||||
|
/// downloader or the only owner of the segment.
|
||||||
|
|
||||||
|
bool can_update_segment_state = downloader_id == getCallerIdImpl(true)
|
||||||
|
|| cache->isLastFileSegmentHolder(key(), offset(), cache_lock, segment_lock);
|
||||||
|
|
||||||
|
if (can_update_segment_state)
|
||||||
|
download_state = State::PARTIALLY_DOWNLOADED;
|
||||||
|
}
|
||||||
|
|
||||||
|
try
|
||||||
|
{
|
||||||
|
completeImpl(cache_lock, segment_lock, /* allow_non_strict_checking */true);
|
||||||
|
}
|
||||||
|
catch (...)
|
||||||
|
{
|
||||||
|
if (!downloader_id.empty() && downloader_id == getCallerIdImpl(true))
|
||||||
|
downloader_id.clear();
|
||||||
|
|
||||||
|
cv.notify_all();
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
|
||||||
|
cv.notify_all();
|
||||||
|
}
|
||||||
|
|
||||||
|
void FileSegment::completeImpl(std::lock_guard<std::mutex> & cache_lock, std::lock_guard<std::mutex> & segment_lock, bool allow_non_strict_checking)
|
||||||
|
{
|
||||||
|
bool is_last_holder = cache->isLastFileSegmentHolder(key(), offset(), cache_lock, segment_lock);
|
||||||
|
|
||||||
|
if (is_last_holder
|
||||||
|
&& (download_state == State::PARTIALLY_DOWNLOADED || download_state == State::PARTIALLY_DOWNLOADED_NO_CONTINUATION))
|
||||||
|
{
|
||||||
|
size_t current_downloaded_size = getDownloadedSize(segment_lock);
|
||||||
|
if (current_downloaded_size == 0)
|
||||||
{
|
{
|
||||||
size_t current_downloaded_size = getDownloadedSize(segment_lock);
|
download_state = State::SKIP_CACHE;
|
||||||
if (current_downloaded_size == 0)
|
LOG_TEST(log, "Remove cell {} (nothing downloaded)", range().toString());
|
||||||
{
|
cache->remove(key(), offset(), cache_lock, segment_lock);
|
||||||
download_state = State::SKIP_CACHE;
|
}
|
||||||
LOG_TEST(log, "Remove cell {} (nothing downloaded)", range().toString());
|
else
|
||||||
cache->remove(key(), offset(), cache_lock, segment_lock);
|
{
|
||||||
|
/**
|
||||||
|
* Only last holder of current file segment can resize the cell,
|
||||||
|
* because there is an invariant that file segments returned to users
|
||||||
|
* in FileSegmentsHolder represent a contiguous range, so we can resize
|
||||||
|
* it only when nobody needs it.
|
||||||
|
*/
|
||||||
|
LOG_TEST(log, "Resize cell {} to downloaded: {}", range().toString(), current_downloaded_size);
|
||||||
|
cache->reduceSizeToDownloaded(key(), offset(), cache_lock, segment_lock);
|
||||||
|
}
|
||||||
|
|
||||||
detached = true;
|
detached = true;
|
||||||
}
|
|
||||||
else if (is_last_holder)
|
|
||||||
{
|
|
||||||
/**
|
|
||||||
* Only last holder of current file segment can resize the cell,
|
|
||||||
* because there is an invariant that file segments returned to users
|
|
||||||
* in FileSegmentsHolder represent a contiguous range, so we can resize
|
|
||||||
* it only when nobody needs it.
|
|
||||||
*/
|
|
||||||
LOG_TEST(log, "Resize cell {} to downloaded: {}", range().toString(), current_downloaded_size);
|
|
||||||
cache->reduceSizeToDownloaded(key(), offset(), cache_lock, segment_lock);
|
|
||||||
|
|
||||||
detached = true;
|
if (cache_writer)
|
||||||
}
|
{
|
||||||
|
cache_writer->finalize();
|
||||||
|
cache_writer.reset();
|
||||||
|
remote_file_reader.reset();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!downloader_id.empty() && downloader_id == getCallerIdImpl(allow_non_strict_checking))
|
if (!downloader_id.empty() && (downloader_id == getCallerIdImpl(allow_non_strict_checking) || is_last_holder))
|
||||||
{
|
{
|
||||||
LOG_TEST(log, "Clearing downloader id: {}, current state: {}", downloader_id, stateToString(download_state));
|
LOG_TEST(log, "Clearing downloader id: {}, current state: {}", downloader_id, stateToString(download_state));
|
||||||
downloader_id.clear();
|
downloader_id.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!download_can_continue && cache_writer)
|
assertCorrectnessImpl(segment_lock);
|
||||||
{
|
|
||||||
cache_writer->finalize();
|
|
||||||
cache_writer.reset();
|
|
||||||
remote_file_reader.reset();
|
|
||||||
}
|
|
||||||
|
|
||||||
assert(download_state != FileSegment::State::DOWNLOADED || std::filesystem::file_size(cache->getPathInLocalCache(key(), offset())) > 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
String FileSegment::getInfoForLog() const
|
String FileSegment::getInfoForLog() const
|
||||||
@ -440,6 +491,53 @@ String FileSegment::stateToString(FileSegment::State state)
|
|||||||
__builtin_unreachable();
|
__builtin_unreachable();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void FileSegment::assertCorrectness() const
|
||||||
|
{
|
||||||
|
std::lock_guard segment_lock(mutex);
|
||||||
|
assertCorrectnessImpl(segment_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FileSegment::assertCorrectnessImpl(std::lock_guard<std::mutex> & /* segment_lock */) const
|
||||||
|
{
|
||||||
|
assert(downloader_id.empty() == (download_state != FileSegment::State::DOWNLOADING));
|
||||||
|
assert(!downloader_id.empty() == (download_state == FileSegment::State::DOWNLOADING));
|
||||||
|
assert(download_state != FileSegment::State::DOWNLOADED || std::filesystem::file_size(cache->getPathInLocalCache(key(), offset())) > 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
FileSegmentsHolder::~FileSegmentsHolder()
|
||||||
|
{
|
||||||
|
/// In CacheableReadBufferFromRemoteFS file segment's downloader removes file segments from
|
||||||
|
/// FileSegmentsHolder right after calling file_segment->complete(), so on destruction here
|
||||||
|
/// remain only uncompleted file segments.
|
||||||
|
|
||||||
|
IFileCache * cache = nullptr;
|
||||||
|
|
||||||
|
for (auto file_segment_it = file_segments.begin(); file_segment_it != file_segments.end();)
|
||||||
|
{
|
||||||
|
auto current_file_segment_it = file_segment_it;
|
||||||
|
auto & file_segment = *current_file_segment_it;
|
||||||
|
|
||||||
|
if (!cache)
|
||||||
|
cache = file_segment->cache;
|
||||||
|
|
||||||
|
try
|
||||||
|
{
|
||||||
|
/// File segment pointer must be reset right after calling complete() and
|
||||||
|
/// under the same mutex, because complete() checks for segment pointers.
|
||||||
|
std::lock_guard cache_lock(cache->mutex);
|
||||||
|
|
||||||
|
file_segment->complete(cache_lock);
|
||||||
|
|
||||||
|
file_segment_it = file_segments.erase(current_file_segment_it);
|
||||||
|
}
|
||||||
|
catch (...)
|
||||||
|
{
|
||||||
|
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||||
|
assert(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
String FileSegmentsHolder::toString()
|
String FileSegmentsHolder::toString()
|
||||||
{
|
{
|
||||||
String ranges;
|
String ranges;
|
||||||
|
@ -95,12 +95,14 @@ public:
|
|||||||
|
|
||||||
bool reserve(size_t size);
|
bool reserve(size_t size);
|
||||||
|
|
||||||
void write(const char * from, size_t size);
|
void write(const char * from, size_t size, size_t offset_);
|
||||||
|
|
||||||
RemoteFileReaderPtr getRemoteFileReader();
|
RemoteFileReaderPtr getRemoteFileReader();
|
||||||
|
|
||||||
void setRemoteFileReader(RemoteFileReaderPtr remote_file_reader_);
|
void setRemoteFileReader(RemoteFileReaderPtr remote_file_reader_);
|
||||||
|
|
||||||
|
void resetRemoteFileReader();
|
||||||
|
|
||||||
String getOrSetDownloader();
|
String getOrSetDownloader();
|
||||||
|
|
||||||
String getDownloader() const;
|
String getDownloader() const;
|
||||||
@ -121,16 +123,32 @@ public:
|
|||||||
|
|
||||||
String getInfoForLog() const;
|
String getInfoForLog() const;
|
||||||
|
|
||||||
|
void assertCorrectness() const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
size_t availableSize() const { return reserved_size - downloaded_size; }
|
size_t availableSize() const { return reserved_size - downloaded_size; }
|
||||||
bool lastFileSegmentHolder() const;
|
|
||||||
void complete();
|
|
||||||
void completeImpl(bool allow_non_strict_checking = false);
|
|
||||||
void setDownloaded(std::lock_guard<std::mutex> & segment_lock);
|
|
||||||
static String getCallerIdImpl(bool allow_non_strict_checking = false);
|
|
||||||
void resetDownloaderImpl(std::lock_guard<std::mutex> & segment_lock);
|
|
||||||
size_t getDownloadedSize(std::lock_guard<std::mutex> & segment_lock) const;
|
size_t getDownloadedSize(std::lock_guard<std::mutex> & segment_lock) const;
|
||||||
String getInfoForLogImpl(std::lock_guard<std::mutex> & segment_lock) const;
|
String getInfoForLogImpl(std::lock_guard<std::mutex> & segment_lock) const;
|
||||||
|
void assertCorrectnessImpl(std::lock_guard<std::mutex> & segment_lock) const;
|
||||||
|
|
||||||
|
void setDownloaded(std::lock_guard<std::mutex> & segment_lock);
|
||||||
|
|
||||||
|
bool lastFileSegmentHolder() const;
|
||||||
|
|
||||||
|
/// complete() without any completion state is called from destructor of
|
||||||
|
/// FileSegmentsHolder. complete() might check if the caller of the method
|
||||||
|
/// is the last alive holder of the segment. Therefore, complete() and destruction
|
||||||
|
/// of the file segment pointer must be done under the same cache mutex.
|
||||||
|
void complete(std::lock_guard<std::mutex> & cache_lock);
|
||||||
|
|
||||||
|
void completeImpl(
|
||||||
|
std::lock_guard<std::mutex> & cache_lock,
|
||||||
|
std::lock_guard<std::mutex> & segment_lock, bool allow_non_strict_checking = false);
|
||||||
|
|
||||||
|
static String getCallerIdImpl(bool allow_non_strict_checking = false);
|
||||||
|
|
||||||
|
void resetDownloaderImpl(std::lock_guard<std::mutex> & segment_lock);
|
||||||
|
|
||||||
const Range segment_range;
|
const Range segment_range;
|
||||||
|
|
||||||
@ -169,28 +187,7 @@ struct FileSegmentsHolder : private boost::noncopyable
|
|||||||
explicit FileSegmentsHolder(FileSegments && file_segments_) : file_segments(std::move(file_segments_)) {}
|
explicit FileSegmentsHolder(FileSegments && file_segments_) : file_segments(std::move(file_segments_)) {}
|
||||||
FileSegmentsHolder(FileSegmentsHolder && other) : file_segments(std::move(other.file_segments)) {}
|
FileSegmentsHolder(FileSegmentsHolder && other) : file_segments(std::move(other.file_segments)) {}
|
||||||
|
|
||||||
~FileSegmentsHolder()
|
~FileSegmentsHolder();
|
||||||
{
|
|
||||||
/// In CacheableReadBufferFromRemoteFS file segment's downloader removes file segments from
|
|
||||||
/// FileSegmentsHolder right after calling file_segment->complete(), so on destruction here
|
|
||||||
/// remain only uncompleted file segments.
|
|
||||||
|
|
||||||
for (auto & segment : file_segments)
|
|
||||||
{
|
|
||||||
try
|
|
||||||
{
|
|
||||||
segment->complete();
|
|
||||||
}
|
|
||||||
catch (...)
|
|
||||||
{
|
|
||||||
#ifndef NDEBUG
|
|
||||||
throw;
|
|
||||||
#else
|
|
||||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
FileSegments file_segments{};
|
FileSegments file_segments{};
|
||||||
|
|
||||||
|
@ -61,7 +61,7 @@ private:
|
|||||||
class JSONBool : public IItem
|
class JSONBool : public IItem
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
explicit JSONBool(bool value_) : value(std::move(value_)) {}
|
explicit JSONBool(bool value_) : value(value_) {}
|
||||||
void format(const FormatSettings & settings, FormatContext & context) override;
|
void format(const FormatSettings & settings, FormatContext & context) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
@ -74,7 +74,7 @@ public:
|
|||||||
void add(ItemPtr value) { values.push_back(std::move(value)); }
|
void add(ItemPtr value) { values.push_back(std::move(value)); }
|
||||||
void add(std::string value) { add(std::make_unique<JSONString>(std::move(value))); }
|
void add(std::string value) { add(std::make_unique<JSONString>(std::move(value))); }
|
||||||
void add(const char * value) { add(std::make_unique<JSONString>(value)); }
|
void add(const char * value) { add(std::make_unique<JSONString>(value)); }
|
||||||
void add(bool value) { add(std::make_unique<JSONBool>(std::move(value))); }
|
void add(bool value) { add(std::make_unique<JSONBool>(value)); }
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
requires std::is_arithmetic_v<T>
|
requires std::is_arithmetic_v<T>
|
||||||
@ -99,7 +99,7 @@ public:
|
|||||||
void add(std::string key, std::string value) { add(std::move(key), std::make_unique<JSONString>(std::move(value))); }
|
void add(std::string key, std::string value) { add(std::move(key), std::make_unique<JSONString>(std::move(value))); }
|
||||||
void add(std::string key, const char * value) { add(std::move(key), std::make_unique<JSONString>(value)); }
|
void add(std::string key, const char * value) { add(std::move(key), std::make_unique<JSONString>(value)); }
|
||||||
void add(std::string key, std::string_view value) { add(std::move(key), std::make_unique<JSONString>(value)); }
|
void add(std::string key, std::string_view value) { add(std::move(key), std::make_unique<JSONString>(value)); }
|
||||||
void add(std::string key, bool value) { add(std::move(key), std::make_unique<JSONBool>(std::move(value))); }
|
void add(std::string key, bool value) { add(std::move(key), std::make_unique<JSONBool>(value)); }
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
requires std::is_arithmetic_v<T>
|
requires std::is_arithmetic_v<T>
|
||||||
|
15
src/Common/NamePrompter.cpp
Normal file
15
src/Common/NamePrompter.cpp
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
#include <IO/WriteHelpers.h>
|
||||||
|
#include <Common/NamePrompter.h>
|
||||||
|
|
||||||
|
namespace DB::detail
|
||||||
|
{
|
||||||
|
void appendHintsMessageImpl(String & message, const std::vector<String> & hints)
|
||||||
|
{
|
||||||
|
if (hints.empty())
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
message += ". Maybe you meant: " + toString(hints);
|
||||||
|
}
|
||||||
|
}
|
@ -90,6 +90,10 @@ private:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
namespace detail
|
||||||
|
{
|
||||||
|
void appendHintsMessageImpl(String & message, const std::vector<String> & hints);
|
||||||
|
}
|
||||||
|
|
||||||
template <size_t MaxNumHints, typename Self>
|
template <size_t MaxNumHints, typename Self>
|
||||||
class IHints
|
class IHints
|
||||||
@ -102,6 +106,12 @@ public:
|
|||||||
return prompter.getHints(name, getAllRegisteredNames());
|
return prompter.getHints(name, getAllRegisteredNames());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void appendHintsMessage(String & message, const String & name) const
|
||||||
|
{
|
||||||
|
auto hints = getHints(name);
|
||||||
|
detail::appendHintsMessageImpl(message, hints);
|
||||||
|
}
|
||||||
|
|
||||||
IHints() = default;
|
IHints() = default;
|
||||||
|
|
||||||
IHints(const IHints &) = default;
|
IHints(const IHints &) = default;
|
||||||
@ -114,5 +124,4 @@ public:
|
|||||||
private:
|
private:
|
||||||
NamePrompter<MaxNumHints> prompter;
|
NamePrompter<MaxNumHints> prompter;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -112,6 +112,8 @@
|
|||||||
M(CompileExpressionsMicroseconds, "Total time spent for compilation of expressions to LLVM code.") \
|
M(CompileExpressionsMicroseconds, "Total time spent for compilation of expressions to LLVM code.") \
|
||||||
M(CompileExpressionsBytes, "Number of bytes used for expressions compilation.") \
|
M(CompileExpressionsBytes, "Number of bytes used for expressions compilation.") \
|
||||||
\
|
\
|
||||||
|
M(ExecuteShellCommand, "Number of shell command executions.") \
|
||||||
|
\
|
||||||
M(ExternalSortWritePart, "") \
|
M(ExternalSortWritePart, "") \
|
||||||
M(ExternalSortMerge, "") \
|
M(ExternalSortMerge, "") \
|
||||||
M(ExternalAggregationWritePart, "") \
|
M(ExternalAggregationWritePart, "") \
|
||||||
@ -295,6 +297,25 @@
|
|||||||
M(MergeTreeMetadataCacheHit, "Number of times the read of meta file was done from MergeTree metadata cache") \
|
M(MergeTreeMetadataCacheHit, "Number of times the read of meta file was done from MergeTree metadata cache") \
|
||||||
M(MergeTreeMetadataCacheMiss, "Number of times the read of meta file was not done from MergeTree metadata cache") \
|
M(MergeTreeMetadataCacheMiss, "Number of times the read of meta file was not done from MergeTree metadata cache") \
|
||||||
\
|
\
|
||||||
|
M(KafkaRebalanceRevocations, "Number of partition revocations (the first stage of consumer group rebalance)") \
|
||||||
|
M(KafkaRebalanceAssignments, "Number of partition assignments (the final stage of consumer group rebalance)") \
|
||||||
|
M(KafkaRebalanceErrors, "Number of failed consumer group rebalances") \
|
||||||
|
M(KafkaMessagesPolled, "Number of Kafka messages polled from librdkafka to ClickHouse") \
|
||||||
|
M(KafkaMessagesRead, "Number of Kafka messages already processed by ClickHouse") \
|
||||||
|
M(KafkaMessagesFailed, "Number of Kafka messages ClickHouse failed to parse") \
|
||||||
|
M(KafkaRowsRead, "Number of rows parsed from Kafka messages") \
|
||||||
|
M(KafkaRowsRejected, "Number of parsed rows which were later rejected (due to rebalances / errors or similar reasons). Those rows will be consumed again after the rebalance.") \
|
||||||
|
M(KafkaDirectReads, "Number of direct selects from Kafka tables since server start") \
|
||||||
|
M(KafkaBackgroundReads, "Number of background reads populating materialized views from Kafka since server start") \
|
||||||
|
M(KafkaCommits, "Number of successful commits of consumed offsets to Kafka (normally should be the same as KafkaBackgroundReads)") \
|
||||||
|
M(KafkaCommitFailures, "Number of failed commits of consumed offsets to Kafka (usually is a sign of some data duplication)") \
|
||||||
|
M(KafkaConsumerErrors, "Number of errors reported by librdkafka during polls") \
|
||||||
|
M(KafkaWrites, "Number of writes (inserts) to Kafka tables ") \
|
||||||
|
M(KafkaRowsWritten, "Number of rows inserted into Kafka tables") \
|
||||||
|
M(KafkaProducerFlushes, "Number of explicit flushes to Kafka producer") \
|
||||||
|
M(KafkaMessagesProduced, "Number of messages produced to Kafka") \
|
||||||
|
M(KafkaProducerErrors, "Number of errors during producing the messages to Kafka") \
|
||||||
|
\
|
||||||
M(ScalarSubqueriesGlobalCacheHit, "Number of times a read from a scalar subquery was done using the global cache") \
|
M(ScalarSubqueriesGlobalCacheHit, "Number of times a read from a scalar subquery was done using the global cache") \
|
||||||
M(ScalarSubqueriesLocalCacheHit, "Number of times a read from a scalar subquery was done using the local cache") \
|
M(ScalarSubqueriesLocalCacheHit, "Number of times a read from a scalar subquery was done using the local cache") \
|
||||||
M(ScalarSubqueriesCacheMiss, "Number of times a read from a scalar subquery was not cached and had to be calculated completely")
|
M(ScalarSubqueriesCacheMiss, "Number of times a read from a scalar subquery was not cached and had to be calculated completely")
|
||||||
|
@ -29,6 +29,11 @@ namespace
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
namespace ProfileEvents
|
||||||
|
{
|
||||||
|
extern const Event ExecuteShellCommand;
|
||||||
|
}
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -158,6 +163,7 @@ std::unique_ptr<ShellCommand> ShellCommand::executeImpl(
|
|||||||
const Config & config)
|
const Config & config)
|
||||||
{
|
{
|
||||||
logCommand(filename, argv);
|
logCommand(filename, argv);
|
||||||
|
ProfileEvents::increment(ProfileEvents::ExecuteShellCommand);
|
||||||
|
|
||||||
#if !defined(USE_MUSL)
|
#if !defined(USE_MUSL)
|
||||||
/** Here it is written that with a normal call `vfork`, there is a chance of deadlock in multithreaded programs,
|
/** Here it is written that with a normal call `vfork`, there is a chance of deadlock in multithreaded programs,
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#include <Functions/TargetSpecific.h>
|
#include <Common/TargetSpecific.h>
|
||||||
|
|
||||||
#include <Common/CpuId.h>
|
#include <Common/CpuId.h>
|
||||||
|
|
@ -38,21 +38,7 @@ unsigned getCGroupLimitedCPUCores(unsigned default_cpu_count)
|
|||||||
quota_count = ceil(static_cast<float>(cgroup_quota) / static_cast<float>(cgroup_period));
|
quota_count = ceil(static_cast<float>(cgroup_quota) / static_cast<float>(cgroup_period));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Share number (typically a number relative to 1024) (2048 typically expresses 2 CPUs worth of processing)
|
return std::min(default_cpu_count, quota_count);
|
||||||
// -1 for no share setup
|
|
||||||
int cgroup_share = read_from("/sys/fs/cgroup/cpu/cpu.shares", -1);
|
|
||||||
// Convert 1024 to no shares setup
|
|
||||||
if (cgroup_share == 1024)
|
|
||||||
cgroup_share = -1;
|
|
||||||
|
|
||||||
# define PER_CPU_SHARES 1024
|
|
||||||
unsigned share_count = default_cpu_count;
|
|
||||||
if (cgroup_share > -1)
|
|
||||||
{
|
|
||||||
share_count = ceil(static_cast<float>(cgroup_share) / static_cast<float>(PER_CPU_SHARES));
|
|
||||||
}
|
|
||||||
|
|
||||||
return std::min(default_cpu_count, std::min(share_count, quota_count));
|
|
||||||
}
|
}
|
||||||
#endif // OS_LINUX
|
#endif // OS_LINUX
|
||||||
|
|
||||||
@ -91,6 +77,7 @@ unsigned getNumberOfPhysicalCPUCores()
|
|||||||
cpu_count = std::thread::hardware_concurrency();
|
cpu_count = std::thread::hardware_concurrency();
|
||||||
|
|
||||||
#if defined(OS_LINUX)
|
#if defined(OS_LINUX)
|
||||||
|
/// TODO: add a setting for disabling that, similar to UseContainerSupport in java
|
||||||
cpu_count = getCGroupLimitedCPUCores(cpu_count);
|
cpu_count = getCGroupLimitedCPUCores(cpu_count);
|
||||||
#endif // OS_LINUX
|
#endif // OS_LINUX
|
||||||
return cpu_count;
|
return cpu_count;
|
||||||
|
@ -146,6 +146,8 @@ TEST(DateLUTTest, TimeValuesInMiddleOfRange)
|
|||||||
EXPECT_EQ(lut.addYears(time, 10), 1884270011 /*time_t*/);
|
EXPECT_EQ(lut.addYears(time, 10), 1884270011 /*time_t*/);
|
||||||
EXPECT_EQ(lut.timeToString(time), "2019-09-16 19:20:11" /*std::string*/);
|
EXPECT_EQ(lut.timeToString(time), "2019-09-16 19:20:11" /*std::string*/);
|
||||||
EXPECT_EQ(lut.dateToString(time), "2019-09-16" /*std::string*/);
|
EXPECT_EQ(lut.dateToString(time), "2019-09-16" /*std::string*/);
|
||||||
|
EXPECT_EQ(lut.toLastDayOfMonth(time), 1569790800 /*time_t*/);
|
||||||
|
EXPECT_EQ(lut.toLastDayNumOfMonth(time), DayNum(18169) /*DayNum*/);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -206,6 +208,8 @@ TEST(DateLUTTest, TimeValuesAtLeftBoderOfRange)
|
|||||||
EXPECT_EQ(lut.addYears(time, 10), 315532800 /*time_t*/);
|
EXPECT_EQ(lut.addYears(time, 10), 315532800 /*time_t*/);
|
||||||
EXPECT_EQ(lut.timeToString(time), "1970-01-01 00:00:00" /*std::string*/);
|
EXPECT_EQ(lut.timeToString(time), "1970-01-01 00:00:00" /*std::string*/);
|
||||||
EXPECT_EQ(lut.dateToString(time), "1970-01-01" /*std::string*/);
|
EXPECT_EQ(lut.dateToString(time), "1970-01-01" /*std::string*/);
|
||||||
|
EXPECT_EQ(lut.toLastDayOfMonth(time), 2592000 /*time_t*/);
|
||||||
|
EXPECT_EQ(lut.toLastDayNumOfMonth(time), DayNum(30) /*DayNum*/);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(DateLUTTest, TimeValuesAtRightBoderOfRangeOfOldLUT)
|
TEST(DateLUTTest, TimeValuesAtRightBoderOfRangeOfOldLUT)
|
||||||
@ -225,7 +229,7 @@ TEST(DateLUTTest, TimeValuesAtRightBoderOfRangeOfOldLUT)
|
|||||||
|
|
||||||
EXPECT_EQ(lut.toFirstDayOfWeek(time), 4293820800 /*time_t*/);
|
EXPECT_EQ(lut.toFirstDayOfWeek(time), 4293820800 /*time_t*/);
|
||||||
EXPECT_EQ(lut.toFirstDayNumOfWeek(time), DayNum(49697));
|
EXPECT_EQ(lut.toFirstDayNumOfWeek(time), DayNum(49697));
|
||||||
EXPECT_EQ(lut.toFirstDayOfMonth(time), 4291747200 /*time_t*/); // 2016-01-01
|
EXPECT_EQ(lut.toFirstDayOfMonth(time), 4291747200 /*time_t*/); // 2106-01-01
|
||||||
EXPECT_EQ(lut.toFirstDayNumOfMonth(time), DayNum(49673));
|
EXPECT_EQ(lut.toFirstDayNumOfMonth(time), DayNum(49673));
|
||||||
EXPECT_EQ(lut.toFirstDayNumOfQuarter(time), DayNum(49673) /*DayNum*/);
|
EXPECT_EQ(lut.toFirstDayNumOfQuarter(time), DayNum(49673) /*DayNum*/);
|
||||||
EXPECT_EQ(lut.toFirstDayOfQuarter(time), 4291747200 /*time_t*/);
|
EXPECT_EQ(lut.toFirstDayOfQuarter(time), 4291747200 /*time_t*/);
|
||||||
@ -268,6 +272,8 @@ TEST(DateLUTTest, TimeValuesAtRightBoderOfRangeOfOldLUT)
|
|||||||
|
|
||||||
EXPECT_EQ(lut.timeToString(time), "2106-01-31 01:17:53" /*std::string*/);
|
EXPECT_EQ(lut.timeToString(time), "2106-01-31 01:17:53" /*std::string*/);
|
||||||
EXPECT_EQ(lut.dateToString(time), "2106-01-31" /*std::string*/);
|
EXPECT_EQ(lut.dateToString(time), "2106-01-31" /*std::string*/);
|
||||||
|
EXPECT_EQ(lut.toLastDayOfMonth(time), 4294339200 /*time_t*/); // 2106-01-01
|
||||||
|
EXPECT_EQ(lut.toLastDayNumOfMonth(time), DayNum(49703));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -67,7 +67,7 @@ void download(DB::FileSegmentPtr file_segment)
|
|||||||
fs::create_directories(subdir);
|
fs::create_directories(subdir);
|
||||||
|
|
||||||
std::string data(size, '0');
|
std::string data(size, '0');
|
||||||
file_segment->write(data.data(), size);
|
file_segment->write(data.data(), size, file_segment->getDownloadOffset());
|
||||||
}
|
}
|
||||||
|
|
||||||
void prepareAndDownload(DB::FileSegmentPtr file_segment)
|
void prepareAndDownload(DB::FileSegmentPtr file_segment)
|
||||||
|
@ -46,7 +46,8 @@ static ReturnType checkColumnStructure(const ColumnWithTypeAndName & actual, con
|
|||||||
return onError<ReturnType>("Block structure mismatch in " + std::string(context_description) + " stream: different names of columns:\n"
|
return onError<ReturnType>("Block structure mismatch in " + std::string(context_description) + " stream: different names of columns:\n"
|
||||||
+ actual.dumpStructure() + "\n" + expected.dumpStructure(), code);
|
+ actual.dumpStructure() + "\n" + expected.dumpStructure(), code);
|
||||||
|
|
||||||
if (!actual.type->equals(*expected.type))
|
if ((actual.type && !expected.type) || (!actual.type && expected.type)
|
||||||
|
|| (actual.type && expected.type && !actual.type->equals(*expected.type)))
|
||||||
return onError<ReturnType>("Block structure mismatch in " + std::string(context_description) + " stream: different types:\n"
|
return onError<ReturnType>("Block structure mismatch in " + std::string(context_description) + " stream: different types:\n"
|
||||||
+ actual.dumpStructure() + "\n" + expected.dumpStructure(), code);
|
+ actual.dumpStructure() + "\n" + expected.dumpStructure(), code);
|
||||||
|
|
||||||
|
@ -642,6 +642,8 @@ class IColumn;
|
|||||||
M(DateTimeInputFormat, date_time_input_format, FormatSettings::DateTimeInputFormat::Basic, "Method to read DateTime from text input formats. Possible values: 'basic', 'best_effort' and 'best_effort_us'.", 0) \
|
M(DateTimeInputFormat, date_time_input_format, FormatSettings::DateTimeInputFormat::Basic, "Method to read DateTime from text input formats. Possible values: 'basic', 'best_effort' and 'best_effort_us'.", 0) \
|
||||||
M(DateTimeOutputFormat, date_time_output_format, FormatSettings::DateTimeOutputFormat::Simple, "Method to write DateTime to text output. Possible values: 'simple', 'iso', 'unix_timestamp'.", 0) \
|
M(DateTimeOutputFormat, date_time_output_format, FormatSettings::DateTimeOutputFormat::Simple, "Method to write DateTime to text output. Possible values: 'simple', 'iso', 'unix_timestamp'.", 0) \
|
||||||
\
|
\
|
||||||
|
M(Bool, input_format_ipv4_default_on_conversion_error, false, "Deserialization of IPv4 will use default values instead of throwing exception on conversion error.", 0) \
|
||||||
|
M(Bool, input_format_ipv6_default_on_conversion_error, false, "Deserialization of IPV6 will use default values instead of throwing exception on conversion error.", 0) \
|
||||||
M(String, bool_true_representation, "true", "Text to represent bool value in TSV/CSV formats.", 0) \
|
M(String, bool_true_representation, "true", "Text to represent bool value in TSV/CSV formats.", 0) \
|
||||||
M(String, bool_false_representation, "false", "Text to represent bool value in TSV/CSV formats.", 0) \
|
M(String, bool_false_representation, "false", "Text to represent bool value in TSV/CSV formats.", 0) \
|
||||||
\
|
\
|
||||||
|
@ -15,10 +15,6 @@
|
|||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
namespace ErrorCodes
|
|
||||||
{
|
|
||||||
extern const int LOGICAL_ERROR;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Cursor allows to compare rows in different blocks (and parts).
|
/** Cursor allows to compare rows in different blocks (and parts).
|
||||||
* Cursor moves inside single block.
|
* Cursor moves inside single block.
|
||||||
@ -61,25 +57,21 @@ struct SortCursorImpl
|
|||||||
reset(block, perm);
|
reset(block, perm);
|
||||||
}
|
}
|
||||||
|
|
||||||
SortCursorImpl(const Columns & columns, const SortDescription & desc_, size_t order_ = 0, IColumn::Permutation * perm = nullptr)
|
SortCursorImpl(
|
||||||
|
const Block & header,
|
||||||
|
const Columns & columns,
|
||||||
|
const SortDescription & desc_,
|
||||||
|
size_t order_ = 0,
|
||||||
|
IColumn::Permutation * perm = nullptr)
|
||||||
: desc(desc_), sort_columns_size(desc.size()), order(order_), need_collation(desc.size())
|
: desc(desc_), sort_columns_size(desc.size()), order(order_), need_collation(desc.size())
|
||||||
{
|
{
|
||||||
for (auto & column_desc : desc)
|
reset(columns, header, perm);
|
||||||
{
|
|
||||||
if (!column_desc.column_name.empty())
|
|
||||||
throw Exception("SortDescription should contain column position if SortCursor was used without header.",
|
|
||||||
ErrorCodes::LOGICAL_ERROR);
|
|
||||||
}
|
|
||||||
reset(columns, {}, perm);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool empty() const { return rows == 0; }
|
bool empty() const { return rows == 0; }
|
||||||
|
|
||||||
/// Set the cursor to the beginning of the new block.
|
/// Set the cursor to the beginning of the new block.
|
||||||
void reset(const Block & block, IColumn::Permutation * perm = nullptr)
|
void reset(const Block & block, IColumn::Permutation * perm = nullptr) { reset(block.getColumns(), block, perm); }
|
||||||
{
|
|
||||||
reset(block.getColumns(), block, perm);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the cursor to the beginning of the new block.
|
/// Set the cursor to the beginning of the new block.
|
||||||
void reset(const Columns & columns, const Block & block, IColumn::Permutation * perm = nullptr)
|
void reset(const Columns & columns, const Block & block, IColumn::Permutation * perm = nullptr)
|
||||||
@ -95,9 +87,7 @@ struct SortCursorImpl
|
|||||||
for (size_t j = 0, size = desc.size(); j < size; ++j)
|
for (size_t j = 0, size = desc.size(); j < size; ++j)
|
||||||
{
|
{
|
||||||
auto & column_desc = desc[j];
|
auto & column_desc = desc[j];
|
||||||
size_t column_number = !column_desc.column_name.empty()
|
size_t column_number = block.getPositionByName(column_desc.column_name);
|
||||||
? block.getPositionByName(column_desc.column_name)
|
|
||||||
: column_desc.column_number;
|
|
||||||
sort_columns.push_back(columns[column_number].get());
|
sort_columns.push_back(columns[column_number].get());
|
||||||
|
|
||||||
need_collation[j] = desc[j].collator != nullptr && sort_columns.back()->isCollationSupported();
|
need_collation[j] = desc[j].collator != nullptr && sort_columns.back()->isCollationSupported();
|
||||||
@ -367,12 +357,12 @@ private:
|
|||||||
};
|
};
|
||||||
|
|
||||||
template <typename TLeftColumns, typename TRightColumns>
|
template <typename TLeftColumns, typename TRightColumns>
|
||||||
bool less(const TLeftColumns & lhs, const TRightColumns & rhs, size_t i, size_t j, const SortDescription & descr)
|
bool less(const TLeftColumns & lhs, const TRightColumns & rhs, size_t i, size_t j, const SortDescriptionWithPositions & descr)
|
||||||
{
|
{
|
||||||
for (const auto & elem : descr)
|
for (const auto & elem : descr)
|
||||||
{
|
{
|
||||||
size_t ind = elem.column_number;
|
size_t ind = elem.column_number;
|
||||||
int res = elem.direction * lhs[ind]->compareAt(i, j, *rhs[ind], elem.nulls_direction);
|
int res = elem.base.direction * lhs[ind]->compareAt(i, j, *rhs[ind], elem.base.nulls_direction);
|
||||||
if (res < 0)
|
if (res < 0)
|
||||||
return true;
|
return true;
|
||||||
else if (res > 0)
|
else if (res > 0)
|
||||||
|
@ -1,12 +1,12 @@
|
|||||||
#include <Core/SortDescription.h>
|
|
||||||
#include <Core/Block.h>
|
#include <Core/Block.h>
|
||||||
|
#include <Core/SortDescription.h>
|
||||||
#include <IO/Operators.h>
|
#include <IO/Operators.h>
|
||||||
#include <Common/JSONBuilder.h>
|
#include <Common/JSONBuilder.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
void dumpSortDescription(const SortDescription & description, const Block & header, WriteBuffer & out)
|
void dumpSortDescription(const SortDescription & description, WriteBuffer & out)
|
||||||
{
|
{
|
||||||
bool first = true;
|
bool first = true;
|
||||||
|
|
||||||
@ -16,17 +16,7 @@ void dumpSortDescription(const SortDescription & description, const Block & head
|
|||||||
out << ", ";
|
out << ", ";
|
||||||
first = false;
|
first = false;
|
||||||
|
|
||||||
if (!desc.column_name.empty())
|
out << desc.column_name;
|
||||||
out << desc.column_name;
|
|
||||||
else
|
|
||||||
{
|
|
||||||
if (desc.column_number < header.columns())
|
|
||||||
out << header.getByPosition(desc.column_number).name;
|
|
||||||
else
|
|
||||||
out << "?";
|
|
||||||
|
|
||||||
out << " (pos " << desc.column_number << ")";
|
|
||||||
}
|
|
||||||
|
|
||||||
if (desc.direction > 0)
|
if (desc.direction > 0)
|
||||||
out << " ASC";
|
out << " ASC";
|
||||||
@ -38,18 +28,9 @@ void dumpSortDescription(const SortDescription & description, const Block & head
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SortColumnDescription::explain(JSONBuilder::JSONMap & map, const Block & header) const
|
void SortColumnDescription::explain(JSONBuilder::JSONMap & map) const
|
||||||
{
|
{
|
||||||
if (!column_name.empty())
|
map.add("Column", column_name);
|
||||||
map.add("Column", column_name);
|
|
||||||
else
|
|
||||||
{
|
|
||||||
if (column_number < header.columns())
|
|
||||||
map.add("Column", header.getByPosition(column_number).name);
|
|
||||||
|
|
||||||
map.add("Position", column_number);
|
|
||||||
}
|
|
||||||
|
|
||||||
map.add("Ascending", direction > 0);
|
map.add("Ascending", direction > 0);
|
||||||
map.add("With Fill", with_fill);
|
map.add("With Fill", with_fill);
|
||||||
}
|
}
|
||||||
@ -57,17 +38,17 @@ void SortColumnDescription::explain(JSONBuilder::JSONMap & map, const Block & he
|
|||||||
std::string dumpSortDescription(const SortDescription & description)
|
std::string dumpSortDescription(const SortDescription & description)
|
||||||
{
|
{
|
||||||
WriteBufferFromOwnString wb;
|
WriteBufferFromOwnString wb;
|
||||||
dumpSortDescription(description, Block{}, wb);
|
dumpSortDescription(description, wb);
|
||||||
return wb.str();
|
return wb.str();
|
||||||
}
|
}
|
||||||
|
|
||||||
JSONBuilder::ItemPtr explainSortDescription(const SortDescription & description, const Block & header)
|
JSONBuilder::ItemPtr explainSortDescription(const SortDescription & description)
|
||||||
{
|
{
|
||||||
auto json_array = std::make_unique<JSONBuilder::JSONArray>();
|
auto json_array = std::make_unique<JSONBuilder::JSONArray>();
|
||||||
for (const auto & descr : description)
|
for (const auto & descr : description)
|
||||||
{
|
{
|
||||||
auto json_map = std::make_unique<JSONBuilder::JSONMap>();
|
auto json_map = std::make_unique<JSONBuilder::JSONMap>();
|
||||||
descr.explain(*json_map, header);
|
descr.explain(*json_map);
|
||||||
json_array->add(std::move(json_map));
|
json_array->add(std::move(json_map));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -39,7 +39,6 @@ struct FillColumnDescription
|
|||||||
struct SortColumnDescription
|
struct SortColumnDescription
|
||||||
{
|
{
|
||||||
std::string column_name; /// The name of the column.
|
std::string column_name; /// The name of the column.
|
||||||
size_t column_number; /// Column number (used if no name is given).
|
|
||||||
int direction; /// 1 - ascending, -1 - descending.
|
int direction; /// 1 - ascending, -1 - descending.
|
||||||
int nulls_direction; /// 1 - NULLs and NaNs are greater, -1 - less.
|
int nulls_direction; /// 1 - NULLs and NaNs are greater, -1 - less.
|
||||||
/// To achieve NULLS LAST, set it equal to direction, to achieve NULLS FIRST, set it opposite.
|
/// To achieve NULLS LAST, set it equal to direction, to achieve NULLS FIRST, set it opposite.
|
||||||
@ -48,23 +47,24 @@ struct SortColumnDescription
|
|||||||
FillColumnDescription fill_description;
|
FillColumnDescription fill_description;
|
||||||
|
|
||||||
explicit SortColumnDescription(
|
explicit SortColumnDescription(
|
||||||
size_t column_number_, int direction_ = 1, int nulls_direction_ = 1,
|
const std::string & column_name_,
|
||||||
const std::shared_ptr<Collator> & collator_ = nullptr,
|
int direction_ = 1,
|
||||||
bool with_fill_ = false, const FillColumnDescription & fill_description_ = {})
|
int nulls_direction_ = 1,
|
||||||
: column_number(column_number_), direction(direction_), nulls_direction(nulls_direction_), collator(collator_)
|
const std::shared_ptr<Collator> & collator_ = nullptr,
|
||||||
, with_fill(with_fill_), fill_description(fill_description_) {}
|
bool with_fill_ = false,
|
||||||
|
const FillColumnDescription & fill_description_ = {})
|
||||||
explicit SortColumnDescription(
|
: column_name(column_name_)
|
||||||
const std::string & column_name_, int direction_ = 1, int nulls_direction_ = 1,
|
, direction(direction_)
|
||||||
const std::shared_ptr<Collator> & collator_ = nullptr,
|
, nulls_direction(nulls_direction_)
|
||||||
bool with_fill_ = false, const FillColumnDescription & fill_description_ = {})
|
, collator(collator_)
|
||||||
: column_name(column_name_), column_number(0), direction(direction_), nulls_direction(nulls_direction_)
|
, with_fill(with_fill_)
|
||||||
, collator(collator_), with_fill(with_fill_), fill_description(fill_description_) {}
|
, fill_description(fill_description_)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
bool operator == (const SortColumnDescription & other) const
|
bool operator == (const SortColumnDescription & other) const
|
||||||
{
|
{
|
||||||
return column_name == other.column_name && column_number == other.column_number
|
return column_name == other.column_name && direction == other.direction && nulls_direction == other.nulls_direction;
|
||||||
&& direction == other.direction && nulls_direction == other.nulls_direction;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool operator != (const SortColumnDescription & other) const
|
bool operator != (const SortColumnDescription & other) const
|
||||||
@ -72,22 +72,30 @@ struct SortColumnDescription
|
|||||||
return !(*this == other);
|
return !(*this == other);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string dump() const
|
std::string dump() const { return fmt::format("{}:dir {}nulls {}", column_name, direction, nulls_direction); }
|
||||||
{
|
|
||||||
return fmt::format("{}:{}:dir {}nulls ", column_name, column_number, direction, nulls_direction);
|
|
||||||
}
|
|
||||||
|
|
||||||
void explain(JSONBuilder::JSONMap & map, const Block & header) const;
|
void explain(JSONBuilder::JSONMap & map) const;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct SortColumnDescriptionWithColumnIndex
|
||||||
|
{
|
||||||
|
SortColumnDescription base;
|
||||||
|
size_t column_number;
|
||||||
|
|
||||||
|
SortColumnDescriptionWithColumnIndex(SortColumnDescription description_, size_t column_number_)
|
||||||
|
: base(std::move(description_)), column_number(column_number_)
|
||||||
|
{
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Description of the sorting rule for several columns.
|
/// Description of the sorting rule for several columns.
|
||||||
using SortDescription = std::vector<SortColumnDescription>;
|
using SortDescription = std::vector<SortColumnDescription>;
|
||||||
|
using SortDescriptionWithPositions = std::vector<SortColumnDescriptionWithColumnIndex>;
|
||||||
|
|
||||||
/// Outputs user-readable description into `out`.
|
/// Outputs user-readable description into `out`.
|
||||||
void dumpSortDescription(const SortDescription & description, const Block & header, WriteBuffer & out);
|
void dumpSortDescription(const SortDescription & description, WriteBuffer & out);
|
||||||
|
|
||||||
std::string dumpSortDescription(const SortDescription & description);
|
std::string dumpSortDescription(const SortDescription & description);
|
||||||
|
|
||||||
JSONBuilder::ItemPtr explainSortDescription(const SortDescription & description, const Block & header);
|
JSONBuilder::ItemPtr explainSortDescription(const SortDescription & description);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -36,7 +36,7 @@ DataTypePtr recursiveRemoveLowCardinality(const DataTypePtr & type)
|
|||||||
element = recursiveRemoveLowCardinality(element);
|
element = recursiveRemoveLowCardinality(element);
|
||||||
|
|
||||||
if (tuple_type->haveExplicitNames())
|
if (tuple_type->haveExplicitNames())
|
||||||
return std::make_shared<DataTypeTuple>(elements, tuple_type->getElementNames(), tuple_type->serializeNames());
|
return std::make_shared<DataTypeTuple>(elements, tuple_type->getElementNames());
|
||||||
else
|
else
|
||||||
return std::make_shared<DataTypeTuple>(elements);
|
return std::make_shared<DataTypeTuple>(elements);
|
||||||
}
|
}
|
||||||
|
@ -64,8 +64,8 @@ static std::optional<Exception> checkTupleNames(const Strings & names)
|
|||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
DataTypeTuple::DataTypeTuple(const DataTypes & elems_, const Strings & names_, bool serialize_names_)
|
DataTypeTuple::DataTypeTuple(const DataTypes & elems_, const Strings & names_)
|
||||||
: elems(elems_), names(names_), have_explicit_names(true), serialize_names(serialize_names_)
|
: elems(elems_), names(names_), have_explicit_names(true)
|
||||||
{
|
{
|
||||||
size_t size = elems.size();
|
size_t size = elems.size();
|
||||||
if (names.size() != size)
|
if (names.size() != size)
|
||||||
@ -75,11 +75,6 @@ DataTypeTuple::DataTypeTuple(const DataTypes & elems_, const Strings & names_, b
|
|||||||
throw std::move(*exception);
|
throw std::move(*exception);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool DataTypeTuple::canBeCreatedWithNames(const Strings & names)
|
|
||||||
{
|
|
||||||
return checkTupleNames(names) == std::nullopt;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string DataTypeTuple::doGetName() const
|
std::string DataTypeTuple::doGetName() const
|
||||||
{
|
{
|
||||||
size_t size = elems.size();
|
size_t size = elems.size();
|
||||||
@ -91,7 +86,7 @@ std::string DataTypeTuple::doGetName() const
|
|||||||
if (i != 0)
|
if (i != 0)
|
||||||
s << ", ";
|
s << ", ";
|
||||||
|
|
||||||
if (have_explicit_names && serialize_names)
|
if (have_explicit_names)
|
||||||
s << backQuoteIfNeed(names[i]) << ' ';
|
s << backQuoteIfNeed(names[i]) << ' ';
|
||||||
|
|
||||||
s << elems[i]->getName();
|
s << elems[i]->getName();
|
||||||
@ -206,7 +201,7 @@ bool DataTypeTuple::equals(const IDataType & rhs) const
|
|||||||
return false;
|
return false;
|
||||||
|
|
||||||
for (size_t i = 0; i < size; ++i)
|
for (size_t i = 0; i < size; ++i)
|
||||||
if (!elems[i]->equals(*rhs_tuple.elems[i]))
|
if (!elems[i]->equals(*rhs_tuple.elems[i]) || names[i] != rhs_tuple.names[i])
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
@ -265,31 +260,29 @@ size_t DataTypeTuple::getSizeOfValueInMemory() const
|
|||||||
SerializationPtr DataTypeTuple::doGetDefaultSerialization() const
|
SerializationPtr DataTypeTuple::doGetDefaultSerialization() const
|
||||||
{
|
{
|
||||||
SerializationTuple::ElementSerializations serializations(elems.size());
|
SerializationTuple::ElementSerializations serializations(elems.size());
|
||||||
bool use_explicit_names = have_explicit_names && serialize_names;
|
|
||||||
for (size_t i = 0; i < elems.size(); ++i)
|
for (size_t i = 0; i < elems.size(); ++i)
|
||||||
{
|
{
|
||||||
String elem_name = use_explicit_names ? names[i] : toString(i + 1);
|
String elem_name = have_explicit_names ? names[i] : toString(i + 1);
|
||||||
auto serialization = elems[i]->getDefaultSerialization();
|
auto serialization = elems[i]->getDefaultSerialization();
|
||||||
serializations[i] = std::make_shared<SerializationNamed>(serialization, elem_name);
|
serializations[i] = std::make_shared<SerializationNamed>(serialization, elem_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
return std::make_shared<SerializationTuple>(std::move(serializations), use_explicit_names);
|
return std::make_shared<SerializationTuple>(std::move(serializations), have_explicit_names);
|
||||||
}
|
}
|
||||||
|
|
||||||
SerializationPtr DataTypeTuple::getSerialization(const SerializationInfo & info) const
|
SerializationPtr DataTypeTuple::getSerialization(const SerializationInfo & info) const
|
||||||
{
|
{
|
||||||
SerializationTuple::ElementSerializations serializations(elems.size());
|
SerializationTuple::ElementSerializations serializations(elems.size());
|
||||||
const auto & info_tuple = assert_cast<const SerializationInfoTuple &>(info);
|
const auto & info_tuple = assert_cast<const SerializationInfoTuple &>(info);
|
||||||
bool use_explicit_names = have_explicit_names && serialize_names;
|
|
||||||
|
|
||||||
for (size_t i = 0; i < elems.size(); ++i)
|
for (size_t i = 0; i < elems.size(); ++i)
|
||||||
{
|
{
|
||||||
String elem_name = use_explicit_names ? names[i] : toString(i + 1);
|
String elem_name = have_explicit_names ? names[i] : toString(i + 1);
|
||||||
auto serialization = elems[i]->getSerialization(*info_tuple.getElementInfo(i));
|
auto serialization = elems[i]->getSerialization(*info_tuple.getElementInfo(i));
|
||||||
serializations[i] = std::make_shared<SerializationNamed>(serialization, elem_name);
|
serializations[i] = std::make_shared<SerializationNamed>(serialization, elem_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
return std::make_shared<SerializationTuple>(std::move(serializations), use_explicit_names);
|
return std::make_shared<SerializationTuple>(std::move(serializations), have_explicit_names);
|
||||||
}
|
}
|
||||||
|
|
||||||
MutableSerializationInfoPtr DataTypeTuple::createSerializationInfo(const SerializationInfo::Settings & settings) const
|
MutableSerializationInfoPtr DataTypeTuple::createSerializationInfo(const SerializationInfo::Settings & settings) const
|
||||||
|
@ -22,14 +22,11 @@ private:
|
|||||||
DataTypes elems;
|
DataTypes elems;
|
||||||
Strings names;
|
Strings names;
|
||||||
bool have_explicit_names;
|
bool have_explicit_names;
|
||||||
bool serialize_names = true;
|
|
||||||
public:
|
public:
|
||||||
static constexpr bool is_parametric = true;
|
static constexpr bool is_parametric = true;
|
||||||
|
|
||||||
explicit DataTypeTuple(const DataTypes & elems);
|
explicit DataTypeTuple(const DataTypes & elems);
|
||||||
DataTypeTuple(const DataTypes & elems, const Strings & names, bool serialize_names_ = true);
|
DataTypeTuple(const DataTypes & elems, const Strings & names);
|
||||||
|
|
||||||
static bool canBeCreatedWithNames(const Strings & names);
|
|
||||||
|
|
||||||
TypeIndex getTypeId() const override { return TypeIndex::Tuple; }
|
TypeIndex getTypeId() const override { return TypeIndex::Tuple; }
|
||||||
std::string doGetName() const override;
|
std::string doGetName() const override;
|
||||||
@ -66,7 +63,6 @@ public:
|
|||||||
String getNameByPosition(size_t i) const;
|
String getNameByPosition(size_t i) const;
|
||||||
|
|
||||||
bool haveExplicitNames() const { return have_explicit_names; }
|
bool haveExplicitNames() const { return have_explicit_names; }
|
||||||
bool serializeNames() const { return serialize_names; }
|
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -128,22 +128,21 @@ static auto extractVector(const std::vector<Tuple> & vec)
|
|||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
void convertObjectsToTuples(NamesAndTypesList & columns_list, Block & block, const NamesAndTypesList & extended_storage_columns)
|
void convertObjectsToTuples(Block & block, const NamesAndTypesList & extended_storage_columns)
|
||||||
{
|
{
|
||||||
std::unordered_map<String, DataTypePtr> storage_columns_map;
|
std::unordered_map<String, DataTypePtr> storage_columns_map;
|
||||||
for (const auto & [name, type] : extended_storage_columns)
|
for (const auto & [name, type] : extended_storage_columns)
|
||||||
storage_columns_map[name] = type;
|
storage_columns_map[name] = type;
|
||||||
|
|
||||||
for (auto & name_type : columns_list)
|
for (auto & column : block)
|
||||||
{
|
{
|
||||||
if (!isObject(name_type.type))
|
if (!isObject(column.type))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
auto & column = block.getByName(name_type.name);
|
|
||||||
if (!isObject(column.type))
|
if (!isObject(column.type))
|
||||||
throw Exception(ErrorCodes::TYPE_MISMATCH,
|
throw Exception(ErrorCodes::TYPE_MISMATCH,
|
||||||
"Type for column '{}' mismatch in columns list and in block. In list: {}, in block: {}",
|
"Type for column '{}' mismatch in columns list and in block. In list: {}, in block: {}",
|
||||||
name_type.name, name_type.type->getName(), column.type->getName());
|
column.name, column.type->getName(), column.type->getName());
|
||||||
|
|
||||||
const auto & column_object = assert_cast<const ColumnObject &>(*column.column);
|
const auto & column_object = assert_cast<const ColumnObject &>(*column.column);
|
||||||
const auto & subcolumns = column_object.getSubcolumns();
|
const auto & subcolumns = column_object.getSubcolumns();
|
||||||
@ -151,7 +150,7 @@ void convertObjectsToTuples(NamesAndTypesList & columns_list, Block & block, con
|
|||||||
if (!column_object.isFinalized())
|
if (!column_object.isFinalized())
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||||
"Cannot convert to tuple column '{}' from type {}. Column should be finalized first",
|
"Cannot convert to tuple column '{}' from type {}. Column should be finalized first",
|
||||||
name_type.name, name_type.type->getName());
|
column.name, column.type->getName());
|
||||||
|
|
||||||
PathsInData tuple_paths;
|
PathsInData tuple_paths;
|
||||||
DataTypes tuple_types;
|
DataTypes tuple_types;
|
||||||
@ -164,12 +163,11 @@ void convertObjectsToTuples(NamesAndTypesList & columns_list, Block & block, con
|
|||||||
tuple_columns.emplace_back(entry->data.getFinalizedColumnPtr());
|
tuple_columns.emplace_back(entry->data.getFinalizedColumnPtr());
|
||||||
}
|
}
|
||||||
|
|
||||||
auto it = storage_columns_map.find(name_type.name);
|
auto it = storage_columns_map.find(column.name);
|
||||||
if (it == storage_columns_map.end())
|
if (it == storage_columns_map.end())
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Column '{}' not found in storage", name_type.name);
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Column '{}' not found in storage", column.name);
|
||||||
|
|
||||||
std::tie(column.column, column.type) = unflattenTuple(tuple_paths, tuple_types, tuple_columns);
|
std::tie(column.column, column.type) = unflattenTuple(tuple_paths, tuple_types, tuple_columns);
|
||||||
name_type.type = column.type;
|
|
||||||
|
|
||||||
/// Check that constructed Tuple type and type in storage are compatible.
|
/// Check that constructed Tuple type and type in storage are compatible.
|
||||||
getLeastCommonTypeForObject({column.type, it->second}, true);
|
getLeastCommonTypeForObject({column.type, it->second}, true);
|
||||||
|
@ -38,7 +38,7 @@ DataTypePtr getDataTypeByColumn(const IColumn & column);
|
|||||||
|
|
||||||
/// Converts Object types and columns to Tuples in @columns_list and @block
|
/// Converts Object types and columns to Tuples in @columns_list and @block
|
||||||
/// and checks that types are consistent with types in @extended_storage_columns.
|
/// and checks that types are consistent with types in @extended_storage_columns.
|
||||||
void convertObjectsToTuples(NamesAndTypesList & columns_list, Block & block, const NamesAndTypesList & extended_storage_columns);
|
void convertObjectsToTuples(Block & block, const NamesAndTypesList & extended_storage_columns);
|
||||||
|
|
||||||
/// Checks that each path is not the prefix of any other path.
|
/// Checks that each path is not the prefix of any other path.
|
||||||
void checkObjectHasNoAmbiguosPaths(const PathsInData & paths);
|
void checkObjectHasNoAmbiguosPaths(const PathsInData & paths);
|
||||||
|
@ -6,6 +6,8 @@
|
|||||||
#include <Common/formatIPv6.h>
|
#include <Common/formatIPv6.h>
|
||||||
#include <IO/WriteBuffer.h>
|
#include <IO/WriteBuffer.h>
|
||||||
#include <IO/ReadBuffer.h>
|
#include <IO/ReadBuffer.h>
|
||||||
|
#include <Formats/FormatSettings.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -47,9 +49,11 @@ void SerializationIPv4::deserializeText(IColumn & column, ReadBuffer & istr, con
|
|||||||
char buffer[IPV4_MAX_TEXT_LENGTH + 1] = {'\0'};
|
char buffer[IPV4_MAX_TEXT_LENGTH + 1] = {'\0'};
|
||||||
istr.read(buffer, sizeof(buffer) - 1);
|
istr.read(buffer, sizeof(buffer) - 1);
|
||||||
UInt32 ipv4_value = 0;
|
UInt32 ipv4_value = 0;
|
||||||
if (!parseIPv4(buffer, reinterpret_cast<unsigned char *>(&ipv4_value)))
|
|
||||||
|
bool parse_result = parseIPv4(buffer, reinterpret_cast<unsigned char *>(&ipv4_value));
|
||||||
|
if (!parse_result && !settings.input_format_ipv4_default_on_conversion_error)
|
||||||
{
|
{
|
||||||
throw Exception("Invalid IPv4 value.", ErrorCodes::CANNOT_PARSE_DOMAIN_VALUE_FROM_STRING);
|
throw Exception("Invalid IPv4 value", ErrorCodes::CANNOT_PARSE_DOMAIN_VALUE_FROM_STRING);
|
||||||
}
|
}
|
||||||
|
|
||||||
col->insert(ipv4_value);
|
col->insert(ipv4_value);
|
||||||
@ -89,9 +93,11 @@ void SerializationIPv6::deserializeText(IColumn & column, ReadBuffer & istr, con
|
|||||||
istr.read(buffer, sizeof(buffer) - 1);
|
istr.read(buffer, sizeof(buffer) - 1);
|
||||||
|
|
||||||
std::string ipv6_value(IPV6_BINARY_LENGTH, '\0');
|
std::string ipv6_value(IPV6_BINARY_LENGTH, '\0');
|
||||||
if (!parseIPv6(buffer, reinterpret_cast<unsigned char *>(ipv6_value.data())))
|
|
||||||
|
bool parse_result = parseIPv6(buffer, reinterpret_cast<unsigned char *>(ipv6_value.data()));
|
||||||
|
if (!parse_result && !settings.input_format_ipv6_default_on_conversion_error)
|
||||||
{
|
{
|
||||||
throw Exception("Invalid IPv6 value.", ErrorCodes::CANNOT_PARSE_DOMAIN_VALUE_FROM_STRING);
|
throw Exception("Invalid IPv6 value", ErrorCodes::CANNOT_PARSE_DOMAIN_VALUE_FROM_STRING);
|
||||||
}
|
}
|
||||||
|
|
||||||
col->insertString(ipv6_value);
|
col->insertString(ipv6_value);
|
||||||
|
@ -63,7 +63,11 @@ void CachedReadBufferFromRemoteFS::initialize(size_t offset, size_t size)
|
|||||||
|
|
||||||
SeekableReadBufferPtr CachedReadBufferFromRemoteFS::getCacheReadBuffer(size_t offset) const
|
SeekableReadBufferPtr CachedReadBufferFromRemoteFS::getCacheReadBuffer(size_t offset) const
|
||||||
{
|
{
|
||||||
return std::make_shared<ReadBufferFromFile>(cache->getPathInLocalCache(cache_key, offset), settings.local_fs_buffer_size);
|
auto path = cache->getPathInLocalCache(cache_key, offset);
|
||||||
|
auto buf = std::make_shared<ReadBufferFromFile>(path, settings.local_fs_buffer_size);
|
||||||
|
if (buf->size() == 0)
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Attempt to read from an empty cache file: {}", path);
|
||||||
|
return buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
SeekableReadBufferPtr CachedReadBufferFromRemoteFS::getRemoteFSReadBuffer(FileSegmentPtr & file_segment, ReadType read_type_)
|
SeekableReadBufferPtr CachedReadBufferFromRemoteFS::getRemoteFSReadBuffer(FileSegmentPtr & file_segment, ReadType read_type_)
|
||||||
@ -96,7 +100,6 @@ SeekableReadBufferPtr CachedReadBufferFromRemoteFS::getRemoteFSReadBuffer(FileSe
|
|||||||
remote_fs_segment_reader = remote_file_reader_creator();
|
remote_fs_segment_reader = remote_file_reader_creator();
|
||||||
file_segment->setRemoteFileReader(remote_fs_segment_reader);
|
file_segment->setRemoteFileReader(remote_fs_segment_reader);
|
||||||
|
|
||||||
///TODO: add check for pending data
|
|
||||||
return remote_fs_segment_reader;
|
return remote_fs_segment_reader;
|
||||||
}
|
}
|
||||||
case ReadType::REMOTE_FS_READ_BYPASS_CACHE:
|
case ReadType::REMOTE_FS_READ_BYPASS_CACHE:
|
||||||
@ -119,7 +122,6 @@ SeekableReadBufferPtr CachedReadBufferFromRemoteFS::getReadBufferForFileSegment(
|
|||||||
{
|
{
|
||||||
auto range = file_segment->range();
|
auto range = file_segment->range();
|
||||||
|
|
||||||
/// Each wait() call has a timeout of 1 second.
|
|
||||||
size_t wait_download_max_tries = settings.remote_fs_cache_max_wait_sec;
|
size_t wait_download_max_tries = settings.remote_fs_cache_max_wait_sec;
|
||||||
size_t wait_download_tries = 0;
|
size_t wait_download_tries = 0;
|
||||||
|
|
||||||
@ -296,17 +298,21 @@ SeekableReadBufferPtr CachedReadBufferFromRemoteFS::getImplementationBuffer(File
|
|||||||
{
|
{
|
||||||
case ReadType::CACHED:
|
case ReadType::CACHED:
|
||||||
{
|
{
|
||||||
|
#ifndef NDEBUG
|
||||||
|
auto * file_reader = assert_cast<ReadBufferFromFile *>(read_buffer_for_file_segment.get());
|
||||||
|
size_t file_size = file_reader->size();
|
||||||
|
|
||||||
|
if (file_size == 0 || range.left + file_size <= file_offset_of_buffer_end)
|
||||||
|
throw Exception(
|
||||||
|
ErrorCodes::LOGICAL_ERROR,
|
||||||
|
"Unexpected state of cache file. Cache file size: {}, cache file offset: {}, "
|
||||||
|
"expected file size to be non-zero and file downloaded size to exceed current file read offset (expected: {} > {})",
|
||||||
|
file_size, range.left, range.left + file_size, file_offset_of_buffer_end);
|
||||||
|
#endif
|
||||||
|
|
||||||
size_t seek_offset = file_offset_of_buffer_end - range.left;
|
size_t seek_offset = file_offset_of_buffer_end - range.left;
|
||||||
read_buffer_for_file_segment->seek(seek_offset, SEEK_SET);
|
read_buffer_for_file_segment->seek(seek_offset, SEEK_SET);
|
||||||
|
|
||||||
auto * file_reader = assert_cast<ReadBufferFromFile *>(read_buffer_for_file_segment.get());
|
|
||||||
size_t file_size = file_reader->size();
|
|
||||||
auto state = file_segment->state();
|
|
||||||
|
|
||||||
LOG_TEST(log, "Cache file: {}. Cached seek to: {}, file size: {}, file segment state: {}, download offset: {}",
|
|
||||||
file_reader->getFileName(), seek_offset, file_size, state, file_segment->getDownloadOffset());
|
|
||||||
|
|
||||||
assert(file_size > 0);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case ReadType::REMOTE_FS_READ_BYPASS_CACHE:
|
case ReadType::REMOTE_FS_READ_BYPASS_CACHE:
|
||||||
@ -328,15 +334,17 @@ SeekableReadBufferPtr CachedReadBufferFromRemoteFS::getImplementationBuffer(File
|
|||||||
read_buffer_for_file_segment->seek(file_offset_of_buffer_end, SEEK_SET);
|
read_buffer_for_file_segment->seek(file_offset_of_buffer_end, SEEK_SET);
|
||||||
}
|
}
|
||||||
|
|
||||||
auto impl_range = read_buffer_for_file_segment->getRemainingReadRange();
|
|
||||||
auto download_offset = file_segment->getDownloadOffset();
|
auto download_offset = file_segment->getDownloadOffset();
|
||||||
if (download_offset != static_cast<size_t>(read_buffer_for_file_segment->getPosition()))
|
if (download_offset != static_cast<size_t>(read_buffer_for_file_segment->getPosition()))
|
||||||
|
{
|
||||||
|
auto impl_range = read_buffer_for_file_segment->getRemainingReadRange();
|
||||||
throw Exception(
|
throw Exception(
|
||||||
ErrorCodes::LOGICAL_ERROR,
|
ErrorCodes::LOGICAL_ERROR,
|
||||||
"Buffer's offsets mismatch; cached buffer offset: {}, download_offset: {}, position: {}, implementation buffer offset: {}, "
|
"Buffer's offsets mismatch; cached buffer offset: {}, download_offset: {}, position: {}, implementation buffer offset: {}, "
|
||||||
"implementation buffer reading until: {}, file segment info: {}",
|
"implementation buffer reading until: {}, file segment info: {}",
|
||||||
file_offset_of_buffer_end, download_offset, read_buffer_for_file_segment->getPosition(),
|
file_offset_of_buffer_end, download_offset, read_buffer_for_file_segment->getPosition(),
|
||||||
impl_range.left, *impl_range.right, file_segment->getInfoForLog());
|
impl_range.left, *impl_range.right, file_segment->getInfoForLog());
|
||||||
|
}
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -384,6 +392,7 @@ void CachedReadBufferFromRemoteFS::predownload(FileSegmentPtr & file_segment)
|
|||||||
LOG_TEST(log, "Bytes to predownload: {}, caller_id: {}", bytes_to_predownload, FileSegment::getCallerId());
|
LOG_TEST(log, "Bytes to predownload: {}, caller_id: {}", bytes_to_predownload, FileSegment::getCallerId());
|
||||||
|
|
||||||
assert(implementation_buffer->getFileOffsetOfBufferEnd() == file_segment->getDownloadOffset());
|
assert(implementation_buffer->getFileOffsetOfBufferEnd() == file_segment->getDownloadOffset());
|
||||||
|
size_t current_offset = file_segment->getDownloadOffset();
|
||||||
|
|
||||||
while (true)
|
while (true)
|
||||||
{
|
{
|
||||||
@ -423,7 +432,11 @@ void CachedReadBufferFromRemoteFS::predownload(FileSegmentPtr & file_segment)
|
|||||||
{
|
{
|
||||||
LOG_TEST(log, "Left to predownload: {}, buffer size: {}", bytes_to_predownload, implementation_buffer->buffer().size());
|
LOG_TEST(log, "Left to predownload: {}, buffer size: {}", bytes_to_predownload, implementation_buffer->buffer().size());
|
||||||
|
|
||||||
file_segment->write(implementation_buffer->buffer().begin(), current_predownload_size);
|
assert(file_segment->getDownloadOffset() == static_cast<size_t>(implementation_buffer->getPosition()));
|
||||||
|
|
||||||
|
file_segment->write(implementation_buffer->buffer().begin(), current_predownload_size, current_offset);
|
||||||
|
|
||||||
|
current_offset += current_predownload_size;
|
||||||
|
|
||||||
bytes_to_predownload -= current_predownload_size;
|
bytes_to_predownload -= current_predownload_size;
|
||||||
implementation_buffer->position() += current_predownload_size;
|
implementation_buffer->position() += current_predownload_size;
|
||||||
@ -537,13 +550,15 @@ bool CachedReadBufferFromRemoteFS::nextImpl()
|
|||||||
}
|
}
|
||||||
catch (Exception & e)
|
catch (Exception & e)
|
||||||
{
|
{
|
||||||
e.addMessage("Cache info: {}", getInfoForLog());
|
e.addMessage("Cache info: {}", nextimpl_step_log_info);
|
||||||
throw;
|
throw;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool CachedReadBufferFromRemoteFS::nextImplStep()
|
bool CachedReadBufferFromRemoteFS::nextImplStep()
|
||||||
{
|
{
|
||||||
|
last_caller_id = FileSegment::getCallerId();
|
||||||
|
|
||||||
if (IFileCache::shouldBypassCache())
|
if (IFileCache::shouldBypassCache())
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Using cache when not allowed");
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Using cache when not allowed");
|
||||||
|
|
||||||
@ -554,6 +569,9 @@ bool CachedReadBufferFromRemoteFS::nextImplStep()
|
|||||||
return false;
|
return false;
|
||||||
|
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT({
|
||||||
|
/// Save state of current file segment before it is completed.
|
||||||
|
nextimpl_step_log_info = getInfoForLog();
|
||||||
|
|
||||||
if (current_file_segment_it == file_segments_holder->file_segments.end())
|
if (current_file_segment_it == file_segments_holder->file_segments.end())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
@ -623,6 +641,18 @@ bool CachedReadBufferFromRemoteFS::nextImplStep()
|
|||||||
|
|
||||||
if (!result)
|
if (!result)
|
||||||
{
|
{
|
||||||
|
#ifndef NDEBUG
|
||||||
|
if (auto * cache_file_reader = typeid_cast<ReadBufferFromFile *>(implementation_buffer.get()))
|
||||||
|
{
|
||||||
|
auto cache_file_size = cache_file_reader->size();
|
||||||
|
if (cache_file_size == 0)
|
||||||
|
throw Exception(
|
||||||
|
ErrorCodes::LOGICAL_ERROR,
|
||||||
|
"Attempt to read from an empty cache file: {} (just before actual read)",
|
||||||
|
cache_file_size);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
result = implementation_buffer->next();
|
result = implementation_buffer->next();
|
||||||
size = implementation_buffer->buffer().size();
|
size = implementation_buffer->buffer().size();
|
||||||
}
|
}
|
||||||
@ -635,7 +665,12 @@ bool CachedReadBufferFromRemoteFS::nextImplStep()
|
|||||||
|
|
||||||
if (file_segment->reserve(size))
|
if (file_segment->reserve(size))
|
||||||
{
|
{
|
||||||
file_segment->write(needed_to_predownload ? implementation_buffer->position() : implementation_buffer->buffer().begin(), size);
|
assert(file_segment->getDownloadOffset() == static_cast<size_t>(implementation_buffer->getPosition()));
|
||||||
|
|
||||||
|
file_segment->write(needed_to_predownload ? implementation_buffer->position() : implementation_buffer->buffer().begin(), size, file_offset_of_buffer_end);
|
||||||
|
|
||||||
|
assert(file_segment->getDownloadOffset() <= file_segment->range().right + 1);
|
||||||
|
assert(std::next(current_file_segment_it) == file_segments_holder->file_segments.end() || file_segment->getDownloadOffset() == implementation_buffer->getFileOffsetOfBufferEnd());
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -665,10 +700,15 @@ bool CachedReadBufferFromRemoteFS::nextImplStep()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// - If last file segment was read from remote fs, then we read up to segment->range().right, but
|
||||||
|
/// the requested right boundary could be segment->range().left < requested_right_boundary < segment->range().right.
|
||||||
|
/// Therefore need to resize to a smaller size. And resize must be done after write into cache.
|
||||||
|
/// - If last file segment was read from local fs, then we could read more than file_segemnt->range().right, so resize is also needed.
|
||||||
if (std::next(current_file_segment_it) == file_segments_holder->file_segments.end())
|
if (std::next(current_file_segment_it) == file_segments_holder->file_segments.end())
|
||||||
{
|
{
|
||||||
size_t remaining_size_to_read = std::min(current_read_range.right, read_until_position - 1) - file_offset_of_buffer_end + 1;
|
size_t remaining_size_to_read = std::min(current_read_range.right, read_until_position - 1) - file_offset_of_buffer_end + 1;
|
||||||
size = std::min(size, remaining_size_to_read);
|
size = std::min(size, remaining_size_to_read);
|
||||||
|
assert(implementation_buffer->buffer().size() >= nextimpl_working_buffer_offset + size);
|
||||||
implementation_buffer->buffer().resize(nextimpl_working_buffer_offset + size);
|
implementation_buffer->buffer().resize(nextimpl_working_buffer_offset + size);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -692,9 +732,16 @@ bool CachedReadBufferFromRemoteFS::nextImplStep()
|
|||||||
read_until_position, first_offset, file_segments_holder->toString());
|
read_until_position, first_offset, file_segments_holder->toString());
|
||||||
|
|
||||||
if (size == 0 && file_offset_of_buffer_end < read_until_position)
|
if (size == 0 && file_offset_of_buffer_end < read_until_position)
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
{
|
||||||
"Having zero bytes, but range is not finished: file offset: {}, reading until: {}",
|
std::optional<size_t> cache_file_size;
|
||||||
file_offset_of_buffer_end, read_until_position);
|
if (auto * cache_file_reader = assert_cast<ReadBufferFromFile *>(implementation_buffer.get()))
|
||||||
|
cache_file_size = cache_file_reader->size();
|
||||||
|
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||||
|
"Having zero bytes, but range is not finished: file offset: {}, reading until: {}, read type: {}, cache file size: {}",
|
||||||
|
file_offset_of_buffer_end, read_until_position, toString(read_type), cache_file_size ? std::to_string(*cache_file_size) : "None");
|
||||||
|
}
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -757,12 +804,26 @@ std::optional<size_t> CachedReadBufferFromRemoteFS::getLastNonDownloadedOffset()
|
|||||||
|
|
||||||
String CachedReadBufferFromRemoteFS::getInfoForLog()
|
String CachedReadBufferFromRemoteFS::getInfoForLog()
|
||||||
{
|
{
|
||||||
return fmt::format("Buffer path: {}, hash key: {}, file_offset_of_buffer_end: {}, internal buffer remaining read range: {}, file segment info: {}",
|
String implementation_buffer_read_range_str;
|
||||||
remote_fs_object_path, getHexUIntLowercase(cache_key), file_offset_of_buffer_end,
|
if (implementation_buffer)
|
||||||
(implementation_buffer ?
|
{
|
||||||
std::to_string(implementation_buffer->getRemainingReadRange().left) + '-' + (implementation_buffer->getRemainingReadRange().right ? std::to_string(*implementation_buffer->getRemainingReadRange().right) : "None")
|
auto read_range = implementation_buffer->getRemainingReadRange();
|
||||||
: "None"),
|
implementation_buffer_read_range_str = std::to_string(read_range.left) + '-' + (read_range.right ? std::to_string(*read_range.right) : "None");
|
||||||
(current_file_segment_it == file_segments_holder->file_segments.end() ? "None" : (*current_file_segment_it)->getInfoForLog()));
|
}
|
||||||
|
else
|
||||||
|
implementation_buffer_read_range_str = "None";
|
||||||
|
|
||||||
|
auto current_file_segment_info = current_file_segment_it == file_segments_holder->file_segments.end() ? "None" : (*current_file_segment_it)->getInfoForLog();
|
||||||
|
|
||||||
|
return fmt::format("Buffer path: {}, hash key: {}, file_offset_of_buffer_end: {}, internal buffer remaining read range: {}, "
|
||||||
|
"read_type: {}, last caller: {}, file segment info: {}",
|
||||||
|
remote_fs_object_path,
|
||||||
|
getHexUIntLowercase(cache_key),
|
||||||
|
file_offset_of_buffer_end,
|
||||||
|
implementation_buffer_read_range_str,
|
||||||
|
toString(read_type),
|
||||||
|
last_caller_id,
|
||||||
|
current_file_segment_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -98,7 +98,10 @@ private:
|
|||||||
}
|
}
|
||||||
__builtin_unreachable();
|
__builtin_unreachable();
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t first_offset = 0;
|
size_t first_offset = 0;
|
||||||
|
String nextimpl_step_log_info;
|
||||||
|
String last_caller_id;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -68,16 +68,28 @@ std::future<IAsynchronousReader::Result> ThreadPoolRemoteFSReader::submit(Reques
|
|||||||
auto * remote_fs_fd = assert_cast<RemoteFSFileDescriptor *>(request.descriptor.get());
|
auto * remote_fs_fd = assert_cast<RemoteFSFileDescriptor *>(request.descriptor.get());
|
||||||
|
|
||||||
Stopwatch watch(CLOCK_MONOTONIC);
|
Stopwatch watch(CLOCK_MONOTONIC);
|
||||||
auto [bytes_read, offset] = remote_fs_fd->readInto(request.buf, request.size, request.offset, request.ignore);
|
|
||||||
|
ReadBufferFromRemoteFSGather::ReadResult result;
|
||||||
|
try
|
||||||
|
{
|
||||||
|
result = remote_fs_fd->readInto(request.buf, request.size, request.offset, request.ignore);
|
||||||
|
}
|
||||||
|
catch (...)
|
||||||
|
{
|
||||||
|
if (running_group)
|
||||||
|
CurrentThread::detachQuery();
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
|
||||||
watch.stop();
|
watch.stop();
|
||||||
|
|
||||||
ProfileEvents::increment(ProfileEvents::RemoteFSReadMicroseconds, watch.elapsedMicroseconds());
|
|
||||||
ProfileEvents::increment(ProfileEvents::RemoteFSReadBytes, bytes_read);
|
|
||||||
|
|
||||||
if (running_group)
|
if (running_group)
|
||||||
thread_status.detachQuery();
|
CurrentThread::detachQuery();
|
||||||
|
|
||||||
return Result{ .size = bytes_read, .offset = offset };
|
ProfileEvents::increment(ProfileEvents::RemoteFSReadMicroseconds, watch.elapsedMicroseconds());
|
||||||
|
ProfileEvents::increment(ProfileEvents::RemoteFSReadBytes, result.offset ? result.size - result.offset : result.size);
|
||||||
|
|
||||||
|
return Result{ .size = result.size, .offset = result.offset };
|
||||||
});
|
});
|
||||||
|
|
||||||
auto future = task->get_future();
|
auto future = task->get_future();
|
||||||
|
@ -77,6 +77,8 @@ FormatSettings getFormatSettings(ContextPtr context, const Settings & settings)
|
|||||||
format_settings.custom.row_between_delimiter = settings.format_custom_row_between_delimiter;
|
format_settings.custom.row_between_delimiter = settings.format_custom_row_between_delimiter;
|
||||||
format_settings.date_time_input_format = settings.date_time_input_format;
|
format_settings.date_time_input_format = settings.date_time_input_format;
|
||||||
format_settings.date_time_output_format = settings.date_time_output_format;
|
format_settings.date_time_output_format = settings.date_time_output_format;
|
||||||
|
format_settings.input_format_ipv4_default_on_conversion_error = settings.input_format_ipv4_default_on_conversion_error;
|
||||||
|
format_settings.input_format_ipv6_default_on_conversion_error = settings.input_format_ipv6_default_on_conversion_error;
|
||||||
format_settings.bool_true_representation = settings.bool_true_representation;
|
format_settings.bool_true_representation = settings.bool_true_representation;
|
||||||
format_settings.bool_false_representation = settings.bool_false_representation;
|
format_settings.bool_false_representation = settings.bool_false_representation;
|
||||||
format_settings.enable_streaming = settings.output_format_enable_streaming;
|
format_settings.enable_streaming = settings.output_format_enable_streaming;
|
||||||
|
@ -65,6 +65,9 @@ struct FormatSettings
|
|||||||
|
|
||||||
DateTimeOutputFormat date_time_output_format = DateTimeOutputFormat::Simple;
|
DateTimeOutputFormat date_time_output_format = DateTimeOutputFormat::Simple;
|
||||||
|
|
||||||
|
bool input_format_ipv4_default_on_conversion_error = false;
|
||||||
|
bool input_format_ipv6_default_on_conversion_error = false;
|
||||||
|
|
||||||
UInt64 input_allow_errors_num = 0;
|
UInt64 input_allow_errors_num = 0;
|
||||||
Float32 input_allow_errors_ratio = 0;
|
Float32 input_allow_errors_ratio = 0;
|
||||||
|
|
||||||
|
@ -271,13 +271,13 @@ struct JSONEachRowFieldsExtractor
|
|||||||
std::vector<String> column_names;
|
std::vector<String> column_names;
|
||||||
};
|
};
|
||||||
|
|
||||||
std::unordered_map<String, DataTypePtr> readRowAndGetNamesAndDataTypesForJSONEachRow(ReadBuffer & in, bool json_strings)
|
NamesAndTypesList readRowAndGetNamesAndDataTypesForJSONEachRow(ReadBuffer & in, bool json_strings)
|
||||||
{
|
{
|
||||||
JSONEachRowFieldsExtractor extractor;
|
JSONEachRowFieldsExtractor extractor;
|
||||||
auto data_types = determineColumnDataTypesFromJSONEachRowDataImpl<JSONEachRowFieldsExtractor, '{', '}'>(in, json_strings, extractor);
|
auto data_types = determineColumnDataTypesFromJSONEachRowDataImpl<JSONEachRowFieldsExtractor, '{', '}'>(in, json_strings, extractor);
|
||||||
std::unordered_map<String, DataTypePtr> result;
|
NamesAndTypesList result;
|
||||||
for (size_t i = 0; i != extractor.column_names.size(); ++i)
|
for (size_t i = 0; i != extractor.column_names.size(); ++i)
|
||||||
result[extractor.column_names[i]] = data_types[i];
|
result.emplace_back(extractor.column_names[i], data_types[i]);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -20,9 +20,9 @@ std::pair<bool, size_t> fileSegmentationEngineJSONCompactEachRow(ReadBuffer & in
|
|||||||
DataTypePtr getDataTypeFromJSONField(const String & field);
|
DataTypePtr getDataTypeFromJSONField(const String & field);
|
||||||
|
|
||||||
/// Read row in JSONEachRow format and try to determine type for each field.
|
/// Read row in JSONEachRow format and try to determine type for each field.
|
||||||
/// Return map {column_name : type}.
|
/// Return list of names and types.
|
||||||
/// If cannot determine the type of some field, return nullptr for it.
|
/// If cannot determine the type of some field, return nullptr for it.
|
||||||
std::unordered_map<String, DataTypePtr> readRowAndGetNamesAndDataTypesForJSONEachRow(ReadBuffer & in, bool json_strings);
|
NamesAndTypesList readRowAndGetNamesAndDataTypesForJSONEachRow(ReadBuffer & in, bool json_strings);
|
||||||
|
|
||||||
/// Read row in JSONCompactEachRow format and try to determine type for each field.
|
/// Read row in JSONCompactEachRow format and try to determine type for each field.
|
||||||
/// If cannot determine the type of some field, return nullptr for it.
|
/// If cannot determine the type of some field, return nullptr for it.
|
||||||
|
@ -96,17 +96,6 @@ if (TARGET ch_contrib::rapidjson)
|
|||||||
target_link_libraries(clickhouse_functions PRIVATE ch_contrib::rapidjson)
|
target_link_libraries(clickhouse_functions PRIVATE ch_contrib::rapidjson)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# ClickHouse developers may use platform-dependent code under some macro (e.g. `#ifdef ENABLE_MULTITARGET`).
|
|
||||||
# If turned ON, this option defines such macro.
|
|
||||||
# See `src/Functions/TargetSpecific.h`
|
|
||||||
option(ENABLE_MULTITARGET_CODE "Enable platform-dependent code" ON)
|
|
||||||
|
|
||||||
if (ENABLE_MULTITARGET_CODE)
|
|
||||||
add_definitions(-DENABLE_MULTITARGET_CODE=1)
|
|
||||||
else()
|
|
||||||
add_definitions(-DENABLE_MULTITARGET_CODE=0)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
add_subdirectory(GatherUtils)
|
add_subdirectory(GatherUtils)
|
||||||
target_link_libraries(clickhouse_functions PRIVATE clickhouse_functions_gatherutils)
|
target_link_libraries(clickhouse_functions PRIVATE clickhouse_functions_gatherutils)
|
||||||
|
|
||||||
|
@ -179,6 +179,30 @@ struct ToStartOfMonthImpl
|
|||||||
using FactorTransform = ZeroTransform;
|
using FactorTransform = ZeroTransform;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct ToLastDayOfMonthImpl
|
||||||
|
{
|
||||||
|
static constexpr auto name = "toLastDayOfMonth";
|
||||||
|
|
||||||
|
static inline UInt16 execute(Int64 t, const DateLUTImpl & time_zone)
|
||||||
|
{
|
||||||
|
return time_zone.toLastDayNumOfMonth(time_zone.toDayNum(t));
|
||||||
|
}
|
||||||
|
static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone)
|
||||||
|
{
|
||||||
|
return time_zone.toLastDayNumOfMonth(time_zone.toDayNum(t));
|
||||||
|
}
|
||||||
|
static inline UInt16 execute(Int32 d, const DateLUTImpl & time_zone)
|
||||||
|
{
|
||||||
|
return time_zone.toLastDayNumOfMonth(ExtendedDayNum(d));
|
||||||
|
}
|
||||||
|
static inline UInt16 execute(UInt16 d, const DateLUTImpl & time_zone)
|
||||||
|
{
|
||||||
|
return time_zone.toLastDayNumOfMonth(DayNum(d));
|
||||||
|
}
|
||||||
|
|
||||||
|
using FactorTransform = ZeroTransform;
|
||||||
|
};
|
||||||
|
|
||||||
struct ToStartOfQuarterImpl
|
struct ToStartOfQuarterImpl
|
||||||
{
|
{
|
||||||
static constexpr auto name = "toStartOfQuarter";
|
static constexpr auto name = "toStartOfQuarter";
|
||||||
|
@ -1,12 +1,12 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
#include <base/map.h>
|
#include <base/map.h>
|
||||||
|
|
||||||
|
#include <Common/TargetSpecific.h>
|
||||||
#include <Functions/FunctionHelpers.h>
|
#include <Functions/FunctionHelpers.h>
|
||||||
#include <Functions/GatherUtils/GatherUtils.h>
|
#include <Functions/GatherUtils/GatherUtils.h>
|
||||||
#include <Functions/GatherUtils/Sources.h>
|
#include <Functions/GatherUtils/Sources.h>
|
||||||
#include <Functions/IFunction.h>
|
#include <Functions/IFunction.h>
|
||||||
#include <Functions/PerformanceAdaptors.h>
|
#include <Functions/PerformanceAdaptors.h>
|
||||||
#include <Functions/TargetSpecific.h>
|
|
||||||
#include <DataTypes/DataTypeString.h>
|
#include <DataTypes/DataTypeString.h>
|
||||||
#include <DataTypes/DataTypesNumber.h>
|
#include <DataTypes/DataTypesNumber.h>
|
||||||
#include <DataTypes/getLeastSupertype.h>
|
#include <DataTypes/getLeastSupertype.h>
|
||||||
|
@ -2958,8 +2958,7 @@ private:
|
|||||||
/// For named tuples allow conversions for tuples with
|
/// For named tuples allow conversions for tuples with
|
||||||
/// different sets of elements. If element exists in @to_type
|
/// different sets of elements. If element exists in @to_type
|
||||||
/// and doesn't exist in @to_type it will be filled by default values.
|
/// and doesn't exist in @to_type it will be filled by default values.
|
||||||
if (from_type->haveExplicitNames() && from_type->serializeNames()
|
if (from_type->haveExplicitNames() && to_type->haveExplicitNames())
|
||||||
&& to_type->haveExplicitNames() && to_type->serializeNames())
|
|
||||||
{
|
{
|
||||||
const auto & from_names = from_type->getElementNames();
|
const auto & from_names = from_type->getElementNames();
|
||||||
std::unordered_map<String, size_t> from_positions;
|
std::unordered_map<String, size_t> from_positions;
|
||||||
|
@ -38,8 +38,8 @@
|
|||||||
#include <Columns/ColumnTuple.h>
|
#include <Columns/ColumnTuple.h>
|
||||||
#include <Functions/IFunction.h>
|
#include <Functions/IFunction.h>
|
||||||
#include <Functions/FunctionHelpers.h>
|
#include <Functions/FunctionHelpers.h>
|
||||||
#include <Functions/TargetSpecific.h>
|
|
||||||
#include <Functions/PerformanceAdaptors.h>
|
#include <Functions/PerformanceAdaptors.h>
|
||||||
|
#include <Common/TargetSpecific.h>
|
||||||
#include <base/range.h>
|
#include <base/range.h>
|
||||||
#include <base/bit_cast.h>
|
#include <base/bit_cast.h>
|
||||||
|
|
||||||
|
@ -1,9 +1,9 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <Common/TargetSpecific.h>
|
||||||
#include <DataTypes/DataTypesNumber.h>
|
#include <DataTypes/DataTypesNumber.h>
|
||||||
#include <Columns/ColumnVector.h>
|
#include <Columns/ColumnVector.h>
|
||||||
#include <Functions/IFunction.h>
|
#include <Functions/IFunction.h>
|
||||||
#include <Functions/TargetSpecific.h>
|
|
||||||
#include <Functions/PerformanceAdaptors.h>
|
#include <Functions/PerformanceAdaptors.h>
|
||||||
#include <IO/WriteHelpers.h>
|
#include <IO/WriteHelpers.h>
|
||||||
|
|
||||||
|
@ -7,6 +7,8 @@
|
|||||||
#include <Core/AccurateComparison.h>
|
#include <Core/AccurateComparison.h>
|
||||||
#include <base/range.h>
|
#include <base/range.h>
|
||||||
#include "GatherUtils.h"
|
#include "GatherUtils.h"
|
||||||
|
#include "sliceEqualElements.h"
|
||||||
|
#include "sliceHasImplAnyAll.h"
|
||||||
|
|
||||||
|
|
||||||
namespace DB::ErrorCodes
|
namespace DB::ErrorCodes
|
||||||
@ -461,39 +463,19 @@ void NO_INLINE conditional(SourceA && src_a, SourceB && src_b, Sink && sink, con
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// Methods to check if first array has elements from second array, overloaded for various combinations of types.
|
template <typename T>
|
||||||
template <
|
bool insliceEqualElements(const NumericArraySlice<T> & first [[maybe_unused]],
|
||||||
ArraySearchType search_type,
|
size_t first_ind [[maybe_unused]],
|
||||||
typename FirstSliceType,
|
size_t second_ind [[maybe_unused]])
|
||||||
typename SecondSliceType,
|
|
||||||
bool (*isEqual)(const FirstSliceType &, const SecondSliceType &, size_t, size_t)>
|
|
||||||
bool sliceHasImplAnyAll(const FirstSliceType & first, const SecondSliceType & second, const UInt8 * first_null_map, const UInt8 * second_null_map)
|
|
||||||
{
|
{
|
||||||
const bool has_first_null_map = first_null_map != nullptr;
|
if constexpr (is_decimal<T>)
|
||||||
const bool has_second_null_map = second_null_map != nullptr;
|
return accurate::equalsOp(first.data[first_ind].value, first.data[second_ind].value);
|
||||||
|
else
|
||||||
for (size_t i = 0; i < second.size; ++i)
|
return accurate::equalsOp(first.data[first_ind], first.data[second_ind]);
|
||||||
{
|
}
|
||||||
bool has = false;
|
inline ALWAYS_INLINE bool insliceEqualElements(const GenericArraySlice & first, size_t first_ind, size_t second_ind)
|
||||||
for (size_t j = 0; j < first.size && !has; ++j)
|
{
|
||||||
{
|
return first.elements->compareAt(first_ind + first.begin, second_ind + first.begin, *first.elements, -1) == 0;
|
||||||
const bool is_first_null = has_first_null_map && first_null_map[j];
|
|
||||||
const bool is_second_null = has_second_null_map && second_null_map[i];
|
|
||||||
|
|
||||||
if (is_first_null && is_second_null)
|
|
||||||
has = true;
|
|
||||||
|
|
||||||
if (!is_first_null && !is_second_null && isEqual(first, second, j, i))
|
|
||||||
has = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (has && search_type == ArraySearchType::Any)
|
|
||||||
return true;
|
|
||||||
|
|
||||||
if (!has && search_type == ArraySearchType::All)
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return search_type == ArraySearchType::All;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <
|
template <
|
||||||
@ -620,55 +602,6 @@ bool sliceHasImpl(const FirstSliceType & first, const SecondSliceType & second,
|
|||||||
return sliceHasImplAnyAll<search_type, FirstSliceType, SecondSliceType, isEqual>(first, second, first_null_map, second_null_map);
|
return sliceHasImplAnyAll<search_type, FirstSliceType, SecondSliceType, isEqual>(first, second, first_null_map, second_null_map);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
template <typename T, typename U>
|
|
||||||
bool sliceEqualElements(const NumericArraySlice<T> & first [[maybe_unused]],
|
|
||||||
const NumericArraySlice<U> & second [[maybe_unused]],
|
|
||||||
size_t first_ind [[maybe_unused]],
|
|
||||||
size_t second_ind [[maybe_unused]])
|
|
||||||
{
|
|
||||||
/// TODO: Decimal scale
|
|
||||||
if constexpr (is_decimal<T> && is_decimal<U>)
|
|
||||||
return accurate::equalsOp(first.data[first_ind].value, second.data[second_ind].value);
|
|
||||||
else if constexpr (is_decimal<T> || is_decimal<U>)
|
|
||||||
return false;
|
|
||||||
else
|
|
||||||
return accurate::equalsOp(first.data[first_ind], second.data[second_ind]);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
bool sliceEqualElements(const NumericArraySlice<T> &, const GenericArraySlice &, size_t, size_t)
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename U>
|
|
||||||
bool sliceEqualElements(const GenericArraySlice &, const NumericArraySlice<U> &, size_t, size_t)
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline ALWAYS_INLINE bool sliceEqualElements(const GenericArraySlice & first, const GenericArraySlice & second, size_t first_ind, size_t second_ind)
|
|
||||||
{
|
|
||||||
return first.elements->compareAt(first_ind + first.begin, second_ind + second.begin, *second.elements, -1) == 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
bool insliceEqualElements(const NumericArraySlice<T> & first [[maybe_unused]],
|
|
||||||
size_t first_ind [[maybe_unused]],
|
|
||||||
size_t second_ind [[maybe_unused]])
|
|
||||||
{
|
|
||||||
if constexpr (is_decimal<T>)
|
|
||||||
return accurate::equalsOp(first.data[first_ind].value, first.data[second_ind].value);
|
|
||||||
else
|
|
||||||
return accurate::equalsOp(first.data[first_ind], first.data[second_ind]);
|
|
||||||
}
|
|
||||||
|
|
||||||
inline ALWAYS_INLINE bool insliceEqualElements(const GenericArraySlice & first, size_t first_ind, size_t second_ind)
|
|
||||||
{
|
|
||||||
return first.elements->compareAt(first_ind + first.begin, second_ind + first.begin, *first.elements, -1) == 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <ArraySearchType search_type, typename T, typename U>
|
template <ArraySearchType search_type, typename T, typename U>
|
||||||
bool sliceHas(const NumericArraySlice<T> & first, const NumericArraySlice<U> & second)
|
bool sliceHas(const NumericArraySlice<T> & first, const NumericArraySlice<U> & second)
|
||||||
{
|
{
|
||||||
@ -854,4 +787,3 @@ void resizeConstantSize(ArraySource && array_source, ValueSource && value_source
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
include("${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake")
|
include("${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake")
|
||||||
|
|
||||||
add_headers_and_sources(clickhouse_functions_gatherutils .)
|
add_headers_and_sources(clickhouse_functions_gatherutils .)
|
||||||
add_library(clickhouse_functions_gatherutils ${clickhouse_functions_gatherutils_sources} ${clickhouse_functions_gatherutils_headers})
|
add_library(clickhouse_functions_gatherutils ${clickhouse_functions_gatherutils_sources} ${clickhouse_functions_gatherutils_headers})
|
||||||
target_link_libraries(clickhouse_functions_gatherutils PRIVATE dbms)
|
target_link_libraries(clickhouse_functions_gatherutils PRIVATE dbms)
|
||||||
@ -14,3 +15,5 @@ endif()
|
|||||||
if (STRIP_DEBUG_SYMBOLS_FUNCTIONS)
|
if (STRIP_DEBUG_SYMBOLS_FUNCTIONS)
|
||||||
target_compile_options(clickhouse_functions_gatherutils PRIVATE "-g0")
|
target_compile_options(clickhouse_functions_gatherutils PRIVATE "-g0")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
set_target_properties(clickhouse_functions_gatherutils PROPERTIES COMPILE_FLAGS "${X86_INTRINSICS_FLAGS}")
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user