diff --git a/.clang-tidy b/.clang-tidy index ca84a4834e5..6fd67876923 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -16,7 +16,6 @@ Checks: '-*, modernize-make-unique, modernize-raw-string-literal, modernize-redundant-void-arg, - modernize-replace-auto-ptr, modernize-replace-random-shuffle, modernize-use-bool-literals, modernize-use-nullptr, @@ -145,6 +144,7 @@ Checks: '-*, clang-analyzer-cplusplus.SelfAssignment, clang-analyzer-deadcode.DeadStores, clang-analyzer-cplusplus.Move, + clang-analyzer-optin.cplusplus.UninitializedObject, clang-analyzer-optin.cplusplus.VirtualCall, clang-analyzer-security.insecureAPI.UncheckedReturn, clang-analyzer-security.insecureAPI.bcmp, @@ -164,6 +164,8 @@ Checks: '-*, clang-analyzer-unix.cstring.NullArg, boost-use-to-string, + + alpha.security.cert.env.InvalidPtr, ' WarningsAsErrors: '*' diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 6540b60476f..2d8540b57ea 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,4 +1,4 @@ -Changelog category (leave one): +### Changelog category (leave one): - New Feature - Improvement - Bug Fix (user-visible misbehaviour in official stable or prestable release) @@ -9,7 +9,7 @@ Changelog category (leave one): - Not for changelog (changelog entry is not required) -Changelog entry (a user-readable short description of the changes that goes to CHANGELOG.md): +### Changelog entry (a user-readable short description of the changes that goes to CHANGELOG.md): ... diff --git a/.github/workflows/backport_branches.yml b/.github/workflows/backport_branches.yml index 75f8a63368d..417284f14d5 100644 --- a/.github/workflows/backport_branches.yml +++ b/.github/workflows/backport_branches.yml @@ -341,10 +341,15 @@ jobs: steps: - name: Set envs run: | + DEPENDENCIES=$(cat << 'EOF' | jq '. | length' + ${{ toJSON(needs) }} + EOF + ) + echo "DEPENDENCIES=$DEPENDENCIES" >> "$GITHUB_ENV" cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/report_check - REPORTS_PATH=${{runner.temp}}/reports_dir CHECK_NAME=ClickHouse build check (actions) + REPORTS_PATH=${{runner.temp}}/reports_dir + TEMP_PATH=${{runner.temp}}/report_check EOF - name: Download json reports uses: actions/download-artifact@v2 @@ -360,7 +365,7 @@ jobs: sudo rm -fr "$TEMP_PATH" mkdir -p "$TEMP_PATH" cd "$GITHUB_WORKSPACE/tests/ci" - python3 build_report_check.py "$CHECK_NAME" + python3 build_report_check.py "$CHECK_NAME" "$DEPENDENCIES" - name: Cleanup if: always() run: | diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index cfa95b84ee5..eab7ce36eb7 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -947,6 +947,34 @@ jobs: docker rm -f "$(docker ps -a -q)" ||: sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" ############################################################################################ +##################################### Docker images ####################################### +############################################################################################ + DockerServerImages: + needs: + - BuilderDebRelease + - BuilderDebAarch64 + runs-on: [self-hosted, style-checker] + steps: + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + with: + fetch-depth: 0 # otherwise we will have no version info + - name: Check docker clickhouse/clickhouse-server building + run: | + cd "$GITHUB_WORKSPACE/tests/ci" + python3 docker_server.py --release-type head + python3 docker_server.py --release-type head --no-ubuntu \ + --image-repo clickhouse/clickhouse-keeper --image-path docker/keeper + - name: Cleanup + if: always() + run: | + docker kill "$(docker ps -q)" ||: + docker rm -f "$(docker ps -a -q)" ||: + sudo rm -fr "$TEMP_PATH" +############################################################################################ ##################################### BUILD REPORTER ####################################### ############################################################################################ BuilderReport: @@ -964,10 +992,16 @@ jobs: steps: - name: Set envs run: | + DEPENDENCIES=$(cat << 'EOF' | jq '. | length' + ${{ toJSON(needs) }} + EOF + ) + echo "DEPENDENCIES=$DEPENDENCIES" >> "$GITHUB_ENV" cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/report_check - REPORTS_PATH=${{runner.temp}}/reports_dir CHECK_NAME=ClickHouse build check (actions) + REPORTS_PATH=${{runner.temp}}/reports_dir + REPORTS_PATH=${{runner.temp}}/reports_dir + TEMP_PATH=${{runner.temp}}/report_check EOF - name: Download json reports uses: actions/download-artifact@v2 @@ -983,7 +1017,7 @@ jobs: sudo rm -fr "$TEMP_PATH" mkdir -p "$TEMP_PATH" cd "$GITHUB_WORKSPACE/tests/ci" - python3 build_report_check.py "$CHECK_NAME" + python3 build_report_check.py "$CHECK_NAME" "$DEPENDENCIES" - name: Cleanup if: always() run: | diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 5b47f94a324..1e70213adf5 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -72,3 +72,52 @@ jobs: with: name: changed_images path: ${{ runner.temp }}/changed_images.json + BuilderCoverity: + needs: DockerHubPush + runs-on: [self-hosted, builder] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/build_check + IMAGES_PATH=${{runner.temp}}/images_path + REPO_COPY=${{runner.temp}}/build_check/ClickHouse + CACHES_PATH=${{runner.temp}}/../ccaches + CHECK_NAME=ClickHouse build check (actions) + BUILD_NAME=coverity + EOF + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ env.IMAGES_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + id: coverity-checkout + uses: actions/checkout@v2 + with: + submodules: 'true' + fetch-depth: 0 # otherwise we will have no info about contributors + - name: Build + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME" "${{ secrets.COV_TOKEN }}" + - name: Upload Coverity Analysis + if: ${{ success() || failure() }} + run: | + curl --form token='${{ secrets.COV_TOKEN }}' \ + --form email='${{ secrets.ROBOT_CLICKHOUSE_EMAIL }}' \ + --form file="@$TEMP_PATH/$BUILD_NAME/clickhouse-scan.tgz" \ + --form version="${GITHUB_REF#refs/heads/}-${GITHUB_SHA::6}" \ + --form description="Nighly Scan: $(date +'%Y-%m-%dT%H:%M:%S')" \ + https://scan.coverity.com/builds?project=ClickHouse%2FClickHouse + - name: Cleanup + if: always() + run: | + docker kill "$(docker ps -q)" ||: + docker rm -f "$(docker ps -a -q)" ||: + sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 87a31b9683c..8942cca391e 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -4,7 +4,7 @@ env: # Force the stdout and stderr streams to be unbuffered PYTHONUNBUFFERED: 1 -on: # yamllint disable-line rule:truthy +on: # yamllint disable-line rule:truthy pull_request: types: - synchronize @@ -998,6 +998,34 @@ jobs: docker rm -f "$(docker ps -a -q)" ||: sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" ############################################################################################ +##################################### Docker images ####################################### +############################################################################################ + DockerServerImages: + needs: + - BuilderDebRelease + - BuilderDebAarch64 + runs-on: [self-hosted, style-checker] + steps: + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + with: + fetch-depth: 0 # otherwise we will have no version info + - name: Check docker clickhouse/clickhouse-server building + run: | + cd "$GITHUB_WORKSPACE/tests/ci" + python3 docker_server.py --release-type head --no-push + python3 docker_server.py --release-type head --no-push --no-ubuntu \ + --image-repo clickhouse/clickhouse-keeper --image-path docker/keeper + - name: Cleanup + if: always() + run: | + docker kill "$(docker ps -q)" ||: + docker rm -f "$(docker ps -a -q)" ||: + sudo rm -fr "$TEMP_PATH" +############################################################################################ ##################################### BUILD REPORTER ####################################### ############################################################################################ BuilderReport: @@ -1016,10 +1044,16 @@ jobs: steps: - name: Set envs run: | + DEPENDENCIES=$(cat << 'EOF' | jq '. | length' + ${{ toJSON(needs) }} + EOF + ) + echo "DEPENDENCIES=$DEPENDENCIES" >> "$GITHUB_ENV" cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/report_check - REPORTS_PATH=${{runner.temp}}/reports_dir CHECK_NAME=ClickHouse build check (actions) + REPORTS_PATH=${{runner.temp}}/reports_dir + REPORTS_PATH=${{runner.temp}}/reports_dir + TEMP_PATH=${{runner.temp}}/report_check EOF - name: Download json reports uses: actions/download-artifact@v2 @@ -1035,7 +1069,7 @@ jobs: sudo rm -fr "$TEMP_PATH" mkdir -p "$TEMP_PATH" cd "$GITHUB_WORKSPACE/tests/ci" - python3 build_report_check.py "$CHECK_NAME" + python3 build_report_check.py "$CHECK_NAME" "$DEPENDENCIES" - name: Cleanup if: always() run: | @@ -3138,6 +3172,7 @@ jobs: needs: - StyleCheck - DockerHubPush + - DockerServerImages - CheckLabels - BuilderReport - FastTest diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index bd62e64409f..29e3d0c4358 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -36,3 +36,28 @@ jobs: overwrite: true tag: ${{ github.ref }} file_glob: true + ############################################################################################ + ##################################### Docker images ####################################### + ############################################################################################ + DockerServerImages: + runs-on: [self-hosted, style-checker] + steps: + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + with: + fetch-depth: 0 # otherwise we will have no version info + - name: Check docker clickhouse/clickhouse-server building + run: | + cd "$GITHUB_WORKSPACE/tests/ci" + python3 docker_server.py --release-type auto + python3 docker_server.py --release-type auto --no-ubuntu \ + --image-repo clickhouse/clickhouse-keeper --image-path docker/keeper + - name: Cleanup + if: always() + run: | + docker kill "$(docker ps -q)" ||: + docker rm -f "$(docker ps -a -q)" ||: + sudo rm -fr "$TEMP_PATH" diff --git a/.github/workflows/release_branches.yml b/.github/workflows/release_branches.yml index d916699acc2..b2af465142b 100644 --- a/.github/workflows/release_branches.yml +++ b/.github/workflows/release_branches.yml @@ -436,10 +436,16 @@ jobs: steps: - name: Set envs run: | + DEPENDENCIES=$(cat << 'EOF' | jq '. | length' + ${{ toJSON(needs) }} + EOF + ) + echo "DEPENDENCIES=$DEPENDENCIES" >> "$GITHUB_ENV" cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/report_check - REPORTS_PATH=${{runner.temp}}/reports_dir CHECK_NAME=ClickHouse build check (actions) + REPORTS_PATH=${{runner.temp}}/reports_dir + REPORTS_PATH=${{runner.temp}}/reports_dir + TEMP_PATH=${{runner.temp}}/report_check EOF - name: Download json reports uses: actions/download-artifact@v2 @@ -455,7 +461,7 @@ jobs: sudo rm -fr "$TEMP_PATH" mkdir -p "$TEMP_PATH" cd "$GITHUB_WORKSPACE/tests/ci" - python3 build_report_check.py "$CHECK_NAME" + python3 build_report_check.py "$CHECK_NAME" "$DEPENDENCIES" - name: Cleanup if: always() run: | diff --git a/CMakeLists.txt b/CMakeLists.txt index 5157f0f9903..d893ba773cc 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -261,8 +261,8 @@ endif () # Add a section with the hash of the compiled machine code for integrity checks. # Only for official builds, because adding a section can be time consuming (rewrite of several GB). # And cross compiled binaries are not supported (since you cannot execute clickhouse hash-binary) -if (OBJCOPY_PATH AND CLICKHOUSE_OFFICIAL_BUILD AND (NOT CMAKE_TOOLCHAIN_FILE)) - set (USE_BINARY_HASH 1) +if (OBJCOPY_PATH AND CLICKHOUSE_OFFICIAL_BUILD AND (NOT CMAKE_TOOLCHAIN_FILE OR CMAKE_TOOLCHAIN_FILE MATCHES "linux/toolchain-x86_64.cmake$")) + set (USE_BINARY_HASH 1 CACHE STRING "Calculate binary hash and store it in the separate section") endif () # Allows to build stripped binary in a separate directory @@ -294,14 +294,19 @@ include(cmake/cpu_features.cmake) # Enable it explicitly. set (COMPILER_FLAGS "${COMPILER_FLAGS} -fasynchronous-unwind-tables") -# Reproducible builds -# If turned `ON`, remap file source paths in debug info, predefined preprocessor macros and __builtin_FILE(). -option(ENABLE_BUILD_PATH_MAPPING "Enable remap file source paths in debug info, predefined preprocessor macros and __builtin_FILE(). It's to generate reproducible builds. See https://reproducible-builds.org/docs/build-path" ON) +# Reproducible builds. +if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG") + set (ENABLE_BUILD_PATH_MAPPING_DEFAULT OFF) +else () + set (ENABLE_BUILD_PATH_MAPPING_DEFAULT ON) +endif () + +option (ENABLE_BUILD_PATH_MAPPING "Enable remapping of file source paths in debug info, predefined preprocessor macros, and __builtin_FILE(). It's used to generate reproducible builds. See https://reproducible-builds.org/docs/build-path" ${ENABLE_BUILD_PATH_MAPPING_DEFAULT}) if (ENABLE_BUILD_PATH_MAPPING) set (COMPILER_FLAGS "${COMPILER_FLAGS} -ffile-prefix-map=${CMAKE_SOURCE_DIR}=.") set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -ffile-prefix-map=${CMAKE_SOURCE_DIR}=.") -endif() +endif () if (${CMAKE_VERSION} VERSION_LESS "3.12.4") # CMake < 3.12 doesn't support setting 20 as a C++ standard version. diff --git a/base/base/CMakeLists.txt b/base/base/CMakeLists.txt index 8a1ca6064cb..3cfd2f6906a 100644 --- a/base/base/CMakeLists.txt +++ b/base/base/CMakeLists.txt @@ -2,6 +2,7 @@ set (SRCS argsToConfig.cpp coverage.cpp demangle.cpp + getAvailableMemoryAmount.cpp getFQDNOrHostName.cpp getMemoryAmount.cpp getPageSize.cpp diff --git a/base/base/getAvailableMemoryAmount.cpp b/base/base/getAvailableMemoryAmount.cpp new file mode 100644 index 00000000000..d2f794e8952 --- /dev/null +++ b/base/base/getAvailableMemoryAmount.cpp @@ -0,0 +1,44 @@ +#include +#include +#include +#include + +#include +#include +#include +#if defined(BSD) +#include +#include +#endif + + +uint64_t getAvailableMemoryAmountOrZero() +{ +#if defined(_SC_AVPHYS_PAGES) // linux + return getPageSize() * sysconf(_SC_AVPHYS_PAGES); +#elif defined(__FreeBSD__) + struct vmtotal vmt; + size_t vmt_size = sizeof(vmt); + if (sysctlbyname("vm.vmtotal", &vmt, &vmt_size, NULL, 0) == 0) + return getPageSize() * vmt.t_avm; + else + return 0; +#else // darwin + unsigned int usermem; + size_t len = sizeof(usermem); + static int mib[2] = { CTL_HW, HW_USERMEM }; + if (sysctl(mib, 2, &usermem, &len, nullptr, 0) == 0 && len == sizeof(usermem)) + return usermem; + else + return 0; +#endif +} + + +uint64_t getAvailableMemoryAmount() +{ + auto res = getAvailableMemoryAmountOrZero(); + if (!res) + throw std::runtime_error("Cannot determine available memory amount"); + return res; +} diff --git a/base/base/getAvailableMemoryAmount.h b/base/base/getAvailableMemoryAmount.h new file mode 100644 index 00000000000..44612945016 --- /dev/null +++ b/base/base/getAvailableMemoryAmount.h @@ -0,0 +1,12 @@ +#pragma once + +#include + +/** Returns the size of currently available physical memory (RAM) in bytes. + * Returns 0 on unsupported platform or if it cannot determine the size of physical memory. + */ +uint64_t getAvailableMemoryAmountOrZero(); + +/** Throws exception if it cannot determine the size of physical memory. + */ +uint64_t getAvailableMemoryAmount(); diff --git a/base/loggers/Loggers.cpp b/base/loggers/Loggers.cpp index 7c627ad2272..512e44f79c7 100644 --- a/base/loggers/Loggers.cpp +++ b/base/loggers/Loggers.cpp @@ -197,7 +197,6 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log Poco::AutoPtr pf = new OwnPatternFormatter(color_enabled); Poco::AutoPtr log = new DB::OwnFormattingChannel(pf, new Poco::ConsoleChannel); - logger.warning("Logging " + console_log_level_string + " to console"); log->setLevel(console_log_level); split->addChannel(log, "console"); } diff --git a/contrib/krb5-cmake/CMakeLists.txt b/contrib/krb5-cmake/CMakeLists.txt index 685e8737ef0..214d23bc2a9 100644 --- a/contrib/krb5-cmake/CMakeLists.txt +++ b/contrib/krb5-cmake/CMakeLists.txt @@ -1,4 +1,4 @@ -set (ENABLE_KRB5_DEFAULT 1) +set (ENABLE_KRB5_DEFAULT ${ENABLE_LIBRARIES}) if (NOT CMAKE_SYSTEM_NAME MATCHES "Linux" AND NOT (CMAKE_SYSTEM_NAME MATCHES "Darwin" AND NOT CMAKE_CROSSCOMPILING)) message (WARNING "krb5 disabled in non-Linux and non-native-Darwin environments") set (ENABLE_KRB5_DEFAULT 0) @@ -16,6 +16,7 @@ if(NOT AWK_PROGRAM) endif() set(KRB5_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/krb5/src") +set(KRB5_ET_BIN_DIR "${CMAKE_CURRENT_BINARY_DIR}/include_private") set(ALL_SRCS "${KRB5_SOURCE_DIR}/util/et/et_name.c" @@ -90,7 +91,6 @@ set(ALL_SRCS "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/get_tkt_flags.c" "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/set_allowable_enctypes.c" "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/k5sealiov.c" - "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/gssapi_err_krb5.c" "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/canon_name.c" "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/inq_cred.c" "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/export_sec_context.c" @@ -143,11 +143,12 @@ set(ALL_SRCS "${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_buffer_set.c" "${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_set.c" "${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_token.c" - "${KRB5_SOURCE_DIR}/lib/gssapi/generic/gssapi_err_generic.c" "${KRB5_SOURCE_DIR}/lib/gssapi/generic/disp_major_status.c" "${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_seqstate.c" "${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_errmap.c" "${KRB5_SOURCE_DIR}/lib/gssapi/generic/rel_buffer.c" + "${KRB5_ET_BIN_DIR}/lib/gssapi/krb5/gssapi_err_krb5.c" + "${KRB5_ET_BIN_DIR}/lib/gssapi/generic/gssapi_err_generic.c" "${KRB5_SOURCE_DIR}/lib/gssapi/spnego/spnego_mech.c" "${KRB5_SOURCE_DIR}/lib/gssapi/spnego/negoex_util.c" @@ -256,8 +257,8 @@ set(ALL_SRCS "${KRB5_SOURCE_DIR}/util/profile/prof_parse.c" "${KRB5_SOURCE_DIR}/util/profile/prof_get.c" "${KRB5_SOURCE_DIR}/util/profile/prof_set.c" - "${KRB5_SOURCE_DIR}/util/profile/prof_err.c" "${KRB5_SOURCE_DIR}/util/profile/prof_init.c" + "${KRB5_ET_BIN_DIR}/util/profile/prof_err.c" "${KRB5_SOURCE_DIR}/lib/krb5/krb/fwd_tgt.c" "${KRB5_SOURCE_DIR}/lib/krb5/krb/conv_creds.c" "${KRB5_SOURCE_DIR}/lib/krb5/krb/fast.c" @@ -450,13 +451,12 @@ set(ALL_SRCS - "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/k5e1_err.c" - "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kdb5_err.c" - "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/asn1_err.c" - "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb5_err.c" - "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb524_err.c" - "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kv5m_err.c" - + "${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/k5e1_err.c" + "${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/kdb5_err.c" + "${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/asn1_err.c" + "${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/krb5_err.c" + "${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/krb524_err.c" + "${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/kv5m_err.c" "${KRB5_SOURCE_DIR}/lib/krb5/rcache/rc_base.c" @@ -473,7 +473,7 @@ set(ALL_SRCS ) add_custom_command( - OUTPUT "${KRB5_SOURCE_DIR}/util/et/compile_et" + OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/compile_et" COMMAND /bin/sh ./config_script ./compile_et.sh @@ -481,50 +481,17 @@ add_custom_command( ${AWK_PROGRAM} sed > - compile_et + ${CMAKE_CURRENT_BINARY_DIR}/compile_et DEPENDS "${KRB5_SOURCE_DIR}/util/et/compile_et.sh" "${KRB5_SOURCE_DIR}/util/et/config_script" WORKING_DIRECTORY "${KRB5_SOURCE_DIR}/util/et" ) -file(GLOB_RECURSE ET_FILES - "${KRB5_SOURCE_DIR}/*.et" -) - -function(preprocess_et out_var) - set(result) - foreach(in_f ${ARGN}) - string(REPLACE - .et - .c - F_C - ${in_f} - ) - string(REPLACE - .et - .h - F_H - ${in_f} - ) - - get_filename_component(ET_PATH ${in_f} DIRECTORY) - - add_custom_command(OUTPUT ${F_C} ${F_H} - COMMAND perl "${KRB5_SOURCE_DIR}/util/et/compile_et" -d "${KRB5_SOURCE_DIR}/util/et" ${in_f} - DEPENDS ${in_f} "${KRB5_SOURCE_DIR}/util/et/compile_et" - WORKING_DIRECTORY ${ET_PATH} - VERBATIM - ) - list(APPEND result ${F_C}) - endforeach() - set(${out_var} "${result}" PARENT_SCOPE) -endfunction() - add_custom_command( - OUTPUT "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/error_map.h" + OUTPUT "${KRB5_ET_BIN_DIR}/error_map.h" COMMAND perl -I../../../util ../../../util/gen-map.pl - -oerror_map.h + -o${KRB5_ET_BIN_DIR}/error_map.h NAME=gsserrmap KEY=OM_uint32 VALUE=char* @@ -536,22 +503,21 @@ add_custom_command( add_custom_target( ERROR_MAP_H - DEPENDS "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/error_map.h" + DEPENDS "${KRB5_ET_BIN_DIR}/error_map.h" VERBATIM ) add_custom_command( - OUTPUT "${KRB5_SOURCE_DIR}/lib/gssapi/generic/errmap.h" - COMMAND perl -w -I../../../util ../../../util/gen.pl bimap errmap.h NAME=mecherrmap LEFT=OM_uint32 RIGHT=struct\ mecherror LEFTPRINT=print_OM_uint32 RIGHTPRINT=mecherror_print LEFTCMP=cmp_OM_uint32 RIGHTCMP=mecherror_cmp + OUTPUT "${KRB5_ET_BIN_DIR}/errmap.h" + COMMAND perl -w -I../../../util ../../../util/gen.pl bimap ${KRB5_ET_BIN_DIR}/errmap.h NAME=mecherrmap LEFT=OM_uint32 RIGHT=struct\ mecherror LEFTPRINT=print_OM_uint32 RIGHTPRINT=mecherror_print LEFTCMP=cmp_OM_uint32 RIGHTCMP=mecherror_cmp WORKING_DIRECTORY "${KRB5_SOURCE_DIR}/lib/gssapi/generic" ) add_custom_target( ERRMAP_H - DEPENDS "${KRB5_SOURCE_DIR}/lib/gssapi/generic/errmap.h" + DEPENDS "${KRB5_ET_BIN_DIR}/errmap.h" VERBATIM ) - add_custom_target( KRB_5_H DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/include/krb5/krb5.h" @@ -567,7 +533,40 @@ add_dependencies( KRB_5_H ) -preprocess_et(processed_et_files ${ET_FILES}) +# +# Generate error tables +# +function(preprocess_et et_path) + string(REPLACE .et .c F_C ${et_path}) + string(REPLACE .et .h F_H ${et_path}) + get_filename_component(et_dir ${et_path} DIRECTORY) + get_filename_component(et_name ${et_path} NAME_WLE) + + add_custom_command(OUTPUT ${F_C} ${F_H} ${KRB5_ET_BIN_DIR}/${et_name}.h + COMMAND perl "${CMAKE_CURRENT_BINARY_DIR}/compile_et" -d "${KRB5_SOURCE_DIR}/util/et" ${et_path} + # for #include w/o path (via -iquote) + COMMAND ${CMAKE_COMMAND} -E create_symlink ${F_H} ${KRB5_ET_BIN_DIR}/${et_name}.h + DEPENDS ${et_path} "${CMAKE_CURRENT_BINARY_DIR}/compile_et" + WORKING_DIRECTORY ${et_dir} + VERBATIM + ) +endfunction() + +function(generate_error_tables) + file(GLOB_RECURSE ET_FILES "${KRB5_SOURCE_DIR}/*.et") + foreach(et_path ${ET_FILES}) + string(REPLACE ${KRB5_SOURCE_DIR} ${KRB5_ET_BIN_DIR} et_bin_path ${et_path}) + string(REPLACE / _ et_target_name ${et_path}) + get_filename_component(et_bin_dir ${et_bin_path} DIRECTORY) + add_custom_command(OUTPUT ${et_bin_path} + COMMAND ${CMAKE_COMMAND} -E make_directory ${et_bin_dir} + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${et_path} ${et_bin_path} + VERBATIM + ) + preprocess_et(${et_bin_path}) + endforeach() +endfunction() +generate_error_tables() if(CMAKE_SYSTEM_NAME MATCHES "Darwin") add_custom_command( @@ -634,12 +633,12 @@ file(MAKE_DIRECTORY SET(KRBHDEP "${KRB5_SOURCE_DIR}/include/krb5/krb5.hin" - "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb5_err.h" - "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/k5e1_err.h" - "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kdb5_err.h" - "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kv5m_err.h" - "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb524_err.h" - "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/asn1_err.h" + "${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/krb5_err.h" + "${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/k5e1_err.h" + "${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/kdb5_err.h" + "${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/kv5m_err.h" + "${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/krb524_err.h" + "${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/asn1_err.h" ) # cmake < 3.18 does not have 'cat' command @@ -656,6 +655,11 @@ target_include_directories(_krb5 SYSTEM BEFORE PUBLIC "${CMAKE_CURRENT_BINARY_DIR}/include" ) +target_compile_options(_krb5 PRIVATE + # For '#include "file.h"' + -iquote "${CMAKE_CURRENT_BINARY_DIR}/include_private" +) + target_include_directories(_krb5 PRIVATE "${CMAKE_CURRENT_BINARY_DIR}/include_private" # For autoconf.h and other generated headers. ${KRB5_SOURCE_DIR} diff --git a/contrib/llvm-cmake/CMakeLists.txt b/contrib/llvm-cmake/CMakeLists.txt index 6ff07f0e016..87c8a65510f 100644 --- a/contrib/llvm-cmake/CMakeLists.txt +++ b/contrib/llvm-cmake/CMakeLists.txt @@ -1,12 +1,9 @@ -# During cross-compilation in our CI we have to use llvm-tblgen and other building tools -# tools to be build for host architecture and everything else for target architecture (e.g. AArch64) -# Possible workaround is to use llvm-tblgen from some package... -# But lets just enable LLVM for native builds -if (CMAKE_CROSSCOMPILING OR SANITIZE STREQUAL "undefined") - set (ENABLE_EMBEDDED_COMPILER_DEFAULT OFF) +if (APPLE OR NOT ARCH_AMD64 OR SANITIZE STREQUAL "undefined") + set (ENABLE_EMBEDDED_COMPILER_DEFAULT OFF) else() - set (ENABLE_EMBEDDED_COMPILER_DEFAULT ON) + set (ENABLE_EMBEDDED_COMPILER_DEFAULT ON) endif() + option (ENABLE_EMBEDDED_COMPILER "Enable support for 'compile_expressions' option for query execution" ${ENABLE_EMBEDDED_COMPILER_DEFAULT}) if (NOT ENABLE_EMBEDDED_COMPILER) diff --git a/contrib/poco b/contrib/poco index 520a90e02e3..008b1646947 160000 --- a/contrib/poco +++ b/contrib/poco @@ -1 +1 @@ -Subproject commit 520a90e02e3e5cb90afeae1846d161dbc508a6f1 +Subproject commit 008b16469471d55b176db181756c94e3f14dd2dc diff --git a/contrib/unixodbc b/contrib/unixodbc index b0ad30f7f62..a2cd5395e8c 160000 --- a/contrib/unixodbc +++ b/contrib/unixodbc @@ -1 +1 @@ -Subproject commit b0ad30f7f6289c12b76f04bfb9d466374bb32168 +Subproject commit a2cd5395e8c7f7390025ec93af5bfebef3fb5fcd diff --git a/debian/clickhouse-server.service b/debian/clickhouse-server.service index a9400b24270..028b4fbf8ab 100644 --- a/debian/clickhouse-server.service +++ b/debian/clickhouse-server.service @@ -20,7 +20,7 @@ ExecStart=/usr/bin/clickhouse-server --config=/etc/clickhouse-server/config.xml EnvironmentFile=-/etc/default/clickhouse LimitCORE=infinity LimitNOFILE=500000 -CapabilityBoundingSet=CAP_NET_ADMIN CAP_IPC_LOCK CAP_SYS_NICE +CapabilityBoundingSet=CAP_NET_ADMIN CAP_IPC_LOCK CAP_SYS_NICE CAP_NET_BIND_SERVICE [Install] # ClickHouse should not start from the rescue shell (rescue.target). diff --git a/docker/keeper/Dockerfile b/docker/keeper/Dockerfile new file mode 100644 index 00000000000..068377e8f8c --- /dev/null +++ b/docker/keeper/Dockerfile @@ -0,0 +1,74 @@ +FROM ubuntu:20.04 AS glibc-donor + +ARG TARGETARCH +RUN arch=${TARGETARCH:-amd64} \ + && case $arch in \ + amd64) rarch=x86_64 ;; \ + arm64) rarch=aarch64 ;; \ + esac \ + && ln -s "${rarch}-linux-gnu" /lib/linux-gnu + + +FROM alpine + +ENV LANG=en_US.UTF-8 \ + LANGUAGE=en_US:en \ + LC_ALL=en_US.UTF-8 \ + TZ=UTC \ + CLICKHOUSE_CONFIG=/etc/clickhouse-server/config.xml + +COPY --from=glibc-donor /lib/linux-gnu/libc.so.6 /lib/linux-gnu/libdl.so.2 /lib/linux-gnu/libm.so.6 /lib/linux-gnu/libpthread.so.0 /lib/linux-gnu/librt.so.1 /lib/linux-gnu/libnss_dns.so.2 /lib/linux-gnu/libnss_files.so.2 /lib/linux-gnu/libresolv.so.2 /lib/linux-gnu/ld-2.31.so /lib/ +COPY --from=glibc-donor /etc/nsswitch.conf /etc/ +COPY entrypoint.sh /entrypoint.sh + +ARG TARGETARCH +RUN arch=${TARGETARCH:-amd64} \ + && case $arch in \ + amd64) mkdir -p /lib64 && ln -sf /lib/ld-2.31.so /lib64/ld-linux-x86-64.so.2 ;; \ + arm64) ln -sf /lib/ld-2.31.so /lib/ld-linux-aarch64.so.1 ;; \ + esac + +ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release" +ARG VERSION="22.4.1.917" +ARG PACKAGES="clickhouse-keeper" + +# user/group precreated explicitly with fixed uid/gid on purpose. +# It is especially important for rootless containers: in that case entrypoint +# can't do chown and owners of mounted volumes should be configured externally. +# We do that in advance at the begining of Dockerfile before any packages will be +# installed to prevent picking those uid / gid by some unrelated software. +# The same uid / gid (101) is used both for alpine and ubuntu. + + +ARG TARGETARCH +RUN arch=${TARGETARCH:-amd64} \ + && for package in ${PACKAGES}; do \ + { \ + { echo "Get ${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \ + && wget -c -q "${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" -O "/tmp/${package}-${VERSION}-${arch}.tgz" \ + && tar xvzf "/tmp/${package}-${VERSION}-${arch}.tgz" --strip-components=1 -C / ; \ + } || \ + { echo "Fallback to ${REPOSITORY}/${package}-${VERSION}.tgz" \ + && wget -c -q "${REPOSITORY}/${package}-${VERSION}.tgz" -O "/tmp/${package}-${VERSION}.tgz" \ + && tar xvzf "/tmp/${package}-${VERSION}.tgz" --strip-components=2 -C / ; \ + } ; \ + } || exit 1 \ + ; done \ + && rm /tmp/*.tgz /install -r \ + && addgroup -S -g 101 clickhouse \ + && adduser -S -h /var/lib/clickhouse -s /bin/bash -G clickhouse -g "ClickHouse keeper" -u 101 clickhouse \ + && mkdir -p /var/lib/clickhouse /var/log/clickhouse-keeper /etc/clickhouse-keeper \ + && chown clickhouse:clickhouse /var/lib/clickhouse \ + && chown root:clickhouse /var/log/clickhouse-keeper \ + && chmod +x /entrypoint.sh \ + && apk add --no-cache su-exec bash tzdata \ + && cp /usr/share/zoneinfo/UTC /etc/localtime \ + && echo "UTC" > /etc/timezone \ + && chmod ugo+Xrw -R /var/lib/clickhouse /var/log/clickhouse-keeper /etc/clickhouse-keeper + + +EXPOSE 2181 10181 44444 + +VOLUME /var/lib/clickhouse /var/log/clickhouse-keeper /etc/clickhouse-keeper + +ENTRYPOINT ["/entrypoint.sh"] diff --git a/docker/keeper/Dockerfile.alpine b/docker/keeper/Dockerfile.alpine new file mode 120000 index 00000000000..1d1fe94df49 --- /dev/null +++ b/docker/keeper/Dockerfile.alpine @@ -0,0 +1 @@ +Dockerfile \ No newline at end of file diff --git a/docker/keeper/entrypoint.sh b/docker/keeper/entrypoint.sh new file mode 100644 index 00000000000..3aacf655c28 --- /dev/null +++ b/docker/keeper/entrypoint.sh @@ -0,0 +1,93 @@ +#!/bin/bash + +set +x +set -eo pipefail +shopt -s nullglob + +DO_CHOWN=1 +if [ "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]; then + DO_CHOWN=0 +fi + +CLICKHOUSE_UID="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}" +CLICKHOUSE_GID="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}" + +# support --user +if [ "$(id -u)" = "0" ]; then + USER=$CLICKHOUSE_UID + GROUP=$CLICKHOUSE_GID + if command -v gosu &> /dev/null; then + gosu="gosu $USER:$GROUP" + elif command -v su-exec &> /dev/null; then + gosu="su-exec $USER:$GROUP" + else + echo "No gosu/su-exec detected!" + exit 1 + fi +else + USER="$(id -u)" + GROUP="$(id -g)" + gosu="" + DO_CHOWN=0 +fi + +KEEPER_CONFIG="${KEEPER_CONFIG:-/etc/clickhouse-keeper/config.yaml}" + +if [ -f "$KEEPER_CONFIG" ] && ! $gosu test -f "$KEEPER_CONFIG" -a -r "$KEEPER_CONFIG"; then + echo "Configuration file '$KEEPER_CONFIG' isn't readable by user with id '$USER'" + exit 1 +fi + +DATA_DIR="${CLICKHOUSE_DATA_DIR:-/var/lib/clickhouse}" +LOG_DIR="${LOG_DIR:-/var/log/clickhouse-keeper}" +LOG_PATH="${LOG_DIR}/clickhouse-keeper.log" +ERROR_LOG_PATH="${LOG_DIR}/clickhouse-keeper.err.log" +COORDINATION_LOG_DIR="${DATA_DIR}/coordination/log" +COORDINATION_SNAPSHOT_DIR="${DATA_DIR}/coordination/snapshots" +CLICKHOUSE_WATCHDOG_ENABLE=${CLICKHOUSE_WATCHDOG_ENABLE:-0} + +for dir in "$DATA_DIR" \ + "$LOG_DIR" \ + "$TMP_DIR" \ + "$COORDINATION_LOG_DIR" \ + "$COORDINATION_SNAPSHOT_DIR" +do + # check if variable not empty + [ -z "$dir" ] && continue + # ensure directories exist + if ! mkdir -p "$dir"; then + echo "Couldn't create necessary directory: $dir" + exit 1 + fi + + if [ "$DO_CHOWN" = "1" ]; then + # ensure proper directories permissions + # but skip it for if directory already has proper premissions, cause recursive chown may be slow + if [ "$(stat -c %u "$dir")" != "$USER" ] || [ "$(stat -c %g "$dir")" != "$GROUP" ]; then + chown -R "$USER:$GROUP" "$dir" + fi + elif ! $gosu test -d "$dir" -a -w "$dir" -a -r "$dir"; then + echo "Necessary directory '$dir' isn't accessible by user with id '$USER'" + exit 1 + fi +done + +# if no args passed to `docker run` or first argument start with `--`, then the user is passing clickhouse-server arguments +if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then + # Watchdog is launched by default, but does not send SIGINT to the main process, + # so the container can't be finished by ctrl+c + export CLICKHOUSE_WATCHDOG_ENABLE + + cd /var/lib/clickhouse + + # There is a config file. It is already tested with gosu (if it is readably by keeper user) + if [ -f "$KEEPER_CONFIG" ]; then + exec $gosu /usr/bin/clickhouse-keeper --config-file="$KEEPER_CONFIG" --log-file="$LOG_PATH" --errorlog-file="$ERROR_LOG_PATH" "$@" + fi + + # There is no config file. Will use embedded one + exec $gosu /usr/bin/clickhouse-keeper --log-file="$LOG_PATH" --errorlog-file="$ERROR_LOG_PATH" "$@" +fi + +# Otherwise, we assume the user want to run his own process, for example a `bash` shell to explore this image +exec "$@" diff --git a/docker/packager/binary/build.sh b/docker/packager/binary/build.sh index 31416e1a0ee..269d3eb52c6 100755 --- a/docker/packager/binary/build.sh +++ b/docker/packager/binary/build.sh @@ -25,13 +25,21 @@ read -ra CMAKE_FLAGS <<< "${CMAKE_FLAGS:-}" env cmake --debug-trycompile --verbose=1 -DCMAKE_VERBOSE_MAKEFILE=1 -LA "-DCMAKE_BUILD_TYPE=$BUILD_TYPE" "-DSANITIZE=$SANITIZER" -DENABLE_CHECK_HEAVY_BUILDS=1 "${CMAKE_FLAGS[@]}" .. +if [ "coverity" == "$COMBINED_OUTPUT" ] +then + wget --post-data "token=$COV_TOKEN&project=ClickHouse%2FClickHouse" -qO- https://scan.coverity.com/download/linux64 | tar xz -C /opt/cov-analysis --strip-components 1 + export PATH=$PATH:/opt/cov-analysis/bin + cov-configure --config ./coverity.config --template --comptype clangcc --compiler "$CC" + SCAN_WRAPPER="cov-build --config ./coverity.config --dir cov-int" +fi + cache_status # clear cache stats ccache --zero-stats ||: # No quotes because I want it to expand to nothing if empty. -# shellcheck disable=SC2086 -ninja $NINJA_FLAGS clickhouse-bundle +# shellcheck disable=SC2086 # No quotes because I want it to expand to nothing if empty. +$SCAN_WRAPPER ninja $NINJA_FLAGS clickhouse-bundle cache_status @@ -91,6 +99,12 @@ then mv "$COMBINED_OUTPUT.tgz" /output fi +if [ "coverity" == "$COMBINED_OUTPUT" ] +then + tar -cv -I pigz -f "coverity-scan.tgz" cov-int + mv "coverity-scan.tgz" /output +fi + # Also build fuzzers if any sanitizer specified # if [ -n "$SANITIZER" ] # then diff --git a/docker/packager/packager b/docker/packager/packager index f82d402d613..1a79b497fa2 100755 --- a/docker/packager/packager +++ b/docker/packager/packager @@ -86,6 +86,7 @@ def parse_env_variables( additional_pkgs, with_coverage, with_binaries, + coverity_scan, ): DARWIN_SUFFIX = "-darwin" DARWIN_ARM_SUFFIX = "-darwin-aarch64" @@ -176,6 +177,9 @@ def parse_env_variables( if package_type == "performance": result.append("COMBINED_OUTPUT=performance") cmake_flags.append("-DENABLE_TESTS=0") + elif package_type == "coverity": + result.append("COMBINED_OUTPUT=coverity") + result.append("COV_TOKEN={}".format(cov_token)) elif split_binary: result.append("COMBINED_OUTPUT=shared_build") @@ -262,9 +266,8 @@ if __name__ == "__main__": # and configs to be used for performance test. parser.add_argument( "--package-type", - choices=("deb", "binary", "performance"), + choices=["deb", "binary", "performance", "coverity"], required=True, - help="a build type", ) parser.add_argument( "--clickhouse-repo-path", @@ -325,12 +328,13 @@ if __name__ == "__main__": parser.add_argument( "--docker-image-version", default="latest", help="docker image tag to use" ) + parser.add_argument("--cov_token", default="") args = parser.parse_args() if not os.path.isabs(args.output_dir): args.output_dir = os.path.abspath(os.path.join(os.getcwd(), args.output_dir)) - image_type = "binary" if args.package_type == "performance" else args.package_type + image_type = "binary" if args.package_type in ("performance", "coverity") else args.package_type image_name = "clickhouse/binary-builder" if not os.path.isabs(args.clickhouse_repo_path): @@ -372,6 +376,7 @@ if __name__ == "__main__": args.additional_pkgs, args.with_coverage, args.with_binaries, + args.cov_token, ) run_docker_image_with_env( diff --git a/docker/server/.gitignore b/docker/server/.gitignore deleted file mode 100644 index 692758d55aa..00000000000 --- a/docker/server/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -alpine-root/* -tgz-packages/* diff --git a/docker/server/Dockerfile b/docker/server/Dockerfile deleted file mode 100644 index 5b7990ab030..00000000000 --- a/docker/server/Dockerfile +++ /dev/null @@ -1,122 +0,0 @@ -FROM ubuntu:20.04 - -# ARG for quick switch to a given ubuntu mirror -ARG apt_archive="http://archive.ubuntu.com" -RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list - -ARG repository="deb https://packages.clickhouse.com/deb stable main" -ARG version=22.1.1.* - -# set non-empty deb_location_url url to create a docker image -# from debs created by CI build, for example: -# docker build . --network host --build-arg version="21.4.1.6282" --build-arg deb_location_url="https://clickhouse-builds.s3.yandex.net/21852/069cfbff388b3d478d1a16dc7060b48073f5d522/clickhouse_build_check/clang-11_relwithdebuginfo_none_bundled_unsplitted_disable_False_deb/" -t filimonovq/clickhouse-server:pr21852 -ARG deb_location_url="" - -# set non-empty single_binary_location_url to create docker image -# from a single binary url (useful for non-standard builds - with sanitizers, for arm64). -# for example (run on aarch64 server): -# docker build . --network host --build-arg single_binary_location_url="https://builds.clickhouse.com/master/aarch64/clickhouse" -t altinity/clickhouse-server:master-testing-arm -# note: clickhouse-odbc-bridge is not supported there. -ARG single_binary_location_url="" - -# see https://github.com/moby/moby/issues/4032#issuecomment-192327844 -ARG DEBIAN_FRONTEND=noninteractive - -# user/group precreated explicitly with fixed uid/gid on purpose. -# It is especially important for rootless containers: in that case entrypoint -# can't do chown and owners of mounted volumes should be configured externally. -# We do that in advance at the begining of Dockerfile before any packages will be -# installed to prevent picking those uid / gid by some unrelated software. -# The same uid / gid (101) is used both for alpine and ubuntu. - -# To drop privileges, we need 'su' command, that simply changes uid and gid. -# In fact, the 'su' command from Linux is not so simple, due to inherent vulnerability in Linux: -# https://ruderich.org/simon/notes/su-sudo-from-root-tty-hijacking -# It has to mitigate this drawback of Linux, and to do this, 'su' command is creating it's own pseudo-terminal -# and forwarding commands. Due to some ridiculous curcumstances, it does not work in Docker (or it does) -# and for these reasons people are using alternatives to the 'su' command in Docker, -# that don't mess with the terminal, don't care about closing the opened files, etc... -# but can only be safe to drop privileges inside Docker. -# The question - what implementation of 'su' command to use. -# It should be a simple script doing about just two syscalls. -# Some people tend to use 'gosu' tool that is written in Go. -# It is not used for several reasons: -# 1. Dependency on some foreign code in yet another programming language - does not sound alright. -# 2. Anselmo D. Adams suggested not to use it due to false positive alarms in some undisclosed security scanners. - -COPY su-exec.c /su-exec.c - -RUN groupadd -r clickhouse --gid=101 \ - && useradd -r -g clickhouse --uid=101 --home-dir=/var/lib/clickhouse --shell=/bin/bash clickhouse \ - && apt-get update \ - && apt-get install --yes --no-install-recommends \ - apt-transport-https \ - ca-certificates \ - dirmngr \ - gnupg \ - locales \ - wget \ - tzdata \ - && mkdir -p /etc/apt/sources.list.d \ - && apt-key adv --keyserver keyserver.ubuntu.com --recv 8919F6BD2B48D754 \ - && echo $repository > /etc/apt/sources.list.d/clickhouse.list \ - && if [ -n "$deb_location_url" ]; then \ - echo "installing from custom url with deb packages: $deb_location_url" \ - rm -rf /tmp/clickhouse_debs \ - && mkdir -p /tmp/clickhouse_debs \ - && wget --progress=bar:force:noscroll "${deb_location_url}/clickhouse-common-static_${version}_amd64.deb" -P /tmp/clickhouse_debs \ - && wget --progress=bar:force:noscroll "${deb_location_url}/clickhouse-client_${version}_all.deb" -P /tmp/clickhouse_debs \ - && wget --progress=bar:force:noscroll "${deb_location_url}/clickhouse-server_${version}_all.deb" -P /tmp/clickhouse_debs \ - && dpkg -i /tmp/clickhouse_debs/*.deb ; \ - elif [ -n "$single_binary_location_url" ]; then \ - echo "installing from single binary url: $single_binary_location_url" \ - && rm -rf /tmp/clickhouse_binary \ - && mkdir -p /tmp/clickhouse_binary \ - && wget --progress=bar:force:noscroll "$single_binary_location_url" -O /tmp/clickhouse_binary/clickhouse \ - && chmod +x /tmp/clickhouse_binary/clickhouse \ - && /tmp/clickhouse_binary/clickhouse install --user "clickhouse" --group "clickhouse" ; \ - else \ - echo "installing from repository: $repository" \ - && apt-get update \ - && apt-get --yes -o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold" upgrade \ - && apt-get install --allow-unauthenticated --yes --no-install-recommends \ - clickhouse-common-static=$version \ - clickhouse-client=$version \ - clickhouse-server=$version ; \ - fi \ - && apt-get install -y --no-install-recommends tcc libc-dev && \ - tcc /su-exec.c -o /bin/su-exec && \ - chown root:root /bin/su-exec && \ - chmod 0755 /bin/su-exec && \ - rm /su-exec.c && \ - apt-get purge -y --auto-remove tcc libc-dev libc-dev-bin libc6-dev linux-libc-dev \ - && clickhouse-local -q 'SELECT * FROM system.build_options' \ - && rm -rf \ - /var/lib/apt/lists/* \ - /var/cache/debconf \ - /tmp/* \ - && apt-get clean \ - && mkdir -p /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client \ - && chmod ugo+Xrw -R /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client - -# we need to allow "others" access to clickhouse folder, because docker container -# can be started with arbitrary uid (openshift usecase) - -RUN locale-gen en_US.UTF-8 -ENV LANG en_US.UTF-8 -ENV LANGUAGE en_US:en -ENV LC_ALL en_US.UTF-8 -ENV TZ UTC - -RUN mkdir /docker-entrypoint-initdb.d - -COPY docker_related_config.xml /etc/clickhouse-server/config.d/ -COPY entrypoint.sh /entrypoint.sh -RUN chmod +x /entrypoint.sh - -EXPOSE 9000 8123 9009 -VOLUME /var/lib/clickhouse - -ENV CLICKHOUSE_CONFIG /etc/clickhouse-server/config.xml - -ENTRYPOINT ["/entrypoint.sh"] diff --git a/docker/server/Dockerfile b/docker/server/Dockerfile new file mode 120000 index 00000000000..fd45f0f7c7c --- /dev/null +++ b/docker/server/Dockerfile @@ -0,0 +1 @@ +Dockerfile.ubuntu \ No newline at end of file diff --git a/docker/server/Dockerfile.alpine b/docker/server/Dockerfile.alpine index cd192c0c9da..5aaf5dd5511 100644 --- a/docker/server/Dockerfile.alpine +++ b/docker/server/Dockerfile.alpine @@ -1,3 +1,14 @@ +FROM ubuntu:20.04 AS glibc-donor +ARG TARGETARCH + +RUN arch=${TARGETARCH:-amd64} \ + && case $arch in \ + amd64) rarch=x86_64 ;; \ + arm64) rarch=aarch64 ;; \ + esac \ + && ln -s "${rarch}-linux-gnu" /lib/linux-gnu + + FROM alpine ENV LANG=en_US.UTF-8 \ @@ -6,7 +17,24 @@ ENV LANG=en_US.UTF-8 \ TZ=UTC \ CLICKHOUSE_CONFIG=/etc/clickhouse-server/config.xml -COPY alpine-root/ / +COPY --from=glibc-donor /lib/linux-gnu/libc.so.6 /lib/linux-gnu/libdl.so.2 /lib/linux-gnu/libm.so.6 /lib/linux-gnu/libpthread.so.0 /lib/linux-gnu/librt.so.1 /lib/linux-gnu/libnss_dns.so.2 /lib/linux-gnu/libnss_files.so.2 /lib/linux-gnu/libresolv.so.2 /lib/linux-gnu/ld-2.31.so /lib/ +COPY --from=glibc-donor /etc/nsswitch.conf /etc/ +COPY docker_related_config.xml /etc/clickhouse-server/config.d/ +COPY entrypoint.sh /entrypoint.sh + +ARG TARGETARCH + +RUN arch=${TARGETARCH:-amd64} \ + && case $arch in \ + amd64) mkdir -p /lib64 && ln -sf /lib/ld-2.31.so /lib64/ld-linux-x86-64.so.2 ;; \ + arm64) ln -sf /lib/ld-2.31.so /lib/ld-linux-aarch64.so.1 ;; \ + esac + +# lts / testing / prestable / etc +ARG REPO_CHANNEL="stable" +ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" +ARG VERSION="20.9.3.45" +ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" # user/group precreated explicitly with fixed uid/gid on purpose. # It is especially important for rootless containers: in that case entrypoint @@ -15,9 +43,23 @@ COPY alpine-root/ / # installed to prevent picking those uid / gid by some unrelated software. # The same uid / gid (101) is used both for alpine and ubuntu. -RUN addgroup -S -g 101 clickhouse \ +RUN arch=${TARGETARCH:-amd64} \ + && for package in ${PACKAGES}; do \ + { \ + { echo "Get ${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \ + && wget -c -q "${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" -O "/tmp/${package}-${VERSION}-${arch}.tgz" \ + && tar xvzf "/tmp/${package}-${VERSION}-${arch}.tgz" --strip-components=1 -C / ; \ + } || \ + { echo "Fallback to ${REPOSITORY}/${package}-${VERSION}.tgz" \ + && wget -c -q "${REPOSITORY}/${package}-${VERSION}.tgz" -O "/tmp/${package}-${VERSION}.tgz" \ + && tar xvzf "/tmp/${package}-${VERSION}.tgz" --strip-components=2 -C / ; \ + } ; \ + } || exit 1 \ + ; done \ + && rm /tmp/*.tgz /install -r \ + && addgroup -S -g 101 clickhouse \ && adduser -S -h /var/lib/clickhouse -s /bin/bash -G clickhouse -g "ClickHouse server" -u 101 clickhouse \ - && mkdir -p /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client \ + && mkdir -p /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server/config.d /etc/clickhouse-server/users.d /etc/clickhouse-client /docker-entrypoint-initdb.d \ && chown clickhouse:clickhouse /var/lib/clickhouse \ && chown root:clickhouse /var/log/clickhouse-server \ && chmod +x /entrypoint.sh \ diff --git a/docker/server/Dockerfile.ubuntu b/docker/server/Dockerfile.ubuntu new file mode 100644 index 00000000000..6e93bd97036 --- /dev/null +++ b/docker/server/Dockerfile.ubuntu @@ -0,0 +1,129 @@ +FROM ubuntu:20.04 + +# see https://github.com/moby/moby/issues/4032#issuecomment-192327844 +ARG DEBIAN_FRONTEND=noninteractive + +COPY su-exec.c /su-exec.c + +# ARG for quick switch to a given ubuntu mirror +ARG apt_archive="http://archive.ubuntu.com" +RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list \ + && groupadd -r clickhouse --gid=101 \ + && useradd -r -g clickhouse --uid=101 --home-dir=/var/lib/clickhouse --shell=/bin/bash clickhouse \ + && apt-get update \ + && apt-get install --yes --no-install-recommends \ + apt-transport-https \ + ca-certificates \ + dirmngr \ + gnupg \ + locales \ + wget \ + tzdata \ + && apt-get install -y --no-install-recommends tcc libc-dev && \ + tcc /su-exec.c -o /bin/su-exec && \ + chown root:root /bin/su-exec && \ + chmod 0755 /bin/su-exec && \ + rm /su-exec.c && \ + apt-get purge -y --auto-remove tcc libc-dev libc-dev-bin libc6-dev linux-libc-dev \ + && apt-get clean + +ARG REPO_CHANNEL="stable" +ARG REPOSITORY="deb https://packages.clickhouse.com/deb ${REPO_CHANNEL} main" +ARG VERSION=22.1.1.* +ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" + +# set non-empty deb_location_url url to create a docker image +# from debs created by CI build, for example: +# docker build . --network host --build-arg version="21.4.1.6282" --build-arg deb_location_url="https://clickhouse-builds.s3.yandex.net/21852/069cfbff388b3d478d1a16dc7060b48073f5d522/clickhouse_build_check/clang-11_relwithdebuginfo_none_bundled_unsplitted_disable_False_deb/" -t filimonovq/clickhouse-server:pr21852 +ARG deb_location_url="" + +# set non-empty single_binary_location_url to create docker image +# from a single binary url (useful for non-standard builds - with sanitizers, for arm64). +# for example (run on aarch64 server): +# docker build . --network host --build-arg single_binary_location_url="https://builds.clickhouse.com/master/aarch64/clickhouse" -t altinity/clickhouse-server:master-testing-arm +# note: clickhouse-odbc-bridge is not supported there. +ARG single_binary_location_url="" + +# user/group precreated explicitly with fixed uid/gid on purpose. +# It is especially important for rootless containers: in that case entrypoint +# can't do chown and owners of mounted volumes should be configured externally. +# We do that in advance at the begining of Dockerfile before any packages will be +# installed to prevent picking those uid / gid by some unrelated software. +# The same uid / gid (101) is used both for alpine and ubuntu. + +# To drop privileges, we need 'su' command, that simply changes uid and gid. +# In fact, the 'su' command from Linux is not so simple, due to inherent vulnerability in Linux: +# https://ruderich.org/simon/notes/su-sudo-from-root-tty-hijacking +# It has to mitigate this drawback of Linux, and to do this, 'su' command is creating it's own pseudo-terminal +# and forwarding commands. Due to some ridiculous curcumstances, it does not work in Docker (or it does) +# and for these reasons people are using alternatives to the 'su' command in Docker, +# that don't mess with the terminal, don't care about closing the opened files, etc... +# but can only be safe to drop privileges inside Docker. +# The question - what implementation of 'su' command to use. +# It should be a simple script doing about just two syscalls. +# Some people tend to use 'gosu' tool that is written in Go. +# It is not used for several reasons: +# 1. Dependency on some foreign code in yet another programming language - does not sound alright. +# 2. Anselmo D. Adams suggested not to use it due to false positive alarms in some undisclosed security scanners. + +ARG TARGETARCH + +RUN arch=${TARGETARCH:-amd64} \ + && if [ -n "${deb_location_url}" ]; then \ + echo "installing from custom url with deb packages: ${deb_location_url}" \ + rm -rf /tmp/clickhouse_debs \ + && mkdir -p /tmp/clickhouse_debs \ + && for package in ${PACKAGES}; do \ + { wget --progress=bar:force:noscroll "${deb_location_url}/${package}_${VERSION}_${arch}.deb" -P /tmp/clickhouse_debs || \ + wget --progress=bar:force:noscroll "${deb_location_url}/${package}_${VERSION}_all.deb" -P /tmp/clickhouse_debs ; } \ + || exit 1 \ + ; done \ + && dpkg -i /tmp/clickhouse_debs/*.deb ; \ + elif [ -n "${single_binary_location_url}" ]; then \ + echo "installing from single binary url: ${single_binary_location_url}" \ + && rm -rf /tmp/clickhouse_binary \ + && mkdir -p /tmp/clickhouse_binary \ + && wget --progress=bar:force:noscroll "${single_binary_location_url}" -O /tmp/clickhouse_binary/clickhouse \ + && chmod +x /tmp/clickhouse_binary/clickhouse \ + && /tmp/clickhouse_binary/clickhouse install --user "clickhouse" --group "clickhouse" ; \ + else \ + mkdir -p /etc/apt/sources.list.d \ + && apt-key adv --keyserver keyserver.ubuntu.com --recv 8919F6BD2B48D754 \ + && echo ${REPOSITORY} > /etc/apt/sources.list.d/clickhouse.list \ + && echo "installing from repository: ${REPOSITORY}" \ + && apt-get update \ + && apt-get --yes -o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold" upgrade \ + && for package in ${PACKAGES}; do \ + packages="${packages} ${package}=${VERSION}" \ + ; done \ + && apt-get install --allow-unauthenticated --yes --no-install-recommends ${packages} || exit 1 \ + ; fi \ + && clickhouse-local -q 'SELECT * FROM system.build_options' \ + && rm -rf \ + /var/lib/apt/lists/* \ + /var/cache/debconf \ + /tmp/* \ + && mkdir -p /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client \ + && chmod ugo+Xrw -R /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client + +# we need to allow "others" access to clickhouse folder, because docker container +# can be started with arbitrary uid (openshift usecase) + +RUN locale-gen en_US.UTF-8 +ENV LANG en_US.UTF-8 +ENV LANGUAGE en_US:en +ENV LC_ALL en_US.UTF-8 +ENV TZ UTC + +RUN mkdir /docker-entrypoint-initdb.d + +COPY docker_related_config.xml /etc/clickhouse-server/config.d/ +COPY entrypoint.sh /entrypoint.sh +RUN chmod +x /entrypoint.sh + +EXPOSE 9000 8123 9009 +VOLUME /var/lib/clickhouse + +ENV CLICKHOUSE_CONFIG /etc/clickhouse-server/config.xml + +ENTRYPOINT ["/entrypoint.sh"] diff --git a/docker/server/alpine-build.sh b/docker/server/alpine-build.sh deleted file mode 100755 index 1b448c61fbb..00000000000 --- a/docker/server/alpine-build.sh +++ /dev/null @@ -1,63 +0,0 @@ -#!/bin/bash -set -x - -REPO_CHANNEL="${REPO_CHANNEL:-stable}" # lts / testing / prestable / etc -REPO_URL="${REPO_URL:-"https://repo.yandex.ru/clickhouse/tgz/${REPO_CHANNEL}"}" -VERSION="${VERSION:-20.9.3.45}" -DOCKER_IMAGE="${DOCKER_IMAGE:-clickhouse/clickhouse-server}" - -# where original files live -DOCKER_BUILD_FOLDER="${BASH_SOURCE%/*}" - -# we will create root for our image here -CONTAINER_ROOT_FOLDER="${DOCKER_BUILD_FOLDER}/alpine-root" - -# clean up the root from old runs, it's reconstructed each time -rm -rf "$CONTAINER_ROOT_FOLDER" -mkdir -p "$CONTAINER_ROOT_FOLDER" - -# where to put downloaded tgz -TGZ_PACKAGES_FOLDER="${DOCKER_BUILD_FOLDER}/tgz-packages" -mkdir -p "$TGZ_PACKAGES_FOLDER" - -PACKAGES=( "clickhouse-client" "clickhouse-server" "clickhouse-common-static" ) - -# download tars from the repo -for package in "${PACKAGES[@]}" -do - wget -c -q --show-progress "${REPO_URL}/${package}-${VERSION}.tgz" -O "${TGZ_PACKAGES_FOLDER}/${package}-${VERSION}.tgz" -done - -# unpack tars -for package in "${PACKAGES[@]}" -do - tar xvzf "${TGZ_PACKAGES_FOLDER}/${package}-${VERSION}.tgz" --strip-components=2 -C "$CONTAINER_ROOT_FOLDER" -done - -# prepare few more folders -mkdir -p "${CONTAINER_ROOT_FOLDER}/etc/clickhouse-server/users.d" \ - "${CONTAINER_ROOT_FOLDER}/etc/clickhouse-server/config.d" \ - "${CONTAINER_ROOT_FOLDER}/var/log/clickhouse-server" \ - "${CONTAINER_ROOT_FOLDER}/var/lib/clickhouse" \ - "${CONTAINER_ROOT_FOLDER}/docker-entrypoint-initdb.d" \ - "${CONTAINER_ROOT_FOLDER}/lib64" - -cp "${DOCKER_BUILD_FOLDER}/docker_related_config.xml" "${CONTAINER_ROOT_FOLDER}/etc/clickhouse-server/config.d/" -cp "${DOCKER_BUILD_FOLDER}/entrypoint.sh" "${CONTAINER_ROOT_FOLDER}/entrypoint.sh" - -## get glibc components from ubuntu 20.04 and put them to expected place -docker pull ubuntu:20.04 -ubuntu20image=$(docker create --rm ubuntu:20.04) -docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libc.so.6 "${CONTAINER_ROOT_FOLDER}/lib" -docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libdl.so.2 "${CONTAINER_ROOT_FOLDER}/lib" -docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libm.so.6 "${CONTAINER_ROOT_FOLDER}/lib" -docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libpthread.so.0 "${CONTAINER_ROOT_FOLDER}/lib" -docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/librt.so.1 "${CONTAINER_ROOT_FOLDER}/lib" -docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libnss_dns.so.2 "${CONTAINER_ROOT_FOLDER}/lib" -docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libnss_files.so.2 "${CONTAINER_ROOT_FOLDER}/lib" -docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libresolv.so.2 "${CONTAINER_ROOT_FOLDER}/lib" -docker cp -L "${ubuntu20image}":/lib64/ld-linux-x86-64.so.2 "${CONTAINER_ROOT_FOLDER}/lib64" -docker cp -L "${ubuntu20image}":/etc/nsswitch.conf "${CONTAINER_ROOT_FOLDER}/etc" - -docker build "$DOCKER_BUILD_FOLDER" -f Dockerfile.alpine -t "${DOCKER_IMAGE}:${VERSION}-alpine" --pull -rm -rf "$CONTAINER_ROOT_FOLDER" diff --git a/docker/server/local.Dockerfile b/docker/server/local.Dockerfile deleted file mode 100644 index 0d86c9ce45a..00000000000 --- a/docker/server/local.Dockerfile +++ /dev/null @@ -1,47 +0,0 @@ -# Since right now we can't set volumes to the docker during build, we split building container in stages: -# 1. build base container -# 2. run base conatiner with mounted volumes -# 3. commit container as image -# 4. build final container atop that image -# Middle steps are performed by the bash script. - -FROM ubuntu:18.04 as clickhouse-server-base -ARG gosu_ver=1.14 - -VOLUME /packages/ - -# update to allow installing dependencies of clickhouse automatically -RUN apt update; \ - DEBIAN_FRONTEND=noninteractive \ - apt install -y locales; - -ADD https://github.com/tianon/gosu/releases/download/${gosu_ver}/gosu-amd64 /bin/gosu - -RUN locale-gen en_US.UTF-8 -ENV LANG en_US.UTF-8 -ENV LANGUAGE en_US:en -ENV LC_ALL en_US.UTF-8 - -# installing via apt to simulate real-world scenario, where user installs deb package and all it's dependecies automatically. -CMD DEBIAN_FRONTEND=noninteractive \ - apt install -y \ - /packages/clickhouse-common-static_*.deb \ - /packages/clickhouse-server_*.deb ; - -FROM clickhouse-server-base:postinstall as clickhouse-server - -RUN mkdir /docker-entrypoint-initdb.d - -COPY docker_related_config.xml /etc/clickhouse-server/config.d/ -COPY entrypoint.sh /entrypoint.sh - -RUN chmod +x \ - /entrypoint.sh \ - /bin/gosu - -EXPOSE 9000 8123 9009 -VOLUME /var/lib/clickhouse - -ENV CLICKHOUSE_CONFIG /etc/clickhouse-server/config.xml - -ENTRYPOINT ["/entrypoint.sh"] diff --git a/docker/test/fuzzer/run-fuzzer.sh b/docker/test/fuzzer/run-fuzzer.sh index 74711f476f8..32799a669eb 100755 --- a/docker/test/fuzzer/run-fuzzer.sh +++ b/docker/test/fuzzer/run-fuzzer.sh @@ -226,7 +226,6 @@ quit --receive_data_timeout_ms=10000 \ --stacktrace \ --query-fuzzer-runs=1000 \ - --testmode \ --queries-file $(ls -1 ch/tests/queries/0_stateless/*.sql | sort -R) \ $NEW_TESTS_OPT \ > >(tail -n 100000 > fuzzer.log) \ diff --git a/docker/test/integration/mysql_js_client/Dockerfile b/docker/test/integration/mysql_js_client/Dockerfile index b1397b40d38..4c9df10ace1 100644 --- a/docker/test/integration/mysql_js_client/Dockerfile +++ b/docker/test/integration/mysql_js_client/Dockerfile @@ -1,8 +1,10 @@ # docker build -t clickhouse/mysql-js-client . # MySQL JavaScript client docker container -FROM node:8 +FROM node:16.14.2 + +WORKDIR /usr/app RUN npm install mysql -COPY ./test.js test.js +COPY ./test.js ./test.js diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index 5fd78502337..f8b73791388 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -131,8 +131,23 @@ clickhouse-client -q "system flush logs" ||: grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server.log ||: pigz < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.gz & -clickhouse-client -q "select * from system.query_log format TSVWithNamesAndTypes" | pigz > /test_output/query-log.tsv.gz & -clickhouse-client -q "select * from system.query_thread_log format TSVWithNamesAndTypes" | pigz > /test_output/query-thread-log.tsv.gz & + +# Compress tables. +# +# NOTE: +# - that due to tests with s3 storage we cannot use /var/lib/clickhouse/data +# directly +# - even though ci auto-compress some files (but not *.tsv) it does this only +# for files >64MB, we want this files to be compressed explicitly +for table in query_log zookeeper_log trace_log +do + clickhouse-client -q "select * from system.$table format TSVWithNamesAndTypes" | pigz > /test_output/$table.tsv.gz & + if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then + clickhouse-client --port 19000 -q "select * from system.$table format TSVWithNamesAndTypes" | pigz > /test_output/$table.1.tsv.gz & + clickhouse-client --port 29000 -q "select * from system.$table format TSVWithNamesAndTypes" | pigz > /test_output/$table.2.tsv.gz & + fi +done +wait ||: # Also export trace log in flamegraph-friendly format. for trace_type in CPU Memory Real @@ -161,14 +176,6 @@ fi tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||: -# Replace the engine with Ordinary to avoid extra symlinks stuff in artifacts. -# (so that clickhouse-local --path can read it w/o extra care). -sed -i -e "s/ATTACH DATABASE _ UUID '[^']*'/ATTACH DATABASE system/" -e "s/Atomic/Ordinary/" /var/lib/clickhouse/metadata/system.sql -for table in text_log query_log zookeeper_log trace_log; do - sed -i "s/ATTACH TABLE _ UUID '[^']*'/ATTACH TABLE $table/" /var/lib/clickhouse/metadata/system/${table}.sql - tar -chf /test_output/${table}_dump.tar /var/lib/clickhouse/metadata/system.sql /var/lib/clickhouse/metadata/system/${table}.sql /var/lib/clickhouse/data/system/${table} ||: -done - if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server1.log ||: grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server2.log ||: @@ -179,8 +186,6 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]] rm /var/log/clickhouse-server/clickhouse-server2.log mv /var/log/clickhouse-server/stderr1.log /test_output/ ||: mv /var/log/clickhouse-server/stderr2.log /test_output/ ||: - tar -chf /test_output/zookeeper_log_dump1.tar /var/lib/clickhouse1/data/system/zookeeper_log ||: - tar -chf /test_output/zookeeper_log_dump2.tar /var/lib/clickhouse2/data/system/zookeeper_log ||: tar -chf /test_output/coordination1.tar /var/lib/clickhouse1/coordination ||: tar -chf /test_output/coordination2.tar /var/lib/clickhouse2/coordination ||: fi diff --git a/docker/test/stress/run.sh b/docker/test/stress/run.sh index 3cef5b008db..e56afcbfd7a 100755 --- a/docker/test/stress/run.sh +++ b/docker/test/stress/run.sh @@ -348,13 +348,13 @@ then rm -f /test_output/tmp # OOM - zgrep -Fa " Application: Child process was terminated by signal 9" /var/log/clickhouse-server/clickhouse-server.log > /dev/null \ + zgrep -Fa " Application: Child process was terminated by signal 9" /var/log/clickhouse-server/clickhouse-server.log* > /dev/null \ && echo -e 'Backward compatibility check: OOM killer (or signal 9) in clickhouse-server.log\tFAIL' >> /test_output/test_results.tsv \ || echo -e 'Backward compatibility check: No OOM messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv # Logical errors echo "Check for Logical errors in server log:" - zgrep -Fa -A20 "Code: 49, e.displayText() = DB::Exception:" /var/log/clickhouse-server/clickhouse-server.log > /test_output/bc_check_logical_errors.txt \ + zgrep -Fa -A20 "Code: 49, e.displayText() = DB::Exception:" /var/log/clickhouse-server/clickhouse-server.log* > /test_output/bc_check_logical_errors.txt \ && echo -e 'Backward compatibility check: Logical error thrown (see clickhouse-server.log or bc_check_logical_errors.txt)\tFAIL' >> /test_output/test_results.tsv \ || echo -e 'Backward compatibility check: No logical errors\tOK' >> /test_output/test_results.tsv @@ -362,13 +362,13 @@ then [ -s /test_output/bc_check_logical_errors.txt ] || rm /test_output/bc_check_logical_errors.txt # Crash - zgrep -Fa "########################################" /var/log/clickhouse-server/clickhouse-server.log > /dev/null \ + zgrep -Fa "########################################" /var/log/clickhouse-server/clickhouse-server.log* > /dev/null \ && echo -e 'Backward compatibility check: Killed by signal (in clickhouse-server.log)\tFAIL' >> /test_output/test_results.tsv \ || echo -e 'Backward compatibility check: Not crashed\tOK' >> /test_output/test_results.tsv # It also checks for crash without stacktrace (printed by watchdog) echo "Check for Fatal message in server log:" - zgrep -Fa " " /var/log/clickhouse-server/clickhouse-server.log > /test_output/bc_check_fatal_messages.txt \ + zgrep -Fa " " /var/log/clickhouse-server/clickhouse-server.log* > /test_output/bc_check_fatal_messages.txt \ && echo -e 'Backward compatibility check: Fatal message in clickhouse-server.log (see bc_check_fatal_messages.txt)\tFAIL' >> /test_output/test_results.tsv \ || echo -e 'Backward compatibility check: No fatal messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv diff --git a/docker/test/test_runner.sh b/docker/test/test_runner.sh deleted file mode 100755 index 0c99c8c2b32..00000000000 --- a/docker/test/test_runner.sh +++ /dev/null @@ -1,86 +0,0 @@ -#!/bin/sh - -set -e -x - -# Not sure why shellcheck complains that rc is not assigned before it is referenced. -# shellcheck disable=SC2154 -trap 'rc=$?; echo EXITED WITH: $rc; exit $rc' EXIT - -# CLI option to prevent rebuilding images, just re-run tests with images leftover from previuos time -readonly NO_REBUILD_FLAG="--no-rebuild" - -readonly CLICKHOUSE_DOCKER_DIR="$(realpath "${1}")" -readonly CLICKHOUSE_PACKAGES_ARG="${2}" -CLICKHOUSE_SERVER_IMAGE="${3}" - -if [ "${CLICKHOUSE_PACKAGES_ARG}" != "${NO_REBUILD_FLAG}" ]; then - readonly CLICKHOUSE_PACKAGES_DIR="$(realpath "${2}")" # or --no-rebuild -fi - - -# In order to allow packages directory to be anywhere, and to reduce amount of context sent to the docker daemon, -# all images are built in multiple stages: -# 1. build base image, install dependencies -# 2. run image with volume mounted, install what needed from those volumes -# 3. tag container as image -# 4. [optional] build another image atop of tagged. - -# TODO: optionally mount most recent clickhouse-test and queries directory from local machine - -if [ "${CLICKHOUSE_PACKAGES_ARG}" != "${NO_REBUILD_FLAG}" ]; then - docker build --network=host \ - -f "${CLICKHOUSE_DOCKER_DIR}/test/stateless/clickhouse-statelest-test-runner.Dockerfile" \ - --target clickhouse-test-runner-base \ - -t clickhouse-test-runner-base:preinstall \ - "${CLICKHOUSE_DOCKER_DIR}/test/stateless" - - docker rm -f clickhouse-test-runner-installing-packages || true - docker run --network=host \ - -v "${CLICKHOUSE_PACKAGES_DIR}:/packages" \ - --name clickhouse-test-runner-installing-packages \ - clickhouse-test-runner-base:preinstall - docker commit clickhouse-test-runner-installing-packages clickhouse-statelest-test-runner:local - docker rm -f clickhouse-test-runner-installing-packages || true -fi - -# # Create a bind-volume to the clickhouse-test script file -# docker volume create --driver local --opt type=none --opt device=/home/enmk/proj/ClickHouse_master/tests/clickhouse-test --opt o=bind clickhouse-test-script-volume -# docker volume create --driver local --opt type=none --opt device=/home/enmk/proj/ClickHouse_master/tests/queries --opt o=bind clickhouse-test-queries-dir-volume - -# Build server image (optional) from local packages -if [ -z "${CLICKHOUSE_SERVER_IMAGE}" ]; then - CLICKHOUSE_SERVER_IMAGE="clickhouse/server:local" - - if [ "${CLICKHOUSE_PACKAGES_ARG}" != "${NO_REBUILD_FLAG}" ]; then - docker build --network=host \ - -f "${CLICKHOUSE_DOCKER_DIR}/server/local.Dockerfile" \ - --target clickhouse-server-base \ - -t clickhouse-server-base:preinstall \ - "${CLICKHOUSE_DOCKER_DIR}/server" - - docker rm -f clickhouse_server_base_installing_server || true - docker run --network=host -v "${CLICKHOUSE_PACKAGES_DIR}:/packages" \ - --name clickhouse_server_base_installing_server \ - clickhouse-server-base:preinstall - docker commit clickhouse_server_base_installing_server clickhouse-server-base:postinstall - - docker build --network=host \ - -f "${CLICKHOUSE_DOCKER_DIR}/server/local.Dockerfile" \ - --target clickhouse-server \ - -t "${CLICKHOUSE_SERVER_IMAGE}" \ - "${CLICKHOUSE_DOCKER_DIR}/server" - fi -fi - -docker rm -f test-runner || true -docker-compose down -CLICKHOUSE_SERVER_IMAGE="${CLICKHOUSE_SERVER_IMAGE}" \ - docker-compose -f "${CLICKHOUSE_DOCKER_DIR}/test/test_runner_docker_compose.yaml" \ - create \ - --build --force-recreate - -CLICKHOUSE_SERVER_IMAGE="${CLICKHOUSE_SERVER_IMAGE}" \ - docker-compose -f "${CLICKHOUSE_DOCKER_DIR}/test/test_runner_docker_compose.yaml" \ - run \ - --name test-runner \ - test-runner diff --git a/docker/test/test_runner_docker_compose.yaml b/docker/test/test_runner_docker_compose.yaml deleted file mode 100644 index 2aef6a48d77..00000000000 --- a/docker/test/test_runner_docker_compose.yaml +++ /dev/null @@ -1,34 +0,0 @@ -version: "2" - -services: - clickhouse-server: - image: ${CLICKHOUSE_SERVER_IMAGE} - expose: - - "8123" # HTTP - - "9000" # TCP - - "9009" # HTTP-interserver - restart: "no" - - test-runner: - image: clickhouse-statelest-test-runner:local - - restart: "no" - depends_on: - - clickhouse-server - environment: - # these are used by clickhouse-test to point clickhouse-client to the right server - - CLICKHOUSE_HOST=clickhouse-server - - CLICKHOUSE_PORT=9009 - - CLICKHOUSE_TEST_HOST_EXPOSED_PORT=51234 - expose: - # port for any test to serve data to clickhouse-server on rare occasion (like URL-engine tables in 00646), - # should match value of CLICKHOUSE_TEST_HOST_EXPOSED_PORT above - - "51234" - - # NOTE: Dev-mode: mount newest versions of the queries and clickhouse-test script into container. - # volumes: - # - /home/enmk/proj/ClickHouse_master/tests/queries:/usr/share/clickhouse-test/queries:ro - # - /home/enmk/proj/ClickHouse_master/tests/clickhouse-test:/usr/bin/clickhouse-test:ro - - # String-form instead of list-form to allow multiple arguments in "${CLICKHOUSE_TEST_ARGS}" - entrypoint: "clickhouse-test ${CLICKHOUSE_TEST_ARGS}" diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md index a0acda5d5c6..b70cd225cdd 100644 --- a/docs/en/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md @@ -688,7 +688,7 @@ Tags: - `volume_name_N` — Volume name. Volume names must be unique. - `disk` — a disk within a volume. - `max_data_part_size_bytes` — the maximum size of a part that can be stored on any of the volume’s disks. If the a size of a merged part estimated to be bigger than `max_data_part_size_bytes` then this part will be written to a next volume. Basically this feature allows to keep new/small parts on a hot (SSD) volume and move them to a cold (HDD) volume when they reach large size. Do not use this setting if your policy has only one volume. -- `move_factor` — when the amount of available space gets lower than this factor, data automatically start to move on the next volume if any (by default, 0.1). +- `move_factor` — when the amount of available space gets lower than this factor, data automatically starts to move on the next volume if any (by default, 0.1). ClickHouse sorts existing parts by size from largest to smallest (in descending order) and selects parts with the total size that is sufficient to meet the `move_factor` condition. If the total size of all parts is insufficient, all parts will be moved. - `prefer_not_to_merge` — Disables merging of data parts on this volume. When this setting is enabled, merging data on this volume is not allowed. This allows controlling how ClickHouse works with slow disks. Cofiguration examples: diff --git a/docs/en/introduction/adopters.md b/docs/en/introduction/adopters.md index ad199ce452e..20d6b20feb6 100644 --- a/docs/en/introduction/adopters.md +++ b/docs/en/introduction/adopters.md @@ -43,7 +43,7 @@ toc_title: Adopters | Citymobil | Taxi | Analytics | — | — | [Blog Post in Russian, March 2020](https://habr.com/en/company/citymobil/blog/490660/) | | Cloudflare | CDN | Traffic analysis | 36 servers | — | [Blog post, May 2017](https://blog.cloudflare.com/how-cloudflare-analyzes-1m-dns-queries-per-second/), [Blog post, March 2018](https://blog.cloudflare.com/http-analytics-for-6m-requests-per-second-using-clickhouse/) | | Comcast | Media | CDN Traffic Analysis | — | — | [ApacheCon 2019 Talk](https://www.youtube.com/watch?v=e9TZ6gFDjNg) | -| ContentSquare | Web analytics | Main product | — | — | [Blog post in French, November 2018](http://souslecapot.net/2018/11/21/patrick-chatain-vp-engineering-chez-contentsquare-penser-davantage-amelioration-continue-que-revolution-constante/) | +| Contentsquare | Web analytics | Main product | — | — | [Blog post in French, November 2018](http://souslecapot.net/2018/11/21/patrick-chatain-vp-engineering-chez-contentsquare-penser-davantage-amelioration-continue-que-revolution-constante/) | | Corunet | Analytics | Main product | — | — | [Slides in English, April 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup21/predictive_models.pdf) | | CraiditX 氪信 | Finance AI | Analysis | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/udf.pptx) | | Crazypanda | Games | | — | — | Live session on ClickHouse meetup | diff --git a/docs/en/operations/named-collections.md b/docs/en/operations/named-collections.md index dce7938f98b..ab972c72345 100644 --- a/docs/en/operations/named-collections.md +++ b/docs/en/operations/named-collections.md @@ -36,6 +36,7 @@ Example of configuration: AKIAIOSFODNN7EXAMPLE wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY CSV + https://s3.us-east-1.amazonaws.com/yourbucket/mydata/ @@ -44,12 +45,12 @@ Example of configuration: ### Example of using named connections with the s3 function ```sql -INSERT INTO FUNCTION s3(s3_mydata, url = 'https://s3.us-east-1.amazonaws.com/yourbucket/mydata/test_file.tsv.gz', +INSERT INTO FUNCTION s3(s3_mydata, filename = 'test_file.tsv.gz', format = 'TSV', structure = 'number UInt64', compression_method = 'gzip') SELECT * FROM numbers(10000); SELECT count() -FROM s3(s3_mydata, url = 'https://s3.us-east-1.amazonaws.com/yourbucket/mydata/test_file.tsv.gz') +FROM s3(s3_mydata, filename = 'test_file.tsv.gz') ┌─count()─┐ │ 10000 │ diff --git a/docs/en/sql-reference/functions/date-time-functions.md b/docs/en/sql-reference/functions/date-time-functions.md index d535a516b3a..fc48c97bb61 100644 --- a/docs/en/sql-reference/functions/date-time-functions.md +++ b/docs/en/sql-reference/functions/date-time-functions.md @@ -393,6 +393,13 @@ This is a generalization of other functions named `toStartOf*`. For example, `toStartOfInterval(t, INTERVAL 1 day)` returns the same as `toStartOfDay(t)`, `toStartOfInterval(t, INTERVAL 15 minute)` returns the same as `toStartOfFifteenMinutes(t)` etc. +## toLastDayOfMonth {#toLastDayOfMonth} + +Rounds up a date or date with time to the last day of the month. +Returns the date. + +Alias: `LAST_DAY`. + ## toTime {#totime} Converts a date with time to a certain fixed date, while preserving the time. diff --git a/docs/en/sql-reference/functions/index.md b/docs/en/sql-reference/functions/index.md index 7cceec889bd..572aa7f632e 100644 --- a/docs/en/sql-reference/functions/index.md +++ b/docs/en/sql-reference/functions/index.md @@ -77,7 +77,7 @@ A function configuration contains the following settings: - `argument` - argument description with the `type`, and optional `name` of an argument. Each argument is described in a separate setting. Specifying name is necessary if argument names are part of serialization for user defined function format like [Native](../../interfaces/formats.md#native) or [JSONEachRow](../../interfaces/formats.md#jsoneachrow). Default argument name value is `c` + argument_number. - `format` - a [format](../../interfaces/formats.md) in which arguments are passed to the command. - `return_type` - the type of a returned value. -- `return_name` - name of retuned value. Specifying return name is necessary if return name is part of serialization for user defined function format like [Native](../../interfaces/formats.md#native) or [JSONEachRow](../../interfaces/formats.md#jsoneachrow). Optional. Default value is `result`. +- `return_name` - name of returned value. Specifying return name is necessary if return name is part of serialization for user defined function format like [Native](../../interfaces/formats.md#native) or [JSONEachRow](../../interfaces/formats.md#jsoneachrow). Optional. Default value is `result`. - `type` - an executable type. If `type` is set to `executable` then single command is started. If it is set to `executable_pool` then a pool of commands is created. - `max_command_execution_time` - maximum execution time in seconds for processing block of data. This setting is valid for `executable_pool` commands only. Optional. Default value is `10`. - `command_termination_timeout` - time in seconds during which a command should finish after its pipe is closed. After that time `SIGTERM` is sent to the process executing the command. Optional. Default value is `10`. diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md index bce3f9144b1..cedde8a7f35 100644 --- a/docs/en/sql-reference/functions/other-functions.md +++ b/docs/en/sql-reference/functions/other-functions.md @@ -2499,3 +2499,41 @@ Result: │ 286 │ └──────────────────────────┘ ``` + +## getTypeSerializationStreams {#getTypeSerializationStreams} + +return the serialization streams of data type. + +**Syntax** +``` sql +getTypeSerializationStreams(type_name) + +getTypeSerializationStreams(column) +``` + +**Arguments** +- `type_name` - Name of data type to get its serialization paths. [String](../../sql-reference/data-types/string.md#string). +- `column` - any column which has a data type + +**Returned value** +- List of serialization streams; + +Type: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)). + + + +**Example** + +Query: + +``` sql +SELECT getTypeSerializationStreams('Array(Array(Int8))') +``` + +Result: + +``` text +┌───────────────────────getTypeSerializationStreams('Array(Array(Int8))')─────────────────────────────┐ +│ ['{ArraySizes}','{ArrayElements, ArraySizes}','{ArrayElements, ArrayElements, Regular}'] │ +└─────────────────────────────────────────────────────────────────────────────────────────────────────┘ +``` diff --git a/docs/en/sql-reference/statements/create/table.md b/docs/en/sql-reference/statements/create/table.md index 409ec422ade..ee663c92695 100644 --- a/docs/en/sql-reference/statements/create/table.md +++ b/docs/en/sql-reference/statements/create/table.md @@ -114,9 +114,9 @@ In addition, this column is not substituted when using an asterisk in a SELECT q ### EPHEMERAL {#ephemeral} -`EPHEMERAL expr` +`EPHEMERAL [expr]` -Ephemeral column. Such a column isn't stored in the table and cannot be SELECTed, but can be referenced in the defaults of CREATE statement. +Ephemeral column. Such a column isn't stored in the table and cannot be SELECTed, but can be referenced in the defaults of CREATE statement. If `expr` is omitted type for column is required. INSERT without list of columns will skip such column, so SELECT/INSERT invariant is preserved - the dump obtained using `SELECT *` can be inserted back into the table using INSERT without specifying the list of columns. ### ALIAS {#alias} diff --git a/docs/ja/development/developer-instruction.md b/docs/ja/development/developer-instruction.md index c95dc0e2ea4..48afc77237c 100644 --- a/docs/ja/development/developer-instruction.md +++ b/docs/ja/development/developer-instruction.md @@ -273,7 +273,7 @@ GitHubのUIでforkリポジトリに移動します。 ブランチで開発し プル要求は、作業がまだ完了していない場合でも作成できます。 この場合、単語を入れてください “WIP” (進行中の作業)タイトルの先頭に、それは後で変更することができます。 これは、変更の協調的なレビューと議論、および利用可能なすべてのテストの実行に役立ちます。 変更の簡単な説明を提供することが重要です。 -Yandexの従業員がタグであなたのPRにラベルを付けるとすぐにテストが開始されます “can be tested”. The results of some first checks (e.g. code style) will come in within several minutes. Build check results will arrive within half an hour. And the main set of tests will report itself within an hour. +ClickHouseの従業員がタグであなたのPRにラベルを付けるとすぐにテストが開始されます “can be tested”. The results of some first checks (e.g. code style) will come in within several minutes. Build check results will arrive within half an hour. And the main set of tests will report itself within an hour. システムは、プル要求用にClickHouseバイナリビルドを個別に準備します。 これらのビルドを取得するには “Details” 次のリンク “ClickHouse build check” 小切手のリストのエントリ。 そこには、ビルドへの直接リンクがあります。ClickHouseのdebパッケージは、本番サーバーにも展開できます(恐れがない場合)。 diff --git a/docs/ja/introduction/adopters.md b/docs/ja/introduction/adopters.md index 6f878bf1dfe..3372bb74f12 100644 --- a/docs/ja/introduction/adopters.md +++ b/docs/ja/introduction/adopters.md @@ -27,7 +27,7 @@ toc_title: "\u30A2\u30C0\u30D7\u30BF\u30FC" | Cisco | ネットワーク | トラフィック分析 | — | — | [ライトニングトーク2019](https://youtu.be/-hI1vDR2oPY?t=5057) | | Citadel Securities | 金融 | — | — | — | [2019年の貢献](https://github.com/ClickHouse/ClickHouse/pull/4774) | | シティモービル | タクシー | 分析 | — | — | [ロシア語でのブログ投稿,月2020](https://habr.com/en/company/citymobil/blog/490660/) | -| ContentSquare | ウェブ分析 | 主な製品 | — | — | [フランス語でのブログ投稿,November2018](http://souslecapot.net/2018/11/21/patrick-chatain-vp-engineering-chez-contentsquare-penser-davantage-amelioration-continue-que-revolution-constante/) | +| Contentsquare | ウェブ分析 | 主な製品 | — | — | [フランス語でのブログ投稿,November2018](http://souslecapot.net/2018/11/21/patrick-chatain-vp-engineering-chez-contentsquare-penser-davantage-amelioration-continue-que-revolution-constante/) | | Cloudflare | CDN | トラフィック分析 | 36台のサーバー | — | [ブログ投稿,月2017](https://blog.cloudflare.com/how-cloudflare-analyzes-1m-dns-queries-per-second/), [ブログ投稿,月2018](https://blog.cloudflare.com/http-analytics-for-6m-requests-per-second-using-clickhouse/) | | コルネット | 分析 | 主な製品 | — | — | [2019年英語スライド](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup21/predictive_models.pdf) | | CraiditX 氪信 | ファイナンスAI | 分析 | — | — | [2019年のスライド](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/udf.pptx) | diff --git a/docs/ru/development/developer-instruction.md b/docs/ru/development/developer-instruction.md index 964d39163d8..5b6740e88bb 100644 --- a/docs/ru/development/developer-instruction.md +++ b/docs/ru/development/developer-instruction.md @@ -72,11 +72,11 @@ ClickHouse не работает и не собирается на 32-битны Этот вариант не подходит для отправки изменений на сервер. Вы можете временно его использовать, а затем добавить ssh ключи и заменить адрес репозитория с помощью команды `git remote`. -Вы можете также добавить для своего локального репозитория адрес оригинального репозитория Яндекса, чтобы притягивать оттуда обновления: +Вы можете также добавить для своего локального репозитория адрес оригинального репозитория, чтобы притягивать оттуда обновления: git remote add upstream git@github.com:ClickHouse/ClickHouse.git -После этого, вы сможете добавлять в свой репозиторий обновления из репозитория Яндекса с помощью команды `git pull upstream master`. +После этого, вы сможете добавлять в свой репозиторий обновления из репозитория ClickHouse с помощью команды `git pull upstream master`. ### Работа с сабмодулями Git {#rabota-s-sabmoduliami-git} @@ -288,7 +288,7 @@ sudo ./llvm.sh 12 Pull request можно создать, даже если работа над задачей ещё не завершена. В этом случае, добавьте в его название слово «WIP» (work in progress). Название можно будет изменить позже. Это полезно для совместного просмотра и обсуждения изменений, а также для запуска всех имеющихся тестов. Введите краткое описание изменений - впоследствии, оно будет использовано для релизных changelog. -Тесты будут запущены, как только сотрудники Яндекса поставят для pull request тег «Can be tested». Результаты первых проверок (стиль кода) появятся уже через несколько минут. Результаты сборки появятся примерно через пол часа. Результаты основного набора тестов будут доступны в пределах часа. +Тесты будут запущены, как только сотрудники ClickHouse поставят для pull request тег «Can be tested». Результаты первых проверок (стиль кода) появятся уже через несколько минут. Результаты сборки появятся примерно через пол часа. Результаты основного набора тестов будут доступны в пределах часа. Система подготовит сборки ClickHouse специально для вашего pull request. Для их получения, нажмите на ссылку «Details» у проверки «Clickhouse build check». Там вы сможете найти прямые ссылки на собранные .deb пакеты ClickHouse, которые, при желании, вы даже сможете установить на свои продакшен серверы (если не страшно). diff --git a/docs/ru/engines/table-engines/mergetree-family/mergetree.md b/docs/ru/engines/table-engines/mergetree-family/mergetree.md index 3f140f85396..c75fa8e92ce 100644 --- a/docs/ru/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/mergetree.md @@ -678,7 +678,7 @@ TTL d + INTERVAL 1 MONTH GROUP BY k1, k2 SET x = max(x), y = min(y); - `volume_name_N` — название тома. Названия томов должны быть уникальны. - `disk` — диск, находящийся внутри тома. - `max_data_part_size_bytes` — максимальный размер куска данных, который может находится на любом из дисков этого тома. Если в результате слияния размер куска ожидается больше, чем max_data_part_size_bytes, то этот кусок будет записан в следующий том. В основном эта функция позволяет хранить новые / мелкие куски на горячем (SSD) томе и перемещать их на холодный (HDD) том, когда они достигают большого размера. Не используйте этот параметр, если политика имеет только один том. -- `move_factor` — доля доступного свободного места на томе, если места становится меньше, то данные начнут перемещение на следующий том, если он есть (по умолчанию 0.1). +- `move_factor` — доля доступного свободного места на томе, если места становится меньше, то данные начнут перемещение на следующий том, если он есть (по умолчанию 0.1). Для перемещения куски сортируются по размеру от большего к меньшему (по убыванию) и выбираются куски, совокупный размер которых достаточен для соблюдения условия `move_factor`, если совокупный размер всех партов недостаточен, будут перемещены все парты. - `prefer_not_to_merge` — Отключает слияние кусков данных, хранящихся на данном томе. Если данная настройка включена, то слияние данных, хранящихся на данном томе, не допускается. Это позволяет контролировать работу ClickHouse с медленными дисками. Примеры конфигураций: diff --git a/docs/ru/sql-reference/statements/create/table.md b/docs/ru/sql-reference/statements/create/table.md index b9c2a4f0f0b..48cce437b8d 100644 --- a/docs/ru/sql-reference/statements/create/table.md +++ b/docs/ru/sql-reference/statements/create/table.md @@ -110,9 +110,9 @@ SELECT x, toTypeName(x) FROM t1; ### EPHEMERAL {#ephemeral} -`EPHEMERAL expr` +`EPHEMERAL [expr]` -Эфемерное выражение. Такой столбец не хранится в таблице и не может быть получен в запросе SELECT, но на него можно ссылаться в выражениях по умолчанию запроса CREATE. +Эфемерное выражение. Такой столбец не хранится в таблице и не может быть получен в запросе SELECT, но на него можно ссылаться в выражениях по умолчанию запроса CREATE. Если значение по умолчанию `expr` не указано, то тип колонки должен быть специфицирован. INSERT без списка столбцов игнорирует этот столбец, таким образом сохраняется инвариант - т.е. дамп, полученный путём `SELECT *`, можно вставить обратно в таблицу INSERT-ом без указания списка столбцов. ### ALIAS {#alias} diff --git a/docs/ru/sql-reference/table-functions/postgresql.md b/docs/ru/sql-reference/table-functions/postgresql.md index a8ae7cfb80b..e61ca69d78c 100644 --- a/docs/ru/sql-reference/table-functions/postgresql.md +++ b/docs/ru/sql-reference/table-functions/postgresql.md @@ -126,7 +126,7 @@ CREATE TABLE pg_table_schema_with_dots (a UInt32) **См. также** -- [Движок таблиц PostgreSQL](../../sql-reference/table-functions/postgresql.md) +- [Движок таблиц PostgreSQL](../../engines/table-engines/integrations/postgresql.md) - [Использование PostgreSQL как источника данных для внешнего словаря](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql) [Оригинальная статья](https://clickhouse.com/docs/ru/sql-reference/table-functions/postgresql/) diff --git a/docs/tools/requirements.txt b/docs/tools/requirements.txt index c48a70b0909..dd641c13629 100644 --- a/docs/tools/requirements.txt +++ b/docs/tools/requirements.txt @@ -16,7 +16,7 @@ jsmin==3.0.0 livereload==2.6.3 Markdown==3.3.2 MarkupSafe==2.1.0 -mkdocs==1.1.2 +mkdocs==1.3.0 mkdocs-htmlproofer-plugin==0.0.3 mkdocs-macros-plugin==0.4.20 nltk==3.7 diff --git a/docs/zh/development/developer-instruction.md b/docs/zh/development/developer-instruction.md index bd7a197f926..7ade3ad57fb 100644 --- a/docs/zh/development/developer-instruction.md +++ b/docs/zh/development/developer-instruction.md @@ -259,7 +259,7 @@ ClickHouse的架构描述可以在此处查看:https://clickhouse.com/docs/en/ 即使工作尚未完成,也可以创建拉取请求。在这种情况下,请在标题的开头加上«WIP»(正在进行中),以便后续更改。这对于协同审查和讨论更改以及运行所有可用测试用例很有用。提供有关变更的简短描述很重要,这将在后续用于生成重新发布变更日志。 -Yandex成员一旦在您的拉取请求上贴上«可以测试»标签,就会开始测试。一些初始检查项(例如,代码类型)的结果会在几分钟内反馈。构建的检查结果将在半小时内完成。而主要的测试用例集结果将在一小时内报告给您。 +ClickHouse成员一旦在您的拉取请求上贴上«可以测试»标签,就会开始测试。一些初始检查项(例如,代码类型)的结果会在几分钟内反馈。构建的检查结果将在半小时内完成。而主要的测试用例集结果将在一小时内报告给您。 系统将分别为您的拉取请求准备ClickHouse二进制版本。若要检索这些构建信息,请在检查列表中单击« ClickHouse构建检查»旁边的«详细信息»链接。在这里,您会找到指向ClickHouse的.deb软件包的直接链接,此外,甚至可以将其部署在生产服务器上(如果您不担心)。 diff --git a/packages/clickhouse-server.service b/packages/clickhouse-server.service index a9400b24270..028b4fbf8ab 100644 --- a/packages/clickhouse-server.service +++ b/packages/clickhouse-server.service @@ -20,7 +20,7 @@ ExecStart=/usr/bin/clickhouse-server --config=/etc/clickhouse-server/config.xml EnvironmentFile=-/etc/default/clickhouse LimitCORE=infinity LimitNOFILE=500000 -CapabilityBoundingSet=CAP_NET_ADMIN CAP_IPC_LOCK CAP_SYS_NICE +CapabilityBoundingSet=CAP_NET_ADMIN CAP_IPC_LOCK CAP_SYS_NICE CAP_NET_BIND_SERVICE [Install] # ClickHouse should not start from the rescue shell (rescue.target). diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index c2094b3b00d..a34ce02b293 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -163,10 +163,24 @@ void Client::initialize(Poco::Util::Application & self) configReadClient(config(), home_path); + /** getenv is thread-safe in Linux glibc and in all sane libc implementations. + * But the standard does not guarantee that subsequent calls will not rewrite the value by returned pointer. + * + * man getenv: + * + * As typically implemented, getenv() returns a pointer to a string within the environment list. + * The caller must take care not to modify this string, since that would change the environment of + * the process. + * + * The implementation of getenv() is not required to be reentrant. The string pointed to by the return value of getenv() + * may be statically allocated, and can be modified by a subsequent call to getenv(), putenv(3), setenv(3), or unsetenv(3). + */ + const char * env_user = getenv("CLICKHOUSE_USER"); - const char * env_password = getenv("CLICKHOUSE_PASSWORD"); if (env_user) config().setString("user", env_user); + + const char * env_password = getenv("CLICKHOUSE_PASSWORD"); if (env_password) config().setString("password", env_password); @@ -810,7 +824,7 @@ void Client::addOptions(OptionsDescription & options_description) ("quota_key", po::value(), "A string to differentiate quotas when the user have keyed quotas configured on server") ("max_client_network_bandwidth", po::value(), "the maximum speed of data exchange over the network for the client in bytes per second.") - ("compression", po::value(), "enable or disable compression") + ("compression", po::value(), "enable or disable compression (enabled by default for remote communication and disabled for localhost communication).") ("query-fuzzer-runs", po::value()->default_value(0), "After executing every SELECT query, do random mutations in it and run again specified number of times. This is used for testing to discover unexpected corner cases.") ("interleave-queries-file", po::value>()->multitoken(), @@ -1005,6 +1019,7 @@ void Client::processConfig() global_context->setCurrentQueryId(query_id); } print_stack_trace = config().getBool("stacktrace", false); + logging_initialized = true; if (config().has("multiquery")) is_multiquery = true; diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index bb6684ca137..18b62e65765 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -434,6 +434,14 @@ catch (...) return getCurrentExceptionCode(); } +void LocalServer::updateLoggerLevel(const String & logs_level) +{ + if (!logging_initialized) + return; + + config().setString("logger.level", logs_level); + updateLevels(config(), logger()); +} void LocalServer::processConfig() { @@ -460,30 +468,31 @@ void LocalServer::processConfig() auto logging = (config().has("logger.console") || config().has("logger.level") || config().has("log-level") + || config().has("send_logs_level") || config().has("logger.log")); - auto file_logging = config().has("server_logs_file"); - if (is_interactive && logging && !file_logging) - throw Exception("For interactive mode logging is allowed only with --server_logs_file option", - ErrorCodes::BAD_ARGUMENTS); + auto level = config().getString("log-level", "trace"); - if (file_logging) + if (config().has("server_logs_file")) { - auto level = Poco::Logger::parseLevel(config().getString("log-level", "trace")); - Poco::Logger::root().setLevel(level); + auto poco_logs_level = Poco::Logger::parseLevel(level); + Poco::Logger::root().setLevel(poco_logs_level); Poco::Logger::root().setChannel(Poco::AutoPtr(new Poco::SimpleFileChannel(server_logs_file))); + logging_initialized = true; } - else if (logging) + else if (logging || is_interactive) { - // force enable logging config().setString("logger", "logger"); - // sensitive data rules are not used here + auto log_level_default = is_interactive && !logging ? "none" : level; + config().setString("logger.level", config().getString("log-level", config().getString("send_logs_level", log_level_default))); buildLoggers(config(), logger(), "clickhouse-local"); + logging_initialized = true; } else { Poco::Logger::root().setLevel("none"); Poco::Logger::root().setChannel(Poco::AutoPtr(new Poco::NullChannel())); + logging_initialized = false; } shared_context = Context::createShared(); @@ -713,6 +722,8 @@ void LocalServer::processOptions(const OptionsDescription &, const CommandLineOp config().setString("logger.log", options["logger.log"].as()); if (options.count("logger.level")) config().setString("logger.level", options["logger.level"].as()); + if (options.count("send_logs_level")) + config().setString("send_logs_level", options["send_logs_level"].as()); } } diff --git a/programs/local/LocalServer.h b/programs/local/LocalServer.h index 969af7f1b77..e96fb211554 100644 --- a/programs/local/LocalServer.h +++ b/programs/local/LocalServer.h @@ -46,6 +46,8 @@ protected: void processConfig() override; + void updateLoggerLevel(const String & logs_level) override; + private: /** Composes CREATE subquery based on passed arguments (--structure --file --table and --input-format) * This query will be executed first, before queries passed through --query argument diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 8604fee4aea..c12abda9594 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -45,6 +46,7 @@ #include #include #include +#include #include #include #include @@ -80,6 +82,7 @@ #include #include #include +#include #include #include #include @@ -505,6 +508,101 @@ void checkForUsersNotInMainConfig( } } +/// Unused in other builds +#if defined(OS_LINUX) +static String readString(const String & path) +{ + ReadBufferFromFile in(path); + String contents; + readStringUntilEOF(contents, in); + return contents; +} + +static int readNumber(const String & path) +{ + ReadBufferFromFile in(path); + int result; + readText(result, in); + return result; +} + +#endif + +static void sanityChecks(Server * server) +{ + std::string data_path = getCanonicalPath(server->config().getString("path", DBMS_DEFAULT_PATH)); + std::string logs_path = server->config().getString("logger.log", ""); + +#if defined(OS_LINUX) + try + { + if (readString("/sys/devices/system/clocksource/clocksource0/current_clocksource").find("tsc") == std::string::npos) + server->context()->addWarningMessage("Linux is not using fast TSC clock source. Performance can be degraded."); + } + catch (...) + { + } + + try + { + if (readNumber("/proc/sys/vm/overcommit_memory") == 2) + server->context()->addWarningMessage("Linux memory overcommit is disabled."); + } + catch (...) + { + } + + try + { + if (readString("/sys/kernel/mm/transparent_hugepage/enabled").find("[always]") != std::string::npos) + server->context()->addWarningMessage("Linux transparent hugepage are set to \"always\"."); + } + catch (...) + { + } + + try + { + if (readNumber("/proc/sys/kernel/pid_max") < 30000) + server->context()->addWarningMessage("Linux max PID is too low."); + } + catch (...) + { + } + + try + { + if (readNumber("/proc/sys/kernel/threads-max") < 30000) + server->context()->addWarningMessage("Linux threads max count is too low."); + } + catch (...) + { + } + + std::string dev_id = getBlockDeviceId(data_path); + if (getBlockDeviceType(dev_id) == BlockDeviceType::ROT && getBlockDeviceReadAheadBytes(dev_id) == 0) + server->context()->addWarningMessage("Rotational disk with disabled readahead is in use. Performance can be degraded."); +#endif + + try + { + if (getAvailableMemoryAmount() < (2l << 30)) + server->context()->addWarningMessage("Available memory at server startup is too low (2GiB)."); + + if (!enoughSpaceInDirectory(data_path, 1ull << 30)) + server->context()->addWarningMessage("Available disk space at server startup is too low (1GiB)."); + + if (!logs_path.empty()) + { + if (!enoughSpaceInDirectory(fs::path(logs_path).parent_path(), 1ull << 30)) + server->context()->addWarningMessage("Available disk space at server startup is too low (1GiB)."); + } + } + catch (...) + { + } +} + int Server::main(const std::vector & /*args*/) { Poco::Logger * log = &logger(); @@ -538,13 +636,14 @@ int Server::main(const std::vector & /*args*/) global_context->addWarningMessage("Server was built in debug mode. It will work slowly."); #endif -if (ThreadFuzzer::instance().isEffective()) - global_context->addWarningMessage("ThreadFuzzer is enabled. Application will run slowly and unstable."); + if (ThreadFuzzer::instance().isEffective()) + global_context->addWarningMessage("ThreadFuzzer is enabled. Application will run slowly and unstable."); #if defined(SANITIZER) global_context->addWarningMessage("Server was built with sanitizer. It will work slowly."); #endif + sanityChecks(this); // Initialize global thread pool. Do it before we fetch configs from zookeeper // nodes (`from_zk`), because ZooKeeper interface uses the pool. We will @@ -766,6 +865,38 @@ if (ThreadFuzzer::instance().isEffective()) } } + /// Try to increase limit on number of threads. + { + rlimit rlim; + if (getrlimit(RLIMIT_NPROC, &rlim)) + throw Poco::Exception("Cannot getrlimit"); + + if (rlim.rlim_cur == rlim.rlim_max) + { + LOG_DEBUG(log, "rlimit on number of threads is {}", rlim.rlim_cur); + } + else + { + rlim_t old = rlim.rlim_cur; + rlim.rlim_cur = rlim.rlim_max; + int rc = setrlimit(RLIMIT_NPROC, &rlim); + if (rc != 0) + { + LOG_WARNING(log, "Cannot set max number of threads to {}. error: {}", rlim.rlim_cur, strerror(errno)); + rlim.rlim_cur = old; + } + else + { + LOG_DEBUG(log, "Set max number of threads to {} (was {}).", rlim.rlim_cur, old); + } + } + + if (rlim.rlim_cur < 30000) + { + global_context->addWarningMessage("Maximum number of threads is lower than 30000. There could be problems with handling a lot of simultaneous queries."); + } + } + static ServerErrorHandler error_handler; Poco::ErrorHandler::set(&error_handler); diff --git a/programs/server/config.xml b/programs/server/config.xml index 1de379b0b2a..3b035fb39ac 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -148,13 +148,13 @@ - + + - - - - - - - + -->