diff --git a/.clang-tidy b/.clang-tidy index 687b3741b1c..6fd67876923 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -144,6 +144,7 @@ Checks: '-*, clang-analyzer-cplusplus.SelfAssignment, clang-analyzer-deadcode.DeadStores, clang-analyzer-cplusplus.Move, + clang-analyzer-optin.cplusplus.UninitializedObject, clang-analyzer-optin.cplusplus.VirtualCall, clang-analyzer-security.insecureAPI.UncheckedReturn, clang-analyzer-security.insecureAPI.bcmp, @@ -163,6 +164,8 @@ Checks: '-*, clang-analyzer-unix.cstring.NullArg, boost-use-to-string, + + alpha.security.cert.env.InvalidPtr, ' WarningsAsErrors: '*' diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 6540b60476f..2d8540b57ea 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,4 +1,4 @@ -Changelog category (leave one): +### Changelog category (leave one): - New Feature - Improvement - Bug Fix (user-visible misbehaviour in official stable or prestable release) @@ -9,7 +9,7 @@ Changelog category (leave one): - Not for changelog (changelog entry is not required) -Changelog entry (a user-readable short description of the changes that goes to CHANGELOG.md): +### Changelog entry (a user-readable short description of the changes that goes to CHANGELOG.md): ... diff --git a/.github/workflows/backport_branches.yml b/.github/workflows/backport_branches.yml index 75f8a63368d..417284f14d5 100644 --- a/.github/workflows/backport_branches.yml +++ b/.github/workflows/backport_branches.yml @@ -341,10 +341,15 @@ jobs: steps: - name: Set envs run: | + DEPENDENCIES=$(cat << 'EOF' | jq '. | length' + ${{ toJSON(needs) }} + EOF + ) + echo "DEPENDENCIES=$DEPENDENCIES" >> "$GITHUB_ENV" cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/report_check - REPORTS_PATH=${{runner.temp}}/reports_dir CHECK_NAME=ClickHouse build check (actions) + REPORTS_PATH=${{runner.temp}}/reports_dir + TEMP_PATH=${{runner.temp}}/report_check EOF - name: Download json reports uses: actions/download-artifact@v2 @@ -360,7 +365,7 @@ jobs: sudo rm -fr "$TEMP_PATH" mkdir -p "$TEMP_PATH" cd "$GITHUB_WORKSPACE/tests/ci" - python3 build_report_check.py "$CHECK_NAME" + python3 build_report_check.py "$CHECK_NAME" "$DEPENDENCIES" - name: Cleanup if: always() run: | diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index 081fa165c68..eab7ce36eb7 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -992,10 +992,16 @@ jobs: steps: - name: Set envs run: | + DEPENDENCIES=$(cat << 'EOF' | jq '. | length' + ${{ toJSON(needs) }} + EOF + ) + echo "DEPENDENCIES=$DEPENDENCIES" >> "$GITHUB_ENV" cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/report_check - REPORTS_PATH=${{runner.temp}}/reports_dir CHECK_NAME=ClickHouse build check (actions) + REPORTS_PATH=${{runner.temp}}/reports_dir + REPORTS_PATH=${{runner.temp}}/reports_dir + TEMP_PATH=${{runner.temp}}/report_check EOF - name: Download json reports uses: actions/download-artifact@v2 @@ -1011,7 +1017,7 @@ jobs: sudo rm -fr "$TEMP_PATH" mkdir -p "$TEMP_PATH" cd "$GITHUB_WORKSPACE/tests/ci" - python3 build_report_check.py "$CHECK_NAME" + python3 build_report_check.py "$CHECK_NAME" "$DEPENDENCIES" - name: Cleanup if: always() run: | diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 5b47f94a324..1e70213adf5 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -72,3 +72,52 @@ jobs: with: name: changed_images path: ${{ runner.temp }}/changed_images.json + BuilderCoverity: + needs: DockerHubPush + runs-on: [self-hosted, builder] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/build_check + IMAGES_PATH=${{runner.temp}}/images_path + REPO_COPY=${{runner.temp}}/build_check/ClickHouse + CACHES_PATH=${{runner.temp}}/../ccaches + CHECK_NAME=ClickHouse build check (actions) + BUILD_NAME=coverity + EOF + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ env.IMAGES_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + id: coverity-checkout + uses: actions/checkout@v2 + with: + submodules: 'true' + fetch-depth: 0 # otherwise we will have no info about contributors + - name: Build + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME" "${{ secrets.COV_TOKEN }}" + - name: Upload Coverity Analysis + if: ${{ success() || failure() }} + run: | + curl --form token='${{ secrets.COV_TOKEN }}' \ + --form email='${{ secrets.ROBOT_CLICKHOUSE_EMAIL }}' \ + --form file="@$TEMP_PATH/$BUILD_NAME/clickhouse-scan.tgz" \ + --form version="${GITHUB_REF#refs/heads/}-${GITHUB_SHA::6}" \ + --form description="Nighly Scan: $(date +'%Y-%m-%dT%H:%M:%S')" \ + https://scan.coverity.com/builds?project=ClickHouse%2FClickHouse + - name: Cleanup + if: always() + run: | + docker kill "$(docker ps -q)" ||: + docker rm -f "$(docker ps -a -q)" ||: + sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index c01d1821d0f..8942cca391e 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -1044,10 +1044,16 @@ jobs: steps: - name: Set envs run: | + DEPENDENCIES=$(cat << 'EOF' | jq '. | length' + ${{ toJSON(needs) }} + EOF + ) + echo "DEPENDENCIES=$DEPENDENCIES" >> "$GITHUB_ENV" cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/report_check - REPORTS_PATH=${{runner.temp}}/reports_dir CHECK_NAME=ClickHouse build check (actions) + REPORTS_PATH=${{runner.temp}}/reports_dir + REPORTS_PATH=${{runner.temp}}/reports_dir + TEMP_PATH=${{runner.temp}}/report_check EOF - name: Download json reports uses: actions/download-artifact@v2 @@ -1063,7 +1069,7 @@ jobs: sudo rm -fr "$TEMP_PATH" mkdir -p "$TEMP_PATH" cd "$GITHUB_WORKSPACE/tests/ci" - python3 build_report_check.py "$CHECK_NAME" + python3 build_report_check.py "$CHECK_NAME" "$DEPENDENCIES" - name: Cleanup if: always() run: | diff --git a/.github/workflows/release_branches.yml b/.github/workflows/release_branches.yml index d916699acc2..b2af465142b 100644 --- a/.github/workflows/release_branches.yml +++ b/.github/workflows/release_branches.yml @@ -436,10 +436,16 @@ jobs: steps: - name: Set envs run: | + DEPENDENCIES=$(cat << 'EOF' | jq '. | length' + ${{ toJSON(needs) }} + EOF + ) + echo "DEPENDENCIES=$DEPENDENCIES" >> "$GITHUB_ENV" cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/report_check - REPORTS_PATH=${{runner.temp}}/reports_dir CHECK_NAME=ClickHouse build check (actions) + REPORTS_PATH=${{runner.temp}}/reports_dir + REPORTS_PATH=${{runner.temp}}/reports_dir + TEMP_PATH=${{runner.temp}}/report_check EOF - name: Download json reports uses: actions/download-artifact@v2 @@ -455,7 +461,7 @@ jobs: sudo rm -fr "$TEMP_PATH" mkdir -p "$TEMP_PATH" cd "$GITHUB_WORKSPACE/tests/ci" - python3 build_report_check.py "$CHECK_NAME" + python3 build_report_check.py "$CHECK_NAME" "$DEPENDENCIES" - name: Cleanup if: always() run: | diff --git a/CMakeLists.txt b/CMakeLists.txt index a9ce64b87ba..d893ba773cc 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -294,14 +294,19 @@ include(cmake/cpu_features.cmake) # Enable it explicitly. set (COMPILER_FLAGS "${COMPILER_FLAGS} -fasynchronous-unwind-tables") -# Reproducible builds -# If turned `ON`, remap file source paths in debug info, predefined preprocessor macros and __builtin_FILE(). -option(ENABLE_BUILD_PATH_MAPPING "Enable remap file source paths in debug info, predefined preprocessor macros and __builtin_FILE(). It's to generate reproducible builds. See https://reproducible-builds.org/docs/build-path" ON) +# Reproducible builds. +if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG") + set (ENABLE_BUILD_PATH_MAPPING_DEFAULT OFF) +else () + set (ENABLE_BUILD_PATH_MAPPING_DEFAULT ON) +endif () + +option (ENABLE_BUILD_PATH_MAPPING "Enable remapping of file source paths in debug info, predefined preprocessor macros, and __builtin_FILE(). It's used to generate reproducible builds. See https://reproducible-builds.org/docs/build-path" ${ENABLE_BUILD_PATH_MAPPING_DEFAULT}) if (ENABLE_BUILD_PATH_MAPPING) set (COMPILER_FLAGS "${COMPILER_FLAGS} -ffile-prefix-map=${CMAKE_SOURCE_DIR}=.") set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -ffile-prefix-map=${CMAKE_SOURCE_DIR}=.") -endif() +endif () if (${CMAKE_VERSION} VERSION_LESS "3.12.4") # CMake < 3.12 doesn't support setting 20 as a C++ standard version. diff --git a/base/loggers/Loggers.cpp b/base/loggers/Loggers.cpp index 7c627ad2272..512e44f79c7 100644 --- a/base/loggers/Loggers.cpp +++ b/base/loggers/Loggers.cpp @@ -197,7 +197,6 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log Poco::AutoPtr pf = new OwnPatternFormatter(color_enabled); Poco::AutoPtr log = new DB::OwnFormattingChannel(pf, new Poco::ConsoleChannel); - logger.warning("Logging " + console_log_level_string + " to console"); log->setLevel(console_log_level); split->addChannel(log, "console"); } diff --git a/contrib/arrow b/contrib/arrow index 1d9cc51daa4..efdcd015cfd 160000 --- a/contrib/arrow +++ b/contrib/arrow @@ -1 +1 @@ -Subproject commit 1d9cc51daa4e7e9fc6926320ef73759818bd736e +Subproject commit efdcd015cfdee1b6aa349c9ca227ca12c3d697f5 diff --git a/contrib/krb5-cmake/CMakeLists.txt b/contrib/krb5-cmake/CMakeLists.txt index 685e8737ef0..214d23bc2a9 100644 --- a/contrib/krb5-cmake/CMakeLists.txt +++ b/contrib/krb5-cmake/CMakeLists.txt @@ -1,4 +1,4 @@ -set (ENABLE_KRB5_DEFAULT 1) +set (ENABLE_KRB5_DEFAULT ${ENABLE_LIBRARIES}) if (NOT CMAKE_SYSTEM_NAME MATCHES "Linux" AND NOT (CMAKE_SYSTEM_NAME MATCHES "Darwin" AND NOT CMAKE_CROSSCOMPILING)) message (WARNING "krb5 disabled in non-Linux and non-native-Darwin environments") set (ENABLE_KRB5_DEFAULT 0) @@ -16,6 +16,7 @@ if(NOT AWK_PROGRAM) endif() set(KRB5_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/krb5/src") +set(KRB5_ET_BIN_DIR "${CMAKE_CURRENT_BINARY_DIR}/include_private") set(ALL_SRCS "${KRB5_SOURCE_DIR}/util/et/et_name.c" @@ -90,7 +91,6 @@ set(ALL_SRCS "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/get_tkt_flags.c" "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/set_allowable_enctypes.c" "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/k5sealiov.c" - "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/gssapi_err_krb5.c" "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/canon_name.c" "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/inq_cred.c" "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/export_sec_context.c" @@ -143,11 +143,12 @@ set(ALL_SRCS "${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_buffer_set.c" "${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_set.c" "${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_token.c" - "${KRB5_SOURCE_DIR}/lib/gssapi/generic/gssapi_err_generic.c" "${KRB5_SOURCE_DIR}/lib/gssapi/generic/disp_major_status.c" "${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_seqstate.c" "${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_errmap.c" "${KRB5_SOURCE_DIR}/lib/gssapi/generic/rel_buffer.c" + "${KRB5_ET_BIN_DIR}/lib/gssapi/krb5/gssapi_err_krb5.c" + "${KRB5_ET_BIN_DIR}/lib/gssapi/generic/gssapi_err_generic.c" "${KRB5_SOURCE_DIR}/lib/gssapi/spnego/spnego_mech.c" "${KRB5_SOURCE_DIR}/lib/gssapi/spnego/negoex_util.c" @@ -256,8 +257,8 @@ set(ALL_SRCS "${KRB5_SOURCE_DIR}/util/profile/prof_parse.c" "${KRB5_SOURCE_DIR}/util/profile/prof_get.c" "${KRB5_SOURCE_DIR}/util/profile/prof_set.c" - "${KRB5_SOURCE_DIR}/util/profile/prof_err.c" "${KRB5_SOURCE_DIR}/util/profile/prof_init.c" + "${KRB5_ET_BIN_DIR}/util/profile/prof_err.c" "${KRB5_SOURCE_DIR}/lib/krb5/krb/fwd_tgt.c" "${KRB5_SOURCE_DIR}/lib/krb5/krb/conv_creds.c" "${KRB5_SOURCE_DIR}/lib/krb5/krb/fast.c" @@ -450,13 +451,12 @@ set(ALL_SRCS - "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/k5e1_err.c" - "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kdb5_err.c" - "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/asn1_err.c" - "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb5_err.c" - "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb524_err.c" - "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kv5m_err.c" - + "${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/k5e1_err.c" + "${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/kdb5_err.c" + "${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/asn1_err.c" + "${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/krb5_err.c" + "${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/krb524_err.c" + "${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/kv5m_err.c" "${KRB5_SOURCE_DIR}/lib/krb5/rcache/rc_base.c" @@ -473,7 +473,7 @@ set(ALL_SRCS ) add_custom_command( - OUTPUT "${KRB5_SOURCE_DIR}/util/et/compile_et" + OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/compile_et" COMMAND /bin/sh ./config_script ./compile_et.sh @@ -481,50 +481,17 @@ add_custom_command( ${AWK_PROGRAM} sed > - compile_et + ${CMAKE_CURRENT_BINARY_DIR}/compile_et DEPENDS "${KRB5_SOURCE_DIR}/util/et/compile_et.sh" "${KRB5_SOURCE_DIR}/util/et/config_script" WORKING_DIRECTORY "${KRB5_SOURCE_DIR}/util/et" ) -file(GLOB_RECURSE ET_FILES - "${KRB5_SOURCE_DIR}/*.et" -) - -function(preprocess_et out_var) - set(result) - foreach(in_f ${ARGN}) - string(REPLACE - .et - .c - F_C - ${in_f} - ) - string(REPLACE - .et - .h - F_H - ${in_f} - ) - - get_filename_component(ET_PATH ${in_f} DIRECTORY) - - add_custom_command(OUTPUT ${F_C} ${F_H} - COMMAND perl "${KRB5_SOURCE_DIR}/util/et/compile_et" -d "${KRB5_SOURCE_DIR}/util/et" ${in_f} - DEPENDS ${in_f} "${KRB5_SOURCE_DIR}/util/et/compile_et" - WORKING_DIRECTORY ${ET_PATH} - VERBATIM - ) - list(APPEND result ${F_C}) - endforeach() - set(${out_var} "${result}" PARENT_SCOPE) -endfunction() - add_custom_command( - OUTPUT "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/error_map.h" + OUTPUT "${KRB5_ET_BIN_DIR}/error_map.h" COMMAND perl -I../../../util ../../../util/gen-map.pl - -oerror_map.h + -o${KRB5_ET_BIN_DIR}/error_map.h NAME=gsserrmap KEY=OM_uint32 VALUE=char* @@ -536,22 +503,21 @@ add_custom_command( add_custom_target( ERROR_MAP_H - DEPENDS "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/error_map.h" + DEPENDS "${KRB5_ET_BIN_DIR}/error_map.h" VERBATIM ) add_custom_command( - OUTPUT "${KRB5_SOURCE_DIR}/lib/gssapi/generic/errmap.h" - COMMAND perl -w -I../../../util ../../../util/gen.pl bimap errmap.h NAME=mecherrmap LEFT=OM_uint32 RIGHT=struct\ mecherror LEFTPRINT=print_OM_uint32 RIGHTPRINT=mecherror_print LEFTCMP=cmp_OM_uint32 RIGHTCMP=mecherror_cmp + OUTPUT "${KRB5_ET_BIN_DIR}/errmap.h" + COMMAND perl -w -I../../../util ../../../util/gen.pl bimap ${KRB5_ET_BIN_DIR}/errmap.h NAME=mecherrmap LEFT=OM_uint32 RIGHT=struct\ mecherror LEFTPRINT=print_OM_uint32 RIGHTPRINT=mecherror_print LEFTCMP=cmp_OM_uint32 RIGHTCMP=mecherror_cmp WORKING_DIRECTORY "${KRB5_SOURCE_DIR}/lib/gssapi/generic" ) add_custom_target( ERRMAP_H - DEPENDS "${KRB5_SOURCE_DIR}/lib/gssapi/generic/errmap.h" + DEPENDS "${KRB5_ET_BIN_DIR}/errmap.h" VERBATIM ) - add_custom_target( KRB_5_H DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/include/krb5/krb5.h" @@ -567,7 +533,40 @@ add_dependencies( KRB_5_H ) -preprocess_et(processed_et_files ${ET_FILES}) +# +# Generate error tables +# +function(preprocess_et et_path) + string(REPLACE .et .c F_C ${et_path}) + string(REPLACE .et .h F_H ${et_path}) + get_filename_component(et_dir ${et_path} DIRECTORY) + get_filename_component(et_name ${et_path} NAME_WLE) + + add_custom_command(OUTPUT ${F_C} ${F_H} ${KRB5_ET_BIN_DIR}/${et_name}.h + COMMAND perl "${CMAKE_CURRENT_BINARY_DIR}/compile_et" -d "${KRB5_SOURCE_DIR}/util/et" ${et_path} + # for #include w/o path (via -iquote) + COMMAND ${CMAKE_COMMAND} -E create_symlink ${F_H} ${KRB5_ET_BIN_DIR}/${et_name}.h + DEPENDS ${et_path} "${CMAKE_CURRENT_BINARY_DIR}/compile_et" + WORKING_DIRECTORY ${et_dir} + VERBATIM + ) +endfunction() + +function(generate_error_tables) + file(GLOB_RECURSE ET_FILES "${KRB5_SOURCE_DIR}/*.et") + foreach(et_path ${ET_FILES}) + string(REPLACE ${KRB5_SOURCE_DIR} ${KRB5_ET_BIN_DIR} et_bin_path ${et_path}) + string(REPLACE / _ et_target_name ${et_path}) + get_filename_component(et_bin_dir ${et_bin_path} DIRECTORY) + add_custom_command(OUTPUT ${et_bin_path} + COMMAND ${CMAKE_COMMAND} -E make_directory ${et_bin_dir} + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${et_path} ${et_bin_path} + VERBATIM + ) + preprocess_et(${et_bin_path}) + endforeach() +endfunction() +generate_error_tables() if(CMAKE_SYSTEM_NAME MATCHES "Darwin") add_custom_command( @@ -634,12 +633,12 @@ file(MAKE_DIRECTORY SET(KRBHDEP "${KRB5_SOURCE_DIR}/include/krb5/krb5.hin" - "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb5_err.h" - "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/k5e1_err.h" - "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kdb5_err.h" - "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kv5m_err.h" - "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb524_err.h" - "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/asn1_err.h" + "${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/krb5_err.h" + "${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/k5e1_err.h" + "${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/kdb5_err.h" + "${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/kv5m_err.h" + "${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/krb524_err.h" + "${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/asn1_err.h" ) # cmake < 3.18 does not have 'cat' command @@ -656,6 +655,11 @@ target_include_directories(_krb5 SYSTEM BEFORE PUBLIC "${CMAKE_CURRENT_BINARY_DIR}/include" ) +target_compile_options(_krb5 PRIVATE + # For '#include "file.h"' + -iquote "${CMAKE_CURRENT_BINARY_DIR}/include_private" +) + target_include_directories(_krb5 PRIVATE "${CMAKE_CURRENT_BINARY_DIR}/include_private" # For autoconf.h and other generated headers. ${KRB5_SOURCE_DIR} diff --git a/contrib/poco b/contrib/poco index 520a90e02e3..008b1646947 160000 --- a/contrib/poco +++ b/contrib/poco @@ -1 +1 @@ -Subproject commit 520a90e02e3e5cb90afeae1846d161dbc508a6f1 +Subproject commit 008b16469471d55b176db181756c94e3f14dd2dc diff --git a/contrib/replxx b/contrib/replxx index 6f0b6f151ae..3fd0e3c9364 160000 --- a/contrib/replxx +++ b/contrib/replxx @@ -1 +1 @@ -Subproject commit 6f0b6f151ae2a044625ae93acd19ca365fcea64d +Subproject commit 3fd0e3c9364a589447453d9906d854ebd8d385c5 diff --git a/contrib/unixodbc b/contrib/unixodbc index b0ad30f7f62..a2cd5395e8c 160000 --- a/contrib/unixodbc +++ b/contrib/unixodbc @@ -1 +1 @@ -Subproject commit b0ad30f7f6289c12b76f04bfb9d466374bb32168 +Subproject commit a2cd5395e8c7f7390025ec93af5bfebef3fb5fcd diff --git a/docker/keeper/Dockerfile b/docker/keeper/Dockerfile index 207dddce1bb..068377e8f8c 100644 --- a/docker/keeper/Dockerfile +++ b/docker/keeper/Dockerfile @@ -20,6 +20,8 @@ ENV LANG=en_US.UTF-8 \ COPY --from=glibc-donor /lib/linux-gnu/libc.so.6 /lib/linux-gnu/libdl.so.2 /lib/linux-gnu/libm.so.6 /lib/linux-gnu/libpthread.so.0 /lib/linux-gnu/librt.so.1 /lib/linux-gnu/libnss_dns.so.2 /lib/linux-gnu/libnss_files.so.2 /lib/linux-gnu/libresolv.so.2 /lib/linux-gnu/ld-2.31.so /lib/ COPY --from=glibc-donor /etc/nsswitch.conf /etc/ COPY entrypoint.sh /entrypoint.sh + +ARG TARGETARCH RUN arch=${TARGETARCH:-amd64} \ && case $arch in \ amd64) mkdir -p /lib64 && ln -sf /lib/ld-2.31.so /lib64/ld-linux-x86-64.so.2 ;; \ diff --git a/docker/packager/binary/build.sh b/docker/packager/binary/build.sh index 31416e1a0ee..269d3eb52c6 100755 --- a/docker/packager/binary/build.sh +++ b/docker/packager/binary/build.sh @@ -25,13 +25,21 @@ read -ra CMAKE_FLAGS <<< "${CMAKE_FLAGS:-}" env cmake --debug-trycompile --verbose=1 -DCMAKE_VERBOSE_MAKEFILE=1 -LA "-DCMAKE_BUILD_TYPE=$BUILD_TYPE" "-DSANITIZE=$SANITIZER" -DENABLE_CHECK_HEAVY_BUILDS=1 "${CMAKE_FLAGS[@]}" .. +if [ "coverity" == "$COMBINED_OUTPUT" ] +then + wget --post-data "token=$COV_TOKEN&project=ClickHouse%2FClickHouse" -qO- https://scan.coverity.com/download/linux64 | tar xz -C /opt/cov-analysis --strip-components 1 + export PATH=$PATH:/opt/cov-analysis/bin + cov-configure --config ./coverity.config --template --comptype clangcc --compiler "$CC" + SCAN_WRAPPER="cov-build --config ./coverity.config --dir cov-int" +fi + cache_status # clear cache stats ccache --zero-stats ||: # No quotes because I want it to expand to nothing if empty. -# shellcheck disable=SC2086 -ninja $NINJA_FLAGS clickhouse-bundle +# shellcheck disable=SC2086 # No quotes because I want it to expand to nothing if empty. +$SCAN_WRAPPER ninja $NINJA_FLAGS clickhouse-bundle cache_status @@ -91,6 +99,12 @@ then mv "$COMBINED_OUTPUT.tgz" /output fi +if [ "coverity" == "$COMBINED_OUTPUT" ] +then + tar -cv -I pigz -f "coverity-scan.tgz" cov-int + mv "coverity-scan.tgz" /output +fi + # Also build fuzzers if any sanitizer specified # if [ -n "$SANITIZER" ] # then diff --git a/docker/packager/packager b/docker/packager/packager index f82d402d613..1a79b497fa2 100755 --- a/docker/packager/packager +++ b/docker/packager/packager @@ -86,6 +86,7 @@ def parse_env_variables( additional_pkgs, with_coverage, with_binaries, + coverity_scan, ): DARWIN_SUFFIX = "-darwin" DARWIN_ARM_SUFFIX = "-darwin-aarch64" @@ -176,6 +177,9 @@ def parse_env_variables( if package_type == "performance": result.append("COMBINED_OUTPUT=performance") cmake_flags.append("-DENABLE_TESTS=0") + elif package_type == "coverity": + result.append("COMBINED_OUTPUT=coverity") + result.append("COV_TOKEN={}".format(cov_token)) elif split_binary: result.append("COMBINED_OUTPUT=shared_build") @@ -262,9 +266,8 @@ if __name__ == "__main__": # and configs to be used for performance test. parser.add_argument( "--package-type", - choices=("deb", "binary", "performance"), + choices=["deb", "binary", "performance", "coverity"], required=True, - help="a build type", ) parser.add_argument( "--clickhouse-repo-path", @@ -325,12 +328,13 @@ if __name__ == "__main__": parser.add_argument( "--docker-image-version", default="latest", help="docker image tag to use" ) + parser.add_argument("--cov_token", default="") args = parser.parse_args() if not os.path.isabs(args.output_dir): args.output_dir = os.path.abspath(os.path.join(os.getcwd(), args.output_dir)) - image_type = "binary" if args.package_type == "performance" else args.package_type + image_type = "binary" if args.package_type in ("performance", "coverity") else args.package_type image_name = "clickhouse/binary-builder" if not os.path.isabs(args.clickhouse_repo_path): @@ -372,6 +376,7 @@ if __name__ == "__main__": args.additional_pkgs, args.with_coverage, args.with_binaries, + args.cov_token, ) run_docker_image_with_env( diff --git a/docker/server/Dockerfile.ubuntu b/docker/server/Dockerfile.ubuntu index cc198772251..6e93bd97036 100644 --- a/docker/server/Dockerfile.ubuntu +++ b/docker/server/Dockerfile.ubuntu @@ -94,8 +94,9 @@ RUN arch=${TARGETARCH:-amd64} \ && apt-get update \ && apt-get --yes -o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold" upgrade \ && for package in ${PACKAGES}; do \ - apt-get install --allow-unauthenticated --yes --no-install-recommends "${package}=${VERSION}" || exit 1 \ + packages="${packages} ${package}=${VERSION}" \ ; done \ + && apt-get install --allow-unauthenticated --yes --no-install-recommends ${packages} || exit 1 \ ; fi \ && clickhouse-local -q 'SELECT * FROM system.build_options' \ && rm -rf \ diff --git a/docker/test/fuzzer/run-fuzzer.sh b/docker/test/fuzzer/run-fuzzer.sh index 74711f476f8..32799a669eb 100755 --- a/docker/test/fuzzer/run-fuzzer.sh +++ b/docker/test/fuzzer/run-fuzzer.sh @@ -226,7 +226,6 @@ quit --receive_data_timeout_ms=10000 \ --stacktrace \ --query-fuzzer-runs=1000 \ - --testmode \ --queries-file $(ls -1 ch/tests/queries/0_stateless/*.sql | sort -R) \ $NEW_TESTS_OPT \ > >(tail -n 100000 > fuzzer.log) \ diff --git a/docker/test/integration/mysql_js_client/Dockerfile b/docker/test/integration/mysql_js_client/Dockerfile index b1397b40d38..4c9df10ace1 100644 --- a/docker/test/integration/mysql_js_client/Dockerfile +++ b/docker/test/integration/mysql_js_client/Dockerfile @@ -1,8 +1,10 @@ # docker build -t clickhouse/mysql-js-client . # MySQL JavaScript client docker container -FROM node:8 +FROM node:16.14.2 + +WORKDIR /usr/app RUN npm install mysql -COPY ./test.js test.js +COPY ./test.js ./test.js diff --git a/docker/test/stress/run.sh b/docker/test/stress/run.sh index 3cef5b008db..ba85999caa5 100755 --- a/docker/test/stress/run.sh +++ b/docker/test/stress/run.sh @@ -106,17 +106,6 @@ function stop() function start() { - # Rename existing log file - it will be more convenient to read separate files for separate server runs. - if [ -f '/var/log/clickhouse-server/clickhouse-server.log' ] - then - log_file_counter=1 - while [ -f "/var/log/clickhouse-server/clickhouse-server.log.${log_file_counter}" ] - do - log_file_counter=$((log_file_counter + 1)) - done - mv '/var/log/clickhouse-server/clickhouse-server.log' "/var/log/clickhouse-server/clickhouse-server.log.${log_file_counter}" - fi - counter=0 until clickhouse-client --query "SELECT 1" do @@ -190,6 +179,8 @@ clickhouse-client --query "ATTACH DATABASE IF NOT EXISTS datasets ENGINE = Ordin clickhouse-client --query "CREATE DATABASE IF NOT EXISTS test" stop +mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.initial.log + start clickhouse-client --query "SHOW TABLES FROM datasets" @@ -205,6 +196,8 @@ clickhouse-client --query "SHOW TABLES FROM test" || echo -e 'Test script failed\tFAIL' >> /test_output/test_results.tsv stop +mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.stress.log + start clickhouse-client --query "SELECT 'Server successfully started', 'OK'" >> /test_output/test_results.tsv \ @@ -263,10 +256,12 @@ mkdir previous_release_package_folder clickhouse-client --query="SELECT version()" | ./download_previous_release && echo -e 'Download script exit code\tOK' >> /test_output/test_results.tsv \ || echo -e 'Download script failed\tFAIL' >> /test_output/test_results.tsv +stop +mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.clean.log + if [ "$(ls -A previous_release_package_folder/clickhouse-common-static_*.deb && ls -A previous_release_package_folder/clickhouse-server_*.deb)" ] then echo -e "Successfully downloaded previous release packets\tOK" >> /test_output/test_results.tsv - stop # Uninstall current packages dpkg --remove clickhouse-client @@ -289,7 +284,7 @@ then install_packages package_folder mkdir tmp_stress_output - + ./stress --backward-compatibility-check --output-folder tmp_stress_output --global-time-limit=1200 \ && echo -e 'Backward compatibility check: Test script exit code\tOK' >> /test_output/test_results.tsv \ || echo -e 'Backward compatibility check: Test script failed\tFAIL' >> /test_output/test_results.tsv @@ -297,8 +292,9 @@ then clickhouse-client --query="SELECT 'Tables count:', count() FROM system.tables" - stop - + stop + mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.backward.stress.log + # Start new server configure start 500 @@ -310,8 +306,9 @@ then # Let the server run for a while before checking log. sleep 60 - + stop + mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.backward.clean.log # Error messages (we should ignore some errors) echo "Check for Error messages in server log:" @@ -332,7 +329,7 @@ then -e "Code: 1000, e.code() = 111, Connection refused" \ -e "UNFINISHED" \ -e "Renaming unexpected part" \ - /var/log/clickhouse-server/clickhouse-server.log | zgrep -Fa "" > /test_output/bc_check_error_messages.txt \ + /var/log/clickhouse-server/clickhouse-server.backward.*.log | zgrep -Fa "" > /test_output/bc_check_error_messages.txt \ && echo -e 'Backward compatibility check: Error message in clickhouse-server.log (see bc_check_error_messages.txt)\tFAIL' >> /test_output/test_results.tsv \ || echo -e 'Backward compatibility check: No Error messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv @@ -348,13 +345,13 @@ then rm -f /test_output/tmp # OOM - zgrep -Fa " Application: Child process was terminated by signal 9" /var/log/clickhouse-server/clickhouse-server.log > /dev/null \ + zgrep -Fa " Application: Child process was terminated by signal 9" /var/log/clickhouse-server/clickhouse-server.backward.*.log > /dev/null \ && echo -e 'Backward compatibility check: OOM killer (or signal 9) in clickhouse-server.log\tFAIL' >> /test_output/test_results.tsv \ || echo -e 'Backward compatibility check: No OOM messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv # Logical errors echo "Check for Logical errors in server log:" - zgrep -Fa -A20 "Code: 49, e.displayText() = DB::Exception:" /var/log/clickhouse-server/clickhouse-server.log > /test_output/bc_check_logical_errors.txt \ + zgrep -Fa -A20 "Code: 49, e.displayText() = DB::Exception:" /var/log/clickhouse-server/clickhouse-server.backward.*.log > /test_output/bc_check_logical_errors.txt \ && echo -e 'Backward compatibility check: Logical error thrown (see clickhouse-server.log or bc_check_logical_errors.txt)\tFAIL' >> /test_output/test_results.tsv \ || echo -e 'Backward compatibility check: No logical errors\tOK' >> /test_output/test_results.tsv @@ -362,19 +359,18 @@ then [ -s /test_output/bc_check_logical_errors.txt ] || rm /test_output/bc_check_logical_errors.txt # Crash - zgrep -Fa "########################################" /var/log/clickhouse-server/clickhouse-server.log > /dev/null \ + zgrep -Fa "########################################" /var/log/clickhouse-server/clickhouse-server.backward.*.log > /dev/null \ && echo -e 'Backward compatibility check: Killed by signal (in clickhouse-server.log)\tFAIL' >> /test_output/test_results.tsv \ || echo -e 'Backward compatibility check: Not crashed\tOK' >> /test_output/test_results.tsv # It also checks for crash without stacktrace (printed by watchdog) echo "Check for Fatal message in server log:" - zgrep -Fa " " /var/log/clickhouse-server/clickhouse-server.log > /test_output/bc_check_fatal_messages.txt \ + zgrep -Fa " " /var/log/clickhouse-server/clickhouse-server.backward.*.log > /test_output/bc_check_fatal_messages.txt \ && echo -e 'Backward compatibility check: Fatal message in clickhouse-server.log (see bc_check_fatal_messages.txt)\tFAIL' >> /test_output/test_results.tsv \ || echo -e 'Backward compatibility check: No fatal messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv # Remove file bc_check_fatal_messages.txt if it's empty [ -s /test_output/bc_check_fatal_messages.txt ] || rm /test_output/bc_check_fatal_messages.txt - else echo -e "Backward compatibility check: Failed to download previous release packets\tFAIL" >> /test_output/test_results.tsv fi diff --git a/docs/en/introduction/adopters.md b/docs/en/introduction/adopters.md index ad199ce452e..20d6b20feb6 100644 --- a/docs/en/introduction/adopters.md +++ b/docs/en/introduction/adopters.md @@ -43,7 +43,7 @@ toc_title: Adopters | Citymobil | Taxi | Analytics | — | — | [Blog Post in Russian, March 2020](https://habr.com/en/company/citymobil/blog/490660/) | | Cloudflare | CDN | Traffic analysis | 36 servers | — | [Blog post, May 2017](https://blog.cloudflare.com/how-cloudflare-analyzes-1m-dns-queries-per-second/), [Blog post, March 2018](https://blog.cloudflare.com/http-analytics-for-6m-requests-per-second-using-clickhouse/) | | Comcast | Media | CDN Traffic Analysis | — | — | [ApacheCon 2019 Talk](https://www.youtube.com/watch?v=e9TZ6gFDjNg) | -| ContentSquare | Web analytics | Main product | — | — | [Blog post in French, November 2018](http://souslecapot.net/2018/11/21/patrick-chatain-vp-engineering-chez-contentsquare-penser-davantage-amelioration-continue-que-revolution-constante/) | +| Contentsquare | Web analytics | Main product | — | — | [Blog post in French, November 2018](http://souslecapot.net/2018/11/21/patrick-chatain-vp-engineering-chez-contentsquare-penser-davantage-amelioration-continue-que-revolution-constante/) | | Corunet | Analytics | Main product | — | — | [Slides in English, April 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup21/predictive_models.pdf) | | CraiditX 氪信 | Finance AI | Analysis | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/udf.pptx) | | Crazypanda | Games | | — | — | Live session on ClickHouse meetup | diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index 88c43c9c3c2..301b348925f 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -1467,6 +1467,18 @@ The update is performed asynchronously, in a separate system thread. - [background_schedule_pool_size](../../operations/settings/settings.md#background_schedule_pool_size) + +## dns_max_consecutive_failures {#server-settings-dns-max-consecutive-failures} + +The number of consecutive failures accepted when updating a DNS cache entry before it is dropped. +Use `0` to disable cache dropping (entries will only be cleaned by `SYSTEM DROP DNS CACHE`) + +**Default value**: 5. + +**See also** + +- [`SYSTEM DROP DNS CACHE`](../../sql-reference/statements/system.md#query_language-system-drop-dns-cache) + ## distributed_ddl {#server-settings-distributed_ddl} Manage executing [distributed ddl queries](../../sql-reference/distributed-ddl.md) (CREATE, DROP, ALTER, RENAME) on cluster. diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 91bf0812de4..f9996cbfb0b 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -1062,6 +1062,15 @@ Result: └─────────────┴───────────┘ ``` +## log_processors_profiles {#settings-log_processors_profiles} + +Write time that processor spent during execution/waiting for data to `system.processors_profile_log` table. + +See also: + +- [`system.processors_profile_log`](../../operations/system-tables/processors_profile_log.md#system-processors_profile_log) +- [`EXPLAIN PIPELINE`](../../sql-reference/statements/explain.md#explain-pipeline) + ## max_insert_block_size {#settings-max_insert_block_size} The size of blocks (in a count of rows) to form for insertion into a table. diff --git a/docs/en/operations/system-tables/processors_profile_log.md b/docs/en/operations/system-tables/processors_profile_log.md new file mode 100644 index 00000000000..2d76edb5dd7 --- /dev/null +++ b/docs/en/operations/system-tables/processors_profile_log.md @@ -0,0 +1,75 @@ +# system.processors_profile_log {#system-processors_profile_log} + +This table contains profiling on processors level (that you can find in [`EXPLAIN PIPELINE`](../../sql-reference/statements/explain.md#explain-pipeline)). + +Columns: + +- `event_date` ([Date](../../sql-reference/data-types/date.md)) — The date when the event happened. +- `event_time` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — The date and time when the event happened. +- `id` ([UInt64](../../sql-reference/data-types/int-uint.md)) — ID of processor +- `parent_ids` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — Parent processors IDs +- `query_id` ([String](../../sql-reference/data-types/string.md)) — ID of the query +- `name` ([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md)) — Name of the processor. +- `elapsed_us` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of microseconds this processor was executed. +- `input_wait_elapsed_us` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of microseconds this processor was waiting for data (from other processor). +- `output_wait_elapsed_us` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of microseconds this processor was waiting because output port was full. + +**Example** + +Query: + +``` sql +EXPLAIN PIPELINE +SELECT sleep(1) + +┌─explain─────────────────────────┐ +│ (Expression) │ +│ ExpressionTransform │ +│ (SettingQuotaAndLimits) │ +│ (ReadFromStorage) │ +│ SourceFromSingleChunk 0 → 1 │ +└─────────────────────────────────┘ + +SELECT sleep(1) +SETTINGS log_processors_profiles = 1 + +Query id: feb5ed16-1c24-4227-aa54-78c02b3b27d4 + +┌─sleep(1)─┐ +│ 0 │ +└──────────┘ + +1 rows in set. Elapsed: 1.018 sec. + +SELECT + name, + elapsed_us, + input_wait_elapsed_us, + output_wait_elapsed_us +FROM system.processors_profile_log +WHERE query_id = 'feb5ed16-1c24-4227-aa54-78c02b3b27d4' +ORDER BY name ASC +``` + +Result: + +``` text +┌─name────────────────────┬─elapsed_us─┬─input_wait_elapsed_us─┬─output_wait_elapsed_us─┐ +│ ExpressionTransform │ 1000497 │ 2823 │ 197 │ +│ LazyOutputFormat │ 36 │ 1002188 │ 0 │ +│ LimitsCheckingTransform │ 10 │ 1002994 │ 106 │ +│ NullSource │ 5 │ 1002074 │ 0 │ +│ NullSource │ 1 │ 1002084 │ 0 │ +│ SourceFromSingleChunk │ 45 │ 4736 │ 1000819 │ +└─────────────────────────┴────────────┴───────────────────────┴────────────────────────┘ +``` + +Here you can see: + +- `ExpressionTransform` was executing `sleep(1)` function, so it `work` will takes 1e6, and so `elapsed_us` > 1e6. +- `SourceFromSingleChunk` need to wait, because `ExpressionTransform` does not accept any data during execution of `sleep(1)`, so it will be in `PortFull` state for 1e6 us, and so `output_wait_elapsed_us` > 1e6. +- `LimitsCheckingTransform`/`NullSource`/`LazyOutputFormat` need to wait until `ExpressionTransform` will execute `sleep(1)` to process the result, so `input_wait_elapsed_us` > 1e6. + +**See Also** + +- [`EXPLAIN PIPELINE`](../../sql-reference/statements/explain.md#explain-pipeline) diff --git a/docs/en/sql-reference/functions/date-time-functions.md b/docs/en/sql-reference/functions/date-time-functions.md index d535a516b3a..fc48c97bb61 100644 --- a/docs/en/sql-reference/functions/date-time-functions.md +++ b/docs/en/sql-reference/functions/date-time-functions.md @@ -393,6 +393,13 @@ This is a generalization of other functions named `toStartOf*`. For example, `toStartOfInterval(t, INTERVAL 1 day)` returns the same as `toStartOfDay(t)`, `toStartOfInterval(t, INTERVAL 15 minute)` returns the same as `toStartOfFifteenMinutes(t)` etc. +## toLastDayOfMonth {#toLastDayOfMonth} + +Rounds up a date or date with time to the last day of the month. +Returns the date. + +Alias: `LAST_DAY`. + ## toTime {#totime} Converts a date with time to a certain fixed date, while preserving the time. diff --git a/docs/en/sql-reference/functions/index.md b/docs/en/sql-reference/functions/index.md index 7cceec889bd..572aa7f632e 100644 --- a/docs/en/sql-reference/functions/index.md +++ b/docs/en/sql-reference/functions/index.md @@ -77,7 +77,7 @@ A function configuration contains the following settings: - `argument` - argument description with the `type`, and optional `name` of an argument. Each argument is described in a separate setting. Specifying name is necessary if argument names are part of serialization for user defined function format like [Native](../../interfaces/formats.md#native) or [JSONEachRow](../../interfaces/formats.md#jsoneachrow). Default argument name value is `c` + argument_number. - `format` - a [format](../../interfaces/formats.md) in which arguments are passed to the command. - `return_type` - the type of a returned value. -- `return_name` - name of retuned value. Specifying return name is necessary if return name is part of serialization for user defined function format like [Native](../../interfaces/formats.md#native) or [JSONEachRow](../../interfaces/formats.md#jsoneachrow). Optional. Default value is `result`. +- `return_name` - name of returned value. Specifying return name is necessary if return name is part of serialization for user defined function format like [Native](../../interfaces/formats.md#native) or [JSONEachRow](../../interfaces/formats.md#jsoneachrow). Optional. Default value is `result`. - `type` - an executable type. If `type` is set to `executable` then single command is started. If it is set to `executable_pool` then a pool of commands is created. - `max_command_execution_time` - maximum execution time in seconds for processing block of data. This setting is valid for `executable_pool` commands only. Optional. Default value is `10`. - `command_termination_timeout` - time in seconds during which a command should finish after its pipe is closed. After that time `SIGTERM` is sent to the process executing the command. Optional. Default value is `10`. diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md index bce3f9144b1..cedde8a7f35 100644 --- a/docs/en/sql-reference/functions/other-functions.md +++ b/docs/en/sql-reference/functions/other-functions.md @@ -2499,3 +2499,41 @@ Result: │ 286 │ └──────────────────────────┘ ``` + +## getTypeSerializationStreams {#getTypeSerializationStreams} + +return the serialization streams of data type. + +**Syntax** +``` sql +getTypeSerializationStreams(type_name) + +getTypeSerializationStreams(column) +``` + +**Arguments** +- `type_name` - Name of data type to get its serialization paths. [String](../../sql-reference/data-types/string.md#string). +- `column` - any column which has a data type + +**Returned value** +- List of serialization streams; + +Type: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)). + + + +**Example** + +Query: + +``` sql +SELECT getTypeSerializationStreams('Array(Array(Int8))') +``` + +Result: + +``` text +┌───────────────────────getTypeSerializationStreams('Array(Array(Int8))')─────────────────────────────┐ +│ ['{ArraySizes}','{ArrayElements, ArraySizes}','{ArrayElements, ArrayElements, Regular}'] │ +└─────────────────────────────────────────────────────────────────────────────────────────────────────┘ +``` diff --git a/docs/ja/introduction/adopters.md b/docs/ja/introduction/adopters.md index 6f878bf1dfe..3372bb74f12 100644 --- a/docs/ja/introduction/adopters.md +++ b/docs/ja/introduction/adopters.md @@ -27,7 +27,7 @@ toc_title: "\u30A2\u30C0\u30D7\u30BF\u30FC" | Cisco | ネットワーク | トラフィック分析 | — | — | [ライトニングトーク2019](https://youtu.be/-hI1vDR2oPY?t=5057) | | Citadel Securities | 金融 | — | — | — | [2019年の貢献](https://github.com/ClickHouse/ClickHouse/pull/4774) | | シティモービル | タクシー | 分析 | — | — | [ロシア語でのブログ投稿,月2020](https://habr.com/en/company/citymobil/blog/490660/) | -| ContentSquare | ウェブ分析 | 主な製品 | — | — | [フランス語でのブログ投稿,November2018](http://souslecapot.net/2018/11/21/patrick-chatain-vp-engineering-chez-contentsquare-penser-davantage-amelioration-continue-que-revolution-constante/) | +| Contentsquare | ウェブ分析 | 主な製品 | — | — | [フランス語でのブログ投稿,November2018](http://souslecapot.net/2018/11/21/patrick-chatain-vp-engineering-chez-contentsquare-penser-davantage-amelioration-continue-que-revolution-constante/) | | Cloudflare | CDN | トラフィック分析 | 36台のサーバー | — | [ブログ投稿,月2017](https://blog.cloudflare.com/how-cloudflare-analyzes-1m-dns-queries-per-second/), [ブログ投稿,月2018](https://blog.cloudflare.com/http-analytics-for-6m-requests-per-second-using-clickhouse/) | | コルネット | 分析 | 主な製品 | — | — | [2019年英語スライド](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup21/predictive_models.pdf) | | CraiditX 氪信 | ファイナンスAI | 分析 | — | — | [2019年のスライド](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/udf.pptx) | diff --git a/docs/ru/sql-reference/table-functions/postgresql.md b/docs/ru/sql-reference/table-functions/postgresql.md index a8ae7cfb80b..e61ca69d78c 100644 --- a/docs/ru/sql-reference/table-functions/postgresql.md +++ b/docs/ru/sql-reference/table-functions/postgresql.md @@ -126,7 +126,7 @@ CREATE TABLE pg_table_schema_with_dots (a UInt32) **См. также** -- [Движок таблиц PostgreSQL](../../sql-reference/table-functions/postgresql.md) +- [Движок таблиц PostgreSQL](../../engines/table-engines/integrations/postgresql.md) - [Использование PostgreSQL как источника данных для внешнего словаря](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql) [Оригинальная статья](https://clickhouse.com/docs/ru/sql-reference/table-functions/postgresql/) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index c2094b3b00d..a34ce02b293 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -163,10 +163,24 @@ void Client::initialize(Poco::Util::Application & self) configReadClient(config(), home_path); + /** getenv is thread-safe in Linux glibc and in all sane libc implementations. + * But the standard does not guarantee that subsequent calls will not rewrite the value by returned pointer. + * + * man getenv: + * + * As typically implemented, getenv() returns a pointer to a string within the environment list. + * The caller must take care not to modify this string, since that would change the environment of + * the process. + * + * The implementation of getenv() is not required to be reentrant. The string pointed to by the return value of getenv() + * may be statically allocated, and can be modified by a subsequent call to getenv(), putenv(3), setenv(3), or unsetenv(3). + */ + const char * env_user = getenv("CLICKHOUSE_USER"); - const char * env_password = getenv("CLICKHOUSE_PASSWORD"); if (env_user) config().setString("user", env_user); + + const char * env_password = getenv("CLICKHOUSE_PASSWORD"); if (env_password) config().setString("password", env_password); @@ -810,7 +824,7 @@ void Client::addOptions(OptionsDescription & options_description) ("quota_key", po::value(), "A string to differentiate quotas when the user have keyed quotas configured on server") ("max_client_network_bandwidth", po::value(), "the maximum speed of data exchange over the network for the client in bytes per second.") - ("compression", po::value(), "enable or disable compression") + ("compression", po::value(), "enable or disable compression (enabled by default for remote communication and disabled for localhost communication).") ("query-fuzzer-runs", po::value()->default_value(0), "After executing every SELECT query, do random mutations in it and run again specified number of times. This is used for testing to discover unexpected corner cases.") ("interleave-queries-file", po::value>()->multitoken(), @@ -1005,6 +1019,7 @@ void Client::processConfig() global_context->setCurrentQueryId(query_id); } print_stack_trace = config().getBool("stacktrace", false); + logging_initialized = true; if (config().has("multiquery")) is_multiquery = true; diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index bb6684ca137..18b62e65765 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -434,6 +434,14 @@ catch (...) return getCurrentExceptionCode(); } +void LocalServer::updateLoggerLevel(const String & logs_level) +{ + if (!logging_initialized) + return; + + config().setString("logger.level", logs_level); + updateLevels(config(), logger()); +} void LocalServer::processConfig() { @@ -460,30 +468,31 @@ void LocalServer::processConfig() auto logging = (config().has("logger.console") || config().has("logger.level") || config().has("log-level") + || config().has("send_logs_level") || config().has("logger.log")); - auto file_logging = config().has("server_logs_file"); - if (is_interactive && logging && !file_logging) - throw Exception("For interactive mode logging is allowed only with --server_logs_file option", - ErrorCodes::BAD_ARGUMENTS); + auto level = config().getString("log-level", "trace"); - if (file_logging) + if (config().has("server_logs_file")) { - auto level = Poco::Logger::parseLevel(config().getString("log-level", "trace")); - Poco::Logger::root().setLevel(level); + auto poco_logs_level = Poco::Logger::parseLevel(level); + Poco::Logger::root().setLevel(poco_logs_level); Poco::Logger::root().setChannel(Poco::AutoPtr(new Poco::SimpleFileChannel(server_logs_file))); + logging_initialized = true; } - else if (logging) + else if (logging || is_interactive) { - // force enable logging config().setString("logger", "logger"); - // sensitive data rules are not used here + auto log_level_default = is_interactive && !logging ? "none" : level; + config().setString("logger.level", config().getString("log-level", config().getString("send_logs_level", log_level_default))); buildLoggers(config(), logger(), "clickhouse-local"); + logging_initialized = true; } else { Poco::Logger::root().setLevel("none"); Poco::Logger::root().setChannel(Poco::AutoPtr(new Poco::NullChannel())); + logging_initialized = false; } shared_context = Context::createShared(); @@ -713,6 +722,8 @@ void LocalServer::processOptions(const OptionsDescription &, const CommandLineOp config().setString("logger.log", options["logger.log"].as()); if (options.count("logger.level")) config().setString("logger.level", options["logger.level"].as()); + if (options.count("send_logs_level")) + config().setString("send_logs_level", options["send_logs_level"].as()); } } diff --git a/programs/local/LocalServer.h b/programs/local/LocalServer.h index 969af7f1b77..e96fb211554 100644 --- a/programs/local/LocalServer.h +++ b/programs/local/LocalServer.h @@ -46,6 +46,8 @@ protected: void processConfig() override; + void updateLoggerLevel(const String & logs_level) override; + private: /** Composes CREATE subquery based on passed arguments (--structure --file --table and --input-format) * This query will be executed first, before queries passed through --query argument diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index c12abda9594..0b5a7724fe5 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -1503,7 +1503,8 @@ int Server::main(const std::vector & /*args*/) else { /// Initialize a watcher periodically updating DNS cache - dns_cache_updater = std::make_unique(global_context, config().getInt("dns_cache_update_period", 15)); + dns_cache_updater = std::make_unique( + global_context, config().getInt("dns_cache_update_period", 15), config().getUInt("dns_max_consecutive_failures", 5)); } #if defined(OS_LINUX) diff --git a/programs/server/config.xml b/programs/server/config.xml index 4e4cabdb03b..3bb26a3a368 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -1030,14 +1030,26 @@ 1000 - - + + + + + + system + processors_profile_log
+ + toYYYYMM(event_date) + 7500 +
diff --git a/tests/integration/test_system_merges/test.py b/tests/integration/test_system_merges/test.py index 9239cb11065..775706f4df6 100644 --- a/tests/integration/test_system_merges/test.py +++ b/tests/integration/test_system_merges/test.py @@ -124,7 +124,7 @@ def test_merge_simple(started_cluster, replicated): assert ( node_check.query( - "SELECT * FROM system.merges WHERE table = '{name}'".format( + "SELECT * FROM system.merges WHERE table = '{name}' and progress < 1".format( name=table_name ) ) diff --git a/tests/performance/function_calculation_after_sorting_and_limit.xml b/tests/performance/function_calculation_after_sorting_and_limit.xml new file mode 100644 index 00000000000..ddb8f860600 --- /dev/null +++ b/tests/performance/function_calculation_after_sorting_and_limit.xml @@ -0,0 +1,4 @@ + + SELECT sipHash64(number) FROM numbers(1e8) ORDER BY number LIMIT 5 + SELECT sipHash64(number) FROM numbers(1e8) ORDER BY number + 1 LIMIT 5 + diff --git a/tests/performance/has_all.xml b/tests/performance/has_all.xml new file mode 100644 index 00000000000..331442cbfee --- /dev/null +++ b/tests/performance/has_all.xml @@ -0,0 +1,53 @@ + + + + array_type + + Int8 + Int16 + Int32 + Int64 + + + + + + CREATE TABLE test_table_small_{array_type} + ( + `set` Array({array_type}), + `subset` Array ({array_type}) + ) + ENGINE = MergeTree ORDER BY set; + + + + CREATE TABLE test_table_medium_{array_type} + ( + `set` Array({array_type}), + `subset` Array ({array_type}) + ) + ENGINE = MergeTree ORDER BY set; + + + + CREATE TABLE test_table_large_{array_type} + ( + `set` Array({array_type}), + `subset` Array ({array_type}) + ) + ENGINE = MergeTree ORDER BY set; + + + + INSERT INTO test_table_small_{array_type} SELECT groupArraySample(5000)(rand64()) AS set, groupArraySample(500)(rand64()) AS subset FROM numbers(10000000) GROUP BY number % 5000; + INSERT INTO test_table_medium_{array_type} SELECT groupArraySample(50000)(rand64()) AS set, groupArraySample(5000)(rand64()) AS subset FROM numbers(25000000) GROUP BY number % 50000; + INSERT INTO test_table_large_{array_type} SELECT groupArraySample(500000)(rand64()) AS set, groupArraySample(500000)(rand64()) AS subset FROM numbers(50000000) GROUP BY number % 500000; + + SELECT hasAll(set, subset) FROM test_table_small_{array_type} FORMAT Null + SELECT hasAll(set, subset) FROM test_table_medium_{array_type} FORMAT Null + SELECT hasAll(set, subset) FROM test_table_large_{array_type} FORMAT Null + + DROP TABLE IF EXISTS test_table_small_{array_type} + DROP TABLE IF EXISTS test_table_medium_{array_type} + DROP TABLE IF EXISTS test_table_large_{array_type} + diff --git a/tests/performance/scalar2.xml b/tests/performance/scalar2.xml new file mode 100644 index 00000000000..eb427536646 --- /dev/null +++ b/tests/performance/scalar2.xml @@ -0,0 +1,17 @@ + + CREATE TABLE tbl0 (`ds` Date, `x1` String, `x2` UInt32, `x3` UInt32, `x4` UInt32, `bm` AggregateFunction(groupBitmap, UInt32)) ENGINE = MergeTree PARTITION BY (ds, x1) ORDER BY (x2, x3, x4) SETTINGS index_granularity = 1 + + CREATE TABLE tbl (`ds` Date, `y1` UInt32, `x4` UInt32, `y2` UInt32, `y3` UInt32, `bm` AggregateFunction(groupBitmap, UInt32), `y4` UInt32 DEFAULT 0) ENGINE = MergeTree PARTITION BY (ds) ORDER BY (x4, y2, y3) SETTINGS index_granularity = 8192, max_parts_in_total = 10000000 + + insert into tbl0 with murmurHash3_32(toUInt32(rand())) as uid select toDate('2022-03-01')+rand()%7 as ds, concat('xx',toString(rand()%10+1)) as x1, 1 as x2, 2 as x3, bitShiftRight(uid, 22) as x4, groupBitmapState(uid) as bm from numbers(100000000) where x4%40=0 group by ds, x1, x2, x3, x4 + + insert into tbl with murmurHash3_32(toUInt32(rand())) as uid select toDate('2022-03-01')+rand()%7 as ds, rand()%1000+5000 as y1, bitShiftRight(uid, 22) as x4, rand()%100 as y2, rand()%2000 as y3, groupBitmapState(uid) as bm, rand()%1 as y4 from numbers(100000000) where x4%40=0 group by ds, y1, x4, y2, y3, y4 + + CREATE TABLE tmp_acc_hit engine Memory AS SELECT x1, x2, x3, arrayReduceInRanges('groupBitmapMergeState', [(1, 1), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7)], bs) AS bs FROM (SELECT x1, x2, x3, groupArrayInsertAt(b, multiIf(ds = '2022-03-01', 0, ds = '2022-03-02', 1, ds = '2022-03-03', 2, ds = '2022-03-04', 3, ds = '2022-03-05', 4, ds = '2022-03-06', 5, ds = '2022-03-07', 6, 7)) AS bs FROM (SELECT x1, x2, x3, ds, groupBitmapOrState(bm) AS b FROM tbl0 WHERE ((ds >= '2022-03-01') AND (ds <= '2022-03-07')) AND (((x1 = 'xx1') AND (x2 = 1) AND (x3 = 2)) OR ((x1 = 'xx2') AND (x2 = 1) AND (x3 = 2)) OR ((x1 = 'xx3') AND (x2 = 1) AND (x3 = 2)) OR ((x1 = 'xx4') AND (x2 = 1) AND (x3 = 2)) OR ((x1 = 'xx5') AND (x2 = 1) AND (x3 = 2)) OR ((x1 = 'xx6') AND (x2 = 1) AND (x3 = 2)) OR ((x1 = 'xx7') AND (x2 = 1) AND (x3 = 2)) OR ((x1 = 'xx8') AND (x2 = 1) AND (x3 = 2)) OR ((x1 = 'xx9') AND (x2 = 1) AND (x3 = 2)) OR ((x1 = 'xx10') AND (x2 = 1) AND (x3 = 2))) AND (x4 IN (0, 40, 80, 120, 160, 200, 240, 280, 320, 360, 400, 440, 480, 520, 560, 600, 640, 680, 720, 760, 800, 840, 880, 920, 960, 1000)) GROUP BY x1, x2, x3, ds) AS t_hit GROUP BY x1, x2, x3) + + WITH (SELECT groupArrayInsertAt(b, multiIf((x1 = 'xx1') AND (x2 = 1) AND (x3 = 2), 0, (x1 = 'xx2') AND (x2 = 1) AND (x3 = 2), 1, (x1 = 'xx3') AND (x2 = 1) AND (x3 = 2), 2, (x1 = 'xx4') AND (x2 = 1) AND (x3 = 2), 3, (x1 = 'xx5') AND (x2 = 1) AND (x3 = 2), 4, (x1 = 'xx6') AND (x2 = 1) AND (x3 = 2), 5, (x1 = 'xx7') AND (x2 = 1) AND (x3 = 2), 6, (x1 = 'xx8') AND (x2 = 1) AND (x3 = 2), 7, (x1 = 'xx9') AND (x2 = 1) AND (x3 = 2), 8, (x1 = 'xx10') AND (x2 = 1) AND (x3 = 2), 9, 10)) FROM (SELECT x1, x2, x3, bs AS b FROM tmp_acc_hit)) AS bs SELECT y1, x4, toString(flat_arr) AS flat_arr, toString([bitmapAndCardinality(bmor1, (bs[1])[1]), bitmapAndCardinality(bmor2, (bs[1])[1]), bitmapAndCardinality(bmor3, (bs[1])[1]), bitmapAndCardinality(bmor1, (bs[2])[1]), bitmapAndCardinality(bmor2, (bs[2])[1]), bitmapAndCardinality(bmor3, (bs[2])[1]), bitmapAndCardinality(bmor1, (bs[3])[1]), bitmapAndCardinality(bmor2, (bs[3])[1]), bitmapAndCardinality(bmor3, (bs[3])[1]), bitmapAndCardinality(bmor1, (bs[4])[1]), bitmapAndCardinality(bmor2, (bs[4])[1]), bitmapAndCardinality(bmor3, (bs[4])[1]), bitmapAndCardinality(bmor1, (bs[5])[1]), bitmapAndCardinality(bmor2, (bs[5])[1]), bitmapAndCardinality(bmor3, (bs[5])[1]), bitmapAndCardinality(bmor1, (bs[6])[1]), bitmapAndCardinality(bmor2, (bs[6])[1]), bitmapAndCardinality(bmor3, (bs[6])[1]), bitmapAndCardinality(bmor1, (bs[7])[1]), bitmapAndCardinality(bmor2, (bs[7])[1]), bitmapAndCardinality(bmor3, (bs[7])[1]), bitmapAndCardinality(bmor1, (bs[8])[1]), bitmapAndCardinality(bmor2, (bs[8])[1]), bitmapAndCardinality(bmor3, (bs[8])[1]), bitmapAndCardinality(bmor1, (bs[9])[1]), bitmapAndCardinality(bmor2, (bs[9])[1]), bitmapAndCardinality(bmor3, (bs[9])[1]), bitmapAndCardinality(bmor1, (bs[10])[1]), bitmapAndCardinality(bmor2, (bs[10])[1]), bitmapAndCardinality(bmor3, (bs[10])[1])]) AS flat_arr_2 from (SELECT toString(y1) AS y1, toString(x4) AS x4, arrayFlatten(groupArrayInsertAt(flat_arr, multiIf(date_ = '2022-03-01', 0, 1))) AS flat_arr, groupBitmapOrState(bmor1) AS bmor1, groupBitmapOrState(bmor2) AS bmor2, groupBitmapOrState(bmor3) AS bmor3 FROM (WITH '2022-03-01' AS start_ds SELECT y1, x4, groupBitmapOrState(bm) AS bmor1, groupBitmapOrStateIf(bm, y2 > 0) AS bmor2, groupBitmapOrStateIf(bm, y4 = 1) AS bmor3, [sum(y2 * bitmapAndCardinality(bm, (bs[1])[1])), sum(y2 * bitmapAndCardinality(bm, (bs[2])[1])), sum(y2 * bitmapAndCardinality(bm, (bs[3])[1])), sum(y2 * bitmapAndCardinality(bm, (bs[4])[1])), sum(y2 * bitmapAndCardinality(bm, (bs[5])[1])), sum(y2 * bitmapAndCardinality(bm, (bs[6])[1])), sum(y2 * bitmapAndCardinality(bm, (bs[7])[1])), sum(y2 * bitmapAndCardinality(bm, (bs[8])[1])), sum(y2 * bitmapAndCardinality(bm, (bs[9])[1])), sum(y2 * bitmapAndCardinality(bm, (bs[10])[1]))] AS flat_arr, start_ds AS date_ FROM tbl WHERE (ds = start_ds) AND (y1 IN (7063, 5010, 5006, 6788, 6176, 6203, 6769, 6555, 7062, 5119, 5007, 5212, 6814, 6177, 6789, 5095, 4942, 6243, 7061, 6744, 6201, 7196, 6181, 7195, 6178, 5004, 6790, 5008, 6877, 7281, 6791, 6179, 5214, 5005, 7146, 6980, 6322, 5222, 5217, 5137, 6561, 5133, 6937, 5142, 5130, 6885, 7250, 5103, 6867, 7066, 5096, 6868, 6199, 7269, 5131, 6414, 6884, 6560, 5136, 6883, 5158, 6869, 5097, 5132, 5102, 7251, 5219, 4695, 5220, 5202, 4203, 4204, 5098, 6870, 7064, 5101, 5105, 5140, 5135, 5139, 6880, 6194, 5218, 4202, 6655, 5104, 5183, 7245, 5100, 7065, 5099, 6938, 5138, 6881, 5134, 6886, 5141, 5129)) AND (x4 IN (0, 40, 80, 120, 160, 200, 240, 280, 320, 360, 400, 440, 480, 520, 560, 600, 640, 680, 720, 760, 800, 840, 880, 920, 960, 1000)) AND (y4 IN (0, 1)) GROUP BY y1, x4) GROUP BY y1, x4) LIMIT 1 + + DROP TABLE IF EXISTS tbl + DROP TABLE IF EXISTS tbl0 + DROP TABLE IF EXISTS tmp_acc_hit + diff --git a/tests/queries/0_stateless/00155_long_merges.sh b/tests/queries/0_stateless/00155_long_merges.sh index f2d9cd1dade..15ad0892a42 100755 --- a/tests/queries/0_stateless/00155_long_merges.sh +++ b/tests/queries/0_stateless/00155_long_merges.sh @@ -32,7 +32,7 @@ function test { SUM=$(( $1 + $2 )) MAX=$(( $1 > $2 ? $1 : $2 )) - SETTINGS="--min_insert_block_size_rows=0 --min_insert_block_size_bytes=0" + SETTINGS="--min_insert_block_size_rows=0 --min_insert_block_size_bytes=0 --max_block_size=65505" $CLICKHOUSE_CLIENT $SETTINGS --query="INSERT INTO summing_00155 (x) SELECT number AS x FROM system.numbers LIMIT $1" $CLICKHOUSE_CLIENT $SETTINGS --query="INSERT INTO summing_00155 (x) SELECT number AS x FROM system.numbers LIMIT $2" diff --git a/tests/queries/0_stateless/00753_alter_attach.reference b/tests/queries/0_stateless/00753_alter_attach.reference index 007b99d4748..b0d2a3d031c 100644 --- a/tests/queries/0_stateless/00753_alter_attach.reference +++ b/tests/queries/0_stateless/00753_alter_attach.reference @@ -10,3 +10,15 @@ 5 2 6 3 7 3 +4 2 +5 2 +1 1 +2 1 +3 1 +1 1 +2 1 +3 1 +1 1 +2 2 +1 1 +1 1 diff --git a/tests/queries/0_stateless/00753_alter_attach.sql b/tests/queries/0_stateless/00753_alter_attach.sql index ca43fb3aeae..2910bcc222b 100644 --- a/tests/queries/0_stateless/00753_alter_attach.sql +++ b/tests/queries/0_stateless/00753_alter_attach.sql @@ -19,4 +19,53 @@ INSERT INTO alter_attach VALUES (6, 3), (7, 3); ALTER TABLE alter_attach ATTACH PARTITION 2; SELECT * FROM alter_attach ORDER BY x; +ALTER TABLE alter_attach DETACH PARTITION ALL; +SELECT * FROM alter_attach ORDER BY x; + +ALTER TABLE alter_attach ATTACH PARTITION 2; +SELECT * FROM alter_attach ORDER BY x; + +DROP TABLE IF EXISTS detach_all_no_partition; +CREATE TABLE detach_all_no_partition (x UInt64, p UInt8) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO detach_all_no_partition VALUES (1, 1), (2, 1), (3, 1); +SELECT * FROM detach_all_no_partition ORDER BY x; + +ALTER TABLE detach_all_no_partition DETACH PARTITION ALL; +SELECT * FROM detach_all_no_partition ORDER BY x; + +ALTER TABLE detach_all_no_partition ATTACH PARTITION tuple(); +SELECT * FROM detach_all_no_partition ORDER BY x; + DROP TABLE alter_attach; +DROP TABLE detach_all_no_partition; + +DROP TABLE IF EXISTS replicated_table_detach_all1; +DROP TABLE IF EXISTS replicated_table_detach_all2; + +CREATE TABLE replicated_table_detach_all1 ( + id UInt64, + Data String +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00753_{database}/replicated_table_detach_all', '1') ORDER BY id PARTITION BY id; + +CREATE TABLE replicated_table_detach_all2 ( + id UInt64, + Data String +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00753_{database}/replicated_table_detach_all', '2') ORDER BY id PARTITION BY id; + + +INSERT INTO replicated_table_detach_all1 VALUES (1, '1'), (2, '2'); +select * from replicated_table_detach_all1 order by id; + +ALTER TABLE replicated_table_detach_all1 DETACH PARTITION ALL; +select * from replicated_table_detach_all1 order by id; +SYSTEM SYNC REPLICA replicated_table_detach_all2; +select * from replicated_table_detach_all2 order by id; + +ALTER TABLE replicated_table_detach_all1 ATTACH PARTITION tuple(1); +select * from replicated_table_detach_all1 order by id; +SYSTEM SYNC REPLICA replicated_table_detach_all2; +select * from replicated_table_detach_all2 order by id; + +DROP TABLE replicated_table_detach_all1; +DROP TABLE replicated_table_detach_all2; + diff --git a/tests/queries/0_stateless/00825_protobuf_format_no_length_delimiter.sh b/tests/queries/0_stateless/00825_protobuf_format_no_length_delimiter.sh index a16345c4bb1..a1bbdc318d5 100755 --- a/tests/queries/0_stateless/00825_protobuf_format_no_length_delimiter.sh +++ b/tests/queries/0_stateless/00825_protobuf_format_no_length_delimiter.sh @@ -43,7 +43,7 @@ $CLICKHOUSE_CLIENT --query "SELECT * FROM roundtrip_no_length_delimiter_protobuf rm "$BINARY_FILE_PATH" # The ProtobufSingle format can't be used to write multiple rows because this format doesn't have any row delimiter. -$CLICKHOUSE_CLIENT --multiquery --testmode > /dev/null < /dev/null <&1 \ + | ${CLICKHOUSE_CLIENT} --ignore-error -nm --calculate_text_stack_trace 0 --log-level 'error' 2>&1 \ | grep -v -e 'Received exception .*$' -e '^(query: ' | sed 's/^\(Code: [0-9]\+\).*$/\1/g' diff --git a/tests/queries/0_stateless/00980_merge_alter_settings.sql b/tests/queries/0_stateless/00980_merge_alter_settings.sql index c0d18f6d453..f595a09970d 100644 --- a/tests/queries/0_stateless/00980_merge_alter_settings.sql +++ b/tests/queries/0_stateless/00980_merge_alter_settings.sql @@ -91,8 +91,8 @@ SHOW CREATE TABLE table_for_reset_setting; ALTER TABLE table_for_reset_setting RESET SETTING index_granularity; -- { serverError 472 } --- ignore undefined setting -ALTER TABLE table_for_reset_setting RESET SETTING merge_with_ttl_timeout, unknown_setting; +-- don't execute alter with incorrect setting +ALTER TABLE table_for_reset_setting RESET SETTING merge_with_ttl_timeout, unknown_setting; -- { serverError 36 } ALTER TABLE table_for_reset_setting MODIFY SETTING merge_with_ttl_timeout = 300, max_concurrent_queries = 1; @@ -102,4 +102,4 @@ ALTER TABLE table_for_reset_setting RESET SETTING max_concurrent_queries, merge_ SHOW CREATE TABLE table_for_reset_setting; -DROP TABLE IF EXISTS table_for_reset_setting; \ No newline at end of file +DROP TABLE IF EXISTS table_for_reset_setting; diff --git a/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.sql b/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.sql index dfb91eb3b0a..1b291bf84d2 100644 --- a/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.sql +++ b/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.sql @@ -108,8 +108,8 @@ ATTACH TABLE replicated_table_for_reset_setting1; SHOW CREATE TABLE replicated_table_for_reset_setting1; SHOW CREATE TABLE replicated_table_for_reset_setting2; --- ignore undefined setting -ALTER TABLE replicated_table_for_reset_setting1 RESET SETTING check_delay_period, unknown_setting; +-- don't execute alter with incorrect setting +ALTER TABLE replicated_table_for_reset_setting1 RESET SETTING check_delay_period, unknown_setting; -- { serverError 36 } ALTER TABLE replicated_table_for_reset_setting1 RESET SETTING merge_with_ttl_timeout; ALTER TABLE replicated_table_for_reset_setting2 RESET SETTING merge_with_ttl_timeout; diff --git a/tests/queries/0_stateless/01015_attach_part.reference b/tests/queries/0_stateless/01015_attach_part.reference index b6cd514cd25..81c49e654ac 100644 --- a/tests/queries/0_stateless/01015_attach_part.reference +++ b/tests/queries/0_stateless/01015_attach_part.reference @@ -1,3 +1,4 @@ 1000 0 1000 +0 diff --git a/tests/queries/0_stateless/01015_attach_part.sql b/tests/queries/0_stateless/01015_attach_part.sql index 6b786bfbab9..a2f949d3499 100644 --- a/tests/queries/0_stateless/01015_attach_part.sql +++ b/tests/queries/0_stateless/01015_attach_part.sql @@ -21,4 +21,8 @@ ALTER TABLE table_01 ATTACH PART '20191001_1_1_0'; SELECT COUNT() FROM table_01; +ALTER TABLE table_01 DETACH PARTITION ALL; + +SELECT COUNT() FROM table_01; + DROP TABLE IF EXISTS table_01; diff --git a/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh b/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh index dde6b8ccadb..0e258bbbb09 100755 --- a/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh +++ b/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh @@ -88,9 +88,9 @@ from numbers(100000); -- { serverError 241; }" > /dev/null 2>&1 # fails echo "Should throw 1" -execute_insert --testmode +execute_insert echo "Should throw 2" -execute_insert --testmode --min_insert_block_size_rows=1 --min_insert_block_size_rows_for_materialized_views=$((1<<20)) +execute_insert --min_insert_block_size_rows=1 --min_insert_block_size_rows_for_materialized_views=$((1<<20)) # passes echo "Should pass 1" diff --git a/tests/queries/0_stateless/01280_ssd_complex_key_dictionary.sh b/tests/queries/0_stateless/01280_ssd_complex_key_dictionary.sh index d5cae099f36..0de8b3a1a25 100755 --- a/tests/queries/0_stateless/01280_ssd_complex_key_dictionary.sh +++ b/tests/queries/0_stateless/01280_ssd_complex_key_dictionary.sh @@ -41,7 +41,7 @@ $CLICKHOUSE_CLIENT -n --query=" LIFETIME(MIN 1000 MAX 2000) LAYOUT(COMPLEX_KEY_SSD_CACHE(FILE_SIZE 8192 PATH '$USER_FILES_PATH/0d'));" -$CLICKHOUSE_CLIENT --testmode -nq "SELECT dictHas('01280_db.ssd_dict', 'a', tuple('1')); -- { serverError 43 }" +$CLICKHOUSE_CLIENT -nq "SELECT dictHas('01280_db.ssd_dict', 'a', tuple('1')); -- { serverError 43 }" $CLICKHOUSE_CLIENT -n --query=" SELECT 'TEST_SMALL'; @@ -65,7 +65,7 @@ $CLICKHOUSE_CLIENT -n --query=" SELECT dictGetInt32('01280_db.ssd_dict', 'b', tuple('10', toInt32(-20))); SELECT dictGetString('01280_db.ssd_dict', 'c', tuple('10', toInt32(-20)));" -$CLICKHOUSE_CLIENT --testmode -nq "SELECT dictGetUInt64('01280_db.ssd_dict', 'a', tuple(toInt32(3))); -- { serverError 53 }" +$CLICKHOUSE_CLIENT -nq "SELECT dictGetUInt64('01280_db.ssd_dict', 'a', tuple(toInt32(3))); -- { serverError 53 }" $CLICKHOUSE_CLIENT -n --query="DROP DICTIONARY 01280_db.ssd_dict; DROP TABLE IF EXISTS 01280_db.keys_table; diff --git a/tests/queries/0_stateless/01428_nullable_asof_join.sql b/tests/queries/0_stateless/01428_nullable_asof_join.sql index 30e5c51eb1c..e1b00158d68 100644 --- a/tests/queries/0_stateless/01428_nullable_asof_join.sql +++ b/tests/queries/0_stateless/01428_nullable_asof_join.sql @@ -109,3 +109,8 @@ FROM (SELECT toUInt8(number) > 0 as pk, toNullable(toUInt8(number)) as dt FROM n ASOF JOIN (SELECT 1 as pk, toNullable(0) as dt) b ON a.dt >= b.dt AND a.pk = b.pk ORDER BY a.dt; -- { serverError 48 } + +SELECT * +FROM (SELECT NULL AS y, 1 AS x, '2020-01-01 10:10:10' :: DateTime64 AS t) AS t1 +ASOF LEFT JOIN (SELECT NULL AS y, 1 AS x, '2020-01-01 10:10:10' :: DateTime64 AS t) AS t2 +ON t1.t <= t2.t AND t1.x == t2.x FORMAT Null; diff --git a/tests/queries/0_stateless/01576_alias_column_rewrite.reference b/tests/queries/0_stateless/01576_alias_column_rewrite.reference index 11cc146dd62..68875735110 100644 --- a/tests/queries/0_stateless/01576_alias_column_rewrite.reference +++ b/tests/queries/0_stateless/01576_alias_column_rewrite.reference @@ -35,10 +35,11 @@ Expression (Projection) ReadFromMergeTree (default.test_table) Expression (Projection) Limit (preliminary LIMIT (without OFFSET)) - Sorting - Expression (Before ORDER BY) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromMergeTree (default.test_table) + Expression (Before ORDER BY [lifted up part]) + Sorting + Expression (Before ORDER BY) + SettingQuotaAndLimits (Set limits and quota after reading from storage) + ReadFromMergeTree (default.test_table) optimize_aggregation_in_order Expression ((Projection + Before ORDER BY)) Aggregating diff --git a/tests/queries/0_stateless/01591_window_functions.reference b/tests/queries/0_stateless/01591_window_functions.reference index 655232fcdd4..c766bf16f19 100644 --- a/tests/queries/0_stateless/01591_window_functions.reference +++ b/tests/queries/0_stateless/01591_window_functions.reference @@ -925,10 +925,11 @@ Expression ((Projection + Before ORDER BY)) Window (Window step for window \'ORDER BY o ASC, number ASC\') Sorting (Sorting for window \'ORDER BY o ASC, number ASC\') Window (Window step for window \'ORDER BY number ASC\') - Sorting (Sorting for window \'ORDER BY number ASC\') - Expression ((Before window functions + (Projection + Before ORDER BY))) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemNumbers) + Expression ((Before window functions + (Projection + Before ORDER BY)) [lifted up part]) + Sorting (Sorting for window \'ORDER BY number ASC\') + Expression ((Before window functions + (Projection + Before ORDER BY))) + SettingQuotaAndLimits (Set limits and quota after reading from storage) + ReadFromStorage (SystemNumbers) -- A test case for the sort comparator found by fuzzer. SELECT max(number) OVER (ORDER BY number DESC NULLS FIRST), diff --git a/tests/queries/0_stateless/01600_remerge_sort_lowered_memory_bytes_ratio.sql b/tests/queries/0_stateless/01600_remerge_sort_lowered_memory_bytes_ratio.sql index 5de4210d3f2..6e23ab9cdb9 100644 --- a/tests/queries/0_stateless/01600_remerge_sort_lowered_memory_bytes_ratio.sql +++ b/tests/queries/0_stateless/01600_remerge_sort_lowered_memory_bytes_ratio.sql @@ -10,8 +10,8 @@ set max_block_size=40960; -- MergeSortingTransform: Re-merging intermediate ORDER BY data (20 blocks with 819200 rows) to save memory consumption -- MergeSortingTransform: Memory usage is lowered from 186.25 MiB to 95.00 MiB -- MergeSortingTransform: Re-merging is not useful (memory usage was not lowered by remerge_sort_lowered_memory_bytes_ratio=2.0) -select number k, repeat(toString(number), 11) v1, repeat(toString(number), 12) v2 from numbers(3e6) order by k limit 400e3 format Null; -- { serverError 241 } -select number k, repeat(toString(number), 11) v1, repeat(toString(number), 12) v2 from numbers(3e6) order by k limit 400e3 settings remerge_sort_lowered_memory_bytes_ratio=2. format Null; -- { serverError 241 } +select number k, repeat(toString(number), 11) v1, repeat(toString(number), 12) v2 from numbers(3e6) order by v1, v2 limit 400e3 format Null; -- { serverError 241 } +select number k, repeat(toString(number), 11) v1, repeat(toString(number), 12) v2 from numbers(3e6) order by v1, v2 limit 400e3 settings remerge_sort_lowered_memory_bytes_ratio=2. format Null; -- { serverError 241 } -- remerge_sort_lowered_memory_bytes_ratio 1.9 is good (need at least 1.91/0.98=1.94) -- MergeSortingTransform: Re-merging intermediate ORDER BY data (20 blocks with 819200 rows) to save memory consumption diff --git a/tests/queries/0_stateless/01655_plan_optimizations.reference b/tests/queries/0_stateless/01655_plan_optimizations.reference index 33a7ff44b74..bb9c614f728 100644 --- a/tests/queries/0_stateless/01655_plan_optimizations.reference +++ b/tests/queries/0_stateless/01655_plan_optimizations.reference @@ -142,3 +142,12 @@ Filter Filter 2 3 2 3 +> function calculation should be done after sorting and limit (if possible) +> Expression should be divided into two subexpressions and only one of them should be moved after Sorting +Expression (Before ORDER BY [lifted up part]) +FUNCTION sipHash64 +Sorting +Expression (Before ORDER BY) +FUNCTION plus +> this query should be executed without throwing an exception +0 diff --git a/tests/queries/0_stateless/01655_plan_optimizations.sh b/tests/queries/0_stateless/01655_plan_optimizations.sh index b66d788a338..0b7f004a2ce 100755 --- a/tests/queries/0_stateless/01655_plan_optimizations.sh +++ b/tests/queries/0_stateless/01655_plan_optimizations.sh @@ -196,3 +196,12 @@ $CLICKHOUSE_CLIENT -q " select a, b from ( select number + 1 as a, number + 2 as b from numbers(2) union all select number + 1 as b, number + 2 as a from numbers(2) ) where a != 1 settings enable_optimize_predicate_expression = 0" + +echo "> function calculation should be done after sorting and limit (if possible)" +echo "> Expression should be divided into two subexpressions and only one of them should be moved after Sorting" +$CLICKHOUSE_CLIENT -q " + explain actions = 1 select number as n, sipHash64(n) from numbers(100) order by number + 1 limit 5" | + sed 's/^ *//g' | grep -o "^ *\(Expression (Before ORDER BY.*)\|Sorting\|FUNCTION \w\+\)" +echo "> this query should be executed without throwing an exception" +$CLICKHOUSE_CLIENT -q " + select throwIf(number = 5) from (select * from numbers(10)) order by number limit 1" diff --git a/tests/queries/0_stateless/01691_parser_data_type_exponential.sh b/tests/queries/0_stateless/01691_parser_data_type_exponential.sh index 2b1d34982a2..f8004f9350d 100755 --- a/tests/queries/0_stateless/01691_parser_data_type_exponential.sh +++ b/tests/queries/0_stateless/01691_parser_data_type_exponential.sh @@ -5,4 +5,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh # Check that DataType parser does not have exponential complexity in the case found by fuzzer. -for _ in {1..10}; do ${CLICKHOUSE_CLIENT} -n --testmode --query "SELECT CAST(1 AS A2222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmap, 00000000000000000000000000000000000000000000000000000000000000000000000000000001841416382, 222222222222222ggregateFuncpion(groupBitmap22222222222222222222222222222222222222222222222222220000000000000000000000000000000000000000000000000000000000000000000000000000002260637443813394204 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpio22222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggre222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 22222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 2222222222222eFuncpion(groupBitmap, 00000000000000000000000000000000000000000000000000000000000000000000000000000001841416382, 222222222222222ggregateFuncpion(groupBitmap22222222222222222222222222222222222222222222222222222222222222222222222200000000000000000000178859639454016722222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpio22222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmap, 00000000000000000000000000000000000000000000000000000000000000000000000000000001841416382, 222222222222222ggregateFuncpion(groupBitmap22222222222222222222222222222222222222222222222222222222222222222222222200000000000000000000178859639454016722222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmap, 22222222222222222222222222222222222222222222222222222222222222222222222222222222222222222, 222222222222222ggregateFuncpion(groupBitmap222222222222222222222222222222222222222222222222222222222222222222222222000000000000000000001788596394540167623 222222222222222222ggregateFu22222222222222222222222222 222222222, UInt33)); -- { clientError 62 }"; done +for _ in {1..10}; do ${CLICKHOUSE_CLIENT} -n --query "SELECT CAST(1 AS A2222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmap, 00000000000000000000000000000000000000000000000000000000000000000000000000000001841416382, 222222222222222ggregateFuncpion(groupBitmap22222222222222222222222222222222222222222222222222220000000000000000000000000000000000000000000000000000000000000000000000000000002260637443813394204 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpio22222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggre222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 22222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 2222222222222eFuncpion(groupBitmap, 00000000000000000000000000000000000000000000000000000000000000000000000000000001841416382, 222222222222222ggregateFuncpion(groupBitmap22222222222222222222222222222222222222222222222222222222222222222222222200000000000000000000178859639454016722222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpio22222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmap, 00000000000000000000000000000000000000000000000000000000000000000000000000000001841416382, 222222222222222ggregateFuncpion(groupBitmap22222222222222222222222222222222222222222222222222222222222222222222222200000000000000000000178859639454016722222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmap, 22222222222222222222222222222222222222222222222222222222222222222222222222222222222222222, 222222222222222ggregateFuncpion(groupBitmap222222222222222222222222222222222222222222222222222222222222222222222222000000000000000000001788596394540167623 222222222222222222ggregateFu22222222222222222222222222 222222222, UInt33)); -- { clientError 62 }"; done diff --git a/tests/queries/0_stateless/01710_minmax_count_projection.reference b/tests/queries/0_stateless/01710_minmax_count_projection.reference index b13738a66de..259d320a38a 100644 --- a/tests/queries/0_stateless/01710_minmax_count_projection.reference +++ b/tests/queries/0_stateless/01710_minmax_count_projection.reference @@ -9,6 +9,7 @@ 1 9999 3 2021-10-25 10:00:00 2021-10-27 10:00:00 3 +2021-10-25 10:00:00 2021-10-27 10:00:00 3 1 1 1 @@ -17,3 +18,5 @@ 0 2021-10-24 10:00:00 0 +1000 +1000 diff --git a/tests/queries/0_stateless/01710_minmax_count_projection.sql b/tests/queries/0_stateless/01710_minmax_count_projection.sql index 0792fe331bb..a6c04725583 100644 --- a/tests/queries/0_stateless/01710_minmax_count_projection.sql +++ b/tests/queries/0_stateless/01710_minmax_count_projection.sql @@ -50,6 +50,8 @@ drop table if exists d; create table d (dt DateTime, j int) engine MergeTree partition by (toDate(dt), ceiling(j), toDate(dt), CEILING(j)) order by tuple(); insert into d values ('2021-10-24 10:00:00', 10), ('2021-10-25 10:00:00', 10), ('2021-10-26 10:00:00', 10), ('2021-10-27 10:00:00', 10); select min(dt), max(dt), count() from d where toDate(dt) >= '2021-10-25'; +-- fuzz crash +select min(dt), max(dt), count(toDate(dt) >= '2021-10-25') from d where toDate(dt) >= '2021-10-25'; select count() from d group by toDate(dt); -- fuzz crash @@ -59,3 +61,15 @@ SELECT min(dt) FROM d PREWHERE ((0.9998999834060669 AND 1023) AND 255) <= ceil(j SELECT count('') AND NULL FROM d PREWHERE ceil(j) <= NULL; drop table d; + +-- count variant optimization + +drop table if exists test; +create table test (id Int64, d Int64, projection dummy(select * order by id)) engine MergeTree order by id; +insert into test select number, number from numbers(1e3); + +select count(if(d=4, d, 1)) from test settings force_optimize_projection = 1; +select count(d/3) from test settings force_optimize_projection = 1; +select count(if(d=4, Null, 1)) from test settings force_optimize_projection = 1; -- { serverError 584 } + +drop table test; diff --git a/tests/queries/0_stateless/01825_type_json_9.reference b/tests/queries/0_stateless/01825_type_json_9.reference new file mode 100644 index 00000000000..a426b09a100 --- /dev/null +++ b/tests/queries/0_stateless/01825_type_json_9.reference @@ -0,0 +1 @@ +Tuple(foo Int8, k1 Int8, k2 Int8) diff --git a/tests/queries/0_stateless/01825_type_json_9.sql b/tests/queries/0_stateless/01825_type_json_9.sql new file mode 100644 index 00000000000..8fa4b335578 --- /dev/null +++ b/tests/queries/0_stateless/01825_type_json_9.sql @@ -0,0 +1,16 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS t_json; + +SET allow_experimental_object_type = 1; + +CREATE TABLE t_json(id UInt64, obj JSON) ENGINE = MergeTree ORDER BY id; + +INSERT INTO t_json format JSONEachRow {"id": 1, "obj": {"foo": 1, "k1": 2}}; +INSERT INTO t_json format JSONEachRow {"id": 2, "obj": {"foo": 1, "k2": 2}}; + +OPTIMIZE TABLE t_json FINAL; + +SELECT any(toTypeName(obj)) from t_json; + +DROP TABLE IF EXISTS t_json; diff --git a/tests/queries/0_stateless/01825_type_json_parallel_insert.sql b/tests/queries/0_stateless/01825_type_json_parallel_insert.sql index f54004a6630..93d1eecfbd7 100644 --- a/tests/queries/0_stateless/01825_type_json_parallel_insert.sql +++ b/tests/queries/0_stateless/01825_type_json_parallel_insert.sql @@ -1,4 +1,4 @@ --- Tags: long +-- Tags: long, no-backward-compatibility-check:22.3.2.1 DROP TABLE IF EXISTS t_json_parallel; SET allow_experimental_object_type = 1, max_insert_threads = 20, max_threads = 20; diff --git a/tests/queries/0_stateless/01825_type_json_partitions.reference b/tests/queries/0_stateless/01825_type_json_partitions.reference new file mode 100644 index 00000000000..5a7ba251572 --- /dev/null +++ b/tests/queries/0_stateless/01825_type_json_partitions.reference @@ -0,0 +1,2 @@ +{"id":1,"obj":{"k1":"v1","k2":""}} +{"id":2,"obj":{"k1":"","k2":"v2"}} diff --git a/tests/queries/0_stateless/01825_type_json_partitions.sql b/tests/queries/0_stateless/01825_type_json_partitions.sql new file mode 100644 index 00000000000..27804e7edae --- /dev/null +++ b/tests/queries/0_stateless/01825_type_json_partitions.sql @@ -0,0 +1,15 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS t_json_partitions; + +SET allow_experimental_object_type = 1; +SET output_format_json_named_tuples_as_objects = 1; + +CREATE TABLE t_json_partitions (id UInt32, obj JSON) +ENGINE MergeTree ORDER BY id PARTITION BY id; + +INSERT INTO t_json_partitions FORMAT JSONEachRow {"id": 1, "obj": {"k1": "v1"}} {"id": 2, "obj": {"k2": "v2"}}; + +SELECT * FROM t_json_partitions ORDER BY id FORMAT JSONEachRow; + +DROP TABLE t_json_partitions; diff --git a/tests/queries/0_stateless/02006_client_test_hint_no_such_error_name.sh b/tests/queries/0_stateless/02006_client_test_hint_no_such_error_name.sh index b846136ae58..972ff3ba73f 100755 --- a/tests/queries/0_stateless/02006_client_test_hint_no_such_error_name.sh +++ b/tests/queries/0_stateless/02006_client_test_hint_no_such_error_name.sh @@ -5,4 +5,4 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --testmode -n -q 'select 1 -- { clientError FOOBAR }' |& grep -o 'No error code with name:.*' +$CLICKHOUSE_CLIENT -n -q 'select 1 -- { clientError FOOBAR }' |& grep -o 'No error code with name:.*' diff --git a/tests/queries/0_stateless/02008_tuple_to_name_value_pairs.sql b/tests/queries/0_stateless/02008_tuple_to_name_value_pairs.sql index 9204975b579..59987a86590 100644 --- a/tests/queries/0_stateless/02008_tuple_to_name_value_pairs.sql +++ b/tests/queries/0_stateless/02008_tuple_to_name_value_pairs.sql @@ -4,7 +4,7 @@ DROP TABLE IF EXISTS test02008; CREATE TABLE test02008 ( col Tuple( a Tuple(key1 int, key2 int), - b Tuple(key1 int, key3 int) + b Tuple(key1 int, key2 int) ) ) ENGINE=Memory(); INSERT INTO test02008 VALUES (tuple(tuple(1, 2), tuple(3, 4))); diff --git a/tests/queries/0_stateless/02052_last_granula_adjust_LOGICAL_ERROR.reference b/tests/queries/0_stateless/02052_last_granula_adjust_LOGICAL_ERROR.reference index d7d3ee8f362..72d9eb2928a 100644 --- a/tests/queries/0_stateless/02052_last_granula_adjust_LOGICAL_ERROR.reference +++ b/tests/queries/0_stateless/02052_last_granula_adjust_LOGICAL_ERROR.reference @@ -1,8 +1,8 @@ -1 -1 -10 -10 -100 -100 -10000 -10000 +0 00000 +0 00000 +9 99999 +9 99999 +99 9999999999 +99 9999999999 +9999 99999999999999999999 +9999 99999999999999999999 diff --git a/tests/queries/0_stateless/02052_last_granula_adjust_LOGICAL_ERROR.sql.j2 b/tests/queries/0_stateless/02052_last_granula_adjust_LOGICAL_ERROR.sql.j2 index 465aa22beb3..53d970496b2 100644 --- a/tests/queries/0_stateless/02052_last_granula_adjust_LOGICAL_ERROR.sql.j2 +++ b/tests/queries/0_stateless/02052_last_granula_adjust_LOGICAL_ERROR.sql.j2 @@ -11,8 +11,8 @@ settings as select number, repeat(toString(number), 5) from numbers({{ rows_in_table }}); -- avoid any optimizations with ignore(*) -select count(ignore(*)) from data_02052_{{ rows_in_table }}_wide{{ wide }} settings max_read_buffer_size=1, max_threads=1; -select count(ignore(*)) from data_02052_{{ rows_in_table }}_wide{{ wide }} settings max_read_buffer_size=0, max_threads=1; -- { serverError CANNOT_READ_ALL_DATA } +select * apply max from data_02052_{{ rows_in_table }}_wide{{ wide }} settings max_read_buffer_size=1, max_threads=1; +select * apply max from data_02052_{{ rows_in_table }}_wide{{ wide }} settings max_read_buffer_size=0, max_threads=1; -- { serverError CANNOT_READ_ALL_DATA } drop table data_02052_{{ rows_in_table }}_wide{{ wide }}; {% endfor %} diff --git a/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference b/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference index 9e24b7c6ea6..67a043d6646 100644 --- a/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference +++ b/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference @@ -7,13 +7,15 @@ ExpressionTransform (Limit) Limit - (Sorting) - MergingSortedTransform 2 → 1 - (Expression) - ExpressionTransform × 2 - (SettingQuotaAndLimits) - (ReadFromMergeTree) - MergeTreeInOrder × 2 0 → 1 + (Expression) + ExpressionTransform + (Sorting) + MergingSortedTransform 2 → 1 + (Expression) + ExpressionTransform × 2 + (SettingQuotaAndLimits) + (ReadFromMergeTree) + MergeTreeInOrder × 2 0 → 1 2020-10-01 9 2020-10-01 9 2020-10-01 9 @@ -23,16 +25,18 @@ ExpressionTransform ExpressionTransform (Limit) Limit - (Sorting) - MergingSortedTransform 2 → 1 - (Expression) - ExpressionTransform × 2 - (SettingQuotaAndLimits) - (ReadFromMergeTree) - ReverseTransform - MergeTreeReverse 0 → 1 - ReverseTransform - MergeTreeReverse 0 → 1 + (Expression) + ExpressionTransform + (Sorting) + MergingSortedTransform 2 → 1 + (Expression) + ExpressionTransform × 2 + (SettingQuotaAndLimits) + (ReadFromMergeTree) + ReverseTransform + MergeTreeReverse 0 → 1 + ReverseTransform + MergeTreeReverse 0 → 1 2020-10-01 9 2020-10-01 9 2020-10-01 9 @@ -42,15 +46,17 @@ ExpressionTransform ExpressionTransform (Limit) Limit - (Sorting) - FinishSortingTransform - PartialSortingTransform - MergingSortedTransform 2 → 1 - (Expression) - ExpressionTransform × 2 - (SettingQuotaAndLimits) - (ReadFromMergeTree) - MergeTreeInOrder × 2 0 → 1 + (Expression) + ExpressionTransform + (Sorting) + FinishSortingTransform + PartialSortingTransform + MergingSortedTransform 2 → 1 + (Expression) + ExpressionTransform × 2 + (SettingQuotaAndLimits) + (ReadFromMergeTree) + MergeTreeInOrder × 2 0 → 1 2020-10-11 0 2020-10-11 0 2020-10-11 0 diff --git a/tests/queries/0_stateless/02149_schema_inference.reference b/tests/queries/0_stateless/02149_schema_inference.reference index f46e3bee101..e4a0c3c3602 100644 --- a/tests/queries/0_stateless/02149_schema_inference.reference +++ b/tests/queries/0_stateless/02149_schema_inference.reference @@ -38,49 +38,49 @@ JSONCompactEachRow c1 Nullable(Float64) c2 Array(Tuple(Nullable(Float64), Nullable(String))) c3 Map(String, Nullable(Float64)) -c4 Nullable(UInt8) -42.42 [(1,'String'),(2,'abcd')] {'key':42,'key2':24} 1 +c4 Nullable(Bool) +42.42 [(1,'String'),(2,'abcd')] {'key':42,'key2':24} true c1 Nullable(Float64) c2 Array(Tuple(Nullable(Float64), Nullable(String))) c3 Map(String, Nullable(Float64)) -c4 Nullable(UInt8) +c4 Nullable(Bool) \N [(1,'String'),(2,NULL)] {'key':NULL,'key2':24} \N -32 [(2,'String 2'),(3,'hello')] {'key3':4242,'key4':2424} 1 +32 [(2,'String 2'),(3,'hello')] {'key3':4242,'key4':2424} true JSONCompactEachRowWithNames a Nullable(Float64) b Array(Tuple(Nullable(Float64), Nullable(String))) c Map(String, Nullable(Float64)) -d Nullable(UInt8) -42.42 [(1,'String'),(2,'abcd')] {'key':42,'key2':24} 1 +d Nullable(Bool) +42.42 [(1,'String'),(2,'abcd')] {'key':42,'key2':24} true JSONEachRow -d Nullable(UInt8) +a Nullable(Float64) b Array(Tuple(Nullable(Float64), Nullable(String))) c Map(String, Nullable(Float64)) +d Nullable(Bool) +42.42 [(1,'String'),(2,'abcd')] {'key':42,'key2':24} true a Nullable(Float64) -1 [(1,'String'),(2,'abcd')] {'key':42,'key2':24} 42.42 -d Nullable(UInt8) b Array(Tuple(Nullable(Float64), Nullable(String))) c Map(String, Nullable(Float64)) -a Nullable(Float64) +d Nullable(Bool) \N [(1,'String'),(2,NULL)] {'key':NULL,'key2':24} \N -1 [(2,'String 2'),(3,'hello')] {'key3':4242,'key4':2424} 32 +32 [(2,'String 2'),(3,'hello')] {'key3':4242,'key4':2424} true +a Nullable(Float64) b Nullable(String) c Array(Nullable(Float64)) -a Nullable(Float64) -s1 [] 1 -\N [2] 2 -\N [] \N -\N [] \N -\N [3] \N +1 s1 [] +2 \N [2] +\N \N [] +\N \N [] +\N \N [3] TSKV +a Nullable(String) b Nullable(String) c Nullable(String) -a Nullable(String) -s1 \N 1 -} [2] 2 +1 s1 \N +2 } [2] \N \N \N \N \N \N -\N [3] \N +\N \N [3] Values c1 Nullable(Float64) c2 Nullable(String) diff --git a/tests/queries/0_stateless/02165_insert_from_infile.reference b/tests/queries/0_stateless/02165_insert_from_infile.reference index 2a00a8faa31..f8c205ecc0f 100644 --- a/tests/queries/0_stateless/02165_insert_from_infile.reference +++ b/tests/queries/0_stateless/02165_insert_from_infile.reference @@ -1,5 +1,5 @@ -INSERT INTO test FROM INFILE data.file SELECT x +INSERT INTO test FROM INFILE \'data.file\' SELECT x FROM input(\'x UInt32\') -INSERT INTO test FROM INFILE data.file WITH number AS x +INSERT INTO test FROM INFILE \'data.file\' WITH number AS x SELECT number FROM input(\'number UInt32\') diff --git a/tests/queries/0_stateless/02174_cte_scalar_cache_mv.reference b/tests/queries/0_stateless/02174_cte_scalar_cache_mv.reference index 246706164df..055c88160ad 100644 --- a/tests/queries/0_stateless/02174_cte_scalar_cache_mv.reference +++ b/tests/queries/0_stateless/02174_cte_scalar_cache_mv.reference @@ -18,7 +18,7 @@ 89 89 89 89 5 94 94 94 94 5 99 99 99 99 5 -02177_MV 7 80 22 +02177_MV 3 80 26 10 40 70 diff --git a/tests/queries/0_stateless/02174_cte_scalar_cache_mv.sql b/tests/queries/0_stateless/02174_cte_scalar_cache_mv.sql index 4d4447c7f31..742d72fe2b2 100644 --- a/tests/queries/0_stateless/02174_cte_scalar_cache_mv.sql +++ b/tests/queries/0_stateless/02174_cte_scalar_cache_mv.sql @@ -39,13 +39,13 @@ SYSTEM FLUSH LOGS; -- The main query should have a cache miss and 3 global hits -- The MV is executed 20 times (100 / 5) and each run does 1 miss and 4 hits to the LOCAL cache -- In addition to this, to prepare the MV, there is an extra preparation to get the list of columns via --- InterpreterSelectQuery, which adds 1 miss and 4 global hits (since it uses the global cache) +-- InterpreterSelectQuery, which adds 5 miss (since we don't use cache for preparation) -- So in total we have: -- Main query: 1 miss, 3 global --- Preparation: 1 miss, 4 global +-- Preparation: 5 miss -- Blocks (20): 20 miss, 0 global, 80 local hits --- TOTAL: 22 miss, 7 global, 80 local +-- TOTAL: 26 miss, 3 global, 80 local SELECT '02177_MV', ProfileEvents['ScalarSubqueriesGlobalCacheHit'] as scalar_cache_global_hit, diff --git a/tests/queries/0_stateless/02188_table_function_format.reference b/tests/queries/0_stateless/02188_table_function_format.reference index ab568fb9fe5..403a4044544 100644 --- a/tests/queries/0_stateless/02188_table_function_format.reference +++ b/tests/queries/0_stateless/02188_table_function_format.reference @@ -1,52 +1,52 @@ -111 Hello -123 World -111 Hello -123 World -111 Hello -123 World -111 Hello -123 World -111 Hello -123 World -111 Hello -123 World -111 Hello -123 World -111 Hello -123 World -111 Hello -123 World -111 Hello -123 World -111 Hello -123 World -111 Hello -123 World -111 Hello -123 World -111 Hello -123 World -111 Hello -123 World -111 Hello -123 World -111 Hello -123 World -111 Hello -123 World -111 Hello -123 World -111 Hello -123 World +Hello 111 +World 123 +Hello 111 +World 123 +Hello 111 +World 123 +Hello 111 +World 123 +Hello 111 +World 123 +Hello 111 +World 123 +Hello 111 +World 123 +Hello 111 +World 123 +Hello 111 +World 123 +Hello 111 +World 123 +Hello 111 +World 123 +Hello 111 +World 123 +Hello 111 +World 123 +Hello 111 +World 123 +Hello 111 +World 123 +Hello 111 +World 123 +Hello 111 +World 123 +Hello 111 +World 123 +Hello 111 +World 123 +Hello 111 +World 123 1 2 [1,2,3] [['abc'],[],['d','e']] c1 Nullable(Float64) c2 Nullable(Float64) c3 Array(Nullable(Float64)) c4 Array(Array(Nullable(String))) -111 Hello -123 World -111 Hello -131 Hello -123 World -b Nullable(Float64) +Hello 111 +World 123 +Hello 111 +Hello 131 +World 123 a Nullable(String) +b Nullable(Float64) diff --git a/tests/queries/0_stateless/02210_processors_profile_log.reference b/tests/queries/0_stateless/02210_processors_profile_log.reference new file mode 100644 index 00000000000..a056b445bbd --- /dev/null +++ b/tests/queries/0_stateless/02210_processors_profile_log.reference @@ -0,0 +1,38 @@ +-- { echo } +EXPLAIN PIPELINE SELECT sleep(1); +(Expression) +ExpressionTransform + (SettingQuotaAndLimits) + (ReadFromStorage) + SourceFromSingleChunk 0 → 1 +SELECT sleep(1) SETTINGS log_processors_profiles=true, log_queries=1, log_queries_min_type='QUERY_FINISH'; +0 +SYSTEM FLUSH LOGS; +WITH + ( + SELECT query_id + FROM system.query_log + WHERE current_database = currentDatabase() AND Settings['log_processors_profiles']='1' + ) AS query_id_ +SELECT + name, + multiIf( + -- ExpressionTransform executes sleep(), + -- so IProcessor::work() will spend 1 sec. + name = 'ExpressionTransform', elapsed_us>1e6, + -- SourceFromSingleChunk, that feed data to ExpressionTransform, + -- will feed first block and then wait in PortFull. + name = 'SourceFromSingleChunk', output_wait_elapsed_us>1e6, + -- NullSource/LazyOutputFormatLazyOutputFormat are the outputs + -- so they cannot starts to execute before sleep(1) will be executed. + input_wait_elapsed_us>1e6) + elapsed +FROM system.processors_profile_log +WHERE query_id = query_id_ +ORDER BY name; +ExpressionTransform 1 +LazyOutputFormat 1 +LimitsCheckingTransform 1 +NullSource 1 +NullSource 1 +SourceFromSingleChunk 1 diff --git a/tests/queries/0_stateless/02210_processors_profile_log.sql b/tests/queries/0_stateless/02210_processors_profile_log.sql new file mode 100644 index 00000000000..160f8009262 --- /dev/null +++ b/tests/queries/0_stateless/02210_processors_profile_log.sql @@ -0,0 +1,28 @@ +-- { echo } +EXPLAIN PIPELINE SELECT sleep(1); + +SELECT sleep(1) SETTINGS log_processors_profiles=true, log_queries=1, log_queries_min_type='QUERY_FINISH'; +SYSTEM FLUSH LOGS; + +WITH + ( + SELECT query_id + FROM system.query_log + WHERE current_database = currentDatabase() AND Settings['log_processors_profiles']='1' + ) AS query_id_ +SELECT + name, + multiIf( + -- ExpressionTransform executes sleep(), + -- so IProcessor::work() will spend 1 sec. + name = 'ExpressionTransform', elapsed_us>1e6, + -- SourceFromSingleChunk, that feed data to ExpressionTransform, + -- will feed first block and then wait in PortFull. + name = 'SourceFromSingleChunk', output_wait_elapsed_us>1e6, + -- NullSource/LazyOutputFormatLazyOutputFormat are the outputs + -- so they cannot starts to execute before sleep(1) will be executed. + input_wait_elapsed_us>1e6) + elapsed +FROM system.processors_profile_log +WHERE query_id = query_id_ +ORDER BY name; diff --git a/tests/queries/0_stateless/02221_parallel_replicas_bug.sh b/tests/queries/0_stateless/02221_parallel_replicas_bug.sh index b4ac6817a54..cce32bf8272 100755 --- a/tests/queries/0_stateless/02221_parallel_replicas_bug.sh +++ b/tests/queries/0_stateless/02221_parallel_replicas_bug.sh @@ -4,4 +4,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} --allow_experimental_parallel_reading_from_replicas=1 -nmT < "$CURDIR"/01099_parallel_distributed_insert_select.sql > /dev/null +${CLICKHOUSE_CLIENT} --allow_experimental_parallel_reading_from_replicas=1 -nm < "$CURDIR"/01099_parallel_distributed_insert_select.sql > /dev/null diff --git a/tests/queries/0_stateless/02222_create_table_without_columns_metadata.reference b/tests/queries/0_stateless/02222_create_table_without_columns_metadata.reference index 9e9e0082cb3..f32b0eb8a92 100644 --- a/tests/queries/0_stateless/02222_create_table_without_columns_metadata.reference +++ b/tests/queries/0_stateless/02222_create_table_without_columns_metadata.reference @@ -1,3 +1,3 @@ -CREATE TABLE default.test\n(\n `y` Nullable(String),\n `x` Nullable(Float64)\n)\nENGINE = File(\'JSONEachRow\', \'data.jsonl\') +CREATE TABLE default.test\n(\n `x` Nullable(Float64),\n `y` Nullable(String)\n)\nENGINE = File(\'JSONEachRow\', \'data.jsonl\') OK OK diff --git a/tests/queries/0_stateless/02234_clickhouse_local_test_mode.sh b/tests/queries/0_stateless/02234_clickhouse_local_test_mode.sh index 6abe1e30334..f736751726d 100755 --- a/tests/queries/0_stateless/02234_clickhouse_local_test_mode.sh +++ b/tests/queries/0_stateless/02234_clickhouse_local_test_mode.sh @@ -6,5 +6,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_LOCAL --query="SELECT n" 2>&1 | grep -q "Code: 47. DB::Exception: Missing columns:" && echo 'OK' || echo 'FAIL' ||: -$CLICKHOUSE_LOCAL --testmode --query="SELECT n -- { serverError 47 }" - +$CLICKHOUSE_LOCAL --query="SELECT n -- { serverError 47 }" diff --git a/tests/queries/0_stateless/02236_explain_pipeline_join.reference b/tests/queries/0_stateless/02236_explain_pipeline_join.reference new file mode 100644 index 00000000000..ed993e2a1e7 --- /dev/null +++ b/tests/queries/0_stateless/02236_explain_pipeline_join.reference @@ -0,0 +1,19 @@ +(Expression) +ExpressionTransform + (Join) + JoiningTransform 2 → 1 + (Expression) + ExpressionTransform + (SettingQuotaAndLimits) + (Limit) + Limit + (ReadFromStorage) + Numbers 0 → 1 + (Expression) + FillingRightJoinSide + ExpressionTransform + (SettingQuotaAndLimits) + (Limit) + Limit + (ReadFromStorage) + Numbers 0 → 1 diff --git a/tests/queries/0_stateless/02236_explain_pipeline_join.sql b/tests/queries/0_stateless/02236_explain_pipeline_join.sql new file mode 100644 index 00000000000..de885ed74ee --- /dev/null +++ b/tests/queries/0_stateless/02236_explain_pipeline_join.sql @@ -0,0 +1,10 @@ +EXPLAIN PIPELINE +SELECT * FROM +( + SELECT * FROM system.numbers LIMIT 10 +) t1 +ALL LEFT JOIN +( + SELECT * FROM system.numbers LIMIT 10 +) t2 +USING number; diff --git a/tests/queries/0_stateless/02240_get_type_serialization_streams.reference b/tests/queries/0_stateless/02240_get_type_serialization_streams.reference new file mode 100644 index 00000000000..3537720214f --- /dev/null +++ b/tests/queries/0_stateless/02240_get_type_serialization_streams.reference @@ -0,0 +1,8 @@ +['{ArraySizes}','{ArrayElements, Regular}'] +['{ArraySizes}','{ArrayElements, TupleElement(keys, escape_tuple_delimiter = true), Regular}','{ArrayElements, TupleElement(values, escape_tuple_delimiter = true), Regular}'] +['{TupleElement(1, escape_tuple_delimiter = true), Regular}','{TupleElement(2, escape_tuple_delimiter = true), Regular}','{TupleElement(3, escape_tuple_delimiter = true), Regular}'] +['{DictionaryKeys, Regular}','{DictionaryIndexes}'] +['{NullMap}','{NullableElements, Regular}'] +['{ArraySizes}','{ArrayElements, Regular}'] +['{ArraySizes}','{ArrayElements, TupleElement(keys, escape_tuple_delimiter = true), Regular}','{ArrayElements, TupleElement(values, escape_tuple_delimiter = true), Regular}'] +['{TupleElement(1, escape_tuple_delimiter = true), Regular}','{TupleElement(2, escape_tuple_delimiter = true), Regular}','{TupleElement(3, escape_tuple_delimiter = true), Regular}','{TupleElement(4, escape_tuple_delimiter = true), Regular}'] diff --git a/tests/queries/0_stateless/02240_get_type_serialization_streams.sql b/tests/queries/0_stateless/02240_get_type_serialization_streams.sql new file mode 100644 index 00000000000..72a66269e22 --- /dev/null +++ b/tests/queries/0_stateless/02240_get_type_serialization_streams.sql @@ -0,0 +1,8 @@ +select getTypeSerializationStreams('Array(Int8)'); +select getTypeSerializationStreams('Map(String, Int64)'); +select getTypeSerializationStreams('Tuple(String, Int64, Float64)'); +select getTypeSerializationStreams('LowCardinality(String)'); +select getTypeSerializationStreams('Nullable(String)'); +select getTypeSerializationStreams([1,2,3]); +select getTypeSerializationStreams(map('a', 1, 'b', 2)); +select getTypeSerializationStreams(tuple('a', 1, 'b', 2)); diff --git a/tests/queries/0_stateless/02240_tskv_schema_inference_bug.reference b/tests/queries/0_stateless/02240_tskv_schema_inference_bug.reference index a8abc33648e..69ed3536951 100644 --- a/tests/queries/0_stateless/02240_tskv_schema_inference_bug.reference +++ b/tests/queries/0_stateless/02240_tskv_schema_inference_bug.reference @@ -1,8 +1,8 @@ +a Nullable(String) b Nullable(String) c Nullable(String) -a Nullable(String) -s1 \N 1 -} [2] 2 +1 s1 \N +2 } [2] \N \N \N \N \N \N -\N [3] \N +\N \N [3] diff --git a/tests/queries/0_stateless/02242_optimize_to_subcolumns_no_storage.sql b/tests/queries/0_stateless/02242_optimize_to_subcolumns_no_storage.sql index e6e4663c5aa..8f8485eb58f 100644 --- a/tests/queries/0_stateless/02242_optimize_to_subcolumns_no_storage.sql +++ b/tests/queries/0_stateless/02242_optimize_to_subcolumns_no_storage.sql @@ -1,3 +1,4 @@ +-- Tags: no-backward-compatibility-check:22.3.2.1 SET optimize_functions_to_subcolumns = 1; SELECT count(*) FROM numbers(2) AS n1, numbers(3) AS n2, numbers(4) AS n3 WHERE (n1.number = n2.number) AND (n2.number = n3.number); diff --git a/tests/queries/0_stateless/02244_ip_address_invalid_insert.reference b/tests/queries/0_stateless/02244_ip_address_invalid_insert.reference new file mode 100644 index 00000000000..60e6a5da083 --- /dev/null +++ b/tests/queries/0_stateless/02244_ip_address_invalid_insert.reference @@ -0,0 +1,10 @@ +1.1.1.1 1.1.1.1 + 0.0.0.0 +1.1.1.1 1.1.1.1 + 0.0.0.0 +fe80::9801:43ff:fe1f:7690 fe80::9801:43ff:fe1f:7690 +1.1.1.1 :: + :: +fe80::9801:43ff:fe1f:7690 fe80::9801:43ff:fe1f:7690 +1.1.1.1 ::ffff:1.1.1.1 + :: diff --git a/tests/queries/0_stateless/02244_ip_address_invalid_insert.sql b/tests/queries/0_stateless/02244_ip_address_invalid_insert.sql new file mode 100644 index 00000000000..4057b9b2d98 --- /dev/null +++ b/tests/queries/0_stateless/02244_ip_address_invalid_insert.sql @@ -0,0 +1,81 @@ +DROP TABLE IF EXISTS test_table_ipv4; +CREATE TABLE test_table_ipv4 +( + ip String, + ipv4 IPv4 +) ENGINE = TinyLog; + +INSERT INTO test_table_ipv4 VALUES ('1.1.1.1', '1.1.1.1'), ('', ''); --{clientError 441} + +SET input_format_ipv4_default_on_conversion_error = 1; + +INSERT INTO test_table_ipv4 VALUES ('1.1.1.1', '1.1.1.1'), ('', ''); +SELECT ip, ipv4 FROM test_table_ipv4; + +SET input_format_ipv4_default_on_conversion_error = 0; + +DROP TABLE test_table_ipv4; + +DROP TABLE IF EXISTS test_table_ipv4_materialized; +CREATE TABLE test_table_ipv4_materialized +( + ip String, + ipv6 IPv4 MATERIALIZED toIPv4(ip) +) ENGINE = TinyLog; + +INSERT INTO test_table_ipv4_materialized(ip) VALUES ('1.1.1.1'), (''); --{serverError 441} + +SET input_format_ipv4_default_on_conversion_error = 1; + +INSERT INTO test_table_ipv4_materialized(ip) VALUES ('1.1.1.1'), (''); --{serverError 441} + +SET cast_ipv4_ipv6_default_on_conversion_error = 1; + +INSERT INTO test_table_ipv4_materialized(ip) VALUES ('1.1.1.1'), (''); +SELECT ip, ipv6 FROM test_table_ipv4_materialized; + +SET input_format_ipv4_default_on_conversion_error = 0; +SET cast_ipv4_ipv6_default_on_conversion_error = 0; + +DROP TABLE test_table_ipv4_materialized; + +DROP TABLE IF EXISTS test_table_ipv6; +CREATE TABLE test_table_ipv6 +( + ip String, + ipv6 IPv6 +) ENGINE = TinyLog; + +INSERT INTO test_table_ipv6 VALUES ('fe80::9801:43ff:fe1f:7690', 'fe80::9801:43ff:fe1f:7690'), ('1.1.1.1', '1.1.1.1'), ('', ''); --{clientError 441} + +SET input_format_ipv6_default_on_conversion_error = 1; + +INSERT INTO test_table_ipv6 VALUES ('fe80::9801:43ff:fe1f:7690', 'fe80::9801:43ff:fe1f:7690'), ('1.1.1.1', '1.1.1.1'), ('', ''); +SELECT ip, ipv6 FROM test_table_ipv6; + +SET input_format_ipv6_default_on_conversion_error = 0; + +DROP TABLE test_table_ipv6; + +DROP TABLE IF EXISTS test_table_ipv6_materialized; +CREATE TABLE test_table_ipv6_materialized +( + ip String, + ipv6 IPv6 MATERIALIZED toIPv6(ip) +) ENGINE = TinyLog; + +INSERT INTO test_table_ipv6_materialized(ip) VALUES ('fe80::9801:43ff:fe1f:7690'), ('1.1.1.1'), (''); --{serverError 441} + +SET input_format_ipv6_default_on_conversion_error = 1; + +INSERT INTO test_table_ipv6_materialized(ip) VALUES ('fe80::9801:43ff:fe1f:7690'), ('1.1.1.1'), (''); --{serverError 441} + +SET cast_ipv4_ipv6_default_on_conversion_error = 1; + +INSERT INTO test_table_ipv6_materialized(ip) VALUES ('fe80::9801:43ff:fe1f:7690'), ('1.1.1.1'), (''); +SELECT ip, ipv6 FROM test_table_ipv6_materialized; + +SET input_format_ipv6_default_on_conversion_error = 0; +SET cast_ipv4_ipv6_default_on_conversion_error = 0; + +DROP TABLE test_table_ipv6_materialized; diff --git a/tests/queries/0_stateless/02245_format_string_stack_overflow.sql b/tests/queries/0_stateless/02245_format_string_stack_overflow.sql index 1ee3606d3a6..40053fd0d9b 100644 --- a/tests/queries/0_stateless/02245_format_string_stack_overflow.sql +++ b/tests/queries/0_stateless/02245_format_string_stack_overflow.sql @@ -1 +1,2 @@ +-- Tags: no-backward-compatibility-check:22.3 select format('{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}', toString(number)) str from numbers(1); diff --git a/tests/queries/0_stateless/02245_join_with_nullable_lowcardinality_crash.reference b/tests/queries/0_stateless/02245_join_with_nullable_lowcardinality_crash.reference new file mode 100644 index 00000000000..12c61d9c54e --- /dev/null +++ b/tests/queries/0_stateless/02245_join_with_nullable_lowcardinality_crash.reference @@ -0,0 +1,2 @@ +usa + diff --git a/tests/queries/0_stateless/02245_join_with_nullable_lowcardinality_crash.sql b/tests/queries/0_stateless/02245_join_with_nullable_lowcardinality_crash.sql new file mode 100644 index 00000000000..abc2ee41402 --- /dev/null +++ b/tests/queries/0_stateless/02245_join_with_nullable_lowcardinality_crash.sql @@ -0,0 +1,20 @@ +drop table if exists with_nullable; +drop table if exists without_nullable; + +CREATE TABLE with_nullable +( timestamp UInt32, + country LowCardinality(Nullable(String)) ) ENGINE = Memory; + +CREATE TABLE without_nullable +( timestamp UInt32, + country LowCardinality(String)) ENGINE = Memory; + +insert into with_nullable values(0,'f'),(0,'usa'); +insert into without_nullable values(0,'usa'),(0,'us2a'); + +select if(t0.country is null ,t2.country,t0.country) "country" +from without_nullable t0 right outer join with_nullable t2 on t0.country=t2.country; + +drop table with_nullable; +drop table without_nullable; + diff --git a/tests/queries/0_stateless/02247_names_order_in_json_and_tskv.reference b/tests/queries/0_stateless/02247_names_order_in_json_and_tskv.reference new file mode 100644 index 00000000000..49a285dc11a --- /dev/null +++ b/tests/queries/0_stateless/02247_names_order_in_json_and_tskv.reference @@ -0,0 +1,34 @@ +a Nullable(String) +b Nullable(String) +c Nullable(String) +1 s1 \N +2 } [2] +\N \N \N +\N \N \N +\N \N [3] +b Nullable(String) +a Nullable(String) +c Nullable(String) +e Nullable(String) +1 \N \N \N +\N 2 3 \N +\N \N \N \N +\N \N \N 3 +3 3 1 \N +a Nullable(Float64) +b Nullable(String) +c Array(Nullable(Float64)) +1 s1 [] +2 \N [2] +\N \N [] +\N \N [] +\N \N [3] +b Nullable(Float64) +a Nullable(Float64) +c Nullable(Float64) +e Nullable(Float64) +1 \N \N \N +\N 2 3 \N +\N \N \N \N +\N \N \N 3 +3 3 1 \N diff --git a/tests/queries/0_stateless/02247_names_order_in_json_and_tskv.sh b/tests/queries/0_stateless/02247_names_order_in_json_and_tskv.sh new file mode 100755 index 00000000000..0be26371585 --- /dev/null +++ b/tests/queries/0_stateless/02247_names_order_in_json_and_tskv.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash +# Tags: no-parallel, no-fasttest + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + + +USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +FILE_NAME=test_02247.data +DATA_FILE=${USER_FILES_PATH:?}/$FILE_NAME + +touch $DATA_FILE + +echo -e 'a=1\tb=s1\tc=\N +c=[2]\ta=2\tb=\N} +a=\N + +c=[3]\ta=\N' > $DATA_FILE +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'TSKV')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'TSKV')" + +echo -e 'b=1 +a=2\tc=3 + +e=3 +c=1\tb=3\ta=3' > $DATA_FILE +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'TSKV')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'TSKV')" + + +echo -e '{"a" : 1, "b" : "s1", "c" : null} +{"c" : [2], "a" : 2, "b" : null} +{} +{"a" : null} +{"c" : [3], "a" : null}' > $DATA_FILE +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'JSONEachRow')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'JSONEachRow')" + +echo -e '{"b" : 1} +{"a" : 2, "c" : 3} +{} +{"e" : 3} +{"c" : 1, "b" : 3, "a" : 3}' > $DATA_FILE +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'JSONEachRow')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'JSONEachRow')" + + +rm $DATA_FILE diff --git a/tests/queries/0_stateless/02247_read_bools_as_numbers_json.reference b/tests/queries/0_stateless/02247_read_bools_as_numbers_json.reference new file mode 100644 index 00000000000..a7609bdd86b --- /dev/null +++ b/tests/queries/0_stateless/02247_read_bools_as_numbers_json.reference @@ -0,0 +1,18 @@ +x Nullable(Bool) +true +false +x Nullable(Float64) +42.42 +0 +x Nullable(Float64) +1 +0.42 +c1 Nullable(Bool) +true +false +c1 Nullable(Float64) +42.42 +0 +c1 Nullable(Float64) +1 +0.42 diff --git a/tests/queries/0_stateless/02247_read_bools_as_numbers_json.sh b/tests/queries/0_stateless/02247_read_bools_as_numbers_json.sh new file mode 100755 index 00000000000..10f050ea6d1 --- /dev/null +++ b/tests/queries/0_stateless/02247_read_bools_as_numbers_json.sh @@ -0,0 +1,47 @@ +#!/usr/bin/env bash +# Tags: no-parallel, no-fasttest + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + + +USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +FILE_NAME=test_02247.data +DATA_FILE=${USER_FILES_PATH:?}/$FILE_NAME + +touch $DATA_FILE + +echo -e '{"x" : true} +{"x" : false}' > $DATA_FILE +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'JSONEachRow')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'JSONEachRow')" + +echo -e '{"x" : 42.42} +{"x" : false}' > $DATA_FILE +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'JSONEachRow')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'JSONEachRow')" + +echo -e '{"x" : true} +{"x" : 0.42}' > $DATA_FILE +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'JSONEachRow')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'JSONEachRow')" + + +echo -e '[true] +[false]' > $DATA_FILE +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'JSONCompactEachRow')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'JSONCompactEachRow')" + +echo -e '[42.42] +[false]' > $DATA_FILE +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'JSONCompactEachRow')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'JSONCompactEachRow')" + +echo -e '[true] +[0.42]' > $DATA_FILE +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'JSONCompactEachRow')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'JSONCompactEachRow')" + + +rm $DATA_FILE diff --git a/tests/queries/0_stateless/02248_nullable_custom_types_to_string.sql b/tests/queries/0_stateless/02248_nullable_custom_types_to_string.sql index 313f703fd03..605500ee840 100644 --- a/tests/queries/0_stateless/02248_nullable_custom_types_to_string.sql +++ b/tests/queries/0_stateless/02248_nullable_custom_types_to_string.sql @@ -1,3 +1,4 @@ +-- Tags: no-backward-compatibility-check:22.3.4.44 select toString(toNullable(true)); select toString(CAST(NULL, 'Nullable(Bool)')); select toString(toNullable(toIPv4('0.0.0.0'))); diff --git a/tests/queries/0_stateless/02249_parse_date_time_basic.reference b/tests/queries/0_stateless/02249_parse_date_time_basic.reference new file mode 100644 index 00000000000..eb030a8fd3d --- /dev/null +++ b/tests/queries/0_stateless/02249_parse_date_time_basic.reference @@ -0,0 +1,5 @@ +2022-03-31T00:00:00Z 1 +2022-04-01T09:10:24Z 2 +2022-03-31T10:18:56Z 3 +2022-03-31T10:18:56Z 4 +2022-04-01T09:10:24Z 5 diff --git a/tests/queries/0_stateless/02249_parse_date_time_basic.sql b/tests/queries/0_stateless/02249_parse_date_time_basic.sql new file mode 100644 index 00000000000..7146462fb74 --- /dev/null +++ b/tests/queries/0_stateless/02249_parse_date_time_basic.sql @@ -0,0 +1,10 @@ +SET date_time_output_format='iso'; +drop table if exists t; +CREATE TABLE t (a DateTime('UTC'), b String, c String, d String, e Int32) ENGINE = Memory; +INSERT INTO t(a, b, c, d ,e) VALUES ('2022-03-31','','','',1); +INSERT INTO t(a, b, c, d ,e) VALUES (1648804224,'','','',2); +INSERT INTO t(a, b, c, d ,e) VALUES ('2022-03-31 10:18:56','','','',3); +INSERT INTO t(a, b, c, d ,e) VALUES ('2022-03-31T10:18:56','','','',4); +INSERT INTO t(a, b, c, d ,e) VALUES ('1648804224','','','',5); +select a, e from t order by e; +drop table if exists t; diff --git a/tests/queries/0_stateless/02250_hints_for_columns.reference b/tests/queries/0_stateless/02250_hints_for_columns.reference new file mode 100644 index 00000000000..0eabe367130 --- /dev/null +++ b/tests/queries/0_stateless/02250_hints_for_columns.reference @@ -0,0 +1,3 @@ +OK +OK +OK diff --git a/tests/queries/0_stateless/02250_hints_for_columns.sh b/tests/queries/0_stateless/02250_hints_for_columns.sh new file mode 100755 index 00000000000..45fd2f238b1 --- /dev/null +++ b/tests/queries/0_stateless/02250_hints_for_columns.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS t" + +$CLICKHOUSE_CLIENT --query="CREATE TABLE t (CounterID UInt32, StartDate Date, UserID UInt32, VisitID UInt32, NestedColumn Nested(A UInt8, S String), ToDrop UInt32) ENGINE = MergeTree(StartDate, intHash32(UserID), (CounterID, StartDate, intHash32(UserID), VisitID), 8192)" + +$CLICKHOUSE_CLIENT --query="ALTER TABLE t DROP COLUMN ToDro" 2>&1 | grep -q "Maybe you meant: \['ToDrop'\]" && echo 'OK' || echo 'FAIL' + +$CLICKHOUSE_CLIENT --query="ALTER TABLE t MODIFY COLUMN ToDro UInt64" 2>&1 | grep -q "Maybe you meant: \['ToDrop'\]" && echo 'OK' || echo 'FAIL' + +$CLICKHOUSE_CLIENT --query="ALTER TABLE t RENAME COLUMN ToDro to ToDropp" 2>&1 | grep -q "Maybe you meant: \['ToDrop'\]" && echo 'OK' || echo 'FAIL' + +$CLICKHOUSE_CLIENT --query="DROP TABLE t" diff --git a/tests/queries/0_stateless/02250_hints_for_projections.reference b/tests/queries/0_stateless/02250_hints_for_projections.reference new file mode 100644 index 00000000000..d86bac9de59 --- /dev/null +++ b/tests/queries/0_stateless/02250_hints_for_projections.reference @@ -0,0 +1 @@ +OK diff --git a/tests/queries/0_stateless/02250_hints_for_projections.sh b/tests/queries/0_stateless/02250_hints_for_projections.sh new file mode 100755 index 00000000000..7db8b243ae4 --- /dev/null +++ b/tests/queries/0_stateless/02250_hints_for_projections.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS t" + +$CLICKHOUSE_CLIENT --query="create table t (x Int32, y Int32, projection pToDrop (select x, y order by x)) engine = MergeTree order by y;" + +$CLICKHOUSE_CLIENT --query="ALTER TABLE t DROP PROJECTION pToDro" 2>&1 | grep -q "Maybe you meant: \['pToDrop'\]" && echo 'OK' || echo 'FAIL' + +$CLICKHOUSE_CLIENT --query="DROP TABLE t" diff --git a/tests/queries/0_stateless/02251_alter_enum_nested_struct.reference b/tests/queries/0_stateless/02251_alter_enum_nested_struct.reference new file mode 100644 index 00000000000..ada5f47c230 --- /dev/null +++ b/tests/queries/0_stateless/02251_alter_enum_nested_struct.reference @@ -0,0 +1,7 @@ +1 ['Option2','Option1'] +2 ['Option1'] +3 ['Option1','Option3'] +1 ['Option2','Option1'] +2 ['Option1'] +3 ['Option1','Option3'] +0 diff --git a/tests/queries/0_stateless/02251_alter_enum_nested_struct.sql b/tests/queries/0_stateless/02251_alter_enum_nested_struct.sql new file mode 100644 index 00000000000..ad2dab3631f --- /dev/null +++ b/tests/queries/0_stateless/02251_alter_enum_nested_struct.sql @@ -0,0 +1,27 @@ +DROP TABLE IF EXISTS alter_enum_array; + +CREATE TABLE alter_enum_array( + Key UInt64, + Value Array(Enum8('Option1'=1, 'Option2'=2)) +) +ENGINE=MergeTree() +ORDER BY tuple(); + +INSERT INTO alter_enum_array VALUES (1, ['Option2', 'Option1']), (2, ['Option1']); + +ALTER TABLE alter_enum_array MODIFY COLUMN Value Array(Enum8('Option1'=1, 'Option2'=2, 'Option3'=3)) SETTINGS mutations_sync=2; + +INSERT INTO alter_enum_array VALUES (3, ['Option1','Option3']); + +SELECT * FROM alter_enum_array ORDER BY Key; + +DETACH TABLE alter_enum_array; +ATTACH TABLE alter_enum_array; + +SELECT * FROM alter_enum_array ORDER BY Key; + +OPTIMIZE TABLE alter_enum_array FINAL; + +SELECT COUNT() FROM system.mutations where table='alter_enum_array' and database=currentDatabase(); + +DROP TABLE IF EXISTS alter_enum_array; diff --git a/tests/queries/0_stateless/02251_last_day_of_month.reference b/tests/queries/0_stateless/02251_last_day_of_month.reference new file mode 100644 index 00000000000..0b83aff1e42 --- /dev/null +++ b/tests/queries/0_stateless/02251_last_day_of_month.reference @@ -0,0 +1,7 @@ +2021-09-30 2021-09-30 2021-09-30 +2021-03-31 2021-03-31 2021-03-31 +2021-02-28 2021-02-28 2021-02-28 +2020-02-29 2020-02-29 2020-02-29 +2021-12-31 2021-12-31 2021-12-31 +2020-12-31 2020-12-31 2020-12-31 +2020-12-31 2020-12-31 diff --git a/tests/queries/0_stateless/02251_last_day_of_month.sql b/tests/queries/0_stateless/02251_last_day_of_month.sql new file mode 100644 index 00000000000..1261f051e17 --- /dev/null +++ b/tests/queries/0_stateless/02251_last_day_of_month.sql @@ -0,0 +1,46 @@ +-- month with 30 days +WITH + toDate('2021-09-12') AS date_value, + toDateTime('2021-09-12 11:22:33') AS date_time_value, + toDateTime64('2021-09-12 11:22:33', 3) AS date_time_64_value +SELECT toLastDayOfMonth(date_value), toLastDayOfMonth(date_time_value), toLastDayOfMonth(date_time_64_value); + +-- month with 31 days +WITH + toDate('2021-03-12') AS date_value, + toDateTime('2021-03-12 11:22:33') AS date_time_value, + toDateTime64('2021-03-12 11:22:33', 3) AS date_time_64_value +SELECT toLastDayOfMonth(date_value), toLastDayOfMonth(date_time_value), toLastDayOfMonth(date_time_64_value); + +-- non leap year February +WITH + toDate('2021-02-12') AS date_value, + toDateTime('2021-02-12 11:22:33') AS date_time_value, + toDateTime64('2021-02-12 11:22:33', 3) AS date_time_64_value +SELECT toLastDayOfMonth(date_value), toLastDayOfMonth(date_time_value), toLastDayOfMonth(date_time_64_value); + +-- leap year February +WITH + toDate('2020-02-12') AS date_value, + toDateTime('2020-02-12 11:22:33') AS date_time_value, + toDateTime64('2020-02-12 11:22:33', 3) AS date_time_64_value +SELECT toLastDayOfMonth(date_value), toLastDayOfMonth(date_time_value), toLastDayOfMonth(date_time_64_value); + +-- December 31 for non-leap year +WITH + toDate('2021-12-12') AS date_value, + toDateTime('2021-12-12 11:22:33') AS date_time_value, + toDateTime64('2021-12-12 11:22:33', 3) AS date_time_64_value +SELECT toLastDayOfMonth(date_value), toLastDayOfMonth(date_time_value), toLastDayOfMonth(date_time_64_value); + +-- December 31 for leap year +WITH + toDate('2020-12-12') AS date_value, + toDateTime('2020-12-12 11:22:33') AS date_time_value, + toDateTime64('2020-12-12 11:22:33', 3) AS date_time_64_value +SELECT toLastDayOfMonth(date_value), toLastDayOfMonth(date_time_value), toLastDayOfMonth(date_time_64_value); + +-- aliases +WITH + toDate('2020-12-12') AS date_value +SELECT last_day(date_value), LAST_DAY(date_value); diff --git a/tests/queries/0_stateless/02252_executable_user_defined_function_short_circuit.reference b/tests/queries/0_stateless/02252_executable_user_defined_function_short_circuit.reference new file mode 100644 index 00000000000..573541ac970 --- /dev/null +++ b/tests/queries/0_stateless/02252_executable_user_defined_function_short_circuit.reference @@ -0,0 +1 @@ +0 diff --git a/tests/queries/0_stateless/02252_executable_user_defined_function_short_circuit.sql b/tests/queries/0_stateless/02252_executable_user_defined_function_short_circuit.sql new file mode 100644 index 00000000000..a475ba33740 --- /dev/null +++ b/tests/queries/0_stateless/02252_executable_user_defined_function_short_circuit.sql @@ -0,0 +1,10 @@ +SELECT number FROM numbers(10) WHERE number > 15 and test_function(number, number) == 4; + +SYSTEM FLUSH LOGS; + +SELECT ProfileEvents['ExecuteShellCommand'] FROM system.query_log WHERE + current_database = currentDatabase() + AND type = 'QueryFinish' + AND query == 'SELECT number FROM numbers(10) WHERE number > 15 and test_function(number, number) == 4;' + AND event_date >= yesterday() AND event_time > now() - interval 10 minute + LIMIT 1; diff --git a/tests/queries/0_stateless/02252_jit_profile_events.reference b/tests/queries/0_stateless/02252_jit_profile_events.reference new file mode 100644 index 00000000000..12d82114f75 --- /dev/null +++ b/tests/queries/0_stateless/02252_jit_profile_events.reference @@ -0,0 +1,4 @@ +0 +1 +0 1 2 +1 diff --git a/tests/queries/0_stateless/02252_jit_profile_events.sql b/tests/queries/0_stateless/02252_jit_profile_events.sql new file mode 100644 index 00000000000..ddb95d4fa37 --- /dev/null +++ b/tests/queries/0_stateless/02252_jit_profile_events.sql @@ -0,0 +1,31 @@ +-- Tags: no-fasttest, no-ubsan, no-cpu-aarch64 + +SET compile_expressions = 1; +SET min_count_to_compile_expression = 0; + +SYSTEM DROP COMPILED EXPRESSION CACHE; + +SELECT number + number + number FROM numbers(1); + +SYSTEM FLUSH LOGS; + +SELECT ProfileEvents['CompileFunction'] FROM system.query_log WHERE + current_database = currentDatabase() + AND type = 'QueryFinish' + AND query == 'SELECT number + number + number FROM numbers(1);' + AND event_date >= yesterday() AND event_time > now() - interval 10 minute + LIMIT 1; + +SET compile_aggregate_expressions = 1; +SET min_count_to_compile_aggregate_expression = 0; + +SELECT sum(number), sum(number + 1), sum(number + 2) FROM numbers(1) GROUP BY number; + +SYSTEM FLUSH LOGS; + +SELECT ProfileEvents['CompileFunction'] FROM system.query_log WHERE + current_database = currentDatabase() + AND type = 'QueryFinish' + AND query == 'SELECT sum(number), sum(number + 1), sum(number + 2) FROM numbers(1) GROUP BY number;' + AND event_date >= yesterday() AND event_time > now() - interval 10 minute + LIMIT 1; diff --git a/tests/queries/0_stateless/02252_reset_non_existing_setting.reference b/tests/queries/0_stateless/02252_reset_non_existing_setting.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02252_reset_non_existing_setting.sql b/tests/queries/0_stateless/02252_reset_non_existing_setting.sql new file mode 100644 index 00000000000..362388c4a10 --- /dev/null +++ b/tests/queries/0_stateless/02252_reset_non_existing_setting.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS most_ordinary_mt; + +CREATE TABLE most_ordinary_mt +( + Key UInt64 +) +ENGINE = MergeTree() +ORDER BY tuple(); + +ALTER TABLE most_ordinary_mt RESET SETTING ttl; --{serverError 36} +ALTER TABLE most_ordinary_mt RESET SETTING allow_remote_fs_zero_copy_replication, xxx; --{serverError 36} + +DROP TABLE IF EXISTS most_ordinary_mt; diff --git a/tests/queries/0_stateless/02262_column_ttl.reference b/tests/queries/0_stateless/02262_column_ttl.reference new file mode 100644 index 00000000000..f59cb48c5f5 --- /dev/null +++ b/tests/queries/0_stateless/02262_column_ttl.reference @@ -0,0 +1 @@ +1 0 diff --git a/tests/queries/0_stateless/02262_column_ttl.sh b/tests/queries/0_stateless/02262_column_ttl.sh new file mode 100755 index 00000000000..b5e29c9b2a1 --- /dev/null +++ b/tests/queries/0_stateless/02262_column_ttl.sh @@ -0,0 +1,51 @@ +#!/usr/bin/env bash +# Tags: no-parallel, no-ordinary-database +# ^^^^^^^^^^^ +# Since the underlying view may disappears while flushing log, and leads to: +# +# DB::Exception: Table test_x449vo..inner_id.9c14fb82-e6b1-4d1a-85a6-935c3a2a2029 is dropped. (TABLE_IS_DROPPED) +# + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +# regression test for columns TTLs +# note, that this should be written in .sh since we need $CLICKHOUSE_DATABASE +# not 'default' to catch text_log + +$CLICKHOUSE_CLIENT -nm -q " + drop table if exists ttl_02262; + drop table if exists this_text_log; + + create table ttl_02262 (date Date, key Int, value String TTL date + interval 1 month) engine=MergeTree order by key; + insert into ttl_02262 values ('2010-01-01', 2010, 'foo'); + optimize table ttl_02262 final; + + detach table ttl_02262; + attach table ttl_02262; + + -- create system.text_log + system flush logs; +" + +ttl_02262_uuid=$($CLICKHOUSE_CLIENT -q "select uuid from system.tables where database = '$CLICKHOUSE_DATABASE' and name = 'ttl_02262'") + +$CLICKHOUSE_CLIENT -nm -q " + -- OPTIMIZE TABLE x FINAL will be done in background + -- attach to it's log, via table UUID in query_id (see merger/mutator code). + create materialized view this_text_log engine=Memory() as + select * from system.text_log where query_id like '%${ttl_02262_uuid}%'; + + optimize table ttl_02262 final; + system flush logs; + -- If TTL will be applied again (during OPTIMIZE TABLE FINAL) it will produce the following message: + -- + -- Some TTL values were not calculated for part 201701_487_641_3. Will calculate them forcefully during merge. + -- + -- Let's ensure that this is not happen anymore: + select count()>0, countIf(message LIKE '%TTL%') from this_text_log; + + drop table ttl_02262; + drop table this_text_log; +" diff --git a/tests/queries/0_stateless/02264_format_insert_compression.reference b/tests/queries/0_stateless/02264_format_insert_compression.reference new file mode 100644 index 00000000000..107b7fcb3e9 --- /dev/null +++ b/tests/queries/0_stateless/02264_format_insert_compression.reference @@ -0,0 +1,3 @@ +-- { echo } +EXPLAIN SYNTAX INSERT INTO foo FROM INFILE '/dev/null' COMPRESSION 'gz'; +INSERT INTO foo FROM INFILE \'/dev/null\' COMPRESSION \'gz\' diff --git a/tests/queries/0_stateless/02264_format_insert_compression.sql b/tests/queries/0_stateless/02264_format_insert_compression.sql new file mode 100644 index 00000000000..c095a8fbbb7 --- /dev/null +++ b/tests/queries/0_stateless/02264_format_insert_compression.sql @@ -0,0 +1,2 @@ +-- { echo } +EXPLAIN SYNTAX INSERT INTO foo FROM INFILE '/dev/null' COMPRESSION 'gz'; diff --git a/tests/queries/0_stateless/02264_format_insert_infile.reference b/tests/queries/0_stateless/02264_format_insert_infile.reference new file mode 100644 index 00000000000..338ea6fbfc6 --- /dev/null +++ b/tests/queries/0_stateless/02264_format_insert_infile.reference @@ -0,0 +1,3 @@ +-- { echo } +EXPLAIN SYNTAX INSERT INTO foo FROM INFILE '/dev/null'; +INSERT INTO foo FROM INFILE \'/dev/null\' diff --git a/tests/queries/0_stateless/02264_format_insert_infile.sql b/tests/queries/0_stateless/02264_format_insert_infile.sql new file mode 100644 index 00000000000..38ee39d932d --- /dev/null +++ b/tests/queries/0_stateless/02264_format_insert_infile.sql @@ -0,0 +1,2 @@ +-- { echo } +EXPLAIN SYNTAX INSERT INTO foo FROM INFILE '/dev/null'; diff --git a/tests/queries/0_stateless/02265_rename_join_ordinary_to_atomic.reference b/tests/queries/0_stateless/02265_rename_join_ordinary_to_atomic.reference new file mode 100644 index 00000000000..58c9bdf9d01 --- /dev/null +++ b/tests/queries/0_stateless/02265_rename_join_ordinary_to_atomic.reference @@ -0,0 +1 @@ +111 diff --git a/tests/queries/0_stateless/02265_rename_join_ordinary_to_atomic.sql b/tests/queries/0_stateless/02265_rename_join_ordinary_to_atomic.sql new file mode 100644 index 00000000000..3ec995a6a24 --- /dev/null +++ b/tests/queries/0_stateless/02265_rename_join_ordinary_to_atomic.sql @@ -0,0 +1,17 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS 02265_atomic_db; +DROP DATABASE IF EXISTS 02265_ordinary_db; + +CREATE DATABASE 02265_atomic_db ENGINE = Atomic; +CREATE DATABASE 02265_ordinary_db ENGINE = Ordinary; + +CREATE TABLE 02265_ordinary_db.join_table ( `a` Int64 ) ENGINE = Join(`ALL`, LEFT, a); +INSERT INTO 02265_ordinary_db.join_table VALUES (111); + +RENAME TABLE 02265_ordinary_db.join_table TO 02265_atomic_db.join_table; + +SELECT * FROM 02265_atomic_db.join_table; + +DROP DATABASE IF EXISTS 02265_atomic_db; +DROP DATABASE IF EXISTS 02265_ordinary_db; diff --git a/tests/queries/0_stateless/02265_test_dns_profile_events.reference b/tests/queries/0_stateless/02265_test_dns_profile_events.reference new file mode 100644 index 00000000000..97ca33b311f --- /dev/null +++ b/tests/queries/0_stateless/02265_test_dns_profile_events.reference @@ -0,0 +1,2 @@ +first_check 1 +second_check 1 diff --git a/tests/queries/0_stateless/02265_test_dns_profile_events.sh b/tests/queries/0_stateless/02265_test_dns_profile_events.sh new file mode 100755 index 00000000000..756a761a0ae --- /dev/null +++ b/tests/queries/0_stateless/02265_test_dns_profile_events.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +# Tags: no-parallel + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + + +current_dns_errors=$($CLICKHOUSE_CLIENT --query "SELECT sum(value) FROM system.events where event = 'DNSError';") +${CLICKHOUSE_CLIENT} --query "SELECT * FROM remote('ThisHostNameDoesNotExistSoItShouldFail', system, one)" 2>/dev/null +${CLICKHOUSE_CLIENT} --query "SELECT 'first_check', sum(value) > ${current_dns_errors} FROM system.events where event = 'DNSError';" + +current_dns_errors=$($CLICKHOUSE_CLIENT --query "SELECT sum(value) FROM system.events where event = 'DNSError';") +${CLICKHOUSE_CLIENT} --query "SELECT * FROM remote('ThisHostNameDoesNotExistSoItShouldFail2', system, one)" 2>/dev/null +${CLICKHOUSE_CLIENT} --query "SELECT 'second_check', sum(value) > ${current_dns_errors} FROM system.events where event = 'DNSError';" + +${CLICKHOUSE_CLIENT} --query "SYSTEM DROP DNS CACHE" diff --git a/tests/queries/1_stateful/00159_parallel_formatting_tsv_and_friends.sh b/tests/queries/1_stateful/00159_parallel_formatting_tsv_and_friends.sh index 9d48774dd2d..02441190b91 100755 --- a/tests/queries/1_stateful/00159_parallel_formatting_tsv_and_friends.sh +++ b/tests/queries/1_stateful/00159_parallel_formatting_tsv_and_friends.sh @@ -10,10 +10,10 @@ FORMATS=('TSV' 'TSVWithNames' 'TSKV') for format in "${FORMATS[@]}" do echo "$format, false"; - $CLICKHOUSE_CLIENT --output_format_parallel_formatting=false -q \ + $CLICKHOUSE_CLIENT --max_threads=0 --output_format_parallel_formatting=false -q \ "SELECT ClientEventTime::DateTime('Asia/Dubai') as a, MobilePhoneModel as b, ClientIP6 as c FROM test.hits ORDER BY a, b, c Format $format" | md5sum echo "$format, true"; - $CLICKHOUSE_CLIENT --output_format_parallel_formatting=true -q \ + $CLICKHOUSE_CLIENT --max_threads=0 --output_format_parallel_formatting=true -q \ "SELECT ClientEventTime::DateTime('Asia/Dubai') as a, MobilePhoneModel as b, ClientIP6 as c FROM test.hits ORDER BY a, b, c Format $format" | md5sum done diff --git a/tests/queries/1_stateful/00168_parallel_processing_on_replicas_part_1.sh b/tests/queries/1_stateful/00168_parallel_processing_on_replicas_part_1.sh index 276fc0274c2..58ce66056af 100755 --- a/tests/queries/1_stateful/00168_parallel_processing_on_replicas_part_1.sh +++ b/tests/queries/1_stateful/00168_parallel_processing_on_replicas_part_1.sh @@ -68,8 +68,8 @@ do TESTNAME_RESULT="/tmp/result_$TESTNAME" NEW_TESTNAME_RESULT="/tmp/result_dist_$TESTNAME" - $CLICKHOUSE_CLIENT $SETTINGS -nm --testmode < $TESTPATH > $TESTNAME_RESULT - $CLICKHOUSE_CLIENT $SETTINGS -nm --testmode < $NEW_TESTNAME > $NEW_TESTNAME_RESULT + $CLICKHOUSE_CLIENT $SETTINGS -nm < $TESTPATH > $TESTNAME_RESULT + $CLICKHOUSE_CLIENT $SETTINGS -nm < $NEW_TESTNAME > $NEW_TESTNAME_RESULT expected=$(cat $TESTNAME_RESULT | md5sum) actual=$(cat $NEW_TESTNAME_RESULT | md5sum) diff --git a/tests/queries/0_stateless/01747_system_session_log_long.reference b/tests/queries/bugs/01747_system_session_log_long.reference similarity index 100% rename from tests/queries/0_stateless/01747_system_session_log_long.reference rename to tests/queries/bugs/01747_system_session_log_long.reference diff --git a/tests/queries/0_stateless/01747_system_session_log_long.sh b/tests/queries/bugs/01747_system_session_log_long.sh similarity index 100% rename from tests/queries/0_stateless/01747_system_session_log_long.sh rename to tests/queries/bugs/01747_system_session_log_long.sh diff --git a/tests/testflows/aes_encryption/requirements/requirements.md b/tests/testflows/aes_encryption/requirements/requirements.md index 80cb614268c..23906f797d0 100644 --- a/tests/testflows/aes_encryption/requirements/requirements.md +++ b/tests/testflows/aes_encryption/requirements/requirements.md @@ -311,7 +311,7 @@ version: 1.0 of the `encrypt` function where the `key_length` SHALL specifies the length of the key and SHALL accept `128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption -mode and SHALL accept [ECB], [CBC], [CFB128], or [OFB] as well as +mode and SHALL accept [CBC], [CFB128], or [OFB] as well as [CTR] and [GCM] as the values. For example, `aes-256-ofb`. #### RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.Invalid @@ -327,9 +327,6 @@ version: 1.0 [ClickHouse] SHALL support the following [AES] block encryption modes as the value for the `mode` parameter of the `encrypt` function: -* `aes-128-ecb` that SHALL use [ECB] block mode encryption with 128 bit key -* `aes-192-ecb` that SHALL use [ECB] block mode encryption with 192 bit key -* `aes-256-ecb` that SHALL use [ECB] block mode encryption with 256 bit key * `aes-128-cbc` that SHALL use [CBC] block mode encryption with 128 bit key * `aes-192-cbc` that SHALL use [CBC] block mode encryption with 192 bit key * `aes-192-cbc` that SHALL use [CBC] block mode encryption with 256 bit key @@ -403,9 +400,6 @@ version: 1.0 [ClickHouse] SHALL return an error when the `encrypt` function is called with the following parameter values when using non-GCM modes -* `aes-128-ecb` mode and `key` is not 16 bytes or `iv` or `aad` is specified -* `aes-192-ecb` mode and `key` is not 24 bytes or `iv` or `aad` is specified -* `aes-256-ecb` mode and `key` is not 32 bytes or `iv` or `aad` is specified * `aes-128-cbc` mode and `key` is not 16 bytes or if specified `iv` is not 16 bytes or `aad` is specified * `aes-192-cbc` mode and `key` is not 24 bytes or if specified `iv` is not 16 bytes or `aad` is specified * `aes-256-cbc` mode and `key` is not 32 bytes or if specified `iv` is not 16 bytes or `aad` is specified @@ -476,7 +470,7 @@ version: 1.0 of the `decrypt` function where the `key_length` SHALL specifies the length of the key and SHALL accept `128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption -mode and SHALL accept [ECB], [CBC], [CFB128], or [OFB] as well as +mode and SHALL accept [CBC], [CFB128], or [OFB] as well as [CTR] and [GCM] as the values. For example, `aes-256-ofb`. #### RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.Invalid @@ -492,9 +486,6 @@ version: 1.0 [ClickHouse] SHALL support the following [AES] block encryption modes as the value for the `mode` parameter of the `decrypt` function: -* `aes-128-ecb` that SHALL use [ECB] block mode encryption with 128 bit key -* `aes-192-ecb` that SHALL use [ECB] block mode encryption with 192 bit key -* `aes-256-ecb` that SHALL use [ECB] block mode encryption with 256 bit key * `aes-128-cbc` that SHALL use [CBC] block mode encryption with 128 bit key * `aes-192-cbc` that SHALL use [CBC] block mode encryption with 192 bit key * `aes-192-cbc` that SHALL use [CBC] block mode encryption with 256 bit key @@ -570,9 +561,6 @@ version: 1.0 [ClickHouse] SHALL return an error when the `decrypt` function is called with the following parameter values when using non-GCM modes -* `aes-128-ecb` mode and `key` is not 16 bytes or `iv` or `aad` is specified -* `aes-192-ecb` mode and `key` is not 24 bytes or `iv` or `aad` is specified -* `aes-256-ecb` mode and `key` is not 32 bytes or `iv` or `aad` is specified * `aes-128-cbc` mode and `key` is not 16 bytes or if specified `iv` is not 16 bytes or `aad` is specified * `aes-192-cbc` mode and `key` is not 24 bytes or if specified `iv` is not 16 bytes or `aad` is specified * `aes-256-cbc` mode and `key` is not 32 bytes or if specified `iv` is not 16 bytes or `aad` is specified @@ -644,7 +632,7 @@ version: 1.0 of the `aes_encrypt_mysql` function where the `key_length` SHALL specifies the length of the key and SHALL accept `128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption -mode and SHALL accept [ECB], [CBC], [CFB128], or [OFB]. For example, `aes-256-ofb`. +mode and SHALL accept [CBC], [CFB128], or [OFB]. For example, `aes-256-ofb`. #### RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.Invalid version: 1.0 @@ -659,9 +647,6 @@ version: 1.0 [ClickHouse] SHALL support the following [AES] block encryption modes as the value for the `mode` parameter of the `aes_encrypt_mysql` function: -* `aes-128-ecb` that SHALL use [ECB] block mode encryption with 128 bit key -* `aes-192-ecb` that SHALL use [ECB] block mode encryption with 192 bit key -* `aes-256-ecb` that SHALL use [ECB] block mode encryption with 256 bit key * `aes-128-cbc` that SHALL use [CBC] block mode encryption with 128 bit key * `aes-192-cbc` that SHALL use [CBC] block mode encryption with 192 bit key * `aes-192-cbc` that SHALL use [CBC] block mode encryption with 256 bit key @@ -750,9 +735,6 @@ version: 1.0 [ClickHouse] SHALL return an error when the `aes_encrypt_mysql` function is called with the following parameter values -* `aes-128-ecb` mode and `key` is less than 16 bytes or `iv` is specified -* `aes-192-ecb` mode and `key` is less than 24 bytes or `iv` is specified -* `aes-256-ecb` mode and `key` is less than 32 bytes or `iv` is specified * `aes-128-cbc` mode and `key` is less than 16 bytes or if specified `iv` is less than 16 bytes * `aes-192-cbc` mode and `key` is less than 24 bytes or if specified `iv` is less than 16 bytes * `aes-256-cbc` mode and `key` is less than 32 bytes or if specified `iv` is less than 16 bytes @@ -810,7 +792,7 @@ version: 1.0 of the `aes_decrypt_mysql` function where the `key_length` SHALL specifies the length of the key and SHALL accept `128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption -mode and SHALL accept [ECB], [CBC], [CFB128], or [OFB]. For example, `aes-256-ofb`. +mode and SHALL accept [CBC], [CFB128], or [OFB]. For example, `aes-256-ofb`. #### RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.Invalid version: 1.0 @@ -825,9 +807,6 @@ version: 1.0 [ClickHouse] SHALL support the following [AES] block encryption modes as the value for the `mode` parameter of the `aes_decrypt_mysql` function: -* `aes-128-ecb` that SHALL use [ECB] block mode encryption with 128 bit key -* `aes-192-ecb` that SHALL use [ECB] block mode encryption with 192 bit key -* `aes-256-ecb` that SHALL use [ECB] block mode encryption with 256 bit key * `aes-128-cbc` that SHALL use [CBC] block mode encryption with 128 bit key * `aes-192-cbc` that SHALL use [CBC] block mode encryption with 192 bit key * `aes-192-cbc` that SHALL use [CBC] block mode encryption with 256 bit key @@ -916,9 +895,6 @@ version: 1.0 [ClickHouse] SHALL return an error when the `aes_decrypt_mysql` function is called with the following parameter values -* `aes-128-ecb` mode and `key` is less than 16 bytes or `iv` is specified -* `aes-192-ecb` mode and `key` is less than 24 bytes or `iv` is specified -* `aes-256-ecb` mode and `key` is less than 32 bytes or `iv` is specified * `aes-128-cbc` mode and `key` is less than 16 bytes or if specified `iv` is less than 16 bytes * `aes-192-cbc` mode and `key` is less than 24 bytes or if specified `iv` is less than 16 bytes * `aes-256-cbc` mode and `key` is less than 32 bytes or if specified `iv` is less than 16 bytes @@ -954,7 +930,6 @@ version: 1.0 [GCM]: https://en.wikipedia.org/wiki/Galois/Counter_Mode [CTR]: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Counter_(CTR) [CBC]: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Cipher_block_chaining_(CBC) -[ECB]: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Electronic_codebook_(ECB) [CFB]: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Cipher_feedback_(CFB) [CFB128]: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Cipher_feedback_(CFB) [OFB]: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Output_feedback_(OFB) diff --git a/tests/testflows/aes_encryption/requirements/requirements.py b/tests/testflows/aes_encryption/requirements/requirements.py index 0fbbea7e85a..4523f2d820f 100644 --- a/tests/testflows/aes_encryption/requirements/requirements.py +++ b/tests/testflows/aes_encryption/requirements/requirements.py @@ -429,7 +429,7 @@ RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_ValuesFormat = Requirement( "of the `encrypt` function where\n" "the `key_length` SHALL specifies the length of the key and SHALL accept\n" "`128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption\n" - "mode and SHALL accept [ECB], [CBC], [CFB128], or [OFB] as well as\n" + "mode and SHALL accept [CBC], [CFB128], or [OFB] as well as\n" "[CTR] and [GCM] as the values. For example, `aes-256-ofb`.\n" "\n" ), @@ -467,9 +467,6 @@ RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Values = Requirement( "[ClickHouse] SHALL support the following [AES] block encryption modes as the value for the `mode` parameter\n" "of the `encrypt` function:\n" "\n" - "* `aes-128-ecb` that SHALL use [ECB] block mode encryption with 128 bit key\n" - "* `aes-192-ecb` that SHALL use [ECB] block mode encryption with 192 bit key\n" - "* `aes-256-ecb` that SHALL use [ECB] block mode encryption with 256 bit key\n" "* `aes-128-cbc` that SHALL use [CBC] block mode encryption with 128 bit key\n" "* `aes-192-cbc` that SHALL use [CBC] block mode encryption with 192 bit key\n" "* `aes-192-cbc` that SHALL use [CBC] block mode encryption with 256 bit key\n" @@ -642,9 +639,6 @@ RQ_SRS008_AES_Encrypt_Function_NonGCMMode_KeyAndInitializationVector_Length = Re "[ClickHouse] SHALL return an error when the `encrypt` function is called with the following parameter values\n" "when using non-GCM modes\n" "\n" - "* `aes-128-ecb` mode and `key` is not 16 bytes or `iv` or `aad` is specified\n" - "* `aes-192-ecb` mode and `key` is not 24 bytes or `iv` or `aad` is specified\n" - "* `aes-256-ecb` mode and `key` is not 32 bytes or `iv` or `aad` is specified\n" "* `aes-128-cbc` mode and `key` is not 16 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" "* `aes-192-cbc` mode and `key` is not 24 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" "* `aes-256-cbc` mode and `key` is not 32 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" @@ -790,7 +784,7 @@ RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_ValuesFormat = Requirement( "of the `decrypt` function where\n" "the `key_length` SHALL specifies the length of the key and SHALL accept\n" "`128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption\n" - "mode and SHALL accept [ECB], [CBC], [CFB128], or [OFB] as well as\n" + "mode and SHALL accept [CBC], [CFB128], or [OFB] as well as\n" "[CTR] and [GCM] as the values. For example, `aes-256-ofb`.\n" "\n" ), @@ -828,9 +822,6 @@ RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Values = Requirement( "[ClickHouse] SHALL support the following [AES] block encryption modes as the value for the `mode` parameter\n" "of the `decrypt` function:\n" "\n" - "* `aes-128-ecb` that SHALL use [ECB] block mode encryption with 128 bit key\n" - "* `aes-192-ecb` that SHALL use [ECB] block mode encryption with 192 bit key\n" - "* `aes-256-ecb` that SHALL use [ECB] block mode encryption with 256 bit key\n" "* `aes-128-cbc` that SHALL use [CBC] block mode encryption with 128 bit key\n" "* `aes-192-cbc` that SHALL use [CBC] block mode encryption with 192 bit key\n" "* `aes-192-cbc` that SHALL use [CBC] block mode encryption with 256 bit key\n" @@ -1005,9 +996,6 @@ RQ_SRS008_AES_Decrypt_Function_NonGCMMode_KeyAndInitializationVector_Length = Re "[ClickHouse] SHALL return an error when the `decrypt` function is called with the following parameter values\n" "when using non-GCM modes\n" "\n" - "* `aes-128-ecb` mode and `key` is not 16 bytes or `iv` or `aad` is specified\n" - "* `aes-192-ecb` mode and `key` is not 24 bytes or `iv` or `aad` is specified\n" - "* `aes-256-ecb` mode and `key` is not 32 bytes or `iv` or `aad` is specified\n" "* `aes-128-cbc` mode and `key` is not 16 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" "* `aes-192-cbc` mode and `key` is not 24 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" "* `aes-256-cbc` mode and `key` is not 32 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" @@ -1154,7 +1142,7 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_ValuesFormat = Requirement( "of the `aes_encrypt_mysql` function where\n" "the `key_length` SHALL specifies the length of the key and SHALL accept\n" "`128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption\n" - "mode and SHALL accept [ECB], [CBC], [CFB128], or [OFB]. For example, `aes-256-ofb`.\n" + "mode and SHALL accept [CBC], [CFB128], or [OFB]. For example, `aes-256-ofb`.\n" "\n" ), link=None, @@ -1191,9 +1179,6 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Values = Requirement( "[ClickHouse] SHALL support the following [AES] block encryption modes as the value for the `mode` parameter\n" "of the `aes_encrypt_mysql` function:\n" "\n" - "* `aes-128-ecb` that SHALL use [ECB] block mode encryption with 128 bit key\n" - "* `aes-192-ecb` that SHALL use [ECB] block mode encryption with 192 bit key\n" - "* `aes-256-ecb` that SHALL use [ECB] block mode encryption with 256 bit key\n" "* `aes-128-cbc` that SHALL use [CBC] block mode encryption with 128 bit key\n" "* `aes-192-cbc` that SHALL use [CBC] block mode encryption with 192 bit key\n" "* `aes-192-cbc` that SHALL use [CBC] block mode encryption with 256 bit key\n" @@ -1392,9 +1377,6 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Mode_KeyAndInitializationVector_Length = Re description=( "[ClickHouse] SHALL return an error when the `aes_encrypt_mysql` function is called with the following parameter values\n" "\n" - "* `aes-128-ecb` mode and `key` is less than 16 bytes or `iv` is specified\n" - "* `aes-192-ecb` mode and `key` is less than 24 bytes or `iv` is specified\n" - "* `aes-256-ecb` mode and `key` is less than 32 bytes or `iv` is specified\n" "* `aes-128-cbc` mode and `key` is less than 16 bytes or if specified `iv` is less than 16 bytes\n" "* `aes-192-cbc` mode and `key` is less than 24 bytes or if specified `iv` is less than 16 bytes\n" "* `aes-256-cbc` mode and `key` is less than 32 bytes or if specified `iv` is less than 16 bytes\n" @@ -1516,7 +1498,7 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_ValuesFormat = Requirement( "of the `aes_decrypt_mysql` function where\n" "the `key_length` SHALL specifies the length of the key and SHALL accept\n" "`128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption\n" - "mode and SHALL accept [ECB], [CBC], [CFB128], or [OFB]. For example, `aes-256-ofb`.\n" + "mode and SHALL accept [CBC], [CFB128], or [OFB]. For example, `aes-256-ofb`.\n" "\n" ), link=None, @@ -1553,9 +1535,6 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Values = Requirement( "[ClickHouse] SHALL support the following [AES] block encryption modes as the value for the `mode` parameter\n" "of the `aes_decrypt_mysql` function:\n" "\n" - "* `aes-128-ecb` that SHALL use [ECB] block mode encryption with 128 bit key\n" - "* `aes-192-ecb` that SHALL use [ECB] block mode encryption with 192 bit key\n" - "* `aes-256-ecb` that SHALL use [ECB] block mode encryption with 256 bit key\n" "* `aes-128-cbc` that SHALL use [CBC] block mode encryption with 128 bit key\n" "* `aes-192-cbc` that SHALL use [CBC] block mode encryption with 192 bit key\n" "* `aes-192-cbc` that SHALL use [CBC] block mode encryption with 256 bit key\n" @@ -1754,9 +1733,6 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Mode_KeyAndInitializationVector_Length = Re description=( "[ClickHouse] SHALL return an error when the `aes_decrypt_mysql` function is called with the following parameter values\n" "\n" - "* `aes-128-ecb` mode and `key` is less than 16 bytes or `iv` is specified\n" - "* `aes-192-ecb` mode and `key` is less than 24 bytes or `iv` is specified\n" - "* `aes-256-ecb` mode and `key` is less than 32 bytes or `iv` is specified\n" "* `aes-128-cbc` mode and `key` is less than 16 bytes or if specified `iv` is less than 16 bytes\n" "* `aes-192-cbc` mode and `key` is less than 24 bytes or if specified `iv` is less than 16 bytes\n" "* `aes-256-cbc` mode and `key` is less than 32 bytes or if specified `iv` is less than 16 bytes\n" @@ -2606,7 +2582,7 @@ version: 1.0 of the `encrypt` function where the `key_length` SHALL specifies the length of the key and SHALL accept `128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption -mode and SHALL accept [ECB], [CBC], [CFB128], or [OFB] as well as +mode and SHALL accept [CBC], [CFB128], or [OFB] as well as [CTR] and [GCM] as the values. For example, `aes-256-ofb`. #### RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.Invalid @@ -2622,9 +2598,6 @@ version: 1.0 [ClickHouse] SHALL support the following [AES] block encryption modes as the value for the `mode` parameter of the `encrypt` function: -* `aes-128-ecb` that SHALL use [ECB] block mode encryption with 128 bit key -* `aes-192-ecb` that SHALL use [ECB] block mode encryption with 192 bit key -* `aes-256-ecb` that SHALL use [ECB] block mode encryption with 256 bit key * `aes-128-cbc` that SHALL use [CBC] block mode encryption with 128 bit key * `aes-192-cbc` that SHALL use [CBC] block mode encryption with 192 bit key * `aes-192-cbc` that SHALL use [CBC] block mode encryption with 256 bit key @@ -2698,9 +2671,6 @@ version: 1.0 [ClickHouse] SHALL return an error when the `encrypt` function is called with the following parameter values when using non-GCM modes -* `aes-128-ecb` mode and `key` is not 16 bytes or `iv` or `aad` is specified -* `aes-192-ecb` mode and `key` is not 24 bytes or `iv` or `aad` is specified -* `aes-256-ecb` mode and `key` is not 32 bytes or `iv` or `aad` is specified * `aes-128-cbc` mode and `key` is not 16 bytes or if specified `iv` is not 16 bytes or `aad` is specified * `aes-192-cbc` mode and `key` is not 24 bytes or if specified `iv` is not 16 bytes or `aad` is specified * `aes-256-cbc` mode and `key` is not 32 bytes or if specified `iv` is not 16 bytes or `aad` is specified @@ -2771,7 +2741,7 @@ version: 1.0 of the `decrypt` function where the `key_length` SHALL specifies the length of the key and SHALL accept `128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption -mode and SHALL accept [ECB], [CBC], [CFB128], or [OFB] as well as +mode and SHALL accept [CBC], [CFB128], or [OFB] as well as [CTR] and [GCM] as the values. For example, `aes-256-ofb`. #### RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.Invalid @@ -2787,9 +2757,6 @@ version: 1.0 [ClickHouse] SHALL support the following [AES] block encryption modes as the value for the `mode` parameter of the `decrypt` function: -* `aes-128-ecb` that SHALL use [ECB] block mode encryption with 128 bit key -* `aes-192-ecb` that SHALL use [ECB] block mode encryption with 192 bit key -* `aes-256-ecb` that SHALL use [ECB] block mode encryption with 256 bit key * `aes-128-cbc` that SHALL use [CBC] block mode encryption with 128 bit key * `aes-192-cbc` that SHALL use [CBC] block mode encryption with 192 bit key * `aes-192-cbc` that SHALL use [CBC] block mode encryption with 256 bit key @@ -2865,9 +2832,6 @@ version: 1.0 [ClickHouse] SHALL return an error when the `decrypt` function is called with the following parameter values when using non-GCM modes -* `aes-128-ecb` mode and `key` is not 16 bytes or `iv` or `aad` is specified -* `aes-192-ecb` mode and `key` is not 24 bytes or `iv` or `aad` is specified -* `aes-256-ecb` mode and `key` is not 32 bytes or `iv` or `aad` is specified * `aes-128-cbc` mode and `key` is not 16 bytes or if specified `iv` is not 16 bytes or `aad` is specified * `aes-192-cbc` mode and `key` is not 24 bytes or if specified `iv` is not 16 bytes or `aad` is specified * `aes-256-cbc` mode and `key` is not 32 bytes or if specified `iv` is not 16 bytes or `aad` is specified @@ -2939,7 +2903,7 @@ version: 1.0 of the `aes_encrypt_mysql` function where the `key_length` SHALL specifies the length of the key and SHALL accept `128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption -mode and SHALL accept [ECB], [CBC], [CFB128], or [OFB]. For example, `aes-256-ofb`. +mode and SHALL accept [CBC], [CFB128], or [OFB]. For example, `aes-256-ofb`. #### RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.Invalid version: 1.0 @@ -2954,9 +2918,6 @@ version: 1.0 [ClickHouse] SHALL support the following [AES] block encryption modes as the value for the `mode` parameter of the `aes_encrypt_mysql` function: -* `aes-128-ecb` that SHALL use [ECB] block mode encryption with 128 bit key -* `aes-192-ecb` that SHALL use [ECB] block mode encryption with 192 bit key -* `aes-256-ecb` that SHALL use [ECB] block mode encryption with 256 bit key * `aes-128-cbc` that SHALL use [CBC] block mode encryption with 128 bit key * `aes-192-cbc` that SHALL use [CBC] block mode encryption with 192 bit key * `aes-192-cbc` that SHALL use [CBC] block mode encryption with 256 bit key @@ -3045,9 +3006,6 @@ version: 1.0 [ClickHouse] SHALL return an error when the `aes_encrypt_mysql` function is called with the following parameter values -* `aes-128-ecb` mode and `key` is less than 16 bytes or `iv` is specified -* `aes-192-ecb` mode and `key` is less than 24 bytes or `iv` is specified -* `aes-256-ecb` mode and `key` is less than 32 bytes or `iv` is specified * `aes-128-cbc` mode and `key` is less than 16 bytes or if specified `iv` is less than 16 bytes * `aes-192-cbc` mode and `key` is less than 24 bytes or if specified `iv` is less than 16 bytes * `aes-256-cbc` mode and `key` is less than 32 bytes or if specified `iv` is less than 16 bytes @@ -3105,7 +3063,7 @@ version: 1.0 of the `aes_decrypt_mysql` function where the `key_length` SHALL specifies the length of the key and SHALL accept `128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption -mode and SHALL accept [ECB], [CBC], [CFB128], or [OFB]. For example, `aes-256-ofb`. +mode and SHALL accept [CBC], [CFB128], or [OFB]. For example, `aes-256-ofb`. #### RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.Invalid version: 1.0 @@ -3120,9 +3078,6 @@ version: 1.0 [ClickHouse] SHALL support the following [AES] block encryption modes as the value for the `mode` parameter of the `aes_decrypt_mysql` function: -* `aes-128-ecb` that SHALL use [ECB] block mode encryption with 128 bit key -* `aes-192-ecb` that SHALL use [ECB] block mode encryption with 192 bit key -* `aes-256-ecb` that SHALL use [ECB] block mode encryption with 256 bit key * `aes-128-cbc` that SHALL use [CBC] block mode encryption with 128 bit key * `aes-192-cbc` that SHALL use [CBC] block mode encryption with 192 bit key * `aes-192-cbc` that SHALL use [CBC] block mode encryption with 256 bit key @@ -3211,9 +3166,6 @@ version: 1.0 [ClickHouse] SHALL return an error when the `aes_decrypt_mysql` function is called with the following parameter values -* `aes-128-ecb` mode and `key` is less than 16 bytes or `iv` is specified -* `aes-192-ecb` mode and `key` is less than 24 bytes or `iv` is specified -* `aes-256-ecb` mode and `key` is less than 32 bytes or `iv` is specified * `aes-128-cbc` mode and `key` is less than 16 bytes or if specified `iv` is less than 16 bytes * `aes-192-cbc` mode and `key` is less than 24 bytes or if specified `iv` is less than 16 bytes * `aes-256-cbc` mode and `key` is less than 32 bytes or if specified `iv` is less than 16 bytes @@ -3249,7 +3201,6 @@ version: 1.0 [GCM]: https://en.wikipedia.org/wiki/Galois/Counter_Mode [CTR]: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Counter_(CTR) [CBC]: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Cipher_block_chaining_(CBC) -[ECB]: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Electronic_codebook_(ECB) [CFB]: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Cipher_feedback_(CFB) [CFB128]: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Cipher_feedback_(CFB) [OFB]: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Output_feedback_(OFB) diff --git a/tests/testflows/ldap/external_user_directory/tests/common.py b/tests/testflows/ldap/external_user_directory/tests/common.py index 871be815a35..c0b6e72cd8e 100644 --- a/tests/testflows/ldap/external_user_directory/tests/common.py +++ b/tests/testflows/ldap/external_user_directory/tests/common.py @@ -84,16 +84,6 @@ def rbac_roles(*roles, node=None): node.query(f"DROP ROLE IF EXISTS {role}") -def verify_ldap_user_exists(server, username, password): - """Check that LDAP user is defined on the LDAP server.""" - with By("searching LDAP database"): - ldap_node = current().context.cluster.node(server) - r = ldap_node.command( - f"ldapwhoami -H ldap://localhost -D 'cn={user_name},ou=users,dc=company,dc=com' -w {password}" - ) - assert r.exitcode == 0, error() - - def create_ldap_external_user_directory_config_content( server=None, roles=None, **kwargs ): diff --git a/utils/db-generator/query_db_generator.cpp b/utils/db-generator/query_db_generator.cpp index dec1f6fe60f..6455bc045d6 100644 --- a/utils/db-generator/query_db_generator.cpp +++ b/utils/db-generator/query_db_generator.cpp @@ -229,7 +229,7 @@ std::map func_to_return_type = { {"torelativeweeknum", FuncRet(Type::i, "")}, {"torelativedaynum", FuncRet(Type::i, "")}, {"torelativehournum", FuncRet(Type::i, "")}, {"torelativeminutenum", FuncRet(Type::i, "")}, {"torelativesecondsnum", FuncRet(Type::i, "")}, {"datediff", FuncRet(Type::d | Type::dt, "")}, {"formatdatetime", FuncRet(Type::s, "")}, {"now", FuncRet(Type::dt | Type::d, "now()")}, {"today", FuncRet(Type::d | Type::dt, "today()")}, - {"yesterday", FuncRet(Type::d | Type::dt, "yesterday()")} + {"yesterday", FuncRet(Type::d | Type::dt, "yesterday()")}, {"tolastdayofmonth", FuncRet(Type::dt | Type::d, "")} }; std::set func_args_same_types = { @@ -253,7 +253,7 @@ std::map func_to_param_type = { {"tostartofinterval", Type::d | Type::dt}, {"totime", Type::d | Type::dt}, {"torelativehonthnum", Type::d | Type::dt}, {"torelativeweeknum", Type::d | Type::dt}, {"torelativedaynum", Type::d | Type::dt}, {"torelativehournum", Type::d | Type::dt}, {"torelativeminutenum", Type::d | Type::dt}, {"torelativesecondnum", Type::d | Type::dt}, {"datediff", Type::d | Type::dt}, - {"formatdatetime", Type::dt} + {"formatdatetime", Type::dt}, {"tolastdayofmonth", Type::d | Type::dt} }; diff --git a/utils/keeper-data-dumper/main.cpp b/utils/keeper-data-dumper/main.cpp index 0f86d34d334..df6083e4bd7 100644 --- a/utils/keeper-data-dumper/main.cpp +++ b/utils/keeper-data-dumper/main.cpp @@ -32,9 +32,9 @@ void dumpMachine(std::shared_ptr machine) ", numChildren: " << value.stat.numChildren << ", dataLength: " << value.stat.dataLength << "}" << std::endl; - std::cout << "\tData: " << storage.container.getValue(key).data << std::endl; + std::cout << "\tData: " << storage.container.getValue(key).getData() << std::endl; - for (const auto & child : value.children) + for (const auto & child : value.getChildren()) { if (key == "/") keys.push(key + child.toString()); diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index e87c4ea2b46..6366aef19ce 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,3 +1,4 @@ +v22.3.3.44-lts 2022-04-06 v22.3.2.2-lts 2022-03-17 v22.2.3.5-stable 2022-02-25 v22.2.2.1-stable 2022-02-17 diff --git a/utils/tests-visualizer/index.html b/utils/tests-visualizer/index.html index 00076f683fa..15ee221aa8e 100644 --- a/utils/tests-visualizer/index.html +++ b/utils/tests-visualizer/index.html @@ -144,7 +144,7 @@ let test_names_query = ` async function loadDataByQuery(query) { const response = await fetch( - "https://play-ci.clickhouse.com?user=play&add_http_cors_header=1", + "https://play.clickhouse.com?user=play&add_http_cors_header=1", { method: "POST", body: query } ) if (!response.ok) throw new Error(`Data download failed\nHTTP status ${response.status}`); diff --git a/website/blog/en/2018/clickhouse-community-meetup-in-paris-on-october-2-2018.md b/website/blog/en/2018/clickhouse-community-meetup-in-paris-on-october-2-2018.md index a8c5c2a92dd..f94d2de411c 100644 --- a/website/blog/en/2018/clickhouse-community-meetup-in-paris-on-october-2-2018.md +++ b/website/blog/en/2018/clickhouse-community-meetup-in-paris-on-october-2-2018.md @@ -7,7 +7,7 @@ tags: ['meetup', 'Paris', 'France', 'events'] Agenda of Paris ClickHouse Meetup was full of use cases, mostly from France-based companies which are actively using ClickHouse. Slides for all talks are [available on the GitHub](https://github.com/clickhouse/clickhouse-presentations/tree/master/meetup18). -Christophe Kalenzaga and Vianney Foucault, engineers from ContentSquare, company that provided the meetup venue: +Christophe Kalenzaga and Vianney Foucault, engineers from Contentsquare, company that provided the meetup venue: ![Christophe Kalenzaga and Vianney Foucault](https://blog-images.clickhouse.com/en/2018/clickhouse-community-meetup-in-paris-on-october-2-2018/1.jpg) Matthieu Jacquet from Storetail (Criteo): diff --git a/website/js/base.js b/website/js/base.js index 9389028f1ef..1ab8f841dbe 100644 --- a/website/js/base.js +++ b/website/js/base.js @@ -67,22 +67,17 @@ }); } - (function (d, w, c) { - (w[c] = w[c] || []).push(function() { - var is_single_page = $('html').attr('data-single-page') === 'true'; - - if (!is_single_page) { - $('head').each(function(_, element) { - $(element).append( - '' - ); - $(element).append( - '' - ); - }); - } + var is_single_page = $('html').attr('data-single-page') === 'true'; + if (!is_single_page) { + $('head').each(function (_, element) { + $(element).append( + '' + ); + $(element).append( + '' + ); }); - })(document, window, ""); + } var beforePrint = function() { var details = document.getElementsByTagName("details");