diff --git a/.clang-tidy b/.clang-tidy index 896052915f7..de19059d09e 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -37,7 +37,6 @@ Checks: [ '-cert-oop54-cpp', '-cert-oop57-cpp', - '-clang-analyzer-optin.core.EnumCastOutOfRange', # https://github.com/abseil/abseil-cpp/issues/1667 '-clang-analyzer-optin.performance.Padding', '-clang-analyzer-unix.Malloc', diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 51a1a6e2df8..74a6f95dbb3 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -48,19 +48,16 @@ At a minimum, the following information should be added (but add more as needed) - [ ] Allow: Stateful tests - [ ] Allow: Integration Tests - [ ] Allow: Performance tests +- [ ] Allow: All Builds - [ ] Allow: All NOT Required Checks - [ ] Allow: batch 1, 2 for multi-batch jobs - [ ] Allow: batch 3, 4, 5, 6 for multi-batch jobs --- - [ ] Exclude: Style check - [ ] Exclude: Fast test -- [ ] Exclude: Integration Tests -- [ ] Exclude: Stateless tests -- [ ] Exclude: Stateful tests -- [ ] Exclude: Performance tests - [ ] Exclude: All with ASAN -- [ ] Exclude: All with Aarch64 - [ ] Exclude: All with TSAN, MSAN, UBSAN, Coverage +- [ ] Exclude: All with aarch64, release, debug --- - [ ] Do not test - [ ] Upload binaries for special builds diff --git a/.github/workflows/backport_branches.yml b/.github/workflows/backport_branches.yml index b0380b939bb..e1980ec9ef2 100644 --- a/.github/workflows/backport_branches.yml +++ b/.github/workflows/backport_branches.yml @@ -70,7 +70,7 @@ jobs: if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_test.yml with: - test_name: Compatibility check (amd64) + test_name: Compatibility check (release) runner_type: style-checker data: ${{ needs.RunConfig.outputs.data }} CompatibilityCheckAarch64: @@ -194,7 +194,7 @@ jobs: if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_test.yml with: - test_name: Install packages (amd64) + test_name: Install packages (release) runner_type: style-checker data: ${{ needs.RunConfig.outputs.data }} run_command: | @@ -204,7 +204,7 @@ jobs: if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_test.yml with: - test_name: Install packages (arm64) + test_name: Install packages (aarch64) runner_type: style-checker-aarch64 data: ${{ needs.RunConfig.outputs.data }} run_command: | @@ -273,5 +273,5 @@ jobs: - name: Finish label run: | cd "$GITHUB_WORKSPACE/tests/ci" - python3 finish_check.py + python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }} python3 merge_pr.py diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index 91dcb6a4968..88bc50a729d 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -115,25 +115,16 @@ jobs: data: ${{ needs.RunConfig.outputs.data }} ################################# Reports ################################# - # Reports should be run even if Builds_1/2 failed - put them separately in wf (not in Tests_1/2) - Builds_1_Report: + # Reports should run even if Builds_1/2 fail - run them separately, not in Tests_1/2/3 + Builds_Report: # run report check for failed builds to indicate the CI error if: ${{ !cancelled() && needs.RunConfig.result == 'success' && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'ClickHouse build check') }} - needs: [RunConfig, Builds_1] + needs: [RunConfig, Builds_1, Builds_2] uses: ./.github/workflows/reusable_test.yml with: test_name: ClickHouse build check runner_type: style-checker-aarch64 data: ${{ needs.RunConfig.outputs.data }} - Builds_2_Report: - # run report check for failed builds to indicate the CI error - if: ${{ !cancelled() && needs.RunConfig.result == 'success' && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'ClickHouse special build check') }} - needs: [RunConfig, Builds_2] - uses: ./.github/workflows/reusable_test.yml - with: - test_name: ClickHouse special build check - runner_type: style-checker-aarch64 - data: ${{ needs.RunConfig.outputs.data }} MarkReleaseReady: if: ${{ !failure() && !cancelled() }} @@ -165,7 +156,7 @@ jobs: FinishCheck: if: ${{ !cancelled() }} - needs: [RunConfig, Builds_1, Builds_2, Builds_1_Report, Builds_2_Report, Tests_1, Tests_2, Tests_3] + needs: [RunConfig, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3] runs-on: [self-hosted, style-checker-aarch64] steps: - name: Check out repository code @@ -173,4 +164,4 @@ jobs: - name: Finish label run: | cd "$GITHUB_WORKSPACE/tests/ci" - python3 finish_check.py + python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }} diff --git a/.github/workflows/merge_queue.yml b/.github/workflows/merge_queue.yml index c8b2452829b..cfa01b0e8f3 100644 --- a/.github/workflows/merge_queue.yml +++ b/.github/workflows/merge_queue.yml @@ -99,7 +99,7 @@ jobs: ################################# Stage Final ################################# # FinishCheck: - if: ${{ !failure() && !cancelled() }} + if: ${{ !cancelled() }} needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Tests_1] runs-on: [self-hosted, style-checker-aarch64] steps: @@ -112,4 +112,4 @@ jobs: - name: Finish label run: | cd "$GITHUB_WORKSPACE/tests/ci" - python3 finish_check.py ${{ (contains(needs.*.result, 'failure') && github.event_name == 'merge_group') && '--pipeline-failure' || '' }} + python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }} diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index e4deaf9f35e..70b71da8fa5 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -143,29 +143,20 @@ jobs: data: ${{ needs.RunConfig.outputs.data }} ################################# Reports ################################# - # Reports should by run even if Builds_1/2 fail, so put them separately in wf (not in Tests_1/2) - Builds_1_Report: + # Reports should run even if Builds_1/2 fail - run them separately (not in Tests_1/2/3) + Builds_Report: # run report check for failed builds to indicate the CI error - if: ${{ !cancelled() && needs.StyleCheck.result == 'success' && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'ClickHouse build check') }} - needs: [RunConfig, StyleCheck, Builds_1] + if: ${{ !cancelled() && needs.RunConfig.result == 'success' && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'ClickHouse build check') }} + needs: [RunConfig, StyleCheck, Builds_1, Builds_2] uses: ./.github/workflows/reusable_test.yml with: test_name: ClickHouse build check runner_type: style-checker-aarch64 data: ${{ needs.RunConfig.outputs.data }} - Builds_2_Report: - # run report check for failed builds to indicate the CI error - if: ${{ !cancelled() && needs.StyleCheck.result == 'success' && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'ClickHouse special build check') }} - needs: [RunConfig, StyleCheck, Builds_2] - uses: ./.github/workflows/reusable_test.yml - with: - test_name: ClickHouse special build check - runner_type: style-checker-aarch64 - data: ${{ needs.RunConfig.outputs.data }} CheckReadyForMerge: if: ${{ !cancelled() && needs.StyleCheck.result == 'success' }} - needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_1_Report, Builds_2_Report, Tests_1, Tests_2] + needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2] runs-on: [self-hosted, style-checker-aarch64] steps: - name: Check out repository code @@ -181,7 +172,7 @@ jobs: # FinishCheck: if: ${{ !cancelled() }} - needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_1_Report, Builds_2_Report, Tests_1, Tests_2, Tests_3] + needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3] runs-on: [self-hosted, style-checker-aarch64] steps: - name: Check out repository code @@ -191,7 +182,7 @@ jobs: - name: Finish label run: | cd "$GITHUB_WORKSPACE/tests/ci" - python3 finish_check.py + python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }} ############################################################################################# ###################################### JEPSEN TESTS ######################################### diff --git a/.github/workflows/release_branches.yml b/.github/workflows/release_branches.yml index 4d45c8d8d4b..45eb7431bb4 100644 --- a/.github/workflows/release_branches.yml +++ b/.github/workflows/release_branches.yml @@ -65,7 +65,7 @@ jobs: if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_test.yml with: - test_name: Compatibility check (amd64) + test_name: Compatibility check (release) runner_type: style-checker data: ${{ needs.RunConfig.outputs.data }} CompatibilityCheckAarch64: @@ -244,7 +244,7 @@ jobs: if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_test.yml with: - test_name: Install packages (amd64) + test_name: Install packages (release) runner_type: style-checker data: ${{ needs.RunConfig.outputs.data }} run_command: | @@ -254,7 +254,7 @@ jobs: if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_test.yml with: - test_name: Install packages (arm64) + test_name: Install packages (aarch64) runner_type: style-checker-aarch64 data: ${{ needs.RunConfig.outputs.data }} run_command: | @@ -496,4 +496,4 @@ jobs: - name: Finish label run: | cd "$GITHUB_WORKSPACE/tests/ci" - python3 finish_check.py + python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }} diff --git a/.github/workflows/tags_stable.yml b/.github/workflows/tags_stable.yml index e4fc9f0b1d3..2aa7694bc41 100644 --- a/.github/workflows/tags_stable.yml +++ b/.github/workflows/tags_stable.yml @@ -46,9 +46,10 @@ jobs: ./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv ./utils/list-versions/update-docker-version.sh GID=$(id -g "${UID}") - docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 \ + # --network=host and CI=1 are required for the S3 access from a container + docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \ --volume="${GITHUB_WORKSPACE}:/ClickHouse" clickhouse/style-test \ - /ClickHouse/utils/changelog/changelog.py -v --debug-helpers \ + /ClickHouse/tests/ci/changelog.py -v --debug-helpers \ --gh-user-or-token="$GITHUB_TOKEN" --jobs=5 \ --output="/ClickHouse/docs/changelogs/${GITHUB_TAG}.md" "${GITHUB_TAG}" git add "./docs/changelogs/${GITHUB_TAG}.md" diff --git a/.gitmodules b/.gitmodules index 28696428e8c..12d865307d8 100644 --- a/.gitmodules +++ b/.gitmodules @@ -91,13 +91,13 @@ [submodule "contrib/aws"] path = contrib/aws url = https://github.com/ClickHouse/aws-sdk-cpp -[submodule "aws-c-event-stream"] +[submodule "contrib/aws-c-event-stream"] path = contrib/aws-c-event-stream url = https://github.com/awslabs/aws-c-event-stream -[submodule "aws-c-common"] +[submodule "contrib/aws-c-common"] path = contrib/aws-c-common url = https://github.com/awslabs/aws-c-common.git -[submodule "aws-checksums"] +[submodule "contrib/aws-checksums"] path = contrib/aws-checksums url = https://github.com/awslabs/aws-checksums [submodule "contrib/curl"] @@ -163,7 +163,7 @@ url = https://github.com/xz-mirror/xz [submodule "contrib/abseil-cpp"] path = contrib/abseil-cpp - url = https://github.com/abseil/abseil-cpp + url = https://github.com/ClickHouse/abseil-cpp.git [submodule "contrib/dragonbox"] path = contrib/dragonbox url = https://github.com/ClickHouse/dragonbox diff --git a/CMakeLists.txt b/CMakeLists.txt index 455adc24182..c4f093b1c99 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -399,7 +399,7 @@ option (ENABLE_GWP_ASAN "Enable Gwp-Asan" ON) # but GWP-ASan also wants to use mmap frequently, # and due to a large number of memory mappings, # it does not work together well. -if ((NOT OS_LINUX AND NOT OS_ANDROID) OR (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")) +if ((NOT OS_LINUX AND NOT OS_ANDROID) OR (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG") OR SANITIZE) set(ENABLE_GWP_ASAN OFF) endif () diff --git a/base/base/EnumReflection.h b/base/base/EnumReflection.h index 4a9de4d17a3..e4e0ef672fd 100644 --- a/base/base/EnumReflection.h +++ b/base/base/EnumReflection.h @@ -32,7 +32,7 @@ constexpr void static_for(F && f) template struct fmt::formatter : fmt::formatter { - constexpr auto format(T value, auto& format_context) + constexpr auto format(T value, auto& format_context) const { return formatter::format(magic_enum::enum_name(value), format_context); } diff --git a/base/base/StringRef.h b/base/base/StringRef.h index 24af84626de..fc0674b8440 100644 --- a/base/base/StringRef.h +++ b/base/base/StringRef.h @@ -12,6 +12,8 @@ #include #include #include +#include +#include #include @@ -376,3 +378,5 @@ namespace PackedZeroTraits std::ostream & operator<<(std::ostream & os, const StringRef & str); + +template<> struct fmt::formatter : fmt::ostream_formatter {}; diff --git a/base/base/wide_integer_to_string.h b/base/base/wide_integer_to_string.h index c2cbe8d82e3..f703a722afa 100644 --- a/base/base/wide_integer_to_string.h +++ b/base/base/wide_integer_to_string.h @@ -62,7 +62,7 @@ struct fmt::formatter> } template - auto format(const wide::integer & value, FormatContext & ctx) + auto format(const wide::integer & value, FormatContext & ctx) const { return fmt::format_to(ctx.out(), "{}", to_string(value)); } diff --git a/contrib/abseil-cpp b/contrib/abseil-cpp index 3bd86026c93..a3c4dd3e77f 160000 --- a/contrib/abseil-cpp +++ b/contrib/abseil-cpp @@ -1 +1 @@ -Subproject commit 3bd86026c93da5a40006fd53403dff9d5f5e30e3 +Subproject commit a3c4dd3e77f28b526efbb0eb394b72e29c633936 diff --git a/contrib/abseil-cpp-cmake/CMakeLists.txt b/contrib/abseil-cpp-cmake/CMakeLists.txt index 7372195bb0d..4137547b736 100644 --- a/contrib/abseil-cpp-cmake/CMakeLists.txt +++ b/contrib/abseil-cpp-cmake/CMakeLists.txt @@ -1,6 +1,8 @@ set(ABSL_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/abseil-cpp") set(ABSL_COMMON_INCLUDE_DIRS "${ABSL_ROOT_DIR}") +# This is a minimized version of the function definition in CMake/AbseilHelpers.cmake + # # Copyright 2017 The Abseil Authors. # @@ -16,7 +18,6 @@ set(ABSL_COMMON_INCLUDE_DIRS "${ABSL_ROOT_DIR}") # See the License for the specific language governing permissions and # limitations under the License. # - function(absl_cc_library) cmake_parse_arguments(ABSL_CC_LIB "DISABLE_INSTALL;PUBLIC;TESTONLY" @@ -76,6 +77,12 @@ function(absl_cc_library) add_library(absl::${ABSL_CC_LIB_NAME} ALIAS ${_NAME}) endfunction() +# The following definitions are an amalgamation of the CMakeLists.txt files in absl/*/ +# To refresh them when upgrading to a new version: +# - copy them over from upstream +# - remove calls of 'absl_cc_test' +# - remove calls of `absl_cc_library` that contain `TESTONLY` +# - append '${DIR}' to the file definitions set(DIR ${ABSL_ROOT_DIR}/absl/algorithm) @@ -102,12 +109,12 @@ absl_cc_library( absl::algorithm absl::core_headers absl::meta + absl::nullability PUBLIC ) set(DIR ${ABSL_ROOT_DIR}/absl/base) -# Internal-only target, do not depend on directly. absl_cc_library( NAME atomic_hook @@ -146,6 +153,18 @@ absl_cc_library( ${ABSL_DEFAULT_COPTS} ) +absl_cc_library( + NAME + no_destructor + HDRS + "${DIR}/no_destructor.h" + DEPS + absl::config + absl::nullability + COPTS + ${ABSL_DEFAULT_COPTS} +) + absl_cc_library( NAME nullability @@ -305,6 +324,8 @@ absl_cc_library( ${ABSL_DEFAULT_COPTS} LINKOPTS ${ABSL_DEFAULT_LINKOPTS} + $<$:-lrt> + $<$:-ladvapi32> DEPS absl::atomic_hook absl::base_internal @@ -312,6 +333,7 @@ absl_cc_library( absl::core_headers absl::dynamic_annotations absl::log_severity + absl::nullability absl::raw_logging_internal absl::spinlock_wait absl::type_traits @@ -357,6 +379,7 @@ absl_cc_library( absl::base absl::config absl::core_headers + absl::nullability PUBLIC ) @@ -467,10 +490,11 @@ absl_cc_library( LINKOPTS ${ABSL_DEFAULT_LINKOPTS} DEPS - absl::container_common absl::common_policy_traits absl::compare absl::compressed_tuple + absl::config + absl::container_common absl::container_memory absl::cord absl::core_headers @@ -480,7 +504,6 @@ absl_cc_library( absl::strings absl::throw_delegate absl::type_traits - absl::utility ) # Internal-only target, do not depend on directly. @@ -523,7 +546,9 @@ absl_cc_library( COPTS ${ABSL_DEFAULT_COPTS} DEPS + absl::base_internal absl::compressed_tuple + absl::config absl::core_headers absl::memory absl::span @@ -548,18 +573,6 @@ absl_cc_library( PUBLIC ) -# Internal-only target, do not depend on directly. -absl_cc_library( - NAME - counting_allocator - HDRS - "${DIR}/internal/counting_allocator.h" - COPTS - ${ABSL_DEFAULT_COPTS} - DEPS - absl::config -) - absl_cc_library( NAME flat_hash_map @@ -570,7 +583,7 @@ absl_cc_library( DEPS absl::container_memory absl::core_headers - absl::hash_function_defaults + absl::hash_container_defaults absl::raw_hash_map absl::algorithm_container absl::memory @@ -586,7 +599,7 @@ absl_cc_library( ${ABSL_DEFAULT_COPTS} DEPS absl::container_memory - absl::hash_function_defaults + absl::hash_container_defaults absl::raw_hash_set absl::algorithm_container absl::core_headers @@ -604,7 +617,7 @@ absl_cc_library( DEPS absl::container_memory absl::core_headers - absl::hash_function_defaults + absl::hash_container_defaults absl::node_slot_policy absl::raw_hash_map absl::algorithm_container @@ -620,8 +633,9 @@ absl_cc_library( COPTS ${ABSL_DEFAULT_COPTS} DEPS + absl::container_memory absl::core_headers - absl::hash_function_defaults + absl::hash_container_defaults absl::node_slot_policy absl::raw_hash_set absl::algorithm_container @@ -629,6 +643,19 @@ absl_cc_library( PUBLIC ) +absl_cc_library( + NAME + hash_container_defaults + HDRS + "${DIR}/hash_container_defaults.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::hash_function_defaults + PUBLIC +) + # Internal-only target, do not depend on directly. absl_cc_library( NAME @@ -655,9 +682,11 @@ absl_cc_library( ${ABSL_DEFAULT_COPTS} DEPS absl::config + absl::container_common absl::cord absl::hash absl::strings + absl::type_traits PUBLIC ) @@ -703,6 +732,7 @@ absl_cc_library( absl::base absl::config absl::exponential_biased + absl::no_destructor absl::raw_logging_internal absl::sample_recorder absl::synchronization @@ -756,7 +786,9 @@ absl_cc_library( COPTS ${ABSL_DEFAULT_COPTS} DEPS + absl::config absl::container_memory + absl::core_headers absl::raw_hash_set absl::throw_delegate PUBLIC @@ -817,6 +849,7 @@ absl_cc_library( DEPS absl::config absl::core_headers + absl::debugging_internal absl::meta absl::strings absl::span @@ -931,6 +964,7 @@ absl_cc_library( absl::crc32c absl::config absl::strings + absl::no_destructor ) set(DIR ${ABSL_ROOT_DIR}/absl/debugging) @@ -954,6 +988,8 @@ absl_cc_library( "${DIR}/stacktrace.cc" COPTS ${ABSL_DEFAULT_COPTS} + LINKOPTS + $<$:${EXECINFO_LIBRARY}> DEPS absl::debugging_internal absl::config @@ -980,6 +1016,7 @@ absl_cc_library( ${ABSL_DEFAULT_COPTS} LINKOPTS ${ABSL_DEFAULT_LINKOPTS} + $<$:-ldbghelp> DEPS absl::debugging_internal absl::demangle_internal @@ -1058,8 +1095,10 @@ absl_cc_library( demangle_internal HDRS "${DIR}/internal/demangle.h" + "${DIR}/internal/demangle_rust.h" SRCS "${DIR}/internal/demangle.cc" + "${DIR}/internal/demangle_rust.cc" COPTS ${ABSL_DEFAULT_COPTS} DEPS @@ -1252,6 +1291,7 @@ absl_cc_library( absl::strings absl::synchronization absl::flat_hash_map + absl::no_destructor ) # Internal-only target, do not depend on directly. @@ -1283,12 +1323,9 @@ absl_cc_library( absl_cc_library( NAME flags - SRCS - "${DIR}/flag.cc" HDRS "${DIR}/declare.h" "${DIR}/flag.h" - "${DIR}/internal/flag_msvc.inc" COPTS ${ABSL_DEFAULT_COPTS} LINKOPTS @@ -1299,7 +1336,6 @@ absl_cc_library( absl::flags_config absl::flags_internal absl::flags_reflection - absl::base absl::core_headers absl::strings ) @@ -1379,6 +1415,9 @@ absl_cc_library( absl::synchronization ) +############################################################################ +# Unit tests in alphabetical order. + set(DIR ${ABSL_ROOT_DIR}/absl/functional) absl_cc_library( @@ -1431,6 +1470,18 @@ absl_cc_library( PUBLIC ) +absl_cc_library( + NAME + overload + HDRS + "${DIR}/overload.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::meta + PUBLIC +) + set(DIR ${ABSL_ROOT_DIR}/absl/hash) absl_cc_library( @@ -1640,6 +1691,7 @@ absl_cc_library( absl::log_internal_conditions absl::log_internal_message absl::log_internal_strip + absl::absl_vlog_is_on ) absl_cc_library( @@ -1721,6 +1773,7 @@ absl_cc_library( absl::log_entry absl::log_severity absl::log_sink + absl::no_destructor absl::raw_logging_internal absl::synchronization absl::span @@ -1771,6 +1824,7 @@ absl_cc_library( LINKOPTS ${ABSL_DEFAULT_LINKOPTS} DEPS + absl::core_headers absl::log_internal_message absl::log_internal_nullstream absl::log_severity @@ -1876,6 +1930,11 @@ absl_cc_library( PUBLIC ) +# Warning: Many linkers will strip the contents of this library because its +# symbols are only used in a global constructor. A workaround is for clients +# to link this using $ instead of +# the plain absl::log_flags. +# TODO(b/320467376): Implement the equivalent of Bazel's alwayslink=True. absl_cc_library( NAME log_flags @@ -1897,6 +1956,7 @@ absl_cc_library( absl::flags absl::flags_marshalling absl::strings + absl::vlog_config_internal PUBLIC ) @@ -1919,6 +1979,7 @@ absl_cc_library( absl::log_severity absl::raw_logging_internal absl::strings + absl::vlog_config_internal ) absl_cc_library( @@ -1952,6 +2013,7 @@ absl_cc_library( ${ABSL_DEFAULT_LINKOPTS} DEPS absl::log_internal_log_impl + absl::vlog_is_on PUBLIC ) @@ -2064,21 +2126,75 @@ absl_cc_library( ) absl_cc_library( - NAME - log_internal_fnmatch - SRCS - "${DIR}/internal/fnmatch.cc" - HDRS - "${DIR}/internal/fnmatch.h" - COPTS - ${ABSL_DEFAULT_COPTS} - LINKOPTS - ${ABSL_DEFAULT_LINKOPTS} - DEPS - absl::config - absl::strings + NAME + vlog_config_internal + SRCS + "${DIR}/internal/vlog_config.cc" + HDRS + "${DIR}/internal/vlog_config.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::base + absl::config + absl::core_headers + absl::log_internal_fnmatch + absl::memory + absl::no_destructor + absl::strings + absl::synchronization + absl::optional ) +absl_cc_library( + NAME + absl_vlog_is_on + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + HDRS + "${DIR}/absl_vlog_is_on.h" + DEPS + absl::vlog_config_internal + absl::config + absl::core_headers + absl::strings +) + +absl_cc_library( + NAME + vlog_is_on + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + HDRS + "${DIR}/vlog_is_on.h" + DEPS + absl::absl_vlog_is_on +) + +absl_cc_library( + NAME + log_internal_fnmatch + SRCS + "${DIR}/internal/fnmatch.cc" + HDRS + "${DIR}/internal/fnmatch.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::strings +) + +# Test targets + set(DIR ${ABSL_ROOT_DIR}/absl/memory) absl_cc_library( @@ -2147,6 +2263,7 @@ absl_cc_library( COPTS ${ABSL_DEFAULT_COPTS} DEPS + absl::compare absl::config absl::core_headers absl::bits @@ -2176,6 +2293,8 @@ absl_cc_library( PUBLIC ) +set(DIR ${ABSL_ROOT_DIR}/absl/profiling) + absl_cc_library( NAME sample_recorder @@ -2188,8 +2307,6 @@ absl_cc_library( absl::synchronization ) -set(DIR ${ABSL_ROOT_DIR}/absl/profiling) - absl_cc_library( NAME exponential_biased @@ -2265,6 +2382,7 @@ absl_cc_library( LINKOPTS ${ABSL_DEFAULT_LINKOPTS} DEPS + absl::config absl::fast_type_id absl::optional ) @@ -2336,11 +2454,13 @@ absl_cc_library( DEPS absl::config absl::inlined_vector + absl::nullability absl::random_internal_pool_urbg absl::random_internal_salted_seed_seq absl::random_internal_seed_material absl::random_seed_gen_exception absl::span + absl::string_view ) # Internal-only target, do not depend on directly. @@ -2399,6 +2519,7 @@ absl_cc_library( ${ABSL_DEFAULT_COPTS} LINKOPTS ${ABSL_DEFAULT_LINKOPTS} + $<$:-lbcrypt> DEPS absl::core_headers absl::optional @@ -2658,6 +2779,29 @@ absl_cc_library( absl::config ) +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_distribution_test_util + SRCS + "${DIR}/internal/chi_square.cc" + "${DIR}/internal/distribution_test_util.cc" + HDRS + "${DIR}/internal/chi_square.h" + "${DIR}/internal/distribution_test_util.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers + absl::raw_logging_internal + absl::strings + absl::str_format + absl::span +) + # Internal-only target, do not depend on directly. absl_cc_library( NAME @@ -2699,6 +2843,8 @@ absl_cc_library( absl::function_ref absl::inlined_vector absl::memory + absl::no_destructor + absl::nullability absl::optional absl::raw_logging_internal absl::span @@ -2724,8 +2870,11 @@ absl_cc_library( absl::base absl::config absl::core_headers + absl::has_ostream_operator + absl::nullability absl::raw_logging_internal absl::status + absl::str_format absl::strings absl::type_traits absl::utility @@ -2748,6 +2897,7 @@ absl_cc_library( absl::base absl::config absl::core_headers + absl::nullability absl::throw_delegate PUBLIC ) @@ -2762,6 +2912,7 @@ absl_cc_library( "${DIR}/has_absl_stringify.h" "${DIR}/internal/damerau_levenshtein_distance.h" "${DIR}/internal/string_constant.h" + "${DIR}/internal/has_absl_stringify.h" "${DIR}/match.h" "${DIR}/numbers.h" "${DIR}/str_cat.h" @@ -2805,6 +2956,7 @@ absl_cc_library( absl::endian absl::int128 absl::memory + absl::nullability absl::raw_logging_internal absl::throw_delegate absl::type_traits @@ -2824,6 +2976,18 @@ absl_cc_library( PUBLIC ) +absl_cc_library( + NAME + has_ostream_operator + HDRS + "${DIR}/has_ostream_operator.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + PUBLIC +) + # Internal-only target, do not depend on directly. absl_cc_library( NAME @@ -2855,7 +3019,12 @@ absl_cc_library( COPTS ${ABSL_DEFAULT_COPTS} DEPS + absl::config + absl::core_headers + absl::nullability + absl::span absl::str_format_internal + absl::string_view PUBLIC ) @@ -2886,6 +3055,7 @@ absl_cc_library( absl::strings absl::config absl::core_headers + absl::fixed_array absl::inlined_vector absl::numeric_representation absl::type_traits @@ -2989,6 +3159,7 @@ absl_cc_library( DEPS absl::base absl::config + absl::no_destructor absl::raw_logging_internal absl::synchronization ) @@ -3079,6 +3250,7 @@ absl_cc_library( absl::endian absl::function_ref absl::inlined_vector + absl::nullability absl::optional absl::raw_logging_internal absl::span @@ -3246,6 +3418,8 @@ absl_cc_library( ${ABSL_DEFAULT_COPTS} DEPS Threads::Threads + # TODO(#1495): Use $ once our + # minimum CMake version >= 3.24 $<$:-Wl,-framework,CoreFoundation> ) @@ -3286,8 +3460,8 @@ absl_cc_library( NAME bad_any_cast_impl SRCS - "${DIR}/bad_any_cast.h" - "${DIR}/bad_any_cast.cc" + "${DIR}/bad_any_cast.h" + "${DIR}/bad_any_cast.cc" COPTS ${ABSL_DEFAULT_COPTS} DEPS @@ -3307,6 +3481,7 @@ absl_cc_library( DEPS absl::algorithm absl::core_headers + absl::nullability absl::throw_delegate absl::type_traits PUBLIC @@ -3327,6 +3502,7 @@ absl_cc_library( absl::config absl::core_headers absl::memory + absl::nullability absl::type_traits absl::utility PUBLIC @@ -3389,6 +3565,7 @@ absl_cc_library( COPTS ${ABSL_DEFAULT_COPTS} DEPS + absl::config absl::core_headers absl::type_traits PUBLIC diff --git a/contrib/aws b/contrib/aws index deeaa9e7c5f..1c2946bfcb7 160000 --- a/contrib/aws +++ b/contrib/aws @@ -1 +1 @@ -Subproject commit deeaa9e7c5fe690e3dacc4005d7ecfa7a66a32bb +Subproject commit 1c2946bfcb7f1e3ae0a858de0b59d4f1a7b4ccaf diff --git a/contrib/cld2 b/contrib/cld2 index bc6d493a2f6..217ba8b8805 160000 --- a/contrib/cld2 +++ b/contrib/cld2 @@ -1 +1 @@ -Subproject commit bc6d493a2f64ed1fc1c4c4b4294a542a04e04217 +Subproject commit 217ba8b8805b41557faadaa47bb6e99f2242eea3 diff --git a/contrib/fmtlib b/contrib/fmtlib index b6f4ceaed0a..a33701196ad 160000 --- a/contrib/fmtlib +++ b/contrib/fmtlib @@ -1 +1 @@ -Subproject commit b6f4ceaed0a0a24ccf575fab6c56dd50ccf6f1a9 +Subproject commit a33701196adfad74917046096bf5a2aa0ab0bb50 diff --git a/contrib/fmtlib-cmake/CMakeLists.txt b/contrib/fmtlib-cmake/CMakeLists.txt index fe399ddc6e1..6625e411295 100644 --- a/contrib/fmtlib-cmake/CMakeLists.txt +++ b/contrib/fmtlib-cmake/CMakeLists.txt @@ -13,7 +13,6 @@ set (SRCS ${FMT_SOURCE_DIR}/include/fmt/core.h ${FMT_SOURCE_DIR}/include/fmt/format.h ${FMT_SOURCE_DIR}/include/fmt/format-inl.h - ${FMT_SOURCE_DIR}/include/fmt/locale.h ${FMT_SOURCE_DIR}/include/fmt/os.h ${FMT_SOURCE_DIR}/include/fmt/ostream.h ${FMT_SOURCE_DIR}/include/fmt/printf.h diff --git a/contrib/googletest b/contrib/googletest index e47544ad31c..a7f443b80b1 160000 --- a/contrib/googletest +++ b/contrib/googletest @@ -1 +1 @@ -Subproject commit e47544ad31cb3ceecd04cc13e8fe556f8df9fe0b +Subproject commit a7f443b80b105f940225332ed3c31f2790092f47 diff --git a/contrib/openssl b/contrib/openssl index f7b8721dfc6..67c0b63e578 160000 --- a/contrib/openssl +++ b/contrib/openssl @@ -1 +1 @@ -Subproject commit f7b8721dfc66abb147f24ca07b9c9d1d64f40f71 +Subproject commit 67c0b63e578e4c751ac9edf490f5a96124fff8dc diff --git a/docker/images.json b/docker/images.json index 7439517379b..716b76ee217 100644 --- a/docker/images.json +++ b/docker/images.json @@ -41,8 +41,7 @@ "docker/test/stateless": { "name": "clickhouse/stateless-test", "dependent": [ - "docker/test/stateful", - "docker/test/unit" + "docker/test/stateful" ] }, "docker/test/stateful": { @@ -122,15 +121,16 @@ "docker/test/base": { "name": "clickhouse/test-base", "dependent": [ + "docker/test/clickbench", "docker/test/fuzzer", - "docker/test/libfuzzer", "docker/test/integration/base", "docker/test/keeper-jepsen", + "docker/test/libfuzzer", "docker/test/server-jepsen", "docker/test/sqllogic", "docker/test/sqltest", - "docker/test/clickbench", - "docker/test/stateless" + "docker/test/stateless", + "docker/test/unit" ] }, "docker/test/integration/kerberized_hadoop": { diff --git a/docker/keeper/Dockerfile b/docker/keeper/Dockerfile index b3271d94184..24f38740ff5 100644 --- a/docker/keeper/Dockerfile +++ b/docker/keeper/Dockerfile @@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="24.5.1.1763" +ARG VERSION="24.5.3.5" ARG PACKAGES="clickhouse-keeper" ARG DIRECT_DOWNLOAD_URLS="" diff --git a/docker/server/Dockerfile.alpine b/docker/server/Dockerfile.alpine index 3f3b880c8f3..c71319a2a7e 100644 --- a/docker/server/Dockerfile.alpine +++ b/docker/server/Dockerfile.alpine @@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="24.5.1.1763" +ARG VERSION="24.5.3.5" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" ARG DIRECT_DOWNLOAD_URLS="" diff --git a/docker/server/Dockerfile.ubuntu b/docker/server/Dockerfile.ubuntu index 5fd22ee9b51..ed8cf3d657d 100644 --- a/docker/server/Dockerfile.ubuntu +++ b/docker/server/Dockerfile.ubuntu @@ -28,7 +28,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list ARG REPO_CHANNEL="stable" ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main" -ARG VERSION="24.5.1.1763" +ARG VERSION="24.5.3.5" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" #docker-official-library:off diff --git a/docker/test/fuzzer/run-fuzzer.sh b/docker/test/fuzzer/run-fuzzer.sh index b8f967ed9c2..6191aeaf304 100755 --- a/docker/test/fuzzer/run-fuzzer.sh +++ b/docker/test/fuzzer/run-fuzzer.sh @@ -208,6 +208,7 @@ handle SIGPIPE nostop noprint pass handle SIGTERM nostop noprint pass handle SIGUSR1 nostop noprint pass handle SIGUSR2 nostop noprint pass +handle SIGSEGV nostop pass handle SIG$RTMIN nostop noprint pass info signals continue diff --git a/docker/test/stateless/attach_gdb.lib b/docker/test/stateless/attach_gdb.lib index d288288bb17..eb54f920b98 100644 --- a/docker/test/stateless/attach_gdb.lib +++ b/docker/test/stateless/attach_gdb.lib @@ -20,6 +20,7 @@ handle SIGPIPE nostop noprint pass handle SIGTERM nostop noprint pass handle SIGUSR1 nostop noprint pass handle SIGUSR2 nostop noprint pass +handle SIGSEGV nostop pass handle SIG$RTMIN nostop noprint pass info signals continue diff --git a/docker/test/stateless/stress_tests.lib b/docker/test/stateless/stress_tests.lib index 3b6ad244c82..c069ccbdd8d 100644 --- a/docker/test/stateless/stress_tests.lib +++ b/docker/test/stateless/stress_tests.lib @@ -89,10 +89,6 @@ function configure() # since we run clickhouse from root sudo chown root: /var/lib/clickhouse - # Set more frequent update period of asynchronous metrics to more frequently update information about real memory usage (less chance of OOM). - echo "1" \ - > /etc/clickhouse-server/config.d/asynchronous_metrics_update_period_s.xml - local total_mem total_mem=$(awk '/MemTotal/ { print $(NF-1) }' /proc/meminfo) # KiB total_mem=$(( total_mem*1024 )) # bytes diff --git a/docker/test/style/Dockerfile b/docker/test/style/Dockerfile index 91768c8328d..6ad03852b66 100644 --- a/docker/test/style/Dockerfile +++ b/docker/test/style/Dockerfile @@ -10,14 +10,15 @@ RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \ aspell \ curl \ - git \ - gh \ file \ + gh \ + git \ libxml2-utils \ + locales \ moreutils \ python3-pip \ yamllint \ - locales \ + zstd \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* @@ -33,6 +34,7 @@ RUN pip3 install \ flake8==4.0.1 \ requests \ thefuzz \ + tqdm==4.66.4 \ types-requests \ unidiff \ && rm -rf /root/.cache/pip diff --git a/docker/test/unit/Dockerfile b/docker/test/unit/Dockerfile index cf5ba1eec7f..af44dc930b2 100644 --- a/docker/test/unit/Dockerfile +++ b/docker/test/unit/Dockerfile @@ -1,9 +1,7 @@ # rebuild in #33610 # docker build -t clickhouse/unit-test . ARG FROM_TAG=latest -FROM clickhouse/stateless-test:$FROM_TAG - -RUN apt-get install gdb +FROM clickhouse/test-base:$FROM_TAG COPY run.sh / CMD ["/bin/bash", "/run.sh"] diff --git a/docs/changelogs/v23.8.15.35-lts.md b/docs/changelogs/v23.8.15.35-lts.md new file mode 100644 index 00000000000..bab5c507fe8 --- /dev/null +++ b/docs/changelogs/v23.8.15.35-lts.md @@ -0,0 +1,40 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v23.8.15.35-lts (060ff8e813a) FIXME as compared to v23.8.14.6-lts (967e51c1d6b) + +#### Build/Testing/Packaging Improvement +* Backported in [#63621](https://github.com/ClickHouse/ClickHouse/issues/63621): The Dockerfile is reviewed by the docker official library in https://github.com/docker-library/official-images/pull/15846. [#63400](https://github.com/ClickHouse/ClickHouse/pull/63400) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Backported in [#65153](https://github.com/ClickHouse/ClickHouse/issues/65153): Decrease the `unit-test` image a few times. [#65102](https://github.com/ClickHouse/ClickHouse/pull/65102) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + +#### Bug Fix (user-visible misbehavior in an official stable release) + +* Backported in [#64422](https://github.com/ClickHouse/ClickHouse/issues/64422): Fixes [#59989](https://github.com/ClickHouse/ClickHouse/issues/59989): runs init scripts when force-enabled or when no database exists, rather than the inverse. [#59991](https://github.com/ClickHouse/ClickHouse/pull/59991) ([jktng](https://github.com/jktng)). +* Backported in [#64016](https://github.com/ClickHouse/ClickHouse/issues/64016): Fix "Invalid storage definition in metadata file" for parameterized views. [#60708](https://github.com/ClickHouse/ClickHouse/pull/60708) ([Azat Khuzhin](https://github.com/azat)). +* Backported in [#63456](https://github.com/ClickHouse/ClickHouse/issues/63456): Fix the issue where the function `addDays` (and similar functions) reports an error when the first parameter is `DateTime64`. [#61561](https://github.com/ClickHouse/ClickHouse/pull/61561) ([Shuai li](https://github.com/loneylee)). +* Backported in [#63289](https://github.com/ClickHouse/ClickHouse/issues/63289): Fix crash with untuple and unresolved lambda. [#63131](https://github.com/ClickHouse/ClickHouse/pull/63131) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#63512](https://github.com/ClickHouse/ClickHouse/issues/63512): Fix `X-ClickHouse-Timezone` header returning wrong timezone when using `session_timezone` as query level setting. [#63377](https://github.com/ClickHouse/ClickHouse/pull/63377) ([Andrey Zvonov](https://github.com/zvonand)). +* Backported in [#63902](https://github.com/ClickHouse/ClickHouse/issues/63902): `query_plan_remove_redundant_distinct` can break queries with WINDOW FUNCTIONS (with `allow_experimental_analyzer` is on). Fixes [#62820](https://github.com/ClickHouse/ClickHouse/issues/62820). [#63776](https://github.com/ClickHouse/ClickHouse/pull/63776) ([Igor Nikonov](https://github.com/devcrafter)). +* Backported in [#64104](https://github.com/ClickHouse/ClickHouse/issues/64104): Deserialize untrusted binary inputs in a safer way. [#64024](https://github.com/ClickHouse/ClickHouse/pull/64024) ([Robert Schulze](https://github.com/rschu1ze)). +* Backported in [#64265](https://github.com/ClickHouse/ClickHouse/issues/64265): Prevent LOGICAL_ERROR on CREATE TABLE as MaterializedView. [#64174](https://github.com/ClickHouse/ClickHouse/pull/64174) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#64867](https://github.com/ClickHouse/ClickHouse/issues/64867): Fixed memory possible incorrect memory tracking in several kinds of queries: queries that read any data from S3, queries via http protocol, asynchronous inserts. [#64844](https://github.com/ClickHouse/ClickHouse/pull/64844) ([Anton Popov](https://github.com/CurtizJ)). + +#### NO CL CATEGORY + +* Backported in [#63704](https://github.com/ClickHouse/ClickHouse/issues/63704):. [#63415](https://github.com/ClickHouse/ClickHouse/pull/63415) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). + +#### NO CL ENTRY + +* NO CL ENTRY: 'Installation test has wrong check_state'. [#63994](https://github.com/ClickHouse/ClickHouse/pull/63994) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Backported in [#63343](https://github.com/ClickHouse/ClickHouse/issues/63343): The commit url has different pattern. [#63331](https://github.com/ClickHouse/ClickHouse/pull/63331) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Backported in [#63965](https://github.com/ClickHouse/ClickHouse/issues/63965): fix 02124_insert_deduplication_token_multiple_blocks. [#63950](https://github.com/ClickHouse/ClickHouse/pull/63950) ([Han Fei](https://github.com/hanfei1991)). +* Backported in [#64043](https://github.com/ClickHouse/ClickHouse/issues/64043): Do not create new release in release branch automatically. [#64039](https://github.com/ClickHouse/ClickHouse/pull/64039) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Pin requests to fix the integration tests. [#65183](https://github.com/ClickHouse/ClickHouse/pull/65183) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + diff --git a/docs/changelogs/v24.1.6.52-stable.md b/docs/changelogs/v24.1.6.52-stable.md new file mode 100644 index 00000000000..341561e9a64 --- /dev/null +++ b/docs/changelogs/v24.1.6.52-stable.md @@ -0,0 +1,45 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v24.1.6.52-stable (fa09f677bc9) FIXME as compared to v24.1.5.6-stable (7f67181ff31) + +#### Improvement +* Backported in [#60292](https://github.com/ClickHouse/ClickHouse/issues/60292): Copy S3 file GCP fallback to buffer copy in case GCP returned `Internal Error` with `GATEWAY_TIMEOUT` HTTP error code. [#60164](https://github.com/ClickHouse/ClickHouse/pull/60164) ([Maksim Kita](https://github.com/kitaisreal)). +* Backported in [#60832](https://github.com/ClickHouse/ClickHouse/issues/60832): Update tzdata to 2024a. [#60768](https://github.com/ClickHouse/ClickHouse/pull/60768) ([Raúl Marín](https://github.com/Algunenano)). + +#### Bug Fix (user-visible misbehavior in an official stable release) + +* Backported in [#60413](https://github.com/ClickHouse/ClickHouse/issues/60413): Fix segmentation fault in KQL parser when the input query exceeds the `max_query_size`. Also re-enable the KQL dialect. Fixes [#59036](https://github.com/ClickHouse/ClickHouse/issues/59036) and [#59037](https://github.com/ClickHouse/ClickHouse/issues/59037). [#59626](https://github.com/ClickHouse/ClickHouse/pull/59626) ([Yong Wang](https://github.com/kashwy)). +* Backported in [#60074](https://github.com/ClickHouse/ClickHouse/issues/60074): Fix error `Read beyond last offset` for `AsynchronousBoundedReadBuffer`. [#59630](https://github.com/ClickHouse/ClickHouse/pull/59630) ([Vitaly Baranov](https://github.com/vitlibar)). +* Backported in [#60299](https://github.com/ClickHouse/ClickHouse/issues/60299): Fix having neigher acked nor nacked messages. If exception happens during read-write phase, messages will be nacked. [#59775](https://github.com/ClickHouse/ClickHouse/pull/59775) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Backported in [#60066](https://github.com/ClickHouse/ClickHouse/issues/60066): Fix optimize_uniq_to_count removing the column alias. [#60026](https://github.com/ClickHouse/ClickHouse/pull/60026) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#60638](https://github.com/ClickHouse/ClickHouse/issues/60638): Fixed a bug in parallel optimization for queries with `FINAL`, which could give an incorrect result in rare cases. [#60041](https://github.com/ClickHouse/ClickHouse/pull/60041) ([Maksim Kita](https://github.com/kitaisreal)). +* Backported in [#60177](https://github.com/ClickHouse/ClickHouse/issues/60177): Fix cosineDistance crash with Nullable. [#60150](https://github.com/ClickHouse/ClickHouse/pull/60150) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#60279](https://github.com/ClickHouse/ClickHouse/issues/60279): Hide sensitive info for `S3Queue` table engine. [#60233](https://github.com/ClickHouse/ClickHouse/pull/60233) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Backported in [#61000](https://github.com/ClickHouse/ClickHouse/issues/61000): Reduce the number of read rows from `system.numbers`. Fixes [#59418](https://github.com/ClickHouse/ClickHouse/issues/59418). [#60546](https://github.com/ClickHouse/ClickHouse/pull/60546) ([JackyWoo](https://github.com/JackyWoo)). +* Backported in [#60791](https://github.com/ClickHouse/ClickHouse/issues/60791): Fix buffer overflow that can happen if the attacker asks the HTTP server to decompress data with a composition of codecs and size triggering numeric overflow. Fix buffer overflow that can happen inside codec NONE on wrong input data. This was submitted by TIANGONG research team through our [Bug Bounty program](https://github.com/ClickHouse/ClickHouse/issues/38986). [#60731](https://github.com/ClickHouse/ClickHouse/pull/60731) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Backported in [#60783](https://github.com/ClickHouse/ClickHouse/issues/60783): Functions for SQL/JSON were able to read uninitialized memory. This closes [#60017](https://github.com/ClickHouse/ClickHouse/issues/60017). Found by Fuzzer. [#60738](https://github.com/ClickHouse/ClickHouse/pull/60738) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Backported in [#60803](https://github.com/ClickHouse/ClickHouse/issues/60803): Do not set aws custom metadata `x-amz-meta-*` headers on UploadPart & CompleteMultipartUpload calls. [#60748](https://github.com/ClickHouse/ClickHouse/pull/60748) ([Francisco J. Jurado Moreno](https://github.com/Beetelbrox)). +* Backported in [#60820](https://github.com/ClickHouse/ClickHouse/issues/60820): Fix crash in arrayEnumerateRanked. [#60764](https://github.com/ClickHouse/ClickHouse/pull/60764) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#60841](https://github.com/ClickHouse/ClickHouse/issues/60841): Fix crash when using input() in INSERT SELECT JOIN. Closes [#60035](https://github.com/ClickHouse/ClickHouse/issues/60035). [#60765](https://github.com/ClickHouse/ClickHouse/pull/60765) ([Kruglov Pavel](https://github.com/Avogar)). +* Backported in [#60904](https://github.com/ClickHouse/ClickHouse/issues/60904): Avoid segfault if too many keys are skipped when reading from S3. [#60849](https://github.com/ClickHouse/ClickHouse/pull/60849) ([Antonio Andelic](https://github.com/antonio2368)). + +#### NO CL CATEGORY + +* Backported in [#60186](https://github.com/ClickHouse/ClickHouse/issues/60186):. [#60181](https://github.com/ClickHouse/ClickHouse/pull/60181) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Backported in [#60333](https://github.com/ClickHouse/ClickHouse/issues/60333): CI: Fix job failures due to jepsen artifacts. [#59890](https://github.com/ClickHouse/ClickHouse/pull/59890) ([Max K.](https://github.com/maxknv)). +* Backported in [#60034](https://github.com/ClickHouse/ClickHouse/issues/60034): Fix mark release ready. [#59994](https://github.com/ClickHouse/ClickHouse/pull/59994) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Backported in [#60326](https://github.com/ClickHouse/ClickHouse/issues/60326): Ability to detect undead ZooKeeper sessions. [#60044](https://github.com/ClickHouse/ClickHouse/pull/60044) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Backported in [#60363](https://github.com/ClickHouse/ClickHouse/issues/60363): CI: hot fix for gh statuses. [#60201](https://github.com/ClickHouse/ClickHouse/pull/60201) ([Max K.](https://github.com/maxknv)). +* Backported in [#60648](https://github.com/ClickHouse/ClickHouse/issues/60648): Detect io_uring in tests. [#60373](https://github.com/ClickHouse/ClickHouse/pull/60373) ([Azat Khuzhin](https://github.com/azat)). +* Backported in [#60569](https://github.com/ClickHouse/ClickHouse/issues/60569): Remove broken test while we fix it. [#60547](https://github.com/ClickHouse/ClickHouse/pull/60547) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#60756](https://github.com/ClickHouse/ClickHouse/issues/60756): Update shellcheck. [#60553](https://github.com/ClickHouse/ClickHouse/pull/60553) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Backported in [#60584](https://github.com/ClickHouse/ClickHouse/issues/60584): CI: fix docker build job name. [#60554](https://github.com/ClickHouse/ClickHouse/pull/60554) ([Max K.](https://github.com/maxknv)). + diff --git a/docs/changelogs/v24.3.4.147-lts.md b/docs/changelogs/v24.3.4.147-lts.md new file mode 100644 index 00000000000..7d77fb29977 --- /dev/null +++ b/docs/changelogs/v24.3.4.147-lts.md @@ -0,0 +1,100 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v24.3.4.147-lts (31a7bdc346d) FIXME as compared to v24.3.3.102-lts (7e7f3bdd9be) + +#### Improvement +* Backported in [#63465](https://github.com/ClickHouse/ClickHouse/issues/63465): Make rabbitmq nack broken messages. Closes [#45350](https://github.com/ClickHouse/ClickHouse/issues/45350). [#60312](https://github.com/ClickHouse/ClickHouse/pull/60312) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Backported in [#64290](https://github.com/ClickHouse/ClickHouse/issues/64290): Fix logical-error when undoing quorum insert transaction. [#61953](https://github.com/ClickHouse/ClickHouse/pull/61953) ([Han Fei](https://github.com/hanfei1991)). + +#### Build/Testing/Packaging Improvement +* Backported in [#63610](https://github.com/ClickHouse/ClickHouse/issues/63610): The Dockerfile is reviewed by the docker official library in https://github.com/docker-library/official-images/pull/15846. [#63400](https://github.com/ClickHouse/ClickHouse/pull/63400) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Backported in [#65128](https://github.com/ClickHouse/ClickHouse/issues/65128): Decrease the `unit-test` image a few times. [#65102](https://github.com/ClickHouse/ClickHouse/pull/65102) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + +#### Bug Fix (user-visible misbehavior in an official stable release) + +* Backported in [#64277](https://github.com/ClickHouse/ClickHouse/issues/64277): Fix queries with FINAL give wrong result when table does not use adaptive granularity. [#62432](https://github.com/ClickHouse/ClickHouse/pull/62432) ([Duc Canh Le](https://github.com/canhld94)). +* Backported in [#63716](https://github.com/ClickHouse/ClickHouse/issues/63716): Fix excessive memory usage for queries with nested lambdas. Fixes [#62036](https://github.com/ClickHouse/ClickHouse/issues/62036). [#62462](https://github.com/ClickHouse/ClickHouse/pull/62462) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#63247](https://github.com/ClickHouse/ClickHouse/issues/63247): Fix size checks when updating materialized nested columns ( fixes [#62731](https://github.com/ClickHouse/ClickHouse/issues/62731) ). [#62773](https://github.com/ClickHouse/ClickHouse/pull/62773) ([Eliot Hautefeuille](https://github.com/hileef)). +* Backported in [#62984](https://github.com/ClickHouse/ClickHouse/issues/62984): Fix the `Unexpected return type` error for queries that read from `StorageBuffer` with `PREWHERE` when the source table has different types. Fixes [#62545](https://github.com/ClickHouse/ClickHouse/issues/62545). [#62916](https://github.com/ClickHouse/ClickHouse/pull/62916) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#63185](https://github.com/ClickHouse/ClickHouse/issues/63185): Sanity check: Clamp values instead of throwing. [#63119](https://github.com/ClickHouse/ClickHouse/pull/63119) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#63293](https://github.com/ClickHouse/ClickHouse/issues/63293): Fix crash with untuple and unresolved lambda. [#63131](https://github.com/ClickHouse/ClickHouse/pull/63131) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#63411](https://github.com/ClickHouse/ClickHouse/issues/63411): Fix a misbehavior when SQL security defaults don't load for old tables during server startup. [#63209](https://github.com/ClickHouse/ClickHouse/pull/63209) ([pufit](https://github.com/pufit)). +* Backported in [#63616](https://github.com/ClickHouse/ClickHouse/issues/63616): Fix bug which could potentially lead to rare LOGICAL_ERROR during SELECT query with message: `Unexpected return type from materialize. Expected type_XXX. Got type_YYY.` Introduced in [#59379](https://github.com/ClickHouse/ClickHouse/issues/59379). [#63353](https://github.com/ClickHouse/ClickHouse/pull/63353) ([alesapin](https://github.com/alesapin)). +* Backported in [#63455](https://github.com/ClickHouse/ClickHouse/issues/63455): Fix `X-ClickHouse-Timezone` header returning wrong timezone when using `session_timezone` as query level setting. [#63377](https://github.com/ClickHouse/ClickHouse/pull/63377) ([Andrey Zvonov](https://github.com/zvonand)). +* Backported in [#63603](https://github.com/ClickHouse/ClickHouse/issues/63603): Fix backup of projection part in case projection was removed from table metadata, but part still has projection. [#63426](https://github.com/ClickHouse/ClickHouse/pull/63426) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Backported in [#63508](https://github.com/ClickHouse/ClickHouse/issues/63508): Fix 'Every derived table must have its own alias' error for MYSQL dictionary source, close [#63341](https://github.com/ClickHouse/ClickHouse/issues/63341). [#63481](https://github.com/ClickHouse/ClickHouse/pull/63481) ([vdimir](https://github.com/vdimir)). +* Backported in [#63595](https://github.com/ClickHouse/ClickHouse/issues/63595): Avoid segafult in `MergeTreePrefetchedReadPool` while fetching projection parts. [#63513](https://github.com/ClickHouse/ClickHouse/pull/63513) ([Antonio Andelic](https://github.com/antonio2368)). +* Backported in [#63748](https://github.com/ClickHouse/ClickHouse/issues/63748): Read only the necessary columns from VIEW (new analyzer). Closes [#62594](https://github.com/ClickHouse/ClickHouse/issues/62594). [#63688](https://github.com/ClickHouse/ClickHouse/pull/63688) ([Maksim Kita](https://github.com/kitaisreal)). +* Backported in [#63770](https://github.com/ClickHouse/ClickHouse/issues/63770): Fix [#63539](https://github.com/ClickHouse/ClickHouse/issues/63539). Forbid WINDOW redefinition in new analyzer. [#63694](https://github.com/ClickHouse/ClickHouse/pull/63694) ([Dmitry Novik](https://github.com/novikd)). +* Backported in [#64189](https://github.com/ClickHouse/ClickHouse/issues/64189): Fix `Not found column` and `CAST AS Map from array requires nested tuple of 2 elements` exceptions for distributed queries which use `Map(Nothing, Nothing)` type. Fixes [#63637](https://github.com/ClickHouse/ClickHouse/issues/63637). [#63753](https://github.com/ClickHouse/ClickHouse/pull/63753) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#63845](https://github.com/ClickHouse/ClickHouse/issues/63845): Fix possible `ILLEGAL_COLUMN` error in `partial_merge` join, close [#37928](https://github.com/ClickHouse/ClickHouse/issues/37928). [#63755](https://github.com/ClickHouse/ClickHouse/pull/63755) ([vdimir](https://github.com/vdimir)). +* Backported in [#63906](https://github.com/ClickHouse/ClickHouse/issues/63906): `query_plan_remove_redundant_distinct` can break queries with WINDOW FUNCTIONS (with `allow_experimental_analyzer` is on). Fixes [#62820](https://github.com/ClickHouse/ClickHouse/issues/62820). [#63776](https://github.com/ClickHouse/ClickHouse/pull/63776) ([Igor Nikonov](https://github.com/devcrafter)). +* Backported in [#63989](https://github.com/ClickHouse/ClickHouse/issues/63989): Fix incorrect select query result when parallel replicas were used to read from a Materialized View. [#63861](https://github.com/ClickHouse/ClickHouse/pull/63861) ([Nikita Taranov](https://github.com/nickitat)). +* Backported in [#64031](https://github.com/ClickHouse/ClickHouse/issues/64031): Fix a error `Database name is empty` for remote queries with lambdas over the cluster with modified default database. Fixes [#63471](https://github.com/ClickHouse/ClickHouse/issues/63471). [#63864](https://github.com/ClickHouse/ClickHouse/pull/63864) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#64559](https://github.com/ClickHouse/ClickHouse/issues/64559): Fix SIGSEGV due to CPU/Real (`query_profiler_real_time_period_ns`/`query_profiler_cpu_time_period_ns`) profiler (has been an issue since 2022, that leads to periodic server crashes, especially if you were using distributed engine). [#63865](https://github.com/ClickHouse/ClickHouse/pull/63865) ([Azat Khuzhin](https://github.com/azat)). +* Backported in [#64009](https://github.com/ClickHouse/ClickHouse/issues/64009): Fix analyzer - IN function with arbitrary deep sub-selects in materialized view to use insertion block. [#63930](https://github.com/ClickHouse/ClickHouse/pull/63930) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Backported in [#64236](https://github.com/ClickHouse/ClickHouse/issues/64236): Fix resolve of unqualified COLUMNS matcher. Preserve the input columns order and forbid usage of unknown identifiers. [#63962](https://github.com/ClickHouse/ClickHouse/pull/63962) ([Dmitry Novik](https://github.com/novikd)). +* Backported in [#64106](https://github.com/ClickHouse/ClickHouse/issues/64106): Deserialize untrusted binary inputs in a safer way. [#64024](https://github.com/ClickHouse/ClickHouse/pull/64024) ([Robert Schulze](https://github.com/rschu1ze)). +* Backported in [#64168](https://github.com/ClickHouse/ClickHouse/issues/64168): Add missing settings to recoverLostReplica. [#64040](https://github.com/ClickHouse/ClickHouse/pull/64040) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#64320](https://github.com/ClickHouse/ClickHouse/issues/64320): This fix will use a proper redefined context with the correct definer for each individual view in the query pipeline Closes [#63777](https://github.com/ClickHouse/ClickHouse/issues/63777). [#64079](https://github.com/ClickHouse/ClickHouse/pull/64079) ([pufit](https://github.com/pufit)). +* Backported in [#64380](https://github.com/ClickHouse/ClickHouse/issues/64380): Fix analyzer: "Not found column" error is fixed when using INTERPOLATE. [#64096](https://github.com/ClickHouse/ClickHouse/pull/64096) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Backported in [#64567](https://github.com/ClickHouse/ClickHouse/issues/64567): Fix creating backups to S3 buckets with different credentials from the disk containing the file. [#64153](https://github.com/ClickHouse/ClickHouse/pull/64153) ([Antonio Andelic](https://github.com/antonio2368)). +* Backported in [#64270](https://github.com/ClickHouse/ClickHouse/issues/64270): Prevent LOGICAL_ERROR on CREATE TABLE as MaterializedView. [#64174](https://github.com/ClickHouse/ClickHouse/pull/64174) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#64339](https://github.com/ClickHouse/ClickHouse/issues/64339): The query cache now considers two identical queries against different databases as different. The previous behavior could be used to bypass missing privileges to read from a table. [#64199](https://github.com/ClickHouse/ClickHouse/pull/64199) ([Robert Schulze](https://github.com/rschu1ze)). +* Backported in [#64259](https://github.com/ClickHouse/ClickHouse/issues/64259): Ignore `text_log` config when using Keeper. [#64218](https://github.com/ClickHouse/ClickHouse/pull/64218) ([Antonio Andelic](https://github.com/antonio2368)). +* Backported in [#64688](https://github.com/ClickHouse/ClickHouse/issues/64688): Fix Query Tree size validation. Closes [#63701](https://github.com/ClickHouse/ClickHouse/issues/63701). [#64377](https://github.com/ClickHouse/ClickHouse/pull/64377) ([Dmitry Novik](https://github.com/novikd)). +* Backported in [#64725](https://github.com/ClickHouse/ClickHouse/issues/64725): Fixed `CREATE TABLE AS` queries for tables with default expressions. [#64455](https://github.com/ClickHouse/ClickHouse/pull/64455) ([Anton Popov](https://github.com/CurtizJ)). +* Backported in [#64621](https://github.com/ClickHouse/ClickHouse/issues/64621): Fix an error `Cannot find column` in distributed queries with constant CTE in the `GROUP BY` key. [#64519](https://github.com/ClickHouse/ClickHouse/pull/64519) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#64678](https://github.com/ClickHouse/ClickHouse/issues/64678): Fix [#64612](https://github.com/ClickHouse/ClickHouse/issues/64612). Do not rewrite aggregation if `-If` combinator is already used. [#64638](https://github.com/ClickHouse/ClickHouse/pull/64638) ([Dmitry Novik](https://github.com/novikd)). +* Backported in [#64831](https://github.com/ClickHouse/ClickHouse/issues/64831): Fix bug which could lead to non-working TTLs with expressions. Fixes [#63700](https://github.com/ClickHouse/ClickHouse/issues/63700). [#64694](https://github.com/ClickHouse/ClickHouse/pull/64694) ([alesapin](https://github.com/alesapin)). +* Backported in [#64940](https://github.com/ClickHouse/ClickHouse/issues/64940): Fix OrderByLimitByDuplicateEliminationVisitor across subqueries. [#64766](https://github.com/ClickHouse/ClickHouse/pull/64766) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#64869](https://github.com/ClickHouse/ClickHouse/issues/64869): Fixed memory possible incorrect memory tracking in several kinds of queries: queries that read any data from S3, queries via http protocol, asynchronous inserts. [#64844](https://github.com/ClickHouse/ClickHouse/pull/64844) ([Anton Popov](https://github.com/CurtizJ)). +* Backported in [#64980](https://github.com/ClickHouse/ClickHouse/issues/64980): Fix the `Block structure mismatch` error for queries reading with `PREWHERE` from the materialized view when the materialized view has columns of different types than the source table. Fixes [#64611](https://github.com/ClickHouse/ClickHouse/issues/64611). [#64855](https://github.com/ClickHouse/ClickHouse/pull/64855) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#64972](https://github.com/ClickHouse/ClickHouse/issues/64972): Fix rare crash when table has TTL with subquery + database replicated + parallel replicas + analyzer. It's really rare, but please don't use TTLs with subqueries. [#64858](https://github.com/ClickHouse/ClickHouse/pull/64858) ([alesapin](https://github.com/alesapin)). +* Backported in [#65070](https://github.com/ClickHouse/ClickHouse/issues/65070): Fix `ALTER MODIFY COMMENT` query that was broken for parameterized VIEWs in https://github.com/ClickHouse/ClickHouse/pull/54211. [#65031](https://github.com/ClickHouse/ClickHouse/pull/65031) ([Nikolay Degterinsky](https://github.com/evillique)). +* Backported in [#65175](https://github.com/ClickHouse/ClickHouse/issues/65175): Fix the `Unknown expression identifier` error for remote queries with `INTERPOLATE (alias)` (new analyzer). Fixes [#64636](https://github.com/ClickHouse/ClickHouse/issues/64636). [#65090](https://github.com/ClickHouse/ClickHouse/pull/65090) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). + +#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC) + +* Backported in [#64587](https://github.com/ClickHouse/ClickHouse/issues/64587): Disabled `enable_vertical_final` setting by default. This feature should not be used because it has a bug: [#64543](https://github.com/ClickHouse/ClickHouse/issues/64543). [#64544](https://github.com/ClickHouse/ClickHouse/pull/64544) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Backported in [#64878](https://github.com/ClickHouse/ClickHouse/issues/64878): This PR fixes an error when a user in a specific situation can escalate their privileges on the default database without necessary grants. [#64769](https://github.com/ClickHouse/ClickHouse/pull/64769) ([pufit](https://github.com/pufit)). + +#### NO CL CATEGORY + +* Backported in [#63304](https://github.com/ClickHouse/ClickHouse/issues/63304):. [#63297](https://github.com/ClickHouse/ClickHouse/pull/63297) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Backported in [#63708](https://github.com/ClickHouse/ClickHouse/issues/63708):. [#63415](https://github.com/ClickHouse/ClickHouse/pull/63415) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). + +#### NO CL ENTRY + +* NO CL ENTRY: 'Revert "Backport [#64363](https://github.com/ClickHouse/ClickHouse/issues/64363) to 24.3: Split tests 03039_dynamic_all_merge_algorithms to avoid timeouts"'. [#64907](https://github.com/ClickHouse/ClickHouse/pull/64907) ([Raúl Marín](https://github.com/Algunenano)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Backported in [#63751](https://github.com/ClickHouse/ClickHouse/issues/63751): group_by_use_nulls strikes back. [#62922](https://github.com/ClickHouse/ClickHouse/pull/62922) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#63558](https://github.com/ClickHouse/ClickHouse/issues/63558): Try fix segfault in `MergeTreeReadPoolBase::createTask`. [#63323](https://github.com/ClickHouse/ClickHouse/pull/63323) ([Antonio Andelic](https://github.com/antonio2368)). +* Backported in [#63336](https://github.com/ClickHouse/ClickHouse/issues/63336): The commit url has different pattern. [#63331](https://github.com/ClickHouse/ClickHouse/pull/63331) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Backported in [#63374](https://github.com/ClickHouse/ClickHouse/issues/63374): Add tags for the test 03000_traverse_shadow_system_data_paths.sql to make it stable. [#63366](https://github.com/ClickHouse/ClickHouse/pull/63366) ([Aleksei Filatov](https://github.com/aalexfvk)). +* Backported in [#63625](https://github.com/ClickHouse/ClickHouse/issues/63625): Workaround for `oklch()` inside canvas bug for firefox. [#63404](https://github.com/ClickHouse/ClickHouse/pull/63404) ([Sergei Trifonov](https://github.com/serxa)). +* Backported in [#63569](https://github.com/ClickHouse/ClickHouse/issues/63569): Add `jwcrypto` to integration tests runner. [#63551](https://github.com/ClickHouse/ClickHouse/pull/63551) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Backported in [#63649](https://github.com/ClickHouse/ClickHouse/issues/63649): Fix `02362_part_log_merge_algorithm` flaky test. [#63635](https://github.com/ClickHouse/ClickHouse/pull/63635) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)). +* Backported in [#63762](https://github.com/ClickHouse/ClickHouse/issues/63762): Cancel S3 reads properly when parallel reads are used. [#63687](https://github.com/ClickHouse/ClickHouse/pull/63687) ([Antonio Andelic](https://github.com/antonio2368)). +* Backported in [#63741](https://github.com/ClickHouse/ClickHouse/issues/63741): Userspace page cache: don't collect stats if cache is unused. [#63730](https://github.com/ClickHouse/ClickHouse/pull/63730) ([Michael Kolupaev](https://github.com/al13n321)). +* Backported in [#63826](https://github.com/ClickHouse/ClickHouse/issues/63826): Fix `test_odbc_interaction` for arm64 on linux. [#63787](https://github.com/ClickHouse/ClickHouse/pull/63787) ([alesapin](https://github.com/alesapin)). +* Backported in [#63895](https://github.com/ClickHouse/ClickHouse/issues/63895): Fix `test_catboost_evaluate` for aarch64. [#63789](https://github.com/ClickHouse/ClickHouse/pull/63789) ([alesapin](https://github.com/alesapin)). +* Backported in [#63887](https://github.com/ClickHouse/ClickHouse/issues/63887): Fix `test_disk_types` for aarch64. [#63832](https://github.com/ClickHouse/ClickHouse/pull/63832) ([alesapin](https://github.com/alesapin)). +* Backported in [#63879](https://github.com/ClickHouse/ClickHouse/issues/63879): Fix `test_short_strings_aggregation` for arm. [#63836](https://github.com/ClickHouse/ClickHouse/pull/63836) ([alesapin](https://github.com/alesapin)). +* Backported in [#63916](https://github.com/ClickHouse/ClickHouse/issues/63916): Disable `test_non_default_compression/test.py::test_preconfigured_deflateqpl_codec` on arm. [#63839](https://github.com/ClickHouse/ClickHouse/pull/63839) ([alesapin](https://github.com/alesapin)). +* Backported in [#63969](https://github.com/ClickHouse/ClickHouse/issues/63969): fix 02124_insert_deduplication_token_multiple_blocks. [#63950](https://github.com/ClickHouse/ClickHouse/pull/63950) ([Han Fei](https://github.com/hanfei1991)). +* Backported in [#64047](https://github.com/ClickHouse/ClickHouse/issues/64047): Do not create new release in release branch automatically. [#64039](https://github.com/ClickHouse/ClickHouse/pull/64039) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Backported in [#64076](https://github.com/ClickHouse/ClickHouse/issues/64076): Files without shebang have mime 'text/plain' or 'inode/x-empty'. [#64062](https://github.com/ClickHouse/ClickHouse/pull/64062) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Backported in [#64142](https://github.com/ClickHouse/ClickHouse/issues/64142): Fix sanitizers. [#64090](https://github.com/ClickHouse/ClickHouse/pull/64090) ([Azat Khuzhin](https://github.com/azat)). +* Backported in [#64159](https://github.com/ClickHouse/ClickHouse/issues/64159): Add retries in `git submodule update`. [#64125](https://github.com/ClickHouse/ClickHouse/pull/64125) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Backported in [#64473](https://github.com/ClickHouse/ClickHouse/issues/64473): Split tests 03039_dynamic_all_merge_algorithms to avoid timeouts. [#64363](https://github.com/ClickHouse/ClickHouse/pull/64363) ([Kruglov Pavel](https://github.com/Avogar)). +* Backported in [#65113](https://github.com/ClickHouse/ClickHouse/issues/65113): Adjust the `version_helper` and script to a new release scheme. [#64759](https://github.com/ClickHouse/ClickHouse/pull/64759) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Backported in [#64999](https://github.com/ClickHouse/ClickHouse/issues/64999): Fix crash with DISTINCT and window functions. [#64767](https://github.com/ClickHouse/ClickHouse/pull/64767) ([Igor Nikonov](https://github.com/devcrafter)). + diff --git a/docs/changelogs/v24.4.2.141-stable.md b/docs/changelogs/v24.4.2.141-stable.md new file mode 100644 index 00000000000..656d0854392 --- /dev/null +++ b/docs/changelogs/v24.4.2.141-stable.md @@ -0,0 +1,101 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v24.4.2.141-stable (9e23d27bd11) FIXME as compared to v24.4.1.2088-stable (6d4b31322d1) + +#### Improvement +* Backported in [#63467](https://github.com/ClickHouse/ClickHouse/issues/63467): Make rabbitmq nack broken messages. Closes [#45350](https://github.com/ClickHouse/ClickHouse/issues/45350). [#60312](https://github.com/ClickHouse/ClickHouse/pull/60312) ([Kseniia Sumarokova](https://github.com/kssenii)). + +#### Build/Testing/Packaging Improvement +* Backported in [#63612](https://github.com/ClickHouse/ClickHouse/issues/63612): The Dockerfile is reviewed by the docker official library in https://github.com/docker-library/official-images/pull/15846. [#63400](https://github.com/ClickHouse/ClickHouse/pull/63400) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + +#### Bug Fix (user-visible misbehavior in an official stable release) + +* Backported in [#64279](https://github.com/ClickHouse/ClickHouse/issues/64279): Fix queries with FINAL give wrong result when table does not use adaptive granularity. [#62432](https://github.com/ClickHouse/ClickHouse/pull/62432) ([Duc Canh Le](https://github.com/canhld94)). +* Backported in [#63295](https://github.com/ClickHouse/ClickHouse/issues/63295): Fix crash with untuple and unresolved lambda. [#63131](https://github.com/ClickHouse/ClickHouse/pull/63131) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#63978](https://github.com/ClickHouse/ClickHouse/issues/63978): Fix intersect parts when restart after drop range. [#63202](https://github.com/ClickHouse/ClickHouse/pull/63202) ([Han Fei](https://github.com/hanfei1991)). +* Backported in [#63413](https://github.com/ClickHouse/ClickHouse/issues/63413): Fix a misbehavior when SQL security defaults don't load for old tables during server startup. [#63209](https://github.com/ClickHouse/ClickHouse/pull/63209) ([pufit](https://github.com/pufit)). +* Backported in [#63388](https://github.com/ClickHouse/ClickHouse/issues/63388): JOIN filter push down filled join fix. Closes [#63228](https://github.com/ClickHouse/ClickHouse/issues/63228). [#63234](https://github.com/ClickHouse/ClickHouse/pull/63234) ([Maksim Kita](https://github.com/kitaisreal)). +* Backported in [#63618](https://github.com/ClickHouse/ClickHouse/issues/63618): Fix bug which could potentially lead to rare LOGICAL_ERROR during SELECT query with message: `Unexpected return type from materialize. Expected type_XXX. Got type_YYY.` Introduced in [#59379](https://github.com/ClickHouse/ClickHouse/issues/59379). [#63353](https://github.com/ClickHouse/ClickHouse/pull/63353) ([alesapin](https://github.com/alesapin)). +* Backported in [#63451](https://github.com/ClickHouse/ClickHouse/issues/63451): Fix `X-ClickHouse-Timezone` header returning wrong timezone when using `session_timezone` as query level setting. [#63377](https://github.com/ClickHouse/ClickHouse/pull/63377) ([Andrey Zvonov](https://github.com/zvonand)). +* Backported in [#63605](https://github.com/ClickHouse/ClickHouse/issues/63605): Fix backup of projection part in case projection was removed from table metadata, but part still has projection. [#63426](https://github.com/ClickHouse/ClickHouse/pull/63426) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Backported in [#63510](https://github.com/ClickHouse/ClickHouse/issues/63510): Fix 'Every derived table must have its own alias' error for MYSQL dictionary source, close [#63341](https://github.com/ClickHouse/ClickHouse/issues/63341). [#63481](https://github.com/ClickHouse/ClickHouse/pull/63481) ([vdimir](https://github.com/vdimir)). +* Backported in [#63592](https://github.com/ClickHouse/ClickHouse/issues/63592): Avoid segafult in `MergeTreePrefetchedReadPool` while fetching projection parts. [#63513](https://github.com/ClickHouse/ClickHouse/pull/63513) ([Antonio Andelic](https://github.com/antonio2368)). +* Backported in [#63750](https://github.com/ClickHouse/ClickHouse/issues/63750): Read only the necessary columns from VIEW (new analyzer). Closes [#62594](https://github.com/ClickHouse/ClickHouse/issues/62594). [#63688](https://github.com/ClickHouse/ClickHouse/pull/63688) ([Maksim Kita](https://github.com/kitaisreal)). +* Backported in [#63772](https://github.com/ClickHouse/ClickHouse/issues/63772): Fix [#63539](https://github.com/ClickHouse/ClickHouse/issues/63539). Forbid WINDOW redefinition in new analyzer. [#63694](https://github.com/ClickHouse/ClickHouse/pull/63694) ([Dmitry Novik](https://github.com/novikd)). +* Backported in [#63872](https://github.com/ClickHouse/ClickHouse/issues/63872): Flatten_nested is broken with replicated database. [#63695](https://github.com/ClickHouse/ClickHouse/pull/63695) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#63854](https://github.com/ClickHouse/ClickHouse/issues/63854): Fix `Not found column` and `CAST AS Map from array requires nested tuple of 2 elements` exceptions for distributed queries which use `Map(Nothing, Nothing)` type. Fixes [#63637](https://github.com/ClickHouse/ClickHouse/issues/63637). [#63753](https://github.com/ClickHouse/ClickHouse/pull/63753) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#63847](https://github.com/ClickHouse/ClickHouse/issues/63847): Fix possible `ILLEGAL_COLUMN` error in `partial_merge` join, close [#37928](https://github.com/ClickHouse/ClickHouse/issues/37928). [#63755](https://github.com/ClickHouse/ClickHouse/pull/63755) ([vdimir](https://github.com/vdimir)). +* Backported in [#63908](https://github.com/ClickHouse/ClickHouse/issues/63908): `query_plan_remove_redundant_distinct` can break queries with WINDOW FUNCTIONS (with `allow_experimental_analyzer` is on). Fixes [#62820](https://github.com/ClickHouse/ClickHouse/issues/62820). [#63776](https://github.com/ClickHouse/ClickHouse/pull/63776) ([Igor Nikonov](https://github.com/devcrafter)). +* Backported in [#63955](https://github.com/ClickHouse/ClickHouse/issues/63955): Fix possible crash with SYSTEM UNLOAD PRIMARY KEY. [#63778](https://github.com/ClickHouse/ClickHouse/pull/63778) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#63938](https://github.com/ClickHouse/ClickHouse/issues/63938): Allow JOIN filter push down to both streams if only single equivalent column is used in query. Closes [#63799](https://github.com/ClickHouse/ClickHouse/issues/63799). [#63819](https://github.com/ClickHouse/ClickHouse/pull/63819) ([Maksim Kita](https://github.com/kitaisreal)). +* Backported in [#63991](https://github.com/ClickHouse/ClickHouse/issues/63991): Fix incorrect select query result when parallel replicas were used to read from a Materialized View. [#63861](https://github.com/ClickHouse/ClickHouse/pull/63861) ([Nikita Taranov](https://github.com/nickitat)). +* Backported in [#64033](https://github.com/ClickHouse/ClickHouse/issues/64033): Fix a error `Database name is empty` for remote queries with lambdas over the cluster with modified default database. Fixes [#63471](https://github.com/ClickHouse/ClickHouse/issues/63471). [#63864](https://github.com/ClickHouse/ClickHouse/pull/63864) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#64561](https://github.com/ClickHouse/ClickHouse/issues/64561): Fix SIGSEGV due to CPU/Real (`query_profiler_real_time_period_ns`/`query_profiler_cpu_time_period_ns`) profiler (has been an issue since 2022, that leads to periodic server crashes, especially if you were using distributed engine). [#63865](https://github.com/ClickHouse/ClickHouse/pull/63865) ([Azat Khuzhin](https://github.com/azat)). +* Backported in [#64011](https://github.com/ClickHouse/ClickHouse/issues/64011): Fix analyzer - IN function with arbitrary deep sub-selects in materialized view to use insertion block. [#63930](https://github.com/ClickHouse/ClickHouse/pull/63930) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Backported in [#64238](https://github.com/ClickHouse/ClickHouse/issues/64238): Fix resolve of unqualified COLUMNS matcher. Preserve the input columns order and forbid usage of unknown identifiers. [#63962](https://github.com/ClickHouse/ClickHouse/pull/63962) ([Dmitry Novik](https://github.com/novikd)). +* Backported in [#64103](https://github.com/ClickHouse/ClickHouse/issues/64103): Deserialize untrusted binary inputs in a safer way. [#64024](https://github.com/ClickHouse/ClickHouse/pull/64024) ([Robert Schulze](https://github.com/rschu1ze)). +* Backported in [#64170](https://github.com/ClickHouse/ClickHouse/issues/64170): Add missing settings to recoverLostReplica. [#64040](https://github.com/ClickHouse/ClickHouse/pull/64040) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#64322](https://github.com/ClickHouse/ClickHouse/issues/64322): This fix will use a proper redefined context with the correct definer for each individual view in the query pipeline Closes [#63777](https://github.com/ClickHouse/ClickHouse/issues/63777). [#64079](https://github.com/ClickHouse/ClickHouse/pull/64079) ([pufit](https://github.com/pufit)). +* Backported in [#64382](https://github.com/ClickHouse/ClickHouse/issues/64382): Fix analyzer: "Not found column" error is fixed when using INTERPOLATE. [#64096](https://github.com/ClickHouse/ClickHouse/pull/64096) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Backported in [#64568](https://github.com/ClickHouse/ClickHouse/issues/64568): Fix creating backups to S3 buckets with different credentials from the disk containing the file. [#64153](https://github.com/ClickHouse/ClickHouse/pull/64153) ([Antonio Andelic](https://github.com/antonio2368)). +* Backported in [#64272](https://github.com/ClickHouse/ClickHouse/issues/64272): Prevent LOGICAL_ERROR on CREATE TABLE as MaterializedView. [#64174](https://github.com/ClickHouse/ClickHouse/pull/64174) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#64330](https://github.com/ClickHouse/ClickHouse/issues/64330): The query cache now considers two identical queries against different databases as different. The previous behavior could be used to bypass missing privileges to read from a table. [#64199](https://github.com/ClickHouse/ClickHouse/pull/64199) ([Robert Schulze](https://github.com/rschu1ze)). +* Backported in [#64254](https://github.com/ClickHouse/ClickHouse/issues/64254): Ignore `text_log` config when using Keeper. [#64218](https://github.com/ClickHouse/ClickHouse/pull/64218) ([Antonio Andelic](https://github.com/antonio2368)). +* Backported in [#64690](https://github.com/ClickHouse/ClickHouse/issues/64690): Fix Query Tree size validation. Closes [#63701](https://github.com/ClickHouse/ClickHouse/issues/63701). [#64377](https://github.com/ClickHouse/ClickHouse/pull/64377) ([Dmitry Novik](https://github.com/novikd)). +* Backported in [#64409](https://github.com/ClickHouse/ClickHouse/issues/64409): Fix `Logical error: Bad cast` for `Buffer` table with `PREWHERE`. Fixes [#64172](https://github.com/ClickHouse/ClickHouse/issues/64172). [#64388](https://github.com/ClickHouse/ClickHouse/pull/64388) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#64727](https://github.com/ClickHouse/ClickHouse/issues/64727): Fixed `CREATE TABLE AS` queries for tables with default expressions. [#64455](https://github.com/ClickHouse/ClickHouse/pull/64455) ([Anton Popov](https://github.com/CurtizJ)). +* Backported in [#64623](https://github.com/ClickHouse/ClickHouse/issues/64623): Fix an error `Cannot find column` in distributed queries with constant CTE in the `GROUP BY` key. [#64519](https://github.com/ClickHouse/ClickHouse/pull/64519) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#64680](https://github.com/ClickHouse/ClickHouse/issues/64680): Fix [#64612](https://github.com/ClickHouse/ClickHouse/issues/64612). Do not rewrite aggregation if `-If` combinator is already used. [#64638](https://github.com/ClickHouse/ClickHouse/pull/64638) ([Dmitry Novik](https://github.com/novikd)). +* Backported in [#64942](https://github.com/ClickHouse/ClickHouse/issues/64942): Fix OrderByLimitByDuplicateEliminationVisitor across subqueries. [#64766](https://github.com/ClickHouse/ClickHouse/pull/64766) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#64871](https://github.com/ClickHouse/ClickHouse/issues/64871): Fixed memory possible incorrect memory tracking in several kinds of queries: queries that read any data from S3, queries via http protocol, asynchronous inserts. [#64844](https://github.com/ClickHouse/ClickHouse/pull/64844) ([Anton Popov](https://github.com/CurtizJ)). + +#### CI Fix or Improvement (changelog entry is not required) + +* Backported in [#63364](https://github.com/ClickHouse/ClickHouse/issues/63364): Implement cumulative A Sync status. [#61464](https://github.com/ClickHouse/ClickHouse/pull/61464) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Backported in [#63338](https://github.com/ClickHouse/ClickHouse/issues/63338): Use `/commit/` to have the URLs in [reports](https://play.clickhouse.com/play?user=play#c2VsZWN0IGRpc3RpbmN0IGNvbW1pdF91cmwgZnJvbSBjaGVja3Mgd2hlcmUgY2hlY2tfc3RhcnRfdGltZSA+PSBub3coKSAtIGludGVydmFsIDEgbW9udGggYW5kIHB1bGxfcmVxdWVzdF9udW1iZXI9NjA1MzI=) like https://github.com/ClickHouse/ClickHouse/commit/44f8bc5308b53797bec8cccc3bd29fab8a00235d and not like https://github.com/ClickHouse/ClickHouse/commits/44f8bc5308b53797bec8cccc3bd29fab8a00235d. [#63331](https://github.com/ClickHouse/ClickHouse/pull/63331) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Backported in [#63376](https://github.com/ClickHouse/ClickHouse/issues/63376):. [#63366](https://github.com/ClickHouse/ClickHouse/pull/63366) ([Aleksei Filatov](https://github.com/aalexfvk)). +* Backported in [#63571](https://github.com/ClickHouse/ClickHouse/issues/63571):. [#63551](https://github.com/ClickHouse/ClickHouse/pull/63551) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Backported in [#63651](https://github.com/ClickHouse/ClickHouse/issues/63651): Fix 02362_part_log_merge_algorithm flaky test. [#63635](https://github.com/ClickHouse/ClickHouse/pull/63635) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)). +* Backported in [#63828](https://github.com/ClickHouse/ClickHouse/issues/63828): Fix test_odbc_interaction from aarch64 [#61457](https://github.com/ClickHouse/ClickHouse/issues/61457). [#63787](https://github.com/ClickHouse/ClickHouse/pull/63787) ([alesapin](https://github.com/alesapin)). +* Backported in [#63897](https://github.com/ClickHouse/ClickHouse/issues/63897): Fix test `test_catboost_evaluate` for aarch64. [#61457](https://github.com/ClickHouse/ClickHouse/issues/61457). [#63789](https://github.com/ClickHouse/ClickHouse/pull/63789) ([alesapin](https://github.com/alesapin)). +* Backported in [#63889](https://github.com/ClickHouse/ClickHouse/issues/63889): Remove HDFS from disks config for one integration test for arm. [#61457](https://github.com/ClickHouse/ClickHouse/issues/61457). [#63832](https://github.com/ClickHouse/ClickHouse/pull/63832) ([alesapin](https://github.com/alesapin)). +* Backported in [#63881](https://github.com/ClickHouse/ClickHouse/issues/63881): Bump version for old image in test_short_strings_aggregation to make it work on arm. [#61457](https://github.com/ClickHouse/ClickHouse/issues/61457). [#63836](https://github.com/ClickHouse/ClickHouse/pull/63836) ([alesapin](https://github.com/alesapin)). +* Backported in [#63919](https://github.com/ClickHouse/ClickHouse/issues/63919): Disable test `test_non_default_compression/test.py::test_preconfigured_deflateqpl_codec` on arm. [#61457](https://github.com/ClickHouse/ClickHouse/issues/61457). [#63839](https://github.com/ClickHouse/ClickHouse/pull/63839) ([alesapin](https://github.com/alesapin)). +* Backported in [#63971](https://github.com/ClickHouse/ClickHouse/issues/63971): Fix 02124_insert_deduplication_token_multiple_blocks. [#63950](https://github.com/ClickHouse/ClickHouse/pull/63950) ([Han Fei](https://github.com/hanfei1991)). +* Backported in [#64049](https://github.com/ClickHouse/ClickHouse/issues/64049): Add `ClickHouseVersion.copy` method. Create a branch release in advance without spinning out the release to increase the stability. [#64039](https://github.com/ClickHouse/ClickHouse/pull/64039) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Backported in [#64078](https://github.com/ClickHouse/ClickHouse/issues/64078): The mime type is not 100% reliable for Python and shell scripts without shebangs; add a check for file extension. [#64062](https://github.com/ClickHouse/ClickHouse/pull/64062) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Backported in [#64161](https://github.com/ClickHouse/ClickHouse/issues/64161): Add retries in git submodule update. [#64125](https://github.com/ClickHouse/ClickHouse/pull/64125) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC) + +* Backported in [#64589](https://github.com/ClickHouse/ClickHouse/issues/64589): Disabled `enable_vertical_final` setting by default. This feature should not be used because it has a bug: [#64543](https://github.com/ClickHouse/ClickHouse/issues/64543). [#64544](https://github.com/ClickHouse/ClickHouse/pull/64544) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Backported in [#64880](https://github.com/ClickHouse/ClickHouse/issues/64880): This PR fixes an error when a user in a specific situation can escalate their privileges on the default database without necessary grants. [#64769](https://github.com/ClickHouse/ClickHouse/pull/64769) ([pufit](https://github.com/pufit)). + +#### NO CL CATEGORY + +* Backported in [#63306](https://github.com/ClickHouse/ClickHouse/issues/63306):. [#63297](https://github.com/ClickHouse/ClickHouse/pull/63297) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Backported in [#63710](https://github.com/ClickHouse/ClickHouse/issues/63710):. [#63415](https://github.com/ClickHouse/ClickHouse/pull/63415) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). + +#### NO CL ENTRY + +* NO CL ENTRY: 'Revert "Backport [#64363](https://github.com/ClickHouse/ClickHouse/issues/64363) to 24.4: Split tests 03039_dynamic_all_merge_algorithms to avoid timeouts"'. [#64905](https://github.com/ClickHouse/ClickHouse/pull/64905) ([Raúl Marín](https://github.com/Algunenano)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* group_by_use_nulls strikes back [#62922](https://github.com/ClickHouse/ClickHouse/pull/62922) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Add `FROM` keyword to `TRUNCATE ALL TABLES` [#63241](https://github.com/ClickHouse/ClickHouse/pull/63241) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). +* More checks for concurrently deleted files and dirs in system.remote_data_paths [#63274](https://github.com/ClickHouse/ClickHouse/pull/63274) ([Alexander Gololobov](https://github.com/davenger)). +* Try fix segfault in `MergeTreeReadPoolBase::createTask` [#63323](https://github.com/ClickHouse/ClickHouse/pull/63323) ([Antonio Andelic](https://github.com/antonio2368)). +* Skip unaccessible table dirs in system.remote_data_paths [#63330](https://github.com/ClickHouse/ClickHouse/pull/63330) ([Alexander Gololobov](https://github.com/davenger)). +* Workaround for `oklch()` inside canvas bug for firefox [#63404](https://github.com/ClickHouse/ClickHouse/pull/63404) ([Sergei Trifonov](https://github.com/serxa)). +* Cancel S3 reads properly when parallel reads are used [#63687](https://github.com/ClickHouse/ClickHouse/pull/63687) ([Antonio Andelic](https://github.com/antonio2368)). +* Userspace page cache: don't collect stats if cache is unused [#63730](https://github.com/ClickHouse/ClickHouse/pull/63730) ([Michael Kolupaev](https://github.com/al13n321)). +* Fix sanitizers [#64090](https://github.com/ClickHouse/ClickHouse/pull/64090) ([Azat Khuzhin](https://github.com/azat)). +* Split tests 03039_dynamic_all_merge_algorithms to avoid timeouts [#64363](https://github.com/ClickHouse/ClickHouse/pull/64363) ([Kruglov Pavel](https://github.com/Avogar)). +* CI: Critical bugfix category in PR template [#64480](https://github.com/ClickHouse/ClickHouse/pull/64480) ([Max K.](https://github.com/maxknv)). + diff --git a/docs/changelogs/v24.5.2.34-stable.md b/docs/changelogs/v24.5.2.34-stable.md new file mode 100644 index 00000000000..2db05a5f5dc --- /dev/null +++ b/docs/changelogs/v24.5.2.34-stable.md @@ -0,0 +1,38 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v24.5.2.34-stable (45589aeee49) FIXME as compared to v24.5.1.1763-stable (647c154a94d) + +#### Improvement +* Backported in [#65096](https://github.com/ClickHouse/ClickHouse/issues/65096): The setting `allow_experimental_join_condition` was accidentally marked as important which may prevent distributed queries in a mixed versions cluster from being executed successfully. [#65008](https://github.com/ClickHouse/ClickHouse/pull/65008) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). + +#### Build/Testing/Packaging Improvement +* Backported in [#65132](https://github.com/ClickHouse/ClickHouse/issues/65132): Decrease the `unit-test` image a few times. [#65102](https://github.com/ClickHouse/ClickHouse/pull/65102) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + +#### Bug Fix (user-visible misbehavior in an official stable release) + +* Backported in [#64729](https://github.com/ClickHouse/ClickHouse/issues/64729): Fixed `CREATE TABLE AS` queries for tables with default expressions. [#64455](https://github.com/ClickHouse/ClickHouse/pull/64455) ([Anton Popov](https://github.com/CurtizJ)). +* Backported in [#65061](https://github.com/ClickHouse/ClickHouse/issues/65061): Fix the `Expression nodes list expected 1 projection names` and `Unknown expression or identifier` errors for queries with aliases to `GLOBAL IN.` Fixes [#64445](https://github.com/ClickHouse/ClickHouse/issues/64445). [#64517](https://github.com/ClickHouse/ClickHouse/pull/64517) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#65088](https://github.com/ClickHouse/ClickHouse/issues/65088): Fix removing the `WHERE` and `PREWHERE` expressions, which are always true (for the new analyzer). Fixes [#64575](https://github.com/ClickHouse/ClickHouse/issues/64575). [#64695](https://github.com/ClickHouse/ClickHouse/pull/64695) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#64944](https://github.com/ClickHouse/ClickHouse/issues/64944): Fix OrderByLimitByDuplicateEliminationVisitor across subqueries. [#64766](https://github.com/ClickHouse/ClickHouse/pull/64766) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#64873](https://github.com/ClickHouse/ClickHouse/issues/64873): Fixed memory possible incorrect memory tracking in several kinds of queries: queries that read any data from S3, queries via http protocol, asynchronous inserts. [#64844](https://github.com/ClickHouse/ClickHouse/pull/64844) ([Anton Popov](https://github.com/CurtizJ)). +* Backported in [#64984](https://github.com/ClickHouse/ClickHouse/issues/64984): Fix the `Block structure mismatch` error for queries reading with `PREWHERE` from the materialized view when the materialized view has columns of different types than the source table. Fixes [#64611](https://github.com/ClickHouse/ClickHouse/issues/64611). [#64855](https://github.com/ClickHouse/ClickHouse/pull/64855) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#64976](https://github.com/ClickHouse/ClickHouse/issues/64976): Fix rare crash when table has TTL with subquery + database replicated + parallel replicas + analyzer. It's really rare, but please don't use TTLs with subqueries. [#64858](https://github.com/ClickHouse/ClickHouse/pull/64858) ([alesapin](https://github.com/alesapin)). +* Backported in [#65074](https://github.com/ClickHouse/ClickHouse/issues/65074): Fix `ALTER MODIFY COMMENT` query that was broken for parameterized VIEWs in https://github.com/ClickHouse/ClickHouse/pull/54211. [#65031](https://github.com/ClickHouse/ClickHouse/pull/65031) ([Nikolay Degterinsky](https://github.com/evillique)). +* Backported in [#65179](https://github.com/ClickHouse/ClickHouse/issues/65179): Fix the `Unknown expression identifier` error for remote queries with `INTERPOLATE (alias)` (new analyzer). Fixes [#64636](https://github.com/ClickHouse/ClickHouse/issues/64636). [#65090](https://github.com/ClickHouse/ClickHouse/pull/65090) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#65163](https://github.com/ClickHouse/ClickHouse/issues/65163): Fix pushing arithmetic operations out of aggregation. In the new analyzer, optimization was applied only once. Part of [#62245](https://github.com/ClickHouse/ClickHouse/issues/62245). [#65104](https://github.com/ClickHouse/ClickHouse/pull/65104) ([Dmitry Novik](https://github.com/novikd)). + +#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC) + +* Backported in [#64882](https://github.com/ClickHouse/ClickHouse/issues/64882): This PR fixes an error when a user in a specific situation can escalate their privileges on the default database without necessary grants. [#64769](https://github.com/ClickHouse/ClickHouse/pull/64769) ([pufit](https://github.com/pufit)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Backported in [#65002](https://github.com/ClickHouse/ClickHouse/issues/65002): Be more graceful with existing tables with `inverted` indexes. [#64656](https://github.com/ClickHouse/ClickHouse/pull/64656) ([Robert Schulze](https://github.com/rschu1ze)). +* Backported in [#65115](https://github.com/ClickHouse/ClickHouse/issues/65115): Adjust the `version_helper` and script to a new release scheme. [#64759](https://github.com/ClickHouse/ClickHouse/pull/64759) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Backported in [#64796](https://github.com/ClickHouse/ClickHouse/issues/64796): Fix crash with DISTINCT and window functions. [#64767](https://github.com/ClickHouse/ClickHouse/pull/64767) ([Igor Nikonov](https://github.com/devcrafter)). + diff --git a/docs/changelogs/v24.5.3.5-stable.md b/docs/changelogs/v24.5.3.5-stable.md new file mode 100644 index 00000000000..4606e58d0a4 --- /dev/null +++ b/docs/changelogs/v24.5.3.5-stable.md @@ -0,0 +1,14 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v24.5.3.5-stable (e0eb66f8e17) FIXME as compared to v24.5.2.34-stable (45589aeee49) + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Backported in [#65227](https://github.com/ClickHouse/ClickHouse/issues/65227): Capture weak_ptr of ContextAccess for safety. [#65051](https://github.com/ClickHouse/ClickHouse/pull/65051) ([Alexander Gololobov](https://github.com/davenger)). +* Backported in [#65219](https://github.com/ClickHouse/ClickHouse/issues/65219): Fix false positives leaky memory warnings in OpenSSL. [#65125](https://github.com/ClickHouse/ClickHouse/pull/65125) ([Robert Schulze](https://github.com/rschu1ze)). + diff --git a/docs/en/development/tests.md b/docs/en/development/tests.md index bbc7dac0a2a..8dff6f0ed1d 100644 --- a/docs/en/development/tests.md +++ b/docs/en/development/tests.md @@ -229,6 +229,10 @@ For production builds, clang is used, but we also test make gcc builds. For deve ## Sanitizers {#sanitizers} +:::note +If the process (ClickHouse server or client) crashes at startup when running it locally, you might need to disable address space layout randomization: `sudo sysctl kernel.randomize_va_space=0` +::: + ### Address sanitizer We run functional, integration, stress and unit tests under ASan on per-commit basis. diff --git a/docs/en/engines/table-engines/integrations/azureBlobStorage.md b/docs/en/engines/table-engines/integrations/azureBlobStorage.md index 0843ff1ac47..dfc27d6b8cf 100644 --- a/docs/en/engines/table-engines/integrations/azureBlobStorage.md +++ b/docs/en/engines/table-engines/integrations/azureBlobStorage.md @@ -54,6 +54,7 @@ SELECT * FROM test_table; - `_path` — Path to the file. Type: `LowCardinalty(String)`. - `_file` — Name of the file. Type: `LowCardinalty(String)`. - `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`. +- `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`. ## See also diff --git a/docs/en/engines/table-engines/integrations/hdfs.md b/docs/en/engines/table-engines/integrations/hdfs.md index 2749fa7e479..c9df713231a 100644 --- a/docs/en/engines/table-engines/integrations/hdfs.md +++ b/docs/en/engines/table-engines/integrations/hdfs.md @@ -235,6 +235,7 @@ libhdfs3 support HDFS namenode HA. - `_path` — Path to the file. Type: `LowCardinalty(String)`. - `_file` — Name of the file. Type: `LowCardinalty(String)`. - `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`. +- `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`. ## Storage Settings {#storage-settings} diff --git a/docs/en/engines/table-engines/integrations/s3.md b/docs/en/engines/table-engines/integrations/s3.md index cb1da1c8e68..93f4a187656 100644 --- a/docs/en/engines/table-engines/integrations/s3.md +++ b/docs/en/engines/table-engines/integrations/s3.md @@ -53,14 +53,14 @@ For partitioning by month, use the `toYYYYMM(date_column)` expression, where `da This example uses the [docker compose recipe](https://github.com/ClickHouse/examples/tree/5fdc6ff72f4e5137e23ea075c88d3f44b0202490/docker-compose-recipes/recipes/ch-and-minio-S3), which integrates ClickHouse and MinIO. You should be able to reproduce the same queries using S3 by replacing the endpoint and authentication values. -Notice that the S3 endpoint in the `ENGINE` configuration uses the parameter token `{_partition_id}` as part of the S3 object (filename), and that the SELECT queries select against those resulting object names (e.g., `test_3.csv`). +Notice that the S3 endpoint in the `ENGINE` configuration uses the parameter token `{_partition_id}` as part of the S3 object (filename), and that the SELECT queries select against those resulting object names (e.g., `test_3.csv`). :::note As shown in the example, querying from S3 tables that are partitioned is not directly supported at this time, but can be accomplished by querying the individual partitions using the S3 table function. -The primary use-case for writing +The primary use-case for writing partitioned data in S3 is to enable transferring that data into another ClickHouse system (for example, moving from on-prem systems to ClickHouse Cloud). Because ClickHouse datasets are often very large, and network @@ -78,9 +78,9 @@ CREATE TABLE p ) ENGINE = S3( # highlight-next-line - 'http://minio:10000/clickhouse//test_{_partition_id}.csv', - 'minioadmin', - 'minioadminpassword', + 'http://minio:10000/clickhouse//test_{_partition_id}.csv', + 'minioadmin', + 'minioadminpassword', 'CSV') PARTITION BY column3 ``` @@ -145,6 +145,7 @@ Code: 48. DB::Exception: Received from localhost:9000. DB::Exception: Reading fr - `_path` — Path to the file. Type: `LowCardinalty(String)`. - `_file` — Name of the file. Type: `LowCardinalty(String)`. - `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`. +- `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`. For more information about virtual columns see [here](../../../engines/table-engines/index.md#table_engines-virtual_columns). diff --git a/docs/en/engines/table-engines/integrations/s3queue.md b/docs/en/engines/table-engines/integrations/s3queue.md index aa7fa512480..0958680dc56 100644 --- a/docs/en/engines/table-engines/integrations/s3queue.md +++ b/docs/en/engines/table-engines/integrations/s3queue.md @@ -75,7 +75,7 @@ Possible values: - unordered — With unordered mode, the set of all already processed files is tracked with persistent nodes in ZooKeeper. - ordered — With ordered mode, only the max name of the successfully consumed file, and the names of files that will be retried after unsuccessful loading attempt are being stored in ZooKeeper. -Default value: `unordered`. +Default value: `ordered` in versions before 24.6. Starting with 24.6 there is no default value, the setting becomes required to be specified manually. For tables created on earlier versions the default value will remain `Ordered` for compatibility. ### after_processing {#after_processing} @@ -181,6 +181,10 @@ For 'Ordered' mode. Defines a maximum boundary for reschedule interval for a bac Default value: `30000`. +### s3queue_buckets {#buckets} + +For 'Ordered' mode. Available since `24.6`. If there are several replicas of S3Queue table, each working with the same metadata directory in keeper, the value of `s3queue_buckets` needs to be equal to at least the number of replicas. If `s3queue_processing_threads` setting is used as well, it makes sense to increase the value of `s3queue_buckets` setting even further, as it defines the actual parallelism of `S3Queue` processing. + ## S3-related Settings {#s3-settings} Engine supports all s3 related settings. For more information about S3 settings see [here](../../../engines/table-engines/integrations/s3.md). @@ -267,7 +271,7 @@ For introspection use `system.s3queue` stateless table and `system.s3queue_log` `exception` String ) ENGINE = SystemS3Queue -COMMENT 'SYSTEM TABLE is built on the fly.' │ +COMMENT 'Contains in-memory state of S3Queue metadata and currently processed rows per file.' │ └────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md index 803b753fe0a..f0c4e1b0e34 100644 --- a/docs/en/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md @@ -6,35 +6,26 @@ sidebar_label: MergeTree # MergeTree -The `MergeTree` engine and other engines of this family (`*MergeTree`) are the most commonly used and most robust ClickHouse table engines. +The `MergeTree` engine and other engines of the `MergeTree` family (e.g. `ReplacingMergeTree`, `AggregatingMergeTree` ) are the most commonly used and most robust table engines in ClickHouse. -Engines in the `MergeTree` family are designed for inserting a very large amount of data into a table. The data is quickly written to the table part by part, then rules are applied for merging the parts in the background. This method is much more efficient than continually rewriting the data in storage during insert. +`MergeTree`-family table engines are designed for high data ingest rates and huge data volumes. +Insert operations create table parts which are merged by a background process with other table parts. -Main features: +Main features of `MergeTree`-family table engines. -- Stores data sorted by primary key. +- The table's primary key determines the sort order within each table part (clustered index). The primary key also does not reference individual rows but blocks of 8192 rows called granules. This makes primary keys of huge data sets small enough to remain loaded in main memory, while still providing fast access to on-disk data. - This allows you to create a small sparse index that helps find data faster. +- Tables can be partitioned using an arbitrary partition expression. Partition pruning ensures partitions are omitted from reading when the query allows it. -- Partitions can be used if the [partitioning key](/docs/en/engines/table-engines/mergetree-family/custom-partitioning-key.md) is specified. +- Data can be replicated across multiple cluster nodes for high availability, failover, and zero downtime upgrades. See [Data replication](/docs/en/engines/table-engines/mergetree-family/replication.md). - ClickHouse supports certain operations with partitions that are more efficient than general operations on the same data with the same result. ClickHouse also automatically cuts off the partition data where the partitioning key is specified in the query. +- `MergeTree` table engines support various statistics kinds and sampling methods to help query optimization. -- Data replication support. - - The family of `ReplicatedMergeTree` tables provides data replication. For more information, see [Data replication](/docs/en/engines/table-engines/mergetree-family/replication.md). - -- Data sampling support. - - If necessary, you can set the data sampling method in the table. - -:::info -The [Merge](/docs/en/engines/table-engines/special/merge.md/#merge) engine does not belong to the `*MergeTree` family. +:::note +Despite a similar name, the [Merge](/docs/en/engines/table-engines/special/merge.md/#merge) engine is different from `*MergeTree` engines. ::: -If you need to update rows frequently, we recommend using the [`ReplacingMergeTree`](/docs/en/engines/table-engines/mergetree-family/replacingmergetree.md) table engine. Using `ALTER TABLE my_table UPDATE` to update rows triggers a mutation, which causes parts to be re-written and uses IO/resources. With `ReplacingMergeTree`, you can simply insert the updated rows and the old rows will be replaced according to the table sorting key. - -## Creating a Table {#table_engine-mergetree-creating-a-table} +## Creating Tables {#table_engine-mergetree-creating-a-table} ``` sql CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] @@ -59,23 +50,24 @@ ORDER BY expr [SETTINGS name = value, ...] ``` -For a description of parameters, see the [CREATE query description](/docs/en/sql-reference/statements/create/table.md). +For a detailed description of the parameters, see the [CREATE TABLE](/docs/en/sql-reference/statements/create/table.md) statement ### Query Clauses {#mergetree-query-clauses} #### ENGINE -`ENGINE` — Name and parameters of the engine. `ENGINE = MergeTree()`. The `MergeTree` engine does not have parameters. +`ENGINE` — Name and parameters of the engine. `ENGINE = MergeTree()`. The `MergeTree` engine has no parameters. #### ORDER_BY `ORDER BY` — The sorting key. -A tuple of column names or arbitrary expressions. Example: `ORDER BY (CounterID, EventDate)`. +A tuple of column names or arbitrary expressions. Example: `ORDER BY (CounterID + 1, EventDate)`. -ClickHouse uses the sorting key as a primary key if the primary key is not defined explicitly by the `PRIMARY KEY` clause. +If no primary key is defined (i.e. `PRIMARY KEY` was not specified), ClickHouse uses the the sorting key as primary key. -Use the `ORDER BY tuple()` syntax, if you do not need sorting, or set `create_table_empty_primary_key_by_default` to `true` to use the `ORDER BY tuple()` syntax by default. See [Selecting the Primary Key](#selecting-the-primary-key). +If no sorting is required, you can use syntax `ORDER BY tuple()`. +Alternatively, if setting `create_table_empty_primary_key_by_default` is enabled, `ORDER BY tuple()` is implicitly added to `CREATE TABLE` statements. See [Selecting a Primary Key](#selecting-a-primary-key). #### PARTITION BY @@ -87,100 +79,32 @@ For partitioning by month, use the `toYYYYMM(date_column)` expression, where `da `PRIMARY KEY` — The primary key if it [differs from the sorting key](#choosing-a-primary-key-that-differs-from-the-sorting-key). Optional. -By default the primary key is the same as the sorting key (which is specified by the `ORDER BY` clause). Thus in most cases it is unnecessary to specify a separate `PRIMARY KEY` clause. +Specifying a sorting key (using `ORDER BY` clause) implicitly specifies a primary key. +It is usually not necessary to specify the primary key in addition to the primary key. #### SAMPLE BY -`SAMPLE BY` — An expression for sampling. Optional. +`SAMPLE BY` — A sampling expression. Optional. -If a sampling expression is used, the primary key must contain it. The result of a sampling expression must be an unsigned integer. Example: `SAMPLE BY intHash32(UserID) ORDER BY (CounterID, EventDate, intHash32(UserID))`. +If specified, it must be contained in the primary key. +The sampling expression must result in an unsigned integer. + +Example: `SAMPLE BY intHash32(UserID) ORDER BY (CounterID, EventDate, intHash32(UserID))`. #### TTL -`TTL` — A list of rules specifying storage duration of rows and defining logic of automatic parts movement [between disks and volumes](#table_engine-mergetree-multiple-volumes). Optional. +`TTL` — A list of rules that specify the storage duration of rows and the logic of automatic parts movement [between disks and volumes](#table_engine-mergetree-multiple-volumes). Optional. -Expression must have one `Date` or `DateTime` column as a result. Example: -``` -TTL date + INTERVAL 1 DAY -``` +Expression must result in a `Date` or `DateTime`, e.g. `TTL date + INTERVAL 1 DAY`. Type of the rule `DELETE|TO DISK 'xxx'|TO VOLUME 'xxx'|GROUP BY` specifies an action to be done with the part if the expression is satisfied (reaches current time): removal of expired rows, moving a part (if expression is satisfied for all rows in a part) to specified disk (`TO DISK 'xxx'`) or to volume (`TO VOLUME 'xxx'`), or aggregating values in expired rows. Default type of the rule is removal (`DELETE`). List of multiple rules can be specified, but there should be no more than one `DELETE` rule. + For more details, see [TTL for columns and tables](#table_engine-mergetree-ttl) -### SETTINGS -Additional parameters that control the behavior of the `MergeTree` (optional): +#### SETTINGS -#### index_granularity - -`index_granularity` — Maximum number of data rows between the marks of an index. Default value: 8192. See [Data Storage](#mergetree-data-storage). - -#### index_granularity_bytes - -`index_granularity_bytes` — Maximum size of data granules in bytes. Default value: 10Mb. To restrict the granule size only by number of rows, set to 0 (not recommended). See [Data Storage](#mergetree-data-storage). - -#### min_index_granularity_bytes - -`min_index_granularity_bytes` — Min allowed size of data granules in bytes. Default value: 1024b. To provide a safeguard against accidentally creating tables with very low index_granularity_bytes. See [Data Storage](#mergetree-data-storage). - -#### enable_mixed_granularity_parts - -`enable_mixed_granularity_parts` — Enables or disables transitioning to control the granule size with the `index_granularity_bytes` setting. Before version 19.11, there was only the `index_granularity` setting for restricting granule size. The `index_granularity_bytes` setting improves ClickHouse performance when selecting data from tables with big rows (tens and hundreds of megabytes). If you have tables with big rows, you can enable this setting for the tables to improve the efficiency of `SELECT` queries. - -#### use_minimalistic_part_header_in_zookeeper - -`use_minimalistic_part_header_in_zookeeper` — Storage method of the data parts headers in ZooKeeper. If `use_minimalistic_part_header_in_zookeeper=1`, then ZooKeeper stores less data. For more information, see the [setting description](/docs/en/operations/server-configuration-parameters/settings.md/#server-settings-use_minimalistic_part_header_in_zookeeper) in “Server configuration parameters”. - -#### min_merge_bytes_to_use_direct_io - -`min_merge_bytes_to_use_direct_io` — The minimum data volume for merge operation that is required for using direct I/O access to the storage disk. When merging data parts, ClickHouse calculates the total storage volume of all the data to be merged. If the volume exceeds `min_merge_bytes_to_use_direct_io` bytes, ClickHouse reads and writes the data to the storage disk using the direct I/O interface (`O_DIRECT` option). If `min_merge_bytes_to_use_direct_io = 0`, then direct I/O is disabled. Default value: `10 * 1024 * 1024 * 1024` bytes. - -#### merge_with_ttl_timeout - -`merge_with_ttl_timeout` — Minimum delay in seconds before repeating a merge with delete TTL. Default value: `14400` seconds (4 hours). -#### merge_with_recompression_ttl_timeout - -`merge_with_recompression_ttl_timeout` — Minimum delay in seconds before repeating a merge with recompression TTL. Default value: `14400` seconds (4 hours). - -#### try_fetch_recompressed_part_timeout - -`try_fetch_recompressed_part_timeout` — Timeout (in seconds) before starting merge with recompression. During this time ClickHouse tries to fetch recompressed part from replica which assigned this merge with recompression. Default value: `7200` seconds (2 hours). - -#### write_final_mark - -`write_final_mark` — Enables or disables writing the final index mark at the end of data part (after the last byte). Default value: 1. Don’t turn it off. - -#### merge_max_block_size - -`merge_max_block_size` — Maximum number of rows in block for merge operations. Default value: 8192. - -#### storage_policy - -`storage_policy` — Storage policy. See [Using Multiple Block Devices for Data Storage](#table_engine-mergetree-multiple-volumes). - -#### min_bytes_for_wide_part - -`min_bytes_for_wide_part`, `min_rows_for_wide_part` — Minimum number of bytes/rows in a data part that can be stored in `Wide` format. You can set one, both or none of these settings. See [Data Storage](#mergetree-data-storage). - -#### max_parts_in_total - -`max_parts_in_total` — Maximum number of parts in all partitions. - -#### max_compress_block_size - -`max_compress_block_size` — Maximum size of blocks of uncompressed data before compressing for writing to a table. You can also specify this setting in the global settings (see [max_compress_block_size](/docs/en/operations/settings/settings.md/#max-compress-block-size) setting). The value specified when table is created overrides the global value for this setting. - -#### min_compress_block_size - -`min_compress_block_size` — Minimum size of blocks of uncompressed data required for compression when writing the next mark. You can also specify this setting in the global settings (see [min_compress_block_size](/docs/en/operations/settings/settings.md/#min-compress-block-size) setting). The value specified when table is created overrides the global value for this setting. - -#### max_partitions_to_read - -`max_partitions_to_read` — Limits the maximum number of partitions that can be accessed in one query. You can also specify setting [max_partitions_to_read](/docs/en/operations/settings/merge-tree-settings.md/#max-partitions-to-read) in the global setting. - -#### allow_experimental_optimized_row_order - -`allow_experimental_optimized_row_order` - Experimental. Enables the optimization of the row order during inserts to improve the compressability of the data for compression codecs (e.g. LZ4). Analyzes and reorders the data, and thus increases the CPU overhead of inserts. +See [MergeTree Settings](../../../operations/settings/merge-tree-settings.md). **Example of Sections Setting** @@ -270,7 +194,7 @@ ClickHouse does not require a unique primary key. You can insert multiple rows w You can use `Nullable`-typed expressions in the `PRIMARY KEY` and `ORDER BY` clauses but it is strongly discouraged. To allow this feature, turn on the [allow_nullable_key](/docs/en/operations/settings/settings.md/#allow-nullable-key) setting. The [NULLS_LAST](/docs/en/sql-reference/statements/select/order-by.md/#sorting-of-special-values) principle applies for `NULL` values in the `ORDER BY` clause. -### Selecting the Primary Key {#selecting-the-primary-key} +### Selecting a Primary Key {#selecting-a-primary-key} The number of columns in the primary key is not explicitly limited. Depending on the data structure, you can include more or fewer columns in the primary key. This may: diff --git a/docs/en/engines/table-engines/special/file.md b/docs/en/engines/table-engines/special/file.md index 0d422f64762..957b18b5305 100644 --- a/docs/en/engines/table-engines/special/file.md +++ b/docs/en/engines/table-engines/special/file.md @@ -102,6 +102,7 @@ For partitioning by month, use the `toYYYYMM(date_column)` expression, where `da - `_path` — Path to the file. Type: `LowCardinalty(String)`. - `_file` — Name of the file. Type: `LowCardinalty(String)`. - `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`. +- `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`. ## Settings {#settings} diff --git a/docs/en/engines/table-engines/special/url.md b/docs/en/engines/table-engines/special/url.md index f6183a779ae..c906830d0e9 100644 --- a/docs/en/engines/table-engines/special/url.md +++ b/docs/en/engines/table-engines/special/url.md @@ -108,6 +108,7 @@ For partitioning by month, use the `toYYYYMM(date_column)` expression, where `da - `_path` — Path to the `URL`. Type: `LowCardinalty(String)`. - `_file` — Resource name of the `URL`. Type: `LowCardinalty(String)`. - `_size` — Size of the resource in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`. +- `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`. ## Storage Settings {#storage-settings} diff --git a/docs/en/interfaces/formats.md b/docs/en/interfaces/formats.md index 66d5bd2e574..ffdd7e2ca25 100644 --- a/docs/en/interfaces/formats.md +++ b/docs/en/interfaces/formats.md @@ -480,7 +480,7 @@ The CSV format supports the output of totals and extremes the same way as `TabSe - [input_format_csv_detect_header](/docs/en/operations/settings/settings-formats.md/#input_format_csv_detect_header) - automatically detect header with names and types in CSV format. Default value - `true`. - [input_format_csv_skip_trailing_empty_lines](/docs/en/operations/settings/settings-formats.md/#input_format_csv_skip_trailing_empty_lines) - skip trailing empty lines at the end of data. Default value - `false`. - [input_format_csv_trim_whitespaces](/docs/en/operations/settings/settings-formats.md/#input_format_csv_trim_whitespaces) - trim spaces and tabs in non-quoted CSV strings. Default value - `true`. -- [input_format_csv_allow_whitespace_or_tab_as_delimiter](/docs/en/operations/settings/settings-formats.md/# input_format_csv_allow_whitespace_or_tab_as_delimiter) - Allow to use whitespace or tab as field delimiter in CSV strings. Default value - `false`. +- [input_format_csv_allow_whitespace_or_tab_as_delimiter](/docs/en/operations/settings/settings-formats.md/#input_format_csv_allow_whitespace_or_tab_as_delimiter) - Allow to use whitespace or tab as field delimiter in CSV strings. Default value - `false`. - [input_format_csv_allow_variable_number_of_columns](/docs/en/operations/settings/settings-formats.md/#input_format_csv_allow_variable_number_of_columns) - allow variable number of columns in CSV format, ignore extra columns and use default values on missing columns. Default value - `false`. - [input_format_csv_use_default_on_bad_values](/docs/en/operations/settings/settings-formats.md/#input_format_csv_use_default_on_bad_values) - Allow to set default value to column when CSV field deserialization failed on bad value. Default value - `false`. - [input_format_csv_try_infer_numbers_from_strings](/docs/en/operations/settings/settings-formats.md/#input_format_csv_try_infer_numbers_from_strings) - Try to infer numbers from string fields while schema inference. Default value - `false`. @@ -1490,6 +1490,8 @@ Differs from [PrettySpaceNoEscapes](#prettyspacenoescapes) in that up to 10,000 - [output_format_pretty_color](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_color) - use ANSI escape sequences to paint colors in Pretty formats. Default value - `true`. - [output_format_pretty_grid_charset](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_grid_charset) - Charset for printing grid borders. Available charsets: ASCII, UTF-8. Default value - `UTF-8`. - [output_format_pretty_row_numbers](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_row_numbers) - Add row numbers before each row for pretty output format. Default value - `true`. +- [output_format_pretty_display_footer_column_names](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_display_footer_column_names) - Display column names in the footer if table contains many rows. Default value - `true`. +- [output_format_pretty_display_footer_column_names_min_rows](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_display_footer_column_names_min_rows) - Sets the minimum number of rows for which a footer will be displayed if [output_format_pretty_display_footer_column_names](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_display_footer_column_names) is enabled. Default value - 50. ## RowBinary {#rowbinary} @@ -2165,6 +2167,8 @@ To exchange data with Hadoop, you can use [HDFS table engine](/docs/en/engines/t - [output_format_parquet_fixed_string_as_fixed_byte_array](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_fixed_string_as_fixed_byte_array) - use Parquet FIXED_LENGTH_BYTE_ARRAY type instead of Binary/String for FixedString columns. Default value - `true`. - [output_format_parquet_version](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_version) - The version of Parquet format used in output format. Default value - `2.latest`. - [output_format_parquet_compression_method](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_compression_method) - compression method used in output Parquet format. Default value - `lz4`. +- [input_format_parquet_max_block_size](/docs/en/operations/settings/settings-formats.md/#input_format_parquet_max_block_size) - Max block row size for parquet reader. Default value - `65409`. +- [input_format_parquet_prefer_block_bytes](/docs/en/operations/settings/settings-formats.md/#input_format_parquet_prefer_block_bytes) - Average block bytes output by parquet reader. Default value - `16744704`. ## ParquetMetadata {data-format-parquet-metadata} diff --git a/docs/en/interfaces/http.md b/docs/en/interfaces/http.md index eb1a3ba1dbc..f5b6326fa96 100644 --- a/docs/en/interfaces/http.md +++ b/docs/en/interfaces/http.md @@ -508,7 +508,7 @@ Now `rule` can configure `method`, `headers`, `url`, `handler`: - `headers` are responsible for matching the header part of the HTTP request. It is compatible with RE2’s regular expressions. It is an optional configuration. If it is not defined in the configuration file, it does not match the header portion of the HTTP request. -- `handler` contains the main processing part. Now `handler` can configure `type`, `status`, `content_type`, `response_content`, `query`, `query_param_name`. +- `handler` contains the main processing part. Now `handler` can configure `type`, `status`, `content_type`, `http_response_headers`, `response_content`, `query`, `query_param_name`. `type` currently supports three types: [predefined_query_handler](#predefined_query_handler), [dynamic_query_handler](#dynamic_query_handler), [static](#static). - `query` — use with `predefined_query_handler` type, executes query when the handler is called. @@ -519,6 +519,8 @@ Now `rule` can configure `method`, `headers`, `url`, `handler`: - `content_type` — use with any type, response [content-type](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type). + - `http_response_headers` — use with any type, response headers map. Could be used to set content type as well. + - `response_content` — use with `static` type, response content sent to client, when using the prefix ‘file://’ or ‘config://’, find the content from the file or configuration sends to client. Next are the configuration methods for different `type`. @@ -616,6 +618,33 @@ Return a message. static 402 text/html; charset=UTF-8 + + en + 43 + + Say Hi! + + + + +``` + +`http_response_headers` could be used to set content type instead of `content_type`. + +``` xml + + + GET + xxx + /hi + + static + 402 + + text/html; charset=UTF-8 + en + 43 + Say Hi! @@ -696,6 +725,9 @@ Find the content from the file send to client. static text/html; charset=UTF-8 + + 737060cd8c284d8af7ad3082f209582d + file:///absolute_path_file.html @@ -706,6 +738,9 @@ Find the content from the file send to client. static text/html; charset=UTF-8 + + 737060cd8c284d8af7ad3082f209582d + file://./relative_path_file.html diff --git a/docs/en/operations/allocation-profiling.md b/docs/en/operations/allocation-profiling.md index 64b4106a7e1..574e1ae2ff3 100644 --- a/docs/en/operations/allocation-profiling.md +++ b/docs/en/operations/allocation-profiling.md @@ -59,10 +59,10 @@ For that, we need to use `jemalloc`'s tool called [jeprof](https://github.com/je If that’s the case, we recommend installing an [alternative implementation](https://github.com/gimli-rs/addr2line) of the tool. ``` -git clone https://github.com/gimli-rs/addr2line +git clone https://github.com/gimli-rs/addr2line.git --depth=1 --branch=0.23.0 cd addr2line -cargo b --examples -r -cp ./target/release/examples/addr2line path/to/current/addr2line +cargo build --features bin --release +cp ./target/release/addr2line path/to/current/addr2line ``` ::: diff --git a/docs/en/operations/analyzer.md b/docs/en/operations/analyzer.md new file mode 100644 index 00000000000..298c6dacd06 --- /dev/null +++ b/docs/en/operations/analyzer.md @@ -0,0 +1,194 @@ +--- +slug: /en/operations/analyzer +sidebar_label: Analyzer +title: Analyzer +description: Details about ClickHouse's query analyzer +keywords: [analyzer] +--- + +# Analyzer + + + +## Known incompatibilities + +In ClickHouse version `24.3`, the new query analyzer was enabled by default. +Despite fixing a large number of bugs and introducing new optimizations, it also introduces some breaking changes in ClickHouse behaviour. Please read the following changes to determine how to rewrite your queries for the new analyzer. + +### Invalid queries are no longer optimized + +The previous query planning infrastructure applied AST-level optimizations before the query validation step. +Optimizations could rewrite the initial query so it becomes valid and can be executed. + +In the new analyzer, query validation takes place before the optimization step. +This means that invalid queries that were possible to execute before are now unsupported. +In such cases, the query must be fixed manually. + +**Example 1:** + +```sql +SELECT number +FROM numbers(1) +GROUP BY toString(number) +``` + +The following query uses column `number` in the projection list when only `toString(number)` is available after the aggregation. +In the old analyzer, `GROUP BY toString(number)` was optimized into `GROUP BY number,` making the query valid. + +**Example 2:** + +```sql +SELECT + number % 2 AS n, + sum(number) +FROM numbers(10) +GROUP BY n +HAVING number > 5 +``` + +The same problem occurs in this query: column `number` is used after aggregation with another key. +The previous query analyzer fixed this query by moving the `number > 5` filter from the `HAVING` clause to the `WHERE` clause. + +To fix the query, you should move all conditions that apply to non-aggregated columns to the `WHERE` section to conform to standard SQL syntax: +```sql +SELECT + number % 2 AS n, + sum(number) +FROM numbers(10) +WHERE number > 5 +GROUP BY n +``` + +### CREATE VIEW with invalid query + +The new analyzer always performs type-checking. +Previously, it was possible to create a `VIEW` with an invalid `SELECT` query. It would then fail during the first `SELECT` or `INSERT` (in the case of `MATERIALIZED VIEW`). + +Now, it's not possible to create such `VIEW`s anymore. + +**Example:** + +```sql +CREATE TABLE source (data String) ENGINE=MergeTree ORDER BY tuple(); + +CREATE VIEW some_view +AS SELECT JSONExtract(data, 'test', 'DateTime64(3)') +FROM source; +``` + +### Known incompatibilities of the `JOIN` clause + +#### Join using column from projection + +Alias from the `SELECT` list can not be used as a `JOIN USING` key by default. + +A new setting, `analyzer_compatibility_join_using_top_level_identifier`, when enabled, alters the behavior of `JOIN USING` to prefer to resolve identifiers based on expressions from the projection list of the `SELECT` query, rather than using the columns from left table directly. + +**Example:** + +```sql +SELECT a + 1 AS b, t2.s +FROM Values('a UInt64, b UInt64', (1, 1)) AS t1 +JOIN Values('b UInt64, s String', (1, 'one'), (2, 'two')) t2 +USING (b); +``` + +With `analyzer_compatibility_join_using_top_level_identifier` set to `true`, the join condition is interpreted as `t1.a + 1 = t2.b`, matching the behavior of earlier versions. So, the result will be `2, 'two'`. +When the setting is `false`, the join condition defaults to `t1.b = t2.b`, and the query will return `2, 'one'`. +If `b` is not present in `t1`, the query will fail with an error. + +#### Changes in behavior with `JOIN USING` and `ALIAS`/`MATERIALIZED` columns + +In the new analyzer, using `*` in a `JOIN USING` query that involves `ALIAS` or `MATERIALIZED` columns will include those columns in the result set by default. + +**Example:** + +```sql +CREATE TABLE t1 (id UInt64, payload ALIAS sipHash64(id)) ENGINE = MergeTree ORDER BY id; +INSERT INTO t1 VALUES (1), (2); + +CREATE TABLE t2 (id UInt64, payload ALIAS sipHash64(id)) ENGINE = MergeTree ORDER BY id; +INSERT INTO t2 VALUES (2), (3); + +SELECT * FROM t1 +FULL JOIN t2 USING (payload); +``` + +In the new analyzer, the result of this query will include the `payload` column along with `id` from both tables. In contrast, the previous analyzer would only include these `ALIAS` columns if specific settings (`asterisk_include_alias_columns` or `asterisk_include_materialized_columns`) were enabled, and the columns might appear in a different order. + +To ensure consistent and expected results, especially when migrating old queries to the new analyzer, it is advisable to specify columns explicitly in the `SELECT` clause rather than using `*`. + +#### Handling of Type Modifiers for columns in `USING` Clause + +In the new version of the analyzer, the rules for determining the common supertype for columns specified in the `USING` clause have been standardized to produce more predictable outcomes, especially when dealing with type modifiers like `LowCardinality` and `Nullable`. + +- `LowCardinality(T)` and `T`: When a column of type `LowCardinality(T)` is joined with a column of type `T`, the resulting common supertype will be `T`, effectively discarding the `LowCardinality` modifier. + +- `Nullable(T)` and `T`: When a column of type `Nullable(T)` is joined with a column of type `T`, the resulting common supertype will be `Nullable(T)`, ensuring that the nullable property is preserved. + +**Example:** + +```sql +SELECT id, toTypeName(id) FROM Values('id LowCardinality(String)', ('a')) AS t1 +FULL OUTER JOIN Values('id String', ('b')) AS t2 +USING (id); +``` + +In this query, the common supertype for `id` is determined as `String`, discarding the `LowCardinality` modifier from `t1`. + +### Projection column names changes + +During projection names computation, aliases are not substituted. + +```sql +SELECT + 1 + 1 AS x, + x + 1 +SETTINGS allow_experimental_analyzer = 0 +FORMAT PrettyCompact + + ┌─x─┬─plus(plus(1, 1), 1)─┐ +1. │ 2 │ 3 │ + └───┴─────────────────────┘ + +SELECT + 1 + 1 AS x, + x + 1 +SETTINGS allow_experimental_analyzer = 1 +FORMAT PrettyCompact + + ┌─x─┬─plus(x, 1)─┐ +1. │ 2 │ 3 │ + └───┴────────────┘ +``` + +### Incompatible function arguments types + +In the new analyzer, type inference happens during initial query analysis. +This change means that type checks are done before short-circuit evaluation; thus, `if` function arguments must always have a common supertype. + +**Example:** + +The following query fails with `There is no supertype for types Array(UInt8), String because some of them are Array and some of them are not`: + +```sql +SELECT toTypeName(if(0, [2, 3, 4], 'String')) +``` + +### Heterogeneous clusters + +The new analyzer significantly changed the communication protocol between servers in the cluster. Thus, it's impossible to run distributed queries on servers with different `allow_experimental_analyzer` setting values. + +### Mutations are interpreted by previous analyzer + +Mutations are still using the old analyzer. +This means some new ClickHouse SQL features can't be used in mutations. For example, the `QUALIFY` clause. +Status can be checked [here](https://github.com/ClickHouse/ClickHouse/issues/61563). + +### Unsupported features + +The list of features new analyzer currently doesn't support: + +- Annoy index. +- Hypothesis index. Work in progress [here](https://github.com/ClickHouse/ClickHouse/pull/48381). +- Window view is not supported. There are no plans to support it in the future. diff --git a/docs/en/operations/named-collections.md b/docs/en/operations/named-collections.md index c9d94dd95ee..91438cfb675 100644 --- a/docs/en/operations/named-collections.md +++ b/docs/en/operations/named-collections.md @@ -67,6 +67,23 @@ To manage named collections with DDL a user must have the `named_control_collect In the above example the `password_sha256_hex` value is the hexadecimal representation of the SHA256 hash of the password. This configuration for the user `default` has the attribute `replace=true` as in the default configuration has a plain text `password` set, and it is not possible to have both plain text and sha256 hex passwords set for a user. ::: +### Storage for named collections + +Named collections can either be stored on local disk or in zookeeper/keeper. By default local storage is used. + +To configure named collections storage in keeper and a `type` (equal to either `keeper` or `zookeeper`) and `path` (path in keeper, where named collections will be stored) to `named_collections_storage` section in configuration file: +``` + + + zookeeper + /named_collections_path/ + 1000 + + +``` + +An optional configuration parameter `update_timeout_ms` by default is equal to `5000`. + ## Storing named collections in configuration files ### XML example @@ -443,3 +460,59 @@ SELECT dictGet('dict', 'b', 1); │ a │ └─────────────────────────┘ ``` + +## Named collections for accessing Kafka + +The description of parameters see [Kafka](../engines/table-engines/integrations/kafka.md). + +### DDL example + +```sql +CREATE NAMED COLLECTION my_kafka_cluster AS +kafka_broker_list = 'localhost:9092', +kafka_topic_list = 'kafka_topic', +kafka_group_name = 'consumer_group', +kafka_format = 'JSONEachRow', +kafka_max_block_size = '1048576'; + +``` +### XML example + +```xml + + + + localhost:9092 + kafka_topic + consumer_group + JSONEachRow + 1048576 + + + +``` + +### Example of using named collections with a Kafka table + +Both of the following examples use the same named collection `my_kafka_cluster`: + + +```sql +CREATE TABLE queue +( + timestamp UInt64, + level String, + message String +) +ENGINE = Kafka(my_kafka_cluster) + +CREATE TABLE queue +( + timestamp UInt64, + level String, + message String +) +ENGINE = Kafka(my_kafka_cluster) +SETTINGS kafka_num_consumers = 4, + kafka_thread_per_consumer = 1; +``` diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index ee3ec5126a2..ce95d475c28 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -2924,6 +2924,8 @@ Define proxy servers for HTTP and HTTPS requests, currently supported by S3 stor There are three ways to define proxy servers: environment variables, proxy lists, and remote proxy resolvers. +Bypassing proxy servers for specific hosts is also supported with the use of `no_proxy`. + ### Environment variables The `http_proxy` and `https_proxy` environment variables allow you to specify a @@ -3033,6 +3035,29 @@ This also allows a mix of resolver types can be used. By default, tunneling (i.e, `HTTP CONNECT`) is used to make `HTTPS` requests over `HTTP` proxy. This setting can be used to disable it. +### no_proxy +By default, all requests will go through the proxy. In order to disable it for specific hosts, the `no_proxy` variable must be set. +It can be set inside the `` clause for list and remote resolvers and as an environment variable for environment resolver. +It supports IP addresses, domains, subdomains and `'*'` wildcard for full bypass. Leading dots are stripped just like curl does. + +Example: + +The below configuration bypasses proxy requests to `clickhouse.cloud` and all of its subdomains (e.g, `auth.clickhouse.cloud`). +The same applies to GitLab, even though it has a leading dot. Both `gitlab.com` and `about.gitlab.com` would bypass the proxy. + +``` xml + + clickhouse.cloud,.gitlab.com + + http://proxy1 + http://proxy2:3128 + + + http://proxy1:3128 + + +``` + ## max_materialized_views_count_for_table {#max_materialized_views_count_for_table} A limit on the number of materialized views attached to a table. @@ -3059,3 +3084,21 @@ This setting is only necessary for the migration period and will become obsolete Type: Bool Default: 1 + +## merge_workload {#merge_workload} + +Used to regulate how resources are utilized and shared between merges and other workloads. Specified value is used as `workload` setting value for all background merges. Can be overridden by a merge tree setting. + +Default value: "default" + +**See Also** +- [Workload Scheduling](/docs/en/operations/workload-scheduling.md) + +## mutation_workload {#mutation_workload} + +Used to regulate how resources are utilized and shared between mutations and other workloads. Specified value is used as `workload` setting value for all background mutations. Can be overridden by a merge tree setting. + +Default value: "default" + +**See Also** +- [Workload Scheduling](/docs/en/operations/workload-scheduling.md) diff --git a/docs/en/operations/settings/merge-tree-settings.md b/docs/en/operations/settings/merge-tree-settings.md index c3f303dcd38..9879ee35612 100644 --- a/docs/en/operations/settings/merge-tree-settings.md +++ b/docs/en/operations/settings/merge-tree-settings.md @@ -3,9 +3,126 @@ slug: /en/operations/settings/merge-tree-settings title: "MergeTree tables settings" --- -The values of `merge_tree` settings (for all MergeTree tables) can be viewed in the table `system.merge_tree_settings`, they can be overridden in `config.xml` in the `merge_tree` section, or set in the `SETTINGS` section of each table. +System table `system.merge_tree_settings` shows the globally set MergeTree settings. -These are example overrides for `max_suspicious_broken_parts`: +MergeTree settings can be set in the `merge_tree` section of the server config file, or specified for each `MergeTree` table individually in +the `SETTINGS` clause of the `CREATE TABLE` statement. + +Example for customizing setting `max_suspicious_broken_parts`: + +Configure the default for all `MergeTree` tables in the server configuration file: + +``` text + + 5 + +``` + +Set for a particular table: + +``` sql +CREATE TABLE tab +( + `A` Int64 +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS max_suspicious_broken_parts = 500; +``` + +Change the settings for a particular table using `ALTER TABLE ... MODIFY SETTING`: + +```sql +ALTER TABLE tab MODIFY SETTING max_suspicious_broken_parts = 100; + +-- reset to global default (value from system.merge_tree_settings) +ALTER TABLE tab RESET SETTING max_suspicious_broken_parts; +``` + +## index_granularity + +Maximum number of data rows between the marks of an index. + +Default value: 8192. + +## index_granularity_bytes + +Maximum size of data granules in bytes. + +Default value: 10Mb. + +To restrict the granule size only by number of rows, set to 0 (not recommended). + +## min_index_granularity_bytes + +Min allowed size of data granules in bytes. + +Default value: 1024b. + +To provide a safeguard against accidentally creating tables with very low index_granularity_bytes. + +## enable_mixed_granularity_parts + +Enables or disables transitioning to control the granule size with the `index_granularity_bytes` setting. Before version 19.11, there was only the `index_granularity` setting for restricting granule size. The `index_granularity_bytes` setting improves ClickHouse performance when selecting data from tables with big rows (tens and hundreds of megabytes). If you have tables with big rows, you can enable this setting for the tables to improve the efficiency of `SELECT` queries. + +## use_minimalistic_part_header_in_zookeeper + +Storage method of the data parts headers in ZooKeeper. If enabled, ZooKeeper stores less data. For details, see [here](../server-configuration-parameters/settings.md/#server-settings-use_minimalistic_part_header_in_zookeeper). + +## min_merge_bytes_to_use_direct_io + +The minimum data volume for merge operation that is required for using direct I/O access to the storage disk. +When merging data parts, ClickHouse calculates the total storage volume of all the data to be merged. +If the volume exceeds `min_merge_bytes_to_use_direct_io` bytes, ClickHouse reads and writes the data to the storage disk using the direct I/O interface (`O_DIRECT` option). +If `min_merge_bytes_to_use_direct_io = 0`, then direct I/O is disabled. + +Default value: `10 * 1024 * 1024 * 1024` bytes. + +## merge_with_ttl_timeout + +Minimum delay in seconds before repeating a merge with delete TTL. + +Default value: `14400` seconds (4 hours). + +## merge_with_recompression_ttl_timeout + +Minimum delay in seconds before repeating a merge with recompression TTL. + +Default value: `14400` seconds (4 hours). + +## write_final_mark + +Enables or disables writing the final index mark at the end of data part (after the last byte). + +Default value: 1. + +Don’t change or bad things will happen. + +## storage_policy + +Storage policy. + +## min_bytes_for_wide_part + +Minimum number of bytes/rows in a data part that can be stored in `Wide` format. +You can set one, both or none of these settings. + +## max_compress_block_size + +Maximum size of blocks of uncompressed data before compressing for writing to a table. +You can also specify this setting in the global settings (see [max_compress_block_size](/docs/en/operations/settings/settings.md/#max-compress-block-size) setting). +The value specified when table is created overrides the global value for this setting. + +## min_compress_block_size + +Minimum size of blocks of uncompressed data required for compression when writing the next mark. +You can also specify this setting in the global settings (see [min_compress_block_size](/docs/en/operations/settings/settings.md/#min-compress-block-size) setting). +The value specified when table is created overrides the global value for this setting. + +## max_partitions_to_read + +Limits the maximum number of partitions that can be accessed in one query. +You can also specify setting [max_partitions_to_read](/docs/en/operations/settings/merge-tree-settings.md/#max-partitions-to-read) in the global setting. ## max_suspicious_broken_parts @@ -17,37 +134,6 @@ Possible values: Default value: 100. -Override example in `config.xml`: - -``` text - - 5 - -``` - -An example to set in `SETTINGS` for a particular table: - -``` sql -CREATE TABLE foo -( - `A` Int64 -) -ENGINE = MergeTree -ORDER BY tuple() -SETTINGS max_suspicious_broken_parts = 500; -``` - -An example of changing the settings for a specific table with the `ALTER TABLE ... MODIFY SETTING` command: - -``` sql -ALTER TABLE foo - MODIFY SETTING max_suspicious_broken_parts = 100; - --- reset to default (use value from system.merge_tree_settings) -ALTER TABLE foo - RESET SETTING max_suspicious_broken_parts; -``` - ## parts_to_throw_insert {#parts-to-throw-insert} If the number of active parts in a single partition exceeds the `parts_to_throw_insert` value, `INSERT` is interrupted with the `Too many parts (N). Merges are processing significantly slower than inserts` exception. @@ -301,6 +387,8 @@ Default value: 10800 ## try_fetch_recompressed_part_timeout +Timeout (in seconds) before starting merge with recompression. During this time ClickHouse tries to fetch recompressed part from replica which assigned this merge with recompression. + Recompression works slow in most cases, so we don't start merge with recompression until this timeout and trying to fetch recompressed part from replica which assigned this merge with recompression. Possible values: @@ -886,10 +974,30 @@ Default value: false - [exclude_deleted_rows_for_part_size_in_merge](#exclude_deleted_rows_for_part_size_in_merge) setting -### allow_experimental_optimized_row_order +## merge_workload + +Used to regulate how resources are utilized and shared between merges and other workloads. Specified value is used as `workload` setting value for background merges of this table. If not specified (empty string), then server setting `merge_workload` is used instead. + +Default value: an empty string + +**See Also** +- [Workload Scheduling](/docs/en/operations/workload-scheduling.md) + +## mutation_workload + +Used to regulate how resources are utilized and shared between mutations and other workloads. Specified value is used as `workload` setting value for background mutations of this table. If not specified (empty string), then server setting `mutation_workload` is used instead. + +Default value: an empty string + +**See Also** +- [Workload Scheduling](/docs/en/operations/workload-scheduling.md) + +### optimize_row_order Controls if the row order should be optimized during inserts to improve the compressability of the newly inserted table part. +Only has an effect for ordinary MergeTree-engine tables. Does nothing for specialized MergeTree engine tables (e.g. CollapsingMergeTree). + MergeTree tables are (optionally) compressed using [compression codecs](../../sql-reference/statements/create/table.md#column_compression_codec). Generic compression codecs such as LZ4 and ZSTD achieve maximum compression rates if the data exposes patterns. Long runs of the same value typically compress very well. diff --git a/docs/en/operations/settings/settings-formats.md b/docs/en/operations/settings/settings-formats.md index 1a27b350652..670c9c6cbf1 100644 --- a/docs/en/operations/settings/settings-formats.md +++ b/docs/en/operations/settings/settings-formats.md @@ -1417,6 +1417,17 @@ Compression method used in output Parquet format. Supported codecs: `snappy`, `l Default value: `lz4`. +### input_format_parquet_max_block_size {#input_format_parquet_max_block_size} +Max block row size for parquet reader. By controlling the number of rows in each block, you can control the memory usage, +and in some operators that cache blocks, you can improve the accuracy of the operator's memory control。 + +Default value: `65409`. + +### input_format_parquet_prefer_block_bytes {#input_format_parquet_prefer_block_bytes} +Average block bytes output by parquet reader. Lowering the configuration in the case of reading some high compression parquet relieves the memory pressure. + +Default value: `65409 * 256 = 16744704` + ## Hive format settings {#hive-format-settings} ### input_format_hive_text_fields_delimiter {#input_format_hive_text_fields_delimiter} @@ -1695,6 +1706,43 @@ Result: └────────────┘ ``` +## output_format_pretty_display_footer_column_names + +Display column names in the footer if there are many table rows. + +Possible values: + +- 0 — No column names are displayed in the footer. +- 1 — Column names are displayed in the footer if row count is greater than or equal to the threshold value set by [output_format_pretty_display_footer_column_names_min_rows](#output_format_pretty_display_footer_column_names_min_rows) (50 by default). + +Default value: `1`. + +**Example** + +Query: + +```sql +SELECT *, toTypeName(*) FROM (SELECT * FROM system.numbers LIMIT 1000); +``` + +Result: + +```response + ┌─number─┬─toTypeName(number)─┐ + 1. │ 0 │ UInt64 │ + 2. │ 1 │ UInt64 │ + 3. │ 2 │ UInt64 │ + ... + 999. │ 998 │ UInt64 │ +1000. │ 999 │ UInt64 │ + └─number─┴─toTypeName(number)─┘ +``` +## output_format_pretty_display_footer_column_names_min_rows + +Sets the minimum number of rows for which a footer with column names will be displayed if setting [output_format_pretty_display_footer_column_names](#output_format_pretty_display_footer_column_names) is enabled. + +Default value: `50`. + ## Template format settings {#template-format-settings} ### format_template_resultset {#format_template_resultset} diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index b3e9da816ab..79d0ca4f151 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -1590,6 +1590,22 @@ Possible values: Default value: `default`. +## parallel_replicas_custom_key_range_lower {#parallel_replicas_custom_key_range_lower} + +Allows the filter type `range` to split the work evenly between replicas based on the custom range `[parallel_replicas_custom_key_range_lower, INT_MAX]`. + +When used in conjuction with [parallel_replicas_custom_key_range_upper](#parallel_replicas_custom_key_range_upper), it lets the filter evenly split the work over replicas for the range `[parallel_replicas_custom_key_range_lower, parallel_replicas_custom_key_range_upper]`. + +Note: This setting will not cause any additional data to be filtered during query processing, rather it changes the points at which the range filter breaks up the range `[0, INT_MAX]` for parallel processing. + +## parallel_replicas_custom_key_range_upper {#parallel_replicas_custom_key_range_upper} + +Allows the filter type `range` to split the work evenly between replicas based on the custom range `[0, parallel_replicas_custom_key_range_upper]`. A value of 0 disables the upper bound, setting it the max value of the custom key expression. + +When used in conjuction with [parallel_replicas_custom_key_range_lower](#parallel_replicas_custom_key_range_lower), it lets the filter evenly split the work over replicas for the range `[parallel_replicas_custom_key_range_lower, parallel_replicas_custom_key_range_upper]`. + +Note: This setting will not cause any additional data to be filtered during query processing, rather it changes the points at which the range filter breaks up the range `[0, INT_MAX]` for parallel processing. + ## allow_experimental_parallel_reading_from_replicas Enables or disables sending SELECT queries to all replicas of a table (up to `max_parallel_replicas`). Reading is parallelized and coordinated dynamically. It will work for any kind of MergeTree table. @@ -3172,7 +3188,7 @@ Default value: `0`. ## lightweight_deletes_sync {#lightweight_deletes_sync} -The same as 'mutation_sync', but controls only execution of lightweight deletes. +The same as 'mutation_sync', but controls only execution of lightweight deletes. Possible values: @@ -3862,6 +3878,10 @@ Possible values: Default value: 30. +:::note +It's applicable only to the default profile. A server reboot is required for the changes to take effect. +::: + ## http_receive_timeout {#http_receive_timeout} HTTP receive timeout (in seconds). @@ -4616,16 +4636,6 @@ Read more about [memory overcommit](memory-overcommit.md). Default value: `1GiB`. -## max_untracked_memory {#max_untracked_memory} -Small allocations and deallocations are grouped in thread local variable and tracked or profiled only when amount (in absolute value) becomes larger than specified value. If the value is higher than 'memory_profiler_step' it will be effectively lowered to 'memory_profiler_step'. - -Default value: `4MiB`. - -## min_untracked_memory {#min_untracked_memory} -Lower bound for untracked memory limit which is applied to threads with low memory consumption. Untracked memory limit equals thread memory usage divided by 16 and clamped between `min_untracked_memory` and `max_untracked_memory` for every thread. It guarantees that total untracked memory does not exceed 10% of current memory footprint even with a lot of small threads. To disable dynamic limit for untracked memory set value `4MiB`. - -Default value: `4KiB`. - ## Schema Inference settings See [schema inference](../../interfaces/schema-inference.md#schema-inference-modes) documentation for more details. diff --git a/docs/en/operations/system-tables/asynchronous_metrics.md b/docs/en/operations/system-tables/asynchronous_metrics.md index 81725b97e41..762d187917c 100644 --- a/docs/en/operations/system-tables/asynchronous_metrics.md +++ b/docs/en/operations/system-tables/asynchronous_metrics.md @@ -639,6 +639,10 @@ An internal metric of the low-level memory allocator (jemalloc). See https://jem An internal metric of the low-level memory allocator (jemalloc). See https://jemalloc.net/jemalloc.3.html +### jemalloc.prof.active + +An internal metric of the low-level memory allocator (jemalloc). See https://jemalloc.net/jemalloc.3.html + **See Also** - [Monitoring](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring. diff --git a/docs/en/operations/utilities/odbc-bridge.md b/docs/en/operations/utilities/odbc-bridge.md index abb8860880e..eb849c6b6ae 100644 --- a/docs/en/operations/utilities/odbc-bridge.md +++ b/docs/en/operations/utilities/odbc-bridge.md @@ -18,7 +18,7 @@ This tool works via HTTP, not via pipes, shared memory, or TCP because: However it can be used as standalone tool from command line with the following parameters in POST-request URL: - `connection_string` -- ODBC connection string. -- `columns` -- columns in ClickHouse NamesAndTypesList format, name in backticks, +- `sample_block` -- columns description in ClickHouse NamesAndTypesList format, name in backticks, type as string. Name and type are space separated, rows separated with newline. - `max_block_size` -- optional parameter, sets maximum size of single block. diff --git a/docs/en/operations/workload-scheduling.md b/docs/en/operations/workload-scheduling.md index 24149099892..08629492ec6 100644 --- a/docs/en/operations/workload-scheduling.md +++ b/docs/en/operations/workload-scheduling.md @@ -47,6 +47,8 @@ Example: Queries can be marked with setting `workload` to distinguish different workloads. If `workload` is not set, than value "default" is used. Note that you are able to specify the other value using settings profiles. Setting constraints can be used to make `workload` constant if you want all queries from the user to be marked with fixed value of `workload` setting. +It is possible to assign a `workload` setting for background activities. Merges and mutations are using `merge_workload` and `mutation_workload` server settings correspondingly. These values can also be overridden for specific tables using `merge_workload` and `mutation_workload` merge tree settings + Let's consider an example of a system with two different workloads: "production" and "development". ```sql @@ -151,6 +153,9 @@ Example: ``` - ## See also - [system.scheduler](/docs/en/operations/system-tables/scheduler.md) + - [merge_workload](/docs/en/operations/settings/merge-tree-settings.md#merge_workload) merge tree setting + - [merge_workload](/docs/en/operations/server-configuration-parameters/settings.md#merge_workload) global server setting + - [mutation_workload](/docs/en/operations/settings/merge-tree-settings.md#mutation_workload) merge tree setting + - [mutation_workload](/docs/en/operations/server-configuration-parameters/settings.md#mutation_workload) global server setting diff --git a/docs/en/sql-reference/aggregate-functions/combinators.md b/docs/en/sql-reference/aggregate-functions/combinators.md index 8ccc5e292b5..e30aa66b3b3 100644 --- a/docs/en/sql-reference/aggregate-functions/combinators.md +++ b/docs/en/sql-reference/aggregate-functions/combinators.md @@ -106,8 +106,8 @@ To work with these states, use: - [AggregatingMergeTree](../../engines/table-engines/mergetree-family/aggregatingmergetree.md) table engine. - [finalizeAggregation](../../sql-reference/functions/other-functions.md#function-finalizeaggregation) function. - [runningAccumulate](../../sql-reference/functions/other-functions.md#runningaccumulate) function. -- [-Merge](#aggregate_functions_combinators-merge) combinator. -- [-MergeState](#aggregate_functions_combinators-mergestate) combinator. +- [-Merge](#-merge) combinator. +- [-MergeState](#-mergestate) combinator. ## -Merge diff --git a/docs/en/sql-reference/aggregate-functions/parametric-functions.md b/docs/en/sql-reference/aggregate-functions/parametric-functions.md index 1dc89b8dcf9..093d88f939f 100644 --- a/docs/en/sql-reference/aggregate-functions/parametric-functions.md +++ b/docs/en/sql-reference/aggregate-functions/parametric-functions.md @@ -82,10 +82,12 @@ FROM In this case, you should remember that you do not know the histogram bin borders. -## sequenceMatch(pattern)(timestamp, cond1, cond2, ...) +## sequenceMatch Checks whether the sequence contains an event chain that matches the pattern. +**Syntax** + ``` sql sequenceMatch(pattern)(timestamp, cond1, cond2, ...) ``` @@ -102,7 +104,7 @@ Events that occur at the same second may lay in the sequence in an undefined ord **Parameters** -- `pattern` — Pattern string. See [Pattern syntax](#sequence-function-pattern-syntax). +- `pattern` — Pattern string. See [Pattern syntax](#sequencematch). **Returned values** @@ -170,9 +172,9 @@ SELECT sequenceMatch('(?1)(?2)')(time, number = 1, number = 2, number = 4) FROM **See Also** -- [sequenceCount](#function-sequencecount) +- [sequenceCount](#sequencecount) -## sequenceCount(pattern)(time, cond1, cond2, ...) +## sequenceCount Counts the number of event chains that matched the pattern. The function searches event chains that do not overlap. It starts to search for the next chain after the current chain is matched. @@ -180,6 +182,8 @@ Counts the number of event chains that matched the pattern. The function searche Events that occur at the same second may lay in the sequence in an undefined order affecting the result. ::: +**Syntax** + ``` sql sequenceCount(pattern)(timestamp, cond1, cond2, ...) ``` @@ -192,7 +196,7 @@ sequenceCount(pattern)(timestamp, cond1, cond2, ...) **Parameters** -- `pattern` — Pattern string. See [Pattern syntax](#sequence-function-pattern-syntax). +- `pattern` — Pattern string. See [Pattern syntax](#sequencematch). **Returned values** @@ -229,7 +233,7 @@ SELECT sequenceCount('(?1).*(?2)')(time, number = 1, number = 2) FROM t **See Also** -- [sequenceMatch](#function-sequencematch) +- [sequenceMatch](#sequencematch) ## windowFunnel diff --git a/docs/en/sql-reference/aggregate-functions/reference/flame_graph.md b/docs/en/sql-reference/aggregate-functions/reference/flame_graph.md new file mode 100644 index 00000000000..ae17153085c --- /dev/null +++ b/docs/en/sql-reference/aggregate-functions/reference/flame_graph.md @@ -0,0 +1,95 @@ +--- +slug: /en/sql-reference/aggregate-functions/reference/flamegraph +sidebar_position: 110 +--- + +# flameGraph + +Aggregate function which builds a [flamegraph](https://www.brendangregg.com/flamegraphs.html) using the list of stacktraces. Outputs an array of strings which can be used by [flamegraph.pl utility](https://github.com/brendangregg/FlameGraph) to render an SVG of the flamegraph. + +## Syntax + +```sql +flameGraph(traces, [size], [ptr]) +``` + +## Parameters + +- `traces` — a stacktrace. [Array](../../data-types/array.md)([UInt64](../../data-types/int-uint.md)). +- `size` — an allocation size for memory profiling. (optional - default `1`). [UInt64](../../data-types/int-uint.md). +- `ptr` — an allocation address. (optional - default `0`). [UInt64](../../data-types/int-uint.md). + +:::note +In the case where `ptr != 0`, a flameGraph will map allocations (size > 0) and deallocations (size < 0) with the same size and ptr. +Only allocations which were not freed are shown. Non mapped deallocations are ignored. +::: + +## Returned value + +- An array of strings for use with [flamegraph.pl utility](https://github.com/brendangregg/FlameGraph). [Array](../../data-types/array.md)([String](../../data-types/string.md)). + +## Examples + +### Building a flamegraph based on a CPU query profiler + +```sql +SET query_profiler_cpu_time_period_ns=10000000; +SELECT SearchPhrase, COUNT(DISTINCT UserID) AS u FROM hits WHERE SearchPhrase <> '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10; +``` + +```text +clickhouse client --allow_introspection_functions=1 -q "select arrayJoin(flameGraph(arrayReverse(trace))) from system.trace_log where trace_type = 'CPU' and query_id = 'xxx'" | ~/dev/FlameGraph/flamegraph.pl > flame_cpu.svg +``` + +### Building a flamegraph based on a memory query profiler, showing all allocations + +```sql +SET memory_profiler_sample_probability=1, max_untracked_memory=1; +SELECT SearchPhrase, COUNT(DISTINCT UserID) AS u FROM hits WHERE SearchPhrase <> '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10; +``` + +```text +clickhouse client --allow_introspection_functions=1 -q "select arrayJoin(flameGraph(trace, size)) from system.trace_log where trace_type = 'MemorySample' and query_id = 'xxx'" | ~/dev/FlameGraph/flamegraph.pl --countname=bytes --color=mem > flame_mem.svg +``` + +### Building a flamegraph based on a memory query profiler, showing allocations which were not deallocated in query context + +```sql +SET memory_profiler_sample_probability=1, max_untracked_memory=1, use_uncompressed_cache=1, merge_tree_max_rows_to_use_cache=100000000000, merge_tree_max_bytes_to_use_cache=1000000000000; +SELECT SearchPhrase, COUNT(DISTINCT UserID) AS u FROM hits WHERE SearchPhrase <> '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10; +``` + +```text +clickhouse client --allow_introspection_functions=1 -q "SELECT arrayJoin(flameGraph(trace, size, ptr)) FROM system.trace_log WHERE trace_type = 'MemorySample' AND query_id = 'xxx'" | ~/dev/FlameGraph/flamegraph.pl --countname=bytes --color=mem > flame_mem_untracked.svg +``` + +### Build a flamegraph based on memory query profiler, showing active allocations at the fixed point of time + +```sql +SET memory_profiler_sample_probability=1, max_untracked_memory=1; +SELECT SearchPhrase, COUNT(DISTINCT UserID) AS u FROM hits WHERE SearchPhrase <> '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10; +``` + +- 1 - Memory usage per second + +```sql +SELECT event_time, m, formatReadableSize(max(s) as m) FROM (SELECT event_time, sum(size) OVER (ORDER BY event_time) AS s FROM system.trace_log WHERE query_id = 'xxx' AND trace_type = 'MemorySample') GROUP BY event_time ORDER BY event_time; +``` + +- 2 - Find a time point with maximal memory usage + +```sql +SELECT argMax(event_time, s), max(s) FROM (SELECT event_time, sum(size) OVER (ORDER BY event_time) AS s FROM system.trace_log WHERE query_id = 'xxx' AND trace_type = 'MemorySample'); +``` + +- 3 - Fix active allocations at fixed point of time + +```text +clickhouse client --allow_introspection_functions=1 -q "SELECT arrayJoin(flameGraph(trace, size, ptr)) FROM (SELECT * FROM system.trace_log WHERE trace_type = 'MemorySample' AND query_id = 'xxx' AND event_time <= 'yyy' ORDER BY event_time)" | ~/dev/FlameGraph/flamegraph.pl --countname=bytes --color=mem > flame_mem_time_point_pos.svg +``` + +- 4 - Find deallocations at fixed point of time + +```text +clickhouse client --allow_introspection_functions=1 -q "SELECT arrayJoin(flameGraph(trace, -size, ptr)) FROM (SELECT * FROM system.trace_log WHERE trace_type = 'MemorySample' AND query_id = 'xxx' AND event_time > 'yyy' ORDER BY event_time desc)" | ~/dev/FlameGraph/flamegraph.pl --countname=bytes --color=mem > flame_mem_time_point_neg.svg +``` diff --git a/docs/en/sql-reference/aggregate-functions/reference/groupconcat.md b/docs/en/sql-reference/aggregate-functions/reference/groupconcat.md new file mode 100644 index 00000000000..072252de8c9 --- /dev/null +++ b/docs/en/sql-reference/aggregate-functions/reference/groupconcat.md @@ -0,0 +1,90 @@ +--- +slug: /en/sql-reference/aggregate-functions/reference/groupconcat +sidebar_position: 363 +sidebar_label: groupConcat +title: groupConcat +--- + +Calculates a concatenated string from a group of strings, optionally separated by a delimiter, and optionally limited by a maximum number of elements. + +**Syntax** + +``` sql +groupConcat(expression [, delimiter] [, limit]); +``` + +**Arguments** + +- `expression` — The expression or column name that outputs strings to be concatenated.. +- `delimiter` — A [string](../../../sql-reference/data-types/string.md) that will be used to separate concatenated values. This parameter is optional and defaults to an empty string if not specified. +- `limit` — A positive [integer](../../../sql-reference/data-types/int-uint.md) specifying the maximum number of elements to concatenate. If more elements are present, excess elements are ignored. This parameter is optional. + +:::note +If delimiter is specified without limit, it must be the first parameter following the expression. If both delimiter and limit are specified, delimiter must precede limit. +::: + +**Returned value** + +- Returns a [string](../../../sql-reference/data-types/string.md) consisting of the concatenated values of the column or expression. If the group has no elements or only null elements, and the function does not specify a handling for only null values, the result is a nullable string with a null value. + +**Examples** + +Input table: + +``` text +┌─id─┬─name─┐ +│ 1 │ John│ +│ 2 │ Jane│ +│ 3 │ Bob│ +└────┴──────┘ +``` + +1. Basic usage without a delimiter: + +Query: + +``` sql +SELECT groupConcat(Name) FROM Employees; +``` + +Result: + +``` text +JohnJaneBob +``` + +This concatenates all names into one continuous string without any separator. + + +2. Using comma as a delimiter: + +Query: + +``` sql +SELECT groupConcat(Name, ', ', 2) FROM Employees; +``` + +Result: + +``` text +John, Jane, Bob +``` + +This output shows the names separated by a comma followed by a space. + + +3. Limiting the number of concatenated elements + +Query: + +``` sql +SELECT groupConcat(Name, ', ', 2) FROM Employees; +``` + +Result: + +``` text +John, Jane +``` + +This query limits the output to the first two names, even though there are more names in the table. diff --git a/docs/en/sql-reference/aggregate-functions/reference/index.md b/docs/en/sql-reference/aggregate-functions/reference/index.md index a56b1c97681..e3725b6a430 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/index.md +++ b/docs/en/sql-reference/aggregate-functions/reference/index.md @@ -58,6 +58,7 @@ ClickHouse-specific aggregate functions: - [topKWeighted](../reference/topkweighted.md) - [deltaSum](../reference/deltasum.md) - [deltaSumTimestamp](../reference/deltasumtimestamp.md) +- [flameGraph](../reference/flame_graph.md) - [groupArray](../reference/grouparray.md) - [groupArrayLast](../reference/grouparraylast.md) - [groupUniqArray](../reference/groupuniqarray.md) diff --git a/docs/en/sql-reference/aggregate-functions/reference/stochasticlinearregression.md b/docs/en/sql-reference/aggregate-functions/reference/stochasticlinearregression.md index ddac82a0977..7ab9e1d3256 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/stochasticlinearregression.md +++ b/docs/en/sql-reference/aggregate-functions/reference/stochasticlinearregression.md @@ -3,7 +3,7 @@ slug: /en/sql-reference/aggregate-functions/reference/stochasticlinearregression sidebar_position: 221 --- -# stochasticLinearRegression +# stochasticLinearRegression {#agg_functions_stochasticlinearregression_parameters} This function implements stochastic linear regression. It supports custom parameters for learning rate, L2 regularization coefficient, mini-batch size, and has a few methods for updating weights ([Adam](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Adam) (used by default), [simple SGD](https://en.wikipedia.org/wiki/Stochastic_gradient_descent), [Momentum](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Momentum), and [Nesterov](https://mipt.ru/upload/medialibrary/d7e/41-91.pdf)). @@ -72,5 +72,5 @@ The query will return a column of predicted values. Note that first argument of **See Also** -- [stochasticLogisticRegression](../../../sql-reference/aggregate-functions/reference/stochasticlogisticregression.md#agg_functions-stochasticlogisticregression) +- [stochasticLogisticRegression](../../../sql-reference/aggregate-functions/reference/stochasticlogisticregression.md#stochasticlogisticregression) - [Difference between linear and logistic regressions](https://stackoverflow.com/questions/12146914/what-is-the-difference-between-linear-regression-and-logistic-regression) diff --git a/docs/en/sql-reference/aggregate-functions/reference/stochasticlogisticregression.md b/docs/en/sql-reference/aggregate-functions/reference/stochasticlogisticregression.md index 0a040689681..4bf5529ddcb 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/stochasticlogisticregression.md +++ b/docs/en/sql-reference/aggregate-functions/reference/stochasticlogisticregression.md @@ -11,7 +11,7 @@ This function implements stochastic logistic regression. It can be used for bina Parameters are exactly the same as in stochasticLinearRegression: `learning rate`, `l2 regularization coefficient`, `mini-batch size`, `method for updating weights`. -For more information see [parameters](#agg_functions-stochasticlinearregression-parameters). +For more information see [parameters](../reference/stochasticlinearregression.md/#parameters). ``` text stochasticLogisticRegression(1.0, 1.0, 10, 'SGD') diff --git a/docs/en/sql-reference/aggregate-functions/reference/varpop.md b/docs/en/sql-reference/aggregate-functions/reference/varpop.md index fcabeb4c6a8..4e010248f6e 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/varpop.md +++ b/docs/en/sql-reference/aggregate-functions/reference/varpop.md @@ -27,7 +27,7 @@ Returns an integer of type `Float64`. **Implementation details** -This function uses a numerically unstable algorithm. If you need numerical stability in calculations, use the slower but more stable [`varPopStable` function](#varPopStable). +This function uses a numerically unstable algorithm. If you need numerical stability in calculations, use the slower but more stable [`varPopStable`](#varpopstable) function. **Example** @@ -76,7 +76,7 @@ Returns an integer of type `Float64`. **Implementation details** -Unlike [`varPop()`](#varPop), this function uses a stable, numerically accurate algorithm to calculate the population variance to avoid issues like catastrophic cancellation or loss of precision. This function also handles `NaN` and `Inf` values correctly, excluding them from calculations. +Unlike [`varPop`](#varpop), this function uses a stable, numerically accurate algorithm to calculate the population variance to avoid issues like catastrophic cancellation or loss of precision. This function also handles `NaN` and `Inf` values correctly, excluding them from calculations. **Example** diff --git a/docs/en/sql-reference/aggregate-functions/reference/varsamp.md b/docs/en/sql-reference/aggregate-functions/reference/varsamp.md index be669a16ae8..bd1cfa5742a 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/varsamp.md +++ b/docs/en/sql-reference/aggregate-functions/reference/varsamp.md @@ -40,7 +40,7 @@ Where: The function assumes that the input data set represents a sample from a larger population. If you want to calculate the variance of the entire population (when you have the complete data set), you should use the [`varPop()` function](./varpop#varpop) instead. -This function uses a numerically unstable algorithm. If you need numerical stability in calculations, use the slower but more stable [`varSampStable` function](#varSampStable). +This function uses a numerically unstable algorithm. If you need numerical stability in calculations, use the slower but more stable [`varSampStable`](#varsampstable) function. **Example** @@ -82,11 +82,11 @@ varSampStable(expr) **Returned value** -The `varSampStable()` function returns a Float64 value representing the sample variance of the input data set. +The `varSampStable` function returns a Float64 value representing the sample variance of the input data set. **Implementation details** -The `varSampStable()` function calculates the sample variance using the same formula as the [`varSamp()`](#varSamp function): +The `varSampStable` function calculates the sample variance using the same formula as the [`varSamp`](#varsamp) function: ```plaintext ∑(x - mean(x))^2 / (n - 1) @@ -97,9 +97,9 @@ Where: - `mean(x)` is the arithmetic mean of the data set. - `n` is the number of data points in the data set. -The difference between `varSampStable()` and `varSamp()` is that `varSampStable()` is designed to provide a more deterministic and stable result when dealing with floating-point arithmetic. It uses an algorithm that minimizes the accumulation of rounding errors, which can be particularly important when dealing with large data sets or data with a wide range of values. +The difference between `varSampStable` and `varSamp` is that `varSampStable` is designed to provide a more deterministic and stable result when dealing with floating-point arithmetic. It uses an algorithm that minimizes the accumulation of rounding errors, which can be particularly important when dealing with large data sets or data with a wide range of values. -Like `varSamp()`, the `varSampStable()` function assumes that the input data set represents a sample from a larger population. If you want to calculate the variance of the entire population (when you have the complete data set), you should use the [`varPopStable()` function](./varpop#varpopstable) instead. +Like `varSamp`, the `varSampStable` function assumes that the input data set represents a sample from a larger population. If you want to calculate the variance of the entire population (when you have the complete data set), you should use the [`varPopStable`](./varpop#varpopstable) function instead. **Example** @@ -125,4 +125,4 @@ Response: 0.865 ``` -This query calculates the sample variance of the `value` column in the `example_table` using the `varSampStable()` function. The result shows that the sample variance of the values `[10.5, 12.3, 9.8, 11.2, 10.7]` is approximately 0.865, which may differ slightly from the result of `varSamp()` due to the more precise handling of floating-point arithmetic. +This query calculates the sample variance of the `value` column in the `example_table` using the `varSampStable()` function. The result shows that the sample variance of the values `[10.5, 12.3, 9.8, 11.2, 10.7]` is approximately 0.865, which may differ slightly from the result of `varSamp` due to the more precise handling of floating-point arithmetic. diff --git a/docs/en/sql-reference/data-types/float.md b/docs/en/sql-reference/data-types/float.md index 23131d5b4fe..3c789076c1e 100644 --- a/docs/en/sql-reference/data-types/float.md +++ b/docs/en/sql-reference/data-types/float.md @@ -7,33 +7,43 @@ sidebar_label: Float32, Float64 # Float32, Float64 :::note -If you need accurate calculations, in particular if you work with financial or business data requiring a high precision you should consider using Decimal instead. Floats might lead to inaccurate results as illustrated below: +If you need accurate calculations, in particular if you work with financial or business data requiring a high precision, you should consider using [Decimal](../data-types/decimal.md) instead. -``` +[Floating Point Numbers](https://en.wikipedia.org/wiki/IEEE_754) might lead to inaccurate results as illustrated below: + +```sql CREATE TABLE IF NOT EXISTS float_vs_decimal ( my_float Float64, my_decimal Decimal64(3) -)Engine=MergeTree ORDER BY tuple() - -INSERT INTO float_vs_decimal SELECT round(randCanonical(), 3) AS res, res FROM system.numbers LIMIT 1000000; # Generate 1 000 000 random number with 2 decimal places and store them as a float and as a decimal +) +Engine=MergeTree +ORDER BY tuple(); +# Generate 1 000 000 random numbers with 2 decimal places and store them as a float and as a decimal +INSERT INTO float_vs_decimal SELECT round(randCanonical(), 3) AS res, res FROM system.numbers LIMIT 1000000; +``` +``` SELECT sum(my_float), sum(my_decimal) FROM float_vs_decimal; -> 500279.56300000014 500279.563 + +┌──────sum(my_float)─┬─sum(my_decimal)─┐ +│ 499693.60500000004 │ 499693.605 │ +└────────────────────┴─────────────────┘ SELECT sumKahan(my_float), sumKahan(my_decimal) FROM float_vs_decimal; -> 500279.563 500279.563 + +┌─sumKahan(my_float)─┬─sumKahan(my_decimal)─┐ +│ 499693.605 │ 499693.605 │ +└────────────────────┴──────────────────────┘ ``` ::: -[Floating point numbers](https://en.wikipedia.org/wiki/IEEE_754). - -Types are equivalent to types of C: +The equivalent types in ClickHouse and in C are given below: - `Float32` — `float`. - `Float64` — `double`. -Aliases: +Float types in ClickHouse have the following aliases: - `Float32` — `FLOAT`, `REAL`, `SINGLE`. - `Float64` — `DOUBLE`, `DOUBLE PRECISION`. diff --git a/docs/en/sql-reference/data-types/geo.md b/docs/en/sql-reference/data-types/geo.md index 7e3c32b3451..7ffc7447d96 100644 --- a/docs/en/sql-reference/data-types/geo.md +++ b/docs/en/sql-reference/data-types/geo.md @@ -33,7 +33,7 @@ Result: ## Ring -`Ring` is a simple polygon without holes stored as an array of points: [Array](array.md)([Point](#point-data-type)). +`Ring` is a simple polygon without holes stored as an array of points: [Array](array.md)([Point](#point)). **Example** @@ -54,7 +54,7 @@ Result: ## Polygon -`Polygon` is a polygon with holes stored as an array of rings: [Array](array.md)([Ring](#ring-data-type)). First element of outer array is the outer shape of polygon and all the following elements are holes. +`Polygon` is a polygon with holes stored as an array of rings: [Array](array.md)([Ring](#ring)). First element of outer array is the outer shape of polygon and all the following elements are holes. **Example** @@ -76,7 +76,7 @@ Result: ## MultiPolygon -`MultiPolygon` consists of multiple polygons and is stored as an array of polygons: [Array](array.md)([Polygon](#polygon-data-type)). +`MultiPolygon` consists of multiple polygons and is stored as an array of polygons: [Array](array.md)([Polygon](#polygon)). **Example** diff --git a/docs/en/sql-reference/dictionaries/index.md b/docs/en/sql-reference/dictionaries/index.md index 080de94f8b7..4c7421d57c0 100644 --- a/docs/en/sql-reference/dictionaries/index.md +++ b/docs/en/sql-reference/dictionaries/index.md @@ -16,7 +16,7 @@ ClickHouse supports special functions for working with dictionaries that can be ClickHouse supports: - Dictionaries with a [set of functions](../../sql-reference/functions/ext-dict-functions.md). -- [Embedded dictionaries](#embedded_dictionaries) with a specific [set of functions](../../sql-reference/functions/ym-dict-functions.md). +- [Embedded dictionaries](#embedded-dictionaries) with a specific [set of functions](../../sql-reference/functions/ym-dict-functions.md). :::tip Tutorial @@ -82,7 +82,7 @@ You can [configure](#configuring-a-dictionary) any number of dictionaries in the You can convert values for a small dictionary by describing it in a `SELECT` query (see the [transform](../../sql-reference/functions/other-functions.md) function). This functionality is not related to dictionaries. ::: -## Configuring a Dictionary {#configuring-a-dictionary} +## Configuring a Dictionary @@ -123,7 +123,7 @@ LAYOUT(...) -- Memory layout configuration LIFETIME(...) -- Lifetime of dictionary in memory ``` -## Storing Dictionaries in Memory {#storing-dictionaries-in-memory} +## Storing Dictionaries in Memory There are a variety of ways to store dictionaries in memory. @@ -415,7 +415,7 @@ or LAYOUT(COMPLEX_KEY_HASHED_ARRAY([SHARDS 1])) ``` -### range_hashed {#range_hashed} +### range_hashed The dictionary is stored in memory in the form of a hash table with an ordered array of ranges and their corresponding values. @@ -679,7 +679,7 @@ When searching for a dictionary, the cache is searched first. For each block of If keys are not found in dictionary, then update cache task is created and added into update queue. Update queue properties can be controlled with settings `max_update_queue_size`, `update_queue_push_timeout_milliseconds`, `query_wait_timeout_milliseconds`, `max_threads_for_updates`. -For cache dictionaries, the expiration [lifetime](#dictionary-updates) of data in the cache can be set. If more time than `lifetime` has passed since loading the data in a cell, the cell’s value is not used and key becomes expired. The key is re-requested the next time it needs to be used. This behaviour can be configured with setting `allow_read_expired_keys`. +For cache dictionaries, the expiration [lifetime](#refreshing-dictionary-data-using-lifetime) of data in the cache can be set. If more time than `lifetime` has passed since loading the data in a cell, the cell’s value is not used and key becomes expired. The key is re-requested the next time it needs to be used. This behaviour can be configured with setting `allow_read_expired_keys`. This is the least effective of all the ways to store dictionaries. The speed of the cache depends strongly on correct settings and the usage scenario. A cache type dictionary performs well only when the hit rates are high enough (recommended 99% and higher). You can view the average hit rate in the [system.dictionaries](../../operations/system-tables/dictionaries.md) table. @@ -899,7 +899,7 @@ Other types are not supported yet. The function returns the attribute for the pr Data must completely fit into RAM. -## Refreshing dictionary data using LIFETIME {#lifetime} +## Refreshing dictionary data using LIFETIME ClickHouse periodically updates dictionaries based on the `LIFETIME` tag (defined in seconds). `LIFETIME` is the update interval for fully downloaded dictionaries and the invalidation interval for cached dictionaries. @@ -1031,7 +1031,7 @@ SOURCE(CLICKHOUSE(... update_field 'added_time' update_lag 15)) ... ``` -## Dictionary Sources {#dictionary-sources} +## Dictionary Sources @@ -1065,7 +1065,7 @@ SOURCE(SOURCE_TYPE(param1 val1 ... paramN valN)) -- Source configuration The source is configured in the `source` section. -For source types [Local file](#local_file), [Executable file](#executable), [HTTP(s)](#https), [ClickHouse](#clickhouse) +For source types [Local file](#local-file), [Executable file](#executable-file), [HTTP(s)](#https), [ClickHouse](#clickhouse) optional settings are available: ``` xml @@ -1089,10 +1089,10 @@ SETTINGS(format_csv_allow_single_quotes = 0) Types of sources (`source_type`): -- [Local file](#local_file) -- [Executable File](#executable) -- [Executable Pool](#executable_pool) -- [HTTP(S)](#http) +- [Local file](#local-file) +- [Executable File](#executable-file) +- [Executable Pool](#executable-pool) +- [HTTP(S)](#https) - DBMS - [ODBC](#odbc) - [MySQL](#mysql) @@ -1102,7 +1102,7 @@ Types of sources (`source_type`): - [Cassandra](#cassandra) - [PostgreSQL](#postgresql) -### Local File {#local_file} +### Local File Example of settings: @@ -1132,9 +1132,9 @@ When a dictionary with source `FILE` is created via DDL command (`CREATE DICTION - [Dictionary function](../../sql-reference/table-functions/dictionary.md#dictionary-function) -### Executable File {#executable} +### Executable File -Working with executable files depends on [how the dictionary is stored in memory](#storig-dictionaries-in-memory). If the dictionary is stored using `cache` and `complex_key_cache`, ClickHouse requests the necessary keys by sending a request to the executable file’s STDIN. Otherwise, ClickHouse starts the executable file and treats its output as dictionary data. +Working with executable files depends on [how the dictionary is stored in memory](#storing-dictionaries-in-memory). If the dictionary is stored using `cache` and `complex_key_cache`, ClickHouse requests the necessary keys by sending a request to the executable file’s STDIN. Otherwise, ClickHouse starts the executable file and treats its output as dictionary data. Example of settings: @@ -1161,7 +1161,7 @@ Setting fields: That dictionary source can be configured only via XML configuration. Creating dictionaries with executable source via DDL is disabled; otherwise, the DB user would be able to execute arbitrary binaries on the ClickHouse node. -### Executable Pool {#executable_pool} +### Executable Pool Executable pool allows loading data from pool of processes. This source does not work with dictionary layouts that need to load all data from source. Executable pool works if the dictionary [is stored](#ways-to-store-dictionaries-in-memory) using `cache`, `complex_key_cache`, `ssd_cache`, `complex_key_ssd_cache`, `direct`, or `complex_key_direct` layouts. @@ -1196,9 +1196,9 @@ Setting fields: That dictionary source can be configured only via XML configuration. Creating dictionaries with executable source via DDL is disabled, otherwise, the DB user would be able to execute arbitrary binary on ClickHouse node. -### HTTP(S) {#https} +### HTTP(S) -Working with an HTTP(S) server depends on [how the dictionary is stored in memory](#storig-dictionaries-in-memory). If the dictionary is stored using `cache` and `complex_key_cache`, ClickHouse requests the necessary keys by sending a request via the `POST` method. +Working with an HTTP(S) server depends on [how the dictionary is stored in memory](#storing-dictionaries-in-memory). If the dictionary is stored using `cache` and `complex_key_cache`, ClickHouse requests the necessary keys by sending a request via the `POST` method. Example of settings: @@ -1285,7 +1285,7 @@ Setting fields: - `db` – Name of the database. Omit it if the database name is set in the `` parameters. - `table` – Name of the table and schema if exists. - `connection_string` – Connection string. -- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](#dictionary-updates). +- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Refreshing dictionary data using LIFETIME](#refreshing-dictionary-data-using-lifetime). - `query` – The custom query. Optional parameter. :::note @@ -1575,7 +1575,7 @@ Setting fields: - `where` – The selection criteria. The syntax for conditions is the same as for `WHERE` clause in MySQL, for example, `id > 10 AND id < 20`. Optional parameter. -- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](#dictionary-updates). +- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Refreshing dictionary data using LIFETIME](#refreshing-dictionary-data-using-lifetime). - `fail_on_connection_loss` – The configuration parameter that controls behavior of the server on connection loss. If `true`, an exception is thrown immediately if the connection between client and server was lost. If `false`, the ClickHouse server retries to execute the query three times before throwing an exception. Note that retrying leads to increased response times. Default value: `false`. @@ -1672,7 +1672,7 @@ Setting fields: - `db` – Name of the database. - `table` – Name of the table. - `where` – The selection criteria. May be omitted. -- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](#dictionary-updates). +- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Refreshing dictionary data using LIFETIME](#refreshing-dictionary-data-using-lifetime). - `secure` - Use ssl for connection. - `query` – The custom query. Optional parameter. @@ -1849,7 +1849,7 @@ Setting fields: - `db` – Name of the database. - `table` – Name of the table. - `where` – The selection criteria. The syntax for conditions is the same as for `WHERE` clause in PostgreSQL. For example, `id > 10 AND id < 20`. Optional parameter. -- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](#dictionary-updates). +- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Refreshing dictionary data using LIFETIME](#refreshing-dictionary-data-using-lifetime). - `query` – The custom query. Optional parameter. :::note @@ -1873,7 +1873,7 @@ LAYOUT(FLAT()) LIFETIME(0); ``` -## Dictionary Key and Fields {#dictionary-key-and-fields} +## Dictionary Key and Fields @@ -1963,7 +1963,7 @@ PRIMARY KEY Id ### Composite Key -The key can be a `tuple` from any types of fields. The [layout](#storig-dictionaries-in-memory) in this case must be `complex_key_hashed` or `complex_key_cache`. +The key can be a `tuple` from any types of fields. The [layout](#storing-dictionaries-in-memory) in this case must be `complex_key_hashed` or `complex_key_cache`. :::tip A composite key can consist of a single element. This makes it possible to use a string as the key, for instance. @@ -2030,17 +2030,17 @@ CREATE DICTIONARY somename ( Configuration fields: -| Tag | Description | Required | -|------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------| -| `name` | Column name. | Yes | -| `type` | ClickHouse data type: [UInt8](../../sql-reference/data-types/int-uint.md), [UInt16](../../sql-reference/data-types/int-uint.md), [UInt32](../../sql-reference/data-types/int-uint.md), [UInt64](../../sql-reference/data-types/int-uint.md), [Int8](../../sql-reference/data-types/int-uint.md), [Int16](../../sql-reference/data-types/int-uint.md), [Int32](../../sql-reference/data-types/int-uint.md), [Int64](../../sql-reference/data-types/int-uint.md), [Float32](../../sql-reference/data-types/float.md), [Float64](../../sql-reference/data-types/float.md), [UUID](../../sql-reference/data-types/uuid.md), [Decimal32](../../sql-reference/data-types/decimal.md), [Decimal64](../../sql-reference/data-types/decimal.md), [Decimal128](../../sql-reference/data-types/decimal.md), [Decimal256](../../sql-reference/data-types/decimal.md),[Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md), [DateTime64](../../sql-reference/data-types/datetime64.md), [String](../../sql-reference/data-types/string.md), [Array](../../sql-reference/data-types/array.md).
ClickHouse tries to cast value from dictionary to the specified data type. For example, for MySQL, the field might be `TEXT`, `VARCHAR`, or `BLOB` in the MySQL source table, but it can be uploaded as `String` in ClickHouse.
[Nullable](../../sql-reference/data-types/nullable.md) is currently supported for [Flat](#flat), [Hashed](#hashed), [ComplexKeyHashed](#complex_key_hashed), [Direct](#direct), [ComplexKeyDirect](#complex_key_direct), [RangeHashed](#range_hashed), Polygon, [Cache](#cache), [ComplexKeyCache](#complex_key_cache), [SSDCache](#ssd_cache), [SSDComplexKeyCache](#complex_key_ssd_cache) dictionaries. In [IPTrie](#ip_trie) dictionaries `Nullable` types are not supported. | Yes | -| `null_value` | Default value for a non-existing element.
In the example, it is an empty string. [NULL](../syntax.md#null) value can be used only for the `Nullable` types (see the previous line with types description). | Yes | -| `expression` | [Expression](../../sql-reference/syntax.md#expressions) that ClickHouse executes on the value.
The expression can be a column name in the remote SQL database. Thus, you can use it to create an alias for the remote column.

Default value: no expression. | No | -| `hierarchical` | If `true`, the attribute contains the value of a parent key for the current key. See [Hierarchical Dictionaries](#hierarchical-dictionaries).

Default value: `false`. | No | -| `injective` | Flag that shows whether the `id -> attribute` image is [injective](https://en.wikipedia.org/wiki/Injective_function).
If `true`, ClickHouse can automatically place after the `GROUP BY` clause the requests to dictionaries with injection. Usually it significantly reduces the amount of such requests.

Default value: `false`. | No | -| `is_object_id` | Flag that shows whether the query is executed for a MongoDB document by `ObjectID`.

Default value: `false`. +| Tag | Description | Required | +|------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------| +| `name` | Column name. | Yes | +| `type` | ClickHouse data type: [UInt8](../../sql-reference/data-types/int-uint.md), [UInt16](../../sql-reference/data-types/int-uint.md), [UInt32](../../sql-reference/data-types/int-uint.md), [UInt64](../../sql-reference/data-types/int-uint.md), [Int8](../../sql-reference/data-types/int-uint.md), [Int16](../../sql-reference/data-types/int-uint.md), [Int32](../../sql-reference/data-types/int-uint.md), [Int64](../../sql-reference/data-types/int-uint.md), [Float32](../../sql-reference/data-types/float.md), [Float64](../../sql-reference/data-types/float.md), [UUID](../../sql-reference/data-types/uuid.md), [Decimal32](../../sql-reference/data-types/decimal.md), [Decimal64](../../sql-reference/data-types/decimal.md), [Decimal128](../../sql-reference/data-types/decimal.md), [Decimal256](../../sql-reference/data-types/decimal.md),[Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md), [DateTime64](../../sql-reference/data-types/datetime64.md), [String](../../sql-reference/data-types/string.md), [Array](../../sql-reference/data-types/array.md).
ClickHouse tries to cast value from dictionary to the specified data type. For example, for MySQL, the field might be `TEXT`, `VARCHAR`, or `BLOB` in the MySQL source table, but it can be uploaded as `String` in ClickHouse.
[Nullable](../../sql-reference/data-types/nullable.md) is currently supported for [Flat](#flat), [Hashed](#hashed), [ComplexKeyHashed](#complex_key_hashed), [Direct](#direct), [ComplexKeyDirect](#complex_key_direct), [RangeHashed](#range_hashed), Polygon, [Cache](#cache), [ComplexKeyCache](#complex_key_cache), [SSDCache](#ssd_cache), [SSDComplexKeyCache](#complex_key_ssd_cache) dictionaries. In [IPTrie](#ip_trie) dictionaries `Nullable` types are not supported. | Yes | +| `null_value` | Default value for a non-existing element.
In the example, it is an empty string. [NULL](../syntax.md#null) value can be used only for the `Nullable` types (see the previous line with types description). | Yes | +| `expression` | [Expression](../../sql-reference/syntax.md#expressions) that ClickHouse executes on the value.
The expression can be a column name in the remote SQL database. Thus, you can use it to create an alias for the remote column.

Default value: no expression. | No | +| `hierarchical` | If `true`, the attribute contains the value of a parent key for the current key. See [Hierarchical Dictionaries](#hierarchical-dictionaries).

Default value: `false`. | No | +| `injective` | Flag that shows whether the `id -> attribute` image is [injective](https://en.wikipedia.org/wiki/Injective_function).
If `true`, ClickHouse can automatically place after the `GROUP BY` clause the requests to dictionaries with injection. Usually it significantly reduces the amount of such requests.

Default value: `false`. | No | +| `is_object_id` | Flag that shows whether the query is executed for a MongoDB document by `ObjectID`.

Default value: `false`. -## Hierarchical Dictionaries {#hierarchical-dictionaries} +## Hierarchical Dictionaries ClickHouse supports hierarchical dictionaries with a [numeric key](#numeric-key). @@ -2165,7 +2165,7 @@ Points can be specified as an array or a tuple of their coordinates. In the curr The user can upload their own data in all formats supported by ClickHouse. -There are 3 types of [in-memory storage](#storig-dictionaries-in-memory) available: +There are 3 types of [in-memory storage](#storing-dictionaries-in-memory) available: - `POLYGON_SIMPLE`. This is a naive implementation, where a linear pass through all polygons is made for each query, and membership is checked for each one without using additional indexes. @@ -2435,7 +2435,7 @@ LIFETIME(0) LAYOUT(regexp_tree); ``` -## Embedded Dictionaries {#embedded-dictionaries} +## Embedded Dictionaries diff --git a/docs/en/sql-reference/functions/array-functions.md b/docs/en/sql-reference/functions/array-functions.md index 7b52fbff714..d87ca4a0fe7 100644 --- a/docs/en/sql-reference/functions/array-functions.md +++ b/docs/en/sql-reference/functions/array-functions.md @@ -1261,7 +1261,7 @@ SELECT arraySort((x) -> -x, [1, 2, 3]) as res; └─────────┘ ``` -For each element of the source array, the lambda function returns the sorting key, that is, \[1 –\> -1, 2 –\> -2, 3 –\> -3\]. Since the `arraySort` function sorts the keys in ascending order, the result is \[3, 2, 1\]. Thus, the `(x) –> -x` lambda function sets the [descending order](#reverse-sort) in a sorting. +For each element of the source array, the lambda function returns the sorting key, that is, \[1 –\> -1, 2 –\> -2, 3 –\> -3\]. Since the `arraySort` function sorts the keys in ascending order, the result is \[3, 2, 1\]. Thus, the `(x) –> -x` lambda function sets the [descending order](#arrayreversesort) in a sorting. The lambda function can accept multiple arguments. In this case, you need to pass the `arraySort` function several arrays of identical length that the arguments of lambda function will correspond to. The resulting array will consist of elements from the first input array; elements from the next input array(s) specify the sorting keys. For example: @@ -1307,10 +1307,15 @@ To improve sorting efficiency, the [Schwartzian transform](https://en.wikipedia. Same as `arraySort` with additional `limit` argument allowing partial sorting. Returns an array of the same size as the original array where elements in range `[1..limit]` are sorted in ascending order. Remaining elements `(limit..N]` shall contain elements in unspecified order. -## arrayReverseSort(\[func,\] arr, ...) {#reverse-sort} +## arrayReverseSort Sorts the elements of the `arr` array in descending order. If the `func` function is specified, `arr` is sorted according to the result of the `func` function applied to the elements of the array, and then the sorted array is reversed. If `func` accepts multiple arguments, the `arrayReverseSort` function is passed several arrays that the arguments of `func` will correspond to. Detailed examples are shown at the end of `arrayReverseSort` description. +**Syntax** + +```sql +arrayReverseSort([func,] arr, ...) +``` Example of integer values sorting: ``` sql @@ -1907,10 +1912,16 @@ FROM numbers(1,10); - [arrayReduce](#arrayreduce) -## arrayReverse(arr) +## arrayReverse Returns an array of the same size as the original array containing the elements in reverse order. +**Syntax** + +```sql +arrayReverse(arr) +``` + Example: ``` sql diff --git a/docs/en/sql-reference/functions/bitmap-functions.md b/docs/en/sql-reference/functions/bitmap-functions.md index a5c8a663b71..d30c0f4dde4 100644 --- a/docs/en/sql-reference/functions/bitmap-functions.md +++ b/docs/en/sql-reference/functions/bitmap-functions.md @@ -74,7 +74,7 @@ bitmapSubsetInRange(bitmap, range_start, range_end) **Arguments** -- `bitmap` – [Bitmap object](#bitmap_functions-bitmapbuild). +- `bitmap` – [Bitmap object](#bitmapbuild). - `range_start` – Start of the range (inclusive). [UInt32](../data-types/int-uint.md). - `range_end` – End of the range (exclusive). [UInt32](../data-types/int-uint.md). @@ -104,7 +104,7 @@ bitmapSubsetLimit(bitmap, range_start, cardinality_limit) **Arguments** -- `bitmap` – [Bitmap object](#bitmap_functions-bitmapbuild). +- `bitmap` – [Bitmap object](#bitmapbuild). - `range_start` – Start of the range (inclusive). [UInt32](../data-types/int-uint.md). - `cardinality_limit` – Maximum cardinality of the subset. [UInt32](../data-types/int-uint.md). @@ -134,7 +134,7 @@ subBitmap(bitmap, offset, cardinality_limit) **Arguments** -- `bitmap` – The bitmap. [Bitmap object](#bitmap_functions-bitmapbuild). +- `bitmap` – The bitmap. [Bitmap object](#bitmapbuild). - `offset` – The position of the first element of the subset. [UInt32](../data-types/int-uint.md). - `cardinality_limit` – The maximum number of elements in the subset. [UInt32](../data-types/int-uint.md). @@ -162,7 +162,7 @@ bitmapContains(bitmap, needle) **Arguments** -- `bitmap` – [Bitmap object](#bitmap_functions-bitmapbuild). +- `bitmap` – [Bitmap object](#bitmapbuild). - `needle` – Searched bit value. [UInt32](../data-types/int-uint.md). **Returned values** @@ -188,7 +188,7 @@ Result: Checks whether two bitmaps intersect. -If `bitmap2` contains exactly one element, consider using [bitmapContains](#bitmap_functions-bitmapcontains) instead as it works more efficiently. +If `bitmap2` contains exactly one element, consider using [bitmapContains](#bitmapcontains) instead as it works more efficiently. **Syntax** diff --git a/docs/en/sql-reference/functions/date-time-functions.md b/docs/en/sql-reference/functions/date-time-functions.md index c2ac78fa542..d410c34f835 100644 --- a/docs/en/sql-reference/functions/date-time-functions.md +++ b/docs/en/sql-reference/functions/date-time-functions.md @@ -83,7 +83,7 @@ Result: ``` ## makeDate32 -Like [makeDate](#makeDate) but produces a [Date32](../data-types/date32.md). +Like [makeDate](#makedate) but produces a [Date32](../data-types/date32.md). ## makeDateTime @@ -214,7 +214,7 @@ Result: **See also** -- [serverTimeZone](#serverTimeZone) +- [serverTimeZone](#servertimezone) ## serverTimeZone @@ -249,7 +249,7 @@ Result: **See also** -- [timeZone](#timeZone) +- [timeZone](#timezone) ## toTimeZone @@ -305,7 +305,7 @@ int32samoa: 1546300800 **See Also** -- [formatDateTime](#formatDateTime) - supports non-constant timezone. +- [formatDateTime](#formatdatetime) - supports non-constant timezone. - [toString](type-conversion-functions.md#tostring) - supports non-constant timezone. ## timeZoneOf @@ -1006,7 +1006,7 @@ toStartOfWeek(t[, mode[, timezone]]) **Arguments** - `t` - a [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md) -- `mode` - determines the first day of the week as described in the [toWeek()](date-time-functions#toweek) function +- `mode` - determines the first day of the week as described in the [toWeek()](#toweek) function - `timezone` - Optional parameter, it behaves like any other conversion function **Returned value** @@ -1049,7 +1049,7 @@ toLastDayOfWeek(t[, mode[, timezone]]) **Arguments** - `t` - a [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md) -- `mode` - determines the last day of the week as described in the [toWeek()](date-time-functions#toweek) function +- `mode` - determines the last day of the week as described in the [toWeek](#toweek) function - `timezone` - Optional parameter, it behaves like any other conversion function **Returned value** @@ -1730,7 +1730,7 @@ Result: **See Also** -- [fromDaysSinceYearZero](#fromDaysSinceYearZero) +- [fromDaysSinceYearZero](#fromdayssinceyearzero) ## fromDaysSinceYearZero @@ -1770,11 +1770,11 @@ Result: **See Also** -- [toDaysSinceYearZero](#toDaysSinceYearZero) +- [toDaysSinceYearZero](#todayssinceyearzero) ## fromDaysSinceYearZero32 -Like [fromDaysSinceYearZero](#fromDaysSinceYearZero) but returns a [Date32](../data-types/date32.md). +Like [fromDaysSinceYearZero](#fromdayssinceyearzero) but returns a [Date32](../data-types/date32.md). ## age @@ -1993,7 +1993,7 @@ Result: **See Also** -- [toStartOfInterval](#tostartofintervaldate_or_date_with_time-interval-x-unit--time_zone) +- [toStartOfInterval](#tostartofinterval) ## date\_add @@ -2066,7 +2066,7 @@ Result: **See Also** -- [addDate](#addDate) +- [addDate](#adddate) ## date\_sub @@ -2140,7 +2140,7 @@ Result: **See Also** -- [subDate](#subDate) +- [subDate](#subdate) ## timestamp\_add @@ -2321,7 +2321,7 @@ Alias: `SUBDATE` - [date_sub](#date_sub) -## now {#now} +## now Returns the current date and time at the moment of query analysis. The function is a constant expression. @@ -3630,7 +3630,7 @@ Result: └───────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` -## formatDateTime {#formatDateTime} +## formatDateTime Formats a Time according to the given Format string. Format is a constant expression, so you cannot have multiple formats for a single result column. @@ -3755,10 +3755,9 @@ LIMIT 10 **See Also** -- [formatDateTimeInJodaSyntax](##formatDateTimeInJodaSyntax) +- [formatDateTimeInJodaSyntax](#formatdatetimeinjodasyntax) - -## formatDateTimeInJodaSyntax {#formatDateTimeInJodaSyntax} +## formatDateTimeInJodaSyntax Similar to formatDateTime, except that it formats datetime in Joda style instead of MySQL style. Refer to https://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html. @@ -3923,11 +3922,11 @@ Result: **See Also** -- [fromUnixTimestampInJodaSyntax](##fromUnixTimestampInJodaSyntax) +- [fromUnixTimestampInJodaSyntax](#fromunixtimestampinjodasyntax) ## fromUnixTimestampInJodaSyntax -Same as [fromUnixTimestamp](#fromUnixTimestamp) but when called in the second way (two or three arguments), the formatting is performed using [Joda style](https://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html) instead of MySQL style. +Same as [fromUnixTimestamp](#fromunixtimestamp) but when called in the second way (two or three arguments), the formatting is performed using [Joda style](https://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html) instead of MySQL style. **Example:** @@ -4142,7 +4141,7 @@ Result: Returns the current date and time at the moment of query analysis. The function is a constant expression. :::note -This function gives the same result that `now('UTC')` would. It was added only for MySQL support and [`now`](#now-now) is the preferred usage. +This function gives the same result that `now('UTC')` would. It was added only for MySQL support and [`now`](#now) is the preferred usage. ::: **Syntax** diff --git a/docs/en/sql-reference/functions/ext-dict-functions.md b/docs/en/sql-reference/functions/ext-dict-functions.md index 82c21ce40c8..093ee690d47 100644 --- a/docs/en/sql-reference/functions/ext-dict-functions.md +++ b/docs/en/sql-reference/functions/ext-dict-functions.md @@ -12,7 +12,7 @@ For dictionaries created with [DDL queries](../../sql-reference/statements/creat For information on connecting and configuring dictionaries, see [Dictionaries](../../sql-reference/dictionaries/index.md). -## dictGet, dictGetOrDefault, dictGetOrNull {#dictGet} +## dictGet, dictGetOrDefault, dictGetOrNull Retrieves values from a dictionary. diff --git a/docs/en/sql-reference/functions/geo/geohash.md b/docs/en/sql-reference/functions/geo/geohash.md index 8abc8006e5d..b6ac7a74092 100644 --- a/docs/en/sql-reference/functions/geo/geohash.md +++ b/docs/en/sql-reference/functions/geo/geohash.md @@ -4,6 +4,8 @@ sidebar_label: Geohash title: "Functions for Working with Geohash" --- +## Geohash + [Geohash](https://en.wikipedia.org/wiki/Geohash) is the geocode system, which subdivides Earth’s surface into buckets of grid shape and encodes each cell into a short string of letters and digits. It is a hierarchical data structure, so the longer is the geohash string, the more precise is the geographic location. If you need to manually convert geographic coordinates to geohash strings, you can use [geohash.org](http://geohash.org/). diff --git a/docs/en/sql-reference/functions/geo/h3.md b/docs/en/sql-reference/functions/geo/h3.md index bcdd457964a..5fbc2adf2fa 100644 --- a/docs/en/sql-reference/functions/geo/h3.md +++ b/docs/en/sql-reference/functions/geo/h3.md @@ -4,6 +4,8 @@ sidebar_label: H3 Indexes title: "Functions for Working with H3 Indexes" --- +## H3 Index + [H3](https://eng.uber.com/h3/) is a geographical indexing system where Earth’s surface divided into a grid of even hexagonal cells. This system is hierarchical, i. e. each hexagon on the top level ("parent") can be split into seven even but smaller ones ("children"), and so on. The level of the hierarchy is called `resolution` and can receive a value from `0` till `15`, where `0` is the `base` level with the largest and coarsest cells. @@ -16,7 +18,7 @@ The full description of the H3 system is available at [the Uber Engineering site ## h3IsValid -Verifies whether the number is a valid [H3](#h3index) index. +Verifies whether the number is a valid [H3](#h3-index) index. **Syntax** @@ -51,7 +53,7 @@ Result: ## h3GetResolution -Defines the resolution of the given [H3](#h3index) index. +Defines the resolution of the given [H3](#h3-index) index. **Syntax** @@ -86,7 +88,7 @@ Result: ## h3EdgeAngle -Calculates the average length of the [H3](#h3index) hexagon edge in grades. +Calculates the average length of the [H3](#h3-index) hexagon edge in grades. **Syntax** @@ -100,7 +102,7 @@ h3EdgeAngle(resolution) **Returned values** -- The average length of the [H3](#h3index) hexagon edge in grades. [Float64](../../data-types/float.md). +- The average length of the [H3](#h3-index) hexagon edge in grades. [Float64](../../data-types/float.md). **Example** @@ -120,7 +122,7 @@ Result: ## h3EdgeLengthM -Calculates the average length of the [H3](#h3index) hexagon edge in meters. +Calculates the average length of the [H3](#h3-index) hexagon edge in meters. **Syntax** @@ -134,7 +136,7 @@ h3EdgeLengthM(resolution) **Returned values** -- The average length of the [H3](#h3index) hexagon edge in meters. [Float64](../../data-types/float.md). +- The average length of the [H3](#h3-index) hexagon edge in meters. [Float64](../../data-types/float.md). **Example** @@ -154,7 +156,7 @@ Result: ## h3EdgeLengthKm -Calculates the average length of the [H3](#h3index) hexagon edge in kilometers. +Calculates the average length of the [H3](#h3-index) hexagon edge in kilometers. **Syntax** @@ -168,7 +170,7 @@ h3EdgeLengthKm(resolution) **Returned values** -- The average length of the [H3](#h3index) hexagon edge in kilometers. [Float64](../../data-types/float.md). +- The average length of the [H3](#h3-index) hexagon edge in kilometers. [Float64](../../data-types/float.md). **Example** @@ -188,7 +190,7 @@ Result: ## geoToH3 -Returns [H3](#h3index) point index `(lon, lat)` with specified resolution. +Returns [H3](#h3-index) point index `(lon, lat)` with specified resolution. **Syntax** @@ -225,7 +227,7 @@ Result: ## h3ToGeo -Returns the centroid longitude and latitude corresponding to the provided [H3](#h3index) index. +Returns the centroid longitude and latitude corresponding to the provided [H3](#h3-index) index. **Syntax** @@ -294,7 +296,7 @@ Result: ## h3kRing - Lists all the [H3](#h3index) hexagons in the raduis of `k` from the given hexagon in random order. + Lists all the [H3](#h3-index) hexagons in the raduis of `k` from the given hexagon in random order. **Syntax** @@ -335,7 +337,7 @@ Result: ## h3GetBaseCell -Returns the base cell number of the [H3](#h3index) index. +Returns the base cell number of the [H3](#h3-index) index. **Syntax** @@ -437,7 +439,7 @@ Result: ## h3IndexesAreNeighbors -Returns whether or not the provided [H3](#h3index) indexes are neighbors. +Returns whether or not the provided [H3](#h3-index) indexes are neighbors. **Syntax** @@ -473,7 +475,7 @@ Result: ## h3ToChildren -Returns an array of child indexes for the given [H3](#h3index) index. +Returns an array of child indexes for the given [H3](#h3-index) index. **Syntax** @@ -508,7 +510,7 @@ Result: ## h3ToParent -Returns the parent (coarser) index containing the given [H3](#h3index) index. +Returns the parent (coarser) index containing the given [H3](#h3-index) index. **Syntax** @@ -609,7 +611,7 @@ Result: ## h3GetResolution -Returns the resolution of the [H3](#h3index) index. +Returns the resolution of the [H3](#h3-index) index. **Syntax** @@ -643,7 +645,7 @@ Result: ## h3IsResClassIII -Returns whether [H3](#h3index) index has a resolution with Class III orientation. +Returns whether [H3](#h3-index) index has a resolution with Class III orientation. **Syntax** @@ -678,7 +680,7 @@ Result: ## h3IsPentagon -Returns whether this [H3](#h3index) index represents a pentagonal cell. +Returns whether this [H3](#h3-index) index represents a pentagonal cell. **Syntax** @@ -713,7 +715,7 @@ Result: ## h3GetFaces -Returns icosahedron faces intersected by a given [H3](#h3index) index. +Returns icosahedron faces intersected by a given [H3](#h3-index) index. **Syntax** @@ -815,7 +817,7 @@ Result: ## h3ToCenterChild -Returns the center child (finer) [H3](#h3index) index contained by given [H3](#h3index) at the given resolution. +Returns the center child (finer) [H3](#h3-index) index contained by given [H3](#h3-index) at the given resolution. **Syntax** @@ -830,7 +832,7 @@ h3ToCenterChild(index, resolution) **Returned values** -- [H3](#h3index) index of the center child contained by given [H3](#h3index) at the given resolution. [UInt64](../../data-types/int-uint.md). +- [H3](#h3-index) index of the center child contained by given [H3](#h3-index) at the given resolution. [UInt64](../../data-types/int-uint.md). **Example** diff --git a/docs/en/sql-reference/functions/geo/polygon.md b/docs/en/sql-reference/functions/geo/polygon.md index c2572779ada..25a7a1fac8e 100644 --- a/docs/en/sql-reference/functions/geo/polygon.md +++ b/docs/en/sql-reference/functions/geo/polygon.md @@ -142,6 +142,34 @@ SELECT readWKTPoint('POINT (1.2 3.4)'); (1.2,3.4) ``` +## readWKTLineString + +Parses a Well-Known Text (WKT) representation of a LineString geometry and returns it in the internal ClickHouse format. + +### Syntax + +```sql +readWKTLineString(wkt_string) +``` + +### Arguments + +- `wkt_string`: The input WKT string representing a LineString geometry. + +### Returned value + +The function returns a ClickHouse internal representation of the linestring geometry. + +### Example + +```sql +SELECT readWKTLineString('LINESTRING (1 1, 2 2, 3 3, 1 1)'); +``` + +```response +[(1,1),(2,2),(3,3),(1,1)] +``` + ## readWKTRing Parses a Well-Known Text (WKT) representation of a Polygon geometry and returns a ring (closed linestring) in the internal ClickHouse format. @@ -163,7 +191,7 @@ The function returns a ClickHouse internal representation of the ring (closed li ### Example ```sql -SELECT readWKTRing('LINESTRING (1 1, 2 2, 3 3, 1 1)'); +SELECT readWKTRing('POLYGON ((1 1, 2 2, 3 3, 1 1))'); ``` ```response diff --git a/docs/en/sql-reference/functions/geo/s2.md b/docs/en/sql-reference/functions/geo/s2.md index 3165b21318b..e022ce870b0 100644 --- a/docs/en/sql-reference/functions/geo/s2.md +++ b/docs/en/sql-reference/functions/geo/s2.md @@ -5,6 +5,8 @@ sidebar_label: S2 Geometry # Functions for Working with S2 Index +## S2Index + [S2](https://s2geometry.io/) is a geographical indexing system where all geographical data is represented on a three-dimensional sphere (similar to a globe). In the S2 library points are represented as the S2 Index - a specific number which encodes internally a point on the surface of a unit sphere, unlike traditional (latitude, longitude) pairs. To get the S2 point index for a given point specified in the format (latitude, longitude) use the [geoToS2](#geotos2) function. Also, you can use the [s2ToGeo](#s2togeo) function for getting geographical coordinates corresponding to the specified S2 point index. diff --git a/docs/en/sql-reference/functions/hash-functions.md b/docs/en/sql-reference/functions/hash-functions.md index 506114038f7..e431ed75465 100644 --- a/docs/en/sql-reference/functions/hash-functions.md +++ b/docs/en/sql-reference/functions/hash-functions.md @@ -45,13 +45,13 @@ SELECT halfMD5(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00') Calculates the MD4 from a string and returns the resulting set of bytes as FixedString(16). -## MD5 {#md5} +## MD5 Calculates the MD5 from a string and returns the resulting set of bytes as FixedString(16). If you do not need MD5 in particular, but you need a decent cryptographic 128-bit hash, use the ‘sipHash128’ function instead. If you want to get the same result as output by the md5sum utility, use lower(hex(MD5(s))). -## sipHash64 {#siphash64} +## sipHash64 Produces a 64-bit [SipHash](https://en.wikipedia.org/wiki/SipHash) hash value. diff --git a/docs/en/sql-reference/functions/ip-address-functions.md b/docs/en/sql-reference/functions/ip-address-functions.md index 5b6a3aef2c8..11a7749b33d 100644 --- a/docs/en/sql-reference/functions/ip-address-functions.md +++ b/docs/en/sql-reference/functions/ip-address-functions.md @@ -295,7 +295,7 @@ Same as `toIPv6`, but if the IPv6 address has an invalid format, it returns null ## toIPv6 Converts a string form of IPv6 address to [IPv6](../data-types/ipv6.md) type. If the IPv6 address has an invalid format, returns an empty value. -Similar to [IPv6StringToNum](#ipv6stringtonums) function, which converts IPv6 address to binary format. +Similar to [IPv6StringToNum](#ipv6stringtonum) function, which converts IPv6 address to binary format. If the input string contains a valid IPv4 address, then the IPv6 equivalent of the IPv4 address is returned. diff --git a/docs/en/sql-reference/functions/json-functions.md b/docs/en/sql-reference/functions/json-functions.md index 5d73c9a83b3..7bff6a6cba5 100644 --- a/docs/en/sql-reference/functions/json-functions.md +++ b/docs/en/sql-reference/functions/json-functions.md @@ -5,10 +5,10 @@ sidebar_label: JSON --- There are two sets of functions to parse JSON: - - [`simpleJSON*` (`visitParam*`)](#simplejson--visitparam-functions) which is made for parsing a limited subset of JSON extremely fast. + - [`simpleJSON*` (`visitParam*`)](#simplejson-visitparam-functions) which is made for parsing a limited subset of JSON extremely fast. - [`JSONExtract*`](#jsonextract-functions) which is made for parsing ordinary JSON. -## simpleJSON / visitParam functions +## simpleJSON (visitParam) functions ClickHouse has special functions for working with simplified JSON. All these JSON functions are based on strong assumptions about what the JSON can be. They try to do as little as possible to get the job done as quickly as possible. diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md index 31df9e5627d..e22dd5d827c 100644 --- a/docs/en/sql-reference/functions/other-functions.md +++ b/docs/en/sql-reference/functions/other-functions.md @@ -212,7 +212,7 @@ toTypeName(x) ## blockSize {#blockSize} -In ClickHouse, queries are processed in blocks (chunks). +In ClickHouse, queries are processed in [blocks](../../development/architecture.md/#block-block) (chunks). This function returns the size (row count) of the block the function is called on. **Syntax** @@ -221,6 +221,33 @@ This function returns the size (row count) of the block the function is called o blockSize() ``` +**Example** + +Query: + +```sql +DROP TABLE IF EXISTS test; +CREATE TABLE test (n UInt8) ENGINE = Memory; + +INSERT INTO test +SELECT * FROM system.numbers LIMIT 5; + +SELECT blockSize() +FROM test; +``` + +Result: + +```response + ┌─blockSize()─┐ +1. │ 5 │ +2. │ 5 │ +3. │ 5 │ +4. │ 5 │ +5. │ 5 │ + └─────────────┘ +``` + ## byteSize Returns an estimation of uncompressed byte size of its arguments in memory. @@ -735,7 +762,7 @@ LIMIT 10 Given a size (number of bytes), this function returns a readable, rounded size with suffix (KB, MB, etc.) as string. -The opposite operations of this function are [parseReadableSize](#parseReadableSize), [parseReadableSizeOrZero](#parseReadableSizeOrZero), and [parseReadableSizeOrNull](#parseReadableSizeOrNull). +The opposite operations of this function are [parseReadableSize](#parsereadablesize), [parseReadableSizeOrZero](#parsereadablesizeorzero), and [parseReadableSizeOrNull](#parsereadablesizeornull). **Syntax** @@ -768,7 +795,7 @@ Result: Given a size (number of bytes), this function returns a readable, rounded size with suffix (KiB, MiB, etc.) as string. -The opposite operations of this function are [parseReadableSize](#parseReadableSize), [parseReadableSizeOrZero](#parseReadableSizeOrZero), and [parseReadableSizeOrNull](#parseReadableSizeOrNull). +The opposite operations of this function are [parseReadableSize](#parsereadablesize), [parseReadableSizeOrZero](#parsereadablesizeorzero), and [parseReadableSizeOrNull](#parsereadablesizeornull). **Syntax** @@ -899,7 +926,7 @@ SELECT Given a string containing a byte size and `B`, `KiB`, `KB`, `MiB`, `MB`, etc. as a unit (i.e. [ISO/IEC 80000-13](https://en.wikipedia.org/wiki/ISO/IEC_80000) or decimal byte unit), this function returns the corresponding number of bytes. If the function is unable to parse the input value, it throws an exception. -The inverse operations of this function are [formatReadableSize](#formatReadableSize) and [formatReadableDecimalSize](#formatReadableDecimalSize). +The inverse operations of this function are [formatReadableSize](#formatreadablesize) and [formatReadableDecimalSize](#formatreadabledecimalsize). **Syntax** @@ -937,7 +964,7 @@ SELECT Given a string containing a byte size and `B`, `KiB`, `KB`, `MiB`, `MB`, etc. as a unit (i.e. [ISO/IEC 80000-13](https://en.wikipedia.org/wiki/ISO/IEC_80000) or decimal byte unit), this function returns the corresponding number of bytes. If the function is unable to parse the input value, it returns `NULL`. -The inverse operations of this function are [formatReadableSize](#formatReadableSize) and [formatReadableDecimalSize](#formatReadableDecimalSize). +The inverse operations of this function are [formatReadableSize](#formatreadablesize) and [formatReadableDecimalSize](#formatreadabledecimalsize). **Syntax** @@ -975,7 +1002,7 @@ SELECT Given a string containing a byte size and `B`, `KiB`, `KB`, `MiB`, `MB`, etc. as a unit (i.e. [ISO/IEC 80000-13](https://en.wikipedia.org/wiki/ISO/IEC_80000) or decimal byte unit), this function returns the corresponding number of bytes. If the function is unable to parse the input value, it returns `0`. -The inverse operations of this function are [formatReadableSize](#formatReadableSize) and [formatReadableDecimalSize](#formatReadableDecimalSize). +The inverse operations of this function are [formatReadableSize](#formatreadablesize) and [formatReadableDecimalSize](#formatreadabledecimalsize). **Syntax** @@ -2684,7 +2711,7 @@ countDigits(x) - Number of digits. [UInt8](../data-types/int-uint.md#uint-ranges). :::note -For `Decimal` values takes into account their scales: calculates result over underlying integer type which is `(value * scale)`. For example: `countDigits(42) = 2`, `countDigits(42.000) = 5`, `countDigits(0.04200) = 4`. I.e. you may check decimal overflow for `Decimal64` with `countDecimal(x) > 18`. It's a slow variant of [isDecimalOverflow](#is-decimal-overflow). +For `Decimal` values takes into account their scales: calculates result over underlying integer type which is `(value * scale)`. For example: `countDigits(42) = 2`, `countDigits(42.000) = 5`, `countDigits(0.04200) = 4`. I.e. you may check decimal overflow for `Decimal64` with `countDecimal(x) > 18`. It's a slow variant of [isDecimalOverflow](#isdecimaloverflow). ::: **Example** @@ -2776,7 +2803,7 @@ currentProfiles() ## enabledProfiles -Returns settings profiles, assigned to the current user both explicitly and implicitly. Explicitly assigned profiles are the same as returned by the [currentProfiles](#current-profiles) function. Implicitly assigned profiles include parent profiles of other assigned profiles, profiles assigned via granted roles, profiles assigned via their own settings, and the main default profile (see the `default_profile` section in the main server configuration file). +Returns settings profiles, assigned to the current user both explicitly and implicitly. Explicitly assigned profiles are the same as returned by the [currentProfiles](#currentprofiles) function. Implicitly assigned profiles include parent profiles of other assigned profiles, profiles assigned via granted roles, profiles assigned via their own settings, and the main default profile (see the `default_profile` section in the main server configuration file). **Syntax** @@ -2889,11 +2916,11 @@ Result: └───────────────────────────┘ ``` -## queryID {#queryID} +## queryID Returns the ID of the current query. Other parameters of a query can be extracted from the [system.query_log](../../operations/system-tables/query_log.md) table via `query_id`. -In contrast to [initialQueryID](#initial-query-id) function, `queryID` can return different results on different shards (see the example). +In contrast to [initialQueryID](#initialqueryid) function, `queryID` can return different results on different shards (see the example). **Syntax** @@ -2927,7 +2954,7 @@ Result: Returns the ID of the initial current query. Other parameters of a query can be extracted from the [system.query_log](../../operations/system-tables/query_log.md) table via `initial_query_id`. -In contrast to [queryID](#query-id) function, `initialQueryID` returns the same results on different shards (see example). +In contrast to [queryID](#queryid) function, `initialQueryID` returns the same results on different shards (see example). **Syntax** @@ -3014,7 +3041,7 @@ shardCount() **See Also** -- [shardNum()](#shard-num) function example also contains `shardCount()` function call. +- [shardNum()](#shardnum) function example also contains `shardCount()` function call. ## getOSKernelVersion @@ -3688,3 +3715,108 @@ Result: ```response {'version':'1','serial_number':'2D9071D64530052D48308473922C7ADAFA85D6C5','signature_algo':'sha256WithRSAEncryption','issuer':'/CN=marsnet.local CA','not_before':'May 7 17:01:21 2024 GMT','not_after':'May 7 17:01:21 2025 GMT','subject':'/CN=chnode1','pkey_algo':'rsaEncryption'} ``` + +## lowCardinalityIndices + +Returns the position of a value in the dictionary of a [LowCardinality](../data-types/lowcardinality.md) column. Positions start at 1. Since LowCardinality have per-part dictionaries, this function may return different positions for the same value in different parts. + +**Syntax** + +```sql +lowCardinalityIndices(col) +``` + +**Arguments** + +- `col` — a low cardinality column. [LowCardinality](../data-types/lowcardinality.md). + +**Returned value** + +- The position of the value in the dictionary of the current part. [UInt64](../data-types/int-uint.md). + +**Example** + +Query: + +```sql +DROP TABLE IF EXISTS test; +CREATE TABLE test (s LowCardinality(String)) ENGINE = Memory; + +-- create two parts: + +INSERT INTO test VALUES ('ab'), ('cd'), ('ab'), ('ab'), ('df'); +INSERT INTO test VALUES ('ef'), ('cd'), ('ab'), ('cd'), ('ef'); + +SELECT s, lowCardinalityIndices(s) FROM test; +``` + +Result: + +```response + ┌─s──┬─lowCardinalityIndices(s)─┐ +1. │ ab │ 1 │ +2. │ cd │ 2 │ +3. │ ab │ 1 │ +4. │ ab │ 1 │ +5. │ df │ 3 │ + └────┴──────────────────────────┘ + ┌─s──┬─lowCardinalityIndices(s)─┐ + 6. │ ef │ 1 │ + 7. │ cd │ 2 │ + 8. │ ab │ 3 │ + 9. │ cd │ 2 │ +10. │ ef │ 1 │ + └────┴──────────────────────────┘ +``` +## lowCardinalityKeys + +Returns the dictionary values of a [LowCardinality](../data-types/lowcardinality.md) column. If the block is smaller or larger than the dictionary size, the result will be truncated or extended with default values. Since LowCardinality have per-part dictionaries, this function may return different dictionary values in different parts. + +**Syntax** + +```sql +lowCardinalityIndices(col) +``` + +**Arguments** + +- `col` — a low cardinality column. [LowCardinality](../data-types/lowcardinality.md). + +**Returned value** + +- The dictionary keys. [UInt64](../data-types/int-uint.md). + +**Example** + +Query: + +```sql +DROP TABLE IF EXISTS test; +CREATE TABLE test (s LowCardinality(String)) ENGINE = Memory; + +-- create two parts: + +INSERT INTO test VALUES ('ab'), ('cd'), ('ab'), ('ab'), ('df'); +INSERT INTO test VALUES ('ef'), ('cd'), ('ab'), ('cd'), ('ef'); + +SELECT s, lowCardinalityKeys(s) FROM test; +``` + +Result: + +```response + ┌─s──┬─lowCardinalityKeys(s)─┐ +1. │ ef │ │ +2. │ cd │ ef │ +3. │ ab │ cd │ +4. │ cd │ ab │ +5. │ ef │ │ + └────┴───────────────────────┘ + ┌─s──┬─lowCardinalityKeys(s)─┐ + 6. │ ab │ │ + 7. │ cd │ ab │ + 8. │ ab │ cd │ + 9. │ ab │ df │ +10. │ df │ │ + └────┴───────────────────────┘ +``` diff --git a/docs/en/sql-reference/functions/rounding-functions.md b/docs/en/sql-reference/functions/rounding-functions.md index d18185c5013..e2f471d47eb 100644 --- a/docs/en/sql-reference/functions/rounding-functions.md +++ b/docs/en/sql-reference/functions/rounding-functions.md @@ -6,49 +6,90 @@ sidebar_label: Rounding # Rounding Functions -## floor(x\[, N\]) +## floor -Returns the largest round number that is less than or equal to `x`. A round number is a multiple of 1/10N, or the nearest number of the appropriate data type if 1 / 10N isn’t exact. -‘N’ is an integer constant, optional parameter. By default it is zero, which means to round to an integer. -‘N’ may be negative. +Returns the largest rounded number less than or equal `x`. +A rounded number is a multiple of 1 / 10 * N, or the nearest number of the appropriate data type if 1 / 10 * N isn’t exact. -Examples: `floor(123.45, 1) = 123.4, floor(123.45, -1) = 120.` +Integer arguments may be rounded with negative `N` argument, with non-negative `N` the function returns `x`, i.e. does nothing. -`x` is any numeric type. The result is a number of the same type. -For integer arguments, it makes sense to round with a negative `N` value (for non-negative `N`, the function does not do anything). -If rounding causes overflow (for example, floor(-128, -1)), an implementation-specific result is returned. +If rounding causes an overflow (for example, `floor(-128, -1)`), the result is undefined. -## ceil(x\[, N\]), ceiling(x\[, N\]) +**Syntax** -Returns the smallest round number that is greater than or equal to `x`. In every other way, it is the same as the `floor` function (see above). +``` sql +floor(x[, N]) +``` -## trunc(x\[, N\]), truncate(x\[, N\]) +**Parameters** -Returns the round number with largest absolute value that has an absolute value less than or equal to `x`‘s. In every other way, it is the same as the ’floor’ function (see above). +- `x` - The value to round. [Float*](../data-types/float.md), [Decimal*](../data-types/decimal.md), or [(U)Int*](../data-types/int-uint.md). +- `N` . [(U)Int*](../data-types/int-uint.md). The default is zero, which means rounding to an integer. Can be negative. + +**Returned value** + +A rounded number of the same type as `x`. + +**Examples** + +Query: + +```sql +SELECT floor(123.45, 1) AS rounded +``` + +Result: + +``` +┌─rounded─┐ +│ 123.4 │ +└─────────┘ +``` + +Query: + +```sql +SELECT floor(123.45, -1) +``` + +Result: + +``` +┌─rounded─┐ +│ 120 │ +└─────────┘ +``` + +## ceiling + +Like `floor` but returns the smallest rounded number greater than or equal `x`. + +**Syntax** + +``` sql +ceiling(x[, N]) +``` + +Alias: `ceil` + +## truncate + +Like `floor` but returns the rounded number with largest absolute value that has an absolute value less than or equal to `x`‘s. **Syntax** ```sql -trunc(input, precision) +truncate(x[, N]) ``` -Alias: `truncate`. - -**Parameters** - -- `input`: A numeric type ([Float](../data-types/float.md), [Decimal](../data-types/decimal.md) or [Integer](../data-types/int-uint.md)). -- `precision`: An [Integer](../data-types/int-uint.md) type. - -**Returned value** - -- A data type of `input`. +Alias: `trunc`. **Example** Query: ```sql -SELECT trunc(123.499, 1) as res; +SELECT truncate(123.499, 1) as res; ``` ```response @@ -57,37 +98,40 @@ SELECT trunc(123.499, 1) as res; └───────┘ ``` -## round(x\[, N\]) +## round Rounds a value to a specified number of decimal places. -The function returns the nearest number of the specified order. In case when given number has equal distance to surrounding numbers, the function uses banker’s rounding for float number types and rounds away from zero for the other number types (Decimal). +The function returns the nearest number of the specified order. +If the input value has equal distance to two neighboring numbers, the function uses banker’s rounding for [Float*](../data-types/float.md) inputs and rounds away from zero for the other number types ([Decimal*](../data-types/decimal.md). + +**Syntax** ``` sql -round(expression [, decimal_places]) +round(x[, N]) ``` **Arguments** -- `expression` — A number to be rounded. Can be any [expression](../../sql-reference/syntax.md#syntax-expressions) returning the numeric [data type](../data-types/index.md#data_types). -- `decimal-places` — An integer value. - - If `decimal-places > 0` then the function rounds the value to the right of the decimal point. - - If `decimal-places < 0` then the function rounds the value to the left of the decimal point. - - If `decimal-places = 0` then the function rounds the value to integer. In this case the argument can be omitted. +- `x` — A number to round. [Float*](../data-types/float.md), [Decimal*](../data-types/decimal.md), or [(U)Int*](../data-types/int-uint.md). +- `N` — The number of decimal places to round to. Integer. Defaults to `0`. + - If `N > 0`, the function rounds to the right of the decimal point. + - If `N < 0`, the function rounds to the left of the decimal point. + - If `N = 0`, the function rounds to the next integer. **Returned value:** -The rounded number of the same type as the input number. +A rounded number of the same type as `x`. **Examples** -Example of usage with Float: +Example with `Float` inputs: -``` sql +```sql SELECT number / 2 AS x, round(x) FROM system.numbers LIMIT 3; ``` -``` text +``` ┌───x─┬─round(divide(number, 2))─┐ │ 0 │ 0 │ │ 0.5 │ 0 │ @@ -95,13 +139,13 @@ SELECT number / 2 AS x, round(x) FROM system.numbers LIMIT 3; └─────┴──────────────────────────┘ ``` -Example of usage with Decimal: +Example with `Decimal` inputs: -``` sql +```sql SELECT cast(number / 2 AS Decimal(10,4)) AS x, round(x) FROM system.numbers LIMIT 3; ``` -``` text +``` ┌───x─┬─round(CAST(divide(number, 2), 'Decimal(10, 4)'))─┐ │ 0 │ 0 │ │ 0.5 │ 1 │ @@ -109,14 +153,14 @@ SELECT cast(number / 2 AS Decimal(10,4)) AS x, round(x) FROM system.numbers LIM └─────┴──────────────────────────────────────────────────┘ ``` -If you want to keep the trailing zeros, you need to enable `output_format_decimal_trailing_zeros` +To retain trailing zeros, enable setting `output_format_decimal_trailing_zeros`: -``` sql +```sql SELECT cast(number / 2 AS Decimal(10,4)) AS x, round(x) FROM system.numbers LIMIT 3 settings output_format_decimal_trailing_zeros=1; ``` -``` text +``` ┌──────x─┬─round(CAST(divide(number, 2), 'Decimal(10, 4)'))─┐ │ 0.0000 │ 0.0000 │ │ 0.5000 │ 1.0000 │ @@ -151,9 +195,15 @@ round(3.65, 1) = 3.6 Rounds a number to a specified decimal position. -- If the rounding number is halfway between two numbers, the function uses banker’s rounding. Banker's rounding is a method of rounding fractional numbers. When the rounding number is halfway between two numbers, it's rounded to the nearest even digit at the specified decimal position. For example: 3.5 rounds up to 4, 2.5 rounds down to 2. It's the default rounding method for floating point numbers defined in [IEEE 754](https://en.wikipedia.org/wiki/IEEE_754#Roundings_to_nearest). The [round](#rounding_functions-round) function performs the same rounding for floating point numbers. The `roundBankers` function also rounds integers the same way, for example, `roundBankers(45, -1) = 40`. +If the rounding number is halfway between two numbers, the function uses banker’s rounding. +Banker's rounding is a method of rounding fractional numbers +When the rounding number is halfway between two numbers, it's rounded to the nearest even digit at the specified decimal position. +For example: 3.5 rounds up to 4, 2.5 rounds down to 2. +It's the default rounding method for floating point numbers defined in [IEEE 754](https://en.wikipedia.org/wiki/IEEE_754#Roundings_to_nearest). +The [round](#round) function performs the same rounding for floating point numbers. +The `roundBankers` function also rounds integers the same way, for example, `roundBankers(45, -1) = 40`. -- In other cases, the function rounds numbers to the nearest integer. +In other cases, the function rounds numbers to the nearest integer. Using banker’s rounding, you can reduce the effect that rounding numbers has on the results of summing or subtracting these numbers. @@ -166,16 +216,20 @@ For example, sum numbers 1.5, 2.5, 3.5, 4.5 with different rounding: **Syntax** ``` sql -roundBankers(expression [, decimal_places]) +roundBankers(x [, N]) ``` **Arguments** -- `expression` — A number to be rounded. Can be any [expression](../../sql-reference/syntax.md#syntax-expressions) returning the numeric [data type](../data-types/index.md#data_types). -- `decimal-places` — Decimal places. An integer number. - - `decimal-places > 0` — The function rounds the number to the given position right of the decimal point. Example: `roundBankers(3.55, 1) = 3.6`. - - `decimal-places < 0` — The function rounds the number to the given position left of the decimal point. Example: `roundBankers(24.55, -1) = 20`. - - `decimal-places = 0` — The function rounds the number to an integer. In this case the argument can be omitted. Example: `roundBankers(2.5) = 2`. + - `N > 0` — The function rounds the number to the given position right of the decimal point. Example: `roundBankers(3.55, 1) = 3.6`. + - `N < 0` — The function rounds the number to the given position left of the decimal point. Example: `roundBankers(24.55, -1) = 20`. + - `N = 0` — The function rounds the number to an integer. In this case the argument can be omitted. Example: `roundBankers(2.5) = 2`. + +- `x` — A number to round. [Float*](../data-types/float.md), [Decimal*](../data-types/decimal.md), or [(U)Int*](../data-types/int-uint.md). +- `N` — The number of decimal places to round to. Integer. Defaults to `0`. + - If `N > 0`, the function rounds to the right of the decimal point. + - If `N < 0`, the function rounds to the left of the decimal point. + - If `N = 0`, the function rounds to the next integer. **Returned value** @@ -185,13 +239,13 @@ A value rounded by the banker’s rounding method. Query: -``` sql +```sql SELECT number / 2 AS x, roundBankers(x, 0) AS b fROM system.numbers limit 10 ``` Result: -``` text +``` ┌───x─┬─b─┐ │ 0 │ 0 │ │ 0.5 │ 0 │ @@ -208,7 +262,7 @@ Result: Examples of Banker’s rounding: -``` text +``` roundBankers(0.4) = 0 roundBankers(-3.5) = -4 roundBankers(4.5) = 4 @@ -220,7 +274,7 @@ roundBankers(10.755, 2) = 10.76 **See Also** -- [round](#rounding_functions-round) +- [round](#round) ## roundToExp2 @@ -264,7 +318,7 @@ Result: ## roundDuration -Accepts a number. If the number is less than one, it returns `0`. Otherwise, it rounds the number down to numbers from the set of commonly used durations: `1, 10, 30, 60, 120, 180, 240, 300, 600, 1200, 1800, 3600, 7200, 18000, 36000`. +Accepts a number. If the number is less than one, it returns `0`. Otherwise, it rounds the number down to numbers from the set of commonly used durations: `1, 10, 30, 60, 120, 180, 240, 300, 600, 1200, 1800, 3600, 7200, 18000, 36000`. **Syntax** diff --git a/docs/en/sql-reference/functions/string-functions.md b/docs/en/sql-reference/functions/string-functions.md index 342ca2b9f03..c535b82d710 100644 --- a/docs/en/sql-reference/functions/string-functions.md +++ b/docs/en/sql-reference/functions/string-functions.md @@ -1136,16 +1136,136 @@ SELECT tryBase58Decode('3dc8KtHrwM') as res, tryBase58Decode('invalid') as res_i ## base64Encode -Encodes a String or FixedString as base64. +Encodes a String or FixedString as base64, according to [RFC 4648](https://datatracker.ietf.org/doc/html/rfc4648#section-4). Alias: `TO_BASE64`. +**Syntax** + +```sql +base64Encode(plaintext) +``` + +**Arguments** + +- `plaintext` — [String](../data-types/string.md) column or constant. + +**Returned value** + +- A string containing the encoded value of the argument. + +**Example** + +``` sql +SELECT base64Encode('clickhouse'); +``` + +Result: + +```result +┌─base64Encode('clickhouse')─┐ +│ Y2xpY2tob3VzZQ== │ +└────────────────────────────┘ +``` + +## base64UrlEncode + +Encodes an URL (String or FixedString) as base64 with URL-specific modifications, according to [RFC 4648](https://datatracker.ietf.org/doc/html/rfc4648#section-5). + +**Syntax** + +```sql +base64UrlEncode(url) +``` + +**Arguments** + +- `url` — [String](../data-types/string.md) column or constant. + +**Returned value** + +- A string containing the encoded value of the argument. + +**Example** + +``` sql +SELECT base64UrlEncode('https://clickhouse.com'); +``` + +Result: + +```result +┌─base64UrlEncode('https://clickhouse.com')─┐ +│ aHR0cDovL2NsaWNraG91c2UuY29t │ +└───────────────────────────────────────────┘ +``` + ## base64Decode -Decodes a base64-encoded String or FixedString. Throws an exception in case of error. +Accepts a String and decodes it from base64, according to [RFC 4648](https://datatracker.ietf.org/doc/html/rfc4648#section-4). Throws an exception in case of an error. Alias: `FROM_BASE64`. +**Syntax** + +```sql +base64Decode(encoded) +``` + +**Arguments** + +- `encoded` — [String](../data-types/string.md) column or constant. If the string is not a valid Base64-encoded value, an exception is thrown. + +**Returned value** + +- A string containing the decoded value of the argument. + +**Example** + +``` sql +SELECT base64Decode('Y2xpY2tob3VzZQ=='); +``` + +Result: + +```result +┌─base64Decode('Y2xpY2tob3VzZQ==')─┐ +│ clickhouse │ +└──────────────────────────────────┘ +``` + +## base64UrlDecode + +Accepts a base64-encoded URL and decodes it from base64 with URL-specific modifications, according to [RFC 4648](https://datatracker.ietf.org/doc/html/rfc4648#section-5). Throws an exception in case of an error. + +**Syntax** + +```sql +base64UrlDecode(encodedUrl) +``` + +**Arguments** + +- `encodedUrl` — [String](../data-types/string.md) column or constant. If the string is not a valid Base64-encoded value with URL-specific modifications, an exception is thrown. + +**Returned value** + +- A string containing the decoded value of the argument. + +**Example** + +``` sql +SELECT base64UrlDecode('aHR0cDovL2NsaWNraG91c2UuY29t'); +``` + +Result: + +```result +┌─base64UrlDecode('aHR0cDovL2NsaWNraG91c2UuY29t')─┐ +│ https://clickhouse.com │ +└─────────────────────────────────────────────────┘ +``` + ## tryBase64Decode Like `base64Decode` but returns an empty string in case of error. @@ -1156,9 +1276,13 @@ Like `base64Decode` but returns an empty string in case of error. tryBase64Decode(encoded) ``` -**Parameters** +**Arguments** -- `encoded`: [String](../data-types/string.md) column or constant. If the string is not a valid Base58-encoded value, returns an empty string in case of error. +- `encoded`: [String](../data-types/string.md) column or constant. If the string is not a valid Base64-encoded value, returns an empty string. + +**Returned value** + +- A string containing the decoded value of the argument. **Examples** @@ -1169,9 +1293,41 @@ SELECT tryBase64Decode('RW5jb2RlZA==') as res, tryBase64Decode('invalid') as res ``` ```response -┌─res─────┬─res_invalid─┐ -│ Encoded │ │ -└─────────┴─────────────┘ +┌─res────────┬─res_invalid─┐ +│ clickhouse │ │ +└────────────┴─────────────┘ +``` + +## tryBase64UrlDecode + +Like `base64UrlDecode` but returns an empty string in case of error. + +**Syntax** + +```sql +tryBase64UrlDecode(encodedUrl) +``` + +**Parameters** + +- `encodedUrl`: [String](../data-types/string.md) column or constant. If the string is not a valid Base64-encoded value with URL-specific modifications, returns an empty string. + +**Returned value** + +- A string containing the decoded value of the argument. + +**Examples** + +Query: + +```sql +SELECT tryBase64UrlDecode('aHR0cDovL2NsaWNraG91c2UuY29t') as res, tryBase64Decode('aHR0cHM6Ly9jbGlja') as res_invalid; +``` + +```response +┌─res────────────────────┬─res_invalid─┐ +│ https://clickhouse.com │ │ +└────────────────────────┴─────────────┘ ``` ## endsWith {#endswith} @@ -1994,7 +2150,7 @@ Result: ## stringJaccardIndexUTF8 -Like [stringJaccardIndex](#stringJaccardIndex) but for UTF8-encoded strings. +Like [stringJaccardIndex](#stringjaccardindex) but for UTF8-encoded strings. ## editDistance diff --git a/docs/en/sql-reference/functions/string-search-functions.md b/docs/en/sql-reference/functions/string-search-functions.md index d261cff3580..b7ba1d4feb7 100644 --- a/docs/en/sql-reference/functions/string-search-functions.md +++ b/docs/en/sql-reference/functions/string-search-functions.md @@ -262,7 +262,7 @@ Result: ## multiSearchAllPositionsUTF8 -Like [multiSearchAllPositions](#multiSearchAllPositions) but assumes `haystack` and the `needle` substrings are UTF-8 encoded strings. +Like [multiSearchAllPositions](#multisearchallpositions) but assumes `haystack` and the `needle` substrings are UTF-8 encoded strings. **Syntax** @@ -336,7 +336,7 @@ Result: Like [`position`](#position) but returns the leftmost offset in a `haystack` string which matches any of multiple `needle` strings. -Functions [`multiSearchFirstPositionCaseInsensitive`](#multiSearchFirstPositionCaseInsensitive), [`multiSearchFirstPositionUTF8`](#multiSearchFirstPositionUTF8) and [`multiSearchFirstPositionCaseInsensitiveUTF8`](#multiSearchFirstPositionCaseInsensitiveUTF8) provide case-insensitive and/or UTF-8 variants of this function. +Functions [`multiSearchFirstPositionCaseInsensitive`](#multisearchfirstpositioncaseinsensitive), [`multiSearchFirstPositionUTF8`](#multisearchfirstpositionutf8) and [`multiSearchFirstPositionCaseInsensitiveUTF8`](#multisearchfirstpositioncaseinsensitiveutf8) provide case-insensitive and/or UTF-8 variants of this function. **Syntax** @@ -370,7 +370,7 @@ Result: ## multiSearchFirstPositionCaseInsensitive -Like [`multiSearchFirstPosition`](#multiSearchFirstPosition) but ignores case. +Like [`multiSearchFirstPosition`](#multisearchfirstposition) but ignores case. **Syntax** @@ -404,7 +404,7 @@ Result: ## multiSearchFirstPositionUTF8 -Like [`multiSearchFirstPosition`](#multiSearchFirstPosition) but assumes `haystack` and `needle` to be UTF-8 strings. +Like [`multiSearchFirstPosition`](#multisearchfirstposition) but assumes `haystack` and `needle` to be UTF-8 strings. **Syntax** @@ -440,7 +440,7 @@ Result: ## multiSearchFirstPositionCaseInsensitiveUTF8 -Like [`multiSearchFirstPosition`](#multiSearchFirstPosition) but assumes `haystack` and `needle` to be UTF-8 strings and ignores case. +Like [`multiSearchFirstPosition`](#multisearchfirstposition) but assumes `haystack` and `needle` to be UTF-8 strings and ignores case. **Syntax** @@ -478,7 +478,7 @@ Result: Returns the index `i` (starting from 1) of the leftmost found needlei in the string `haystack` and 0 otherwise. -Functions [`multiSearchFirstIndexCaseInsensitive`](#multiSearchFirstIndexCaseInsensitive), [`multiSearchFirstIndexUTF8`](#multiSearchFirstIndexUTF8) and [`multiSearchFirstIndexCaseInsensitiveUTF8`](#multiSearchFirstIndexCaseInsensitiveUTF8) provide case-insensitive and/or UTF-8 variants of this function. +Functions [`multiSearchFirstIndexCaseInsensitive`](#multisearchfirstindexcaseinsensitive), [`multiSearchFirstIndexUTF8`](#multisearchfirstindexutf8) and [`multiSearchFirstIndexCaseInsensitiveUTF8`](#multisearchfirstindexcaseinsensitiveutf8) provide case-insensitive and/or UTF-8 variants of this function. **Syntax** @@ -615,7 +615,7 @@ Result: Returns 1, if at least one string needlei matches the string `haystack` and 0 otherwise. -Functions [`multiSearchAnyCaseInsensitive`](#multiSearchAnyCaseInsensitive), [`multiSearchAnyUTF8`](#multiSearchAnyUTF8) and []`multiSearchAnyCaseInsensitiveUTF8`](#multiSearchAnyCaseInsensitiveUTF8) provide case-insensitive and/or UTF-8 variants of this function. +Functions [`multiSearchAnyCaseInsensitive`](#multisearchanycaseinsensitive), [`multiSearchAnyUTF8`](#multisearchanyutf8) and [`multiSearchAnyCaseInsensitiveUTF8`](#multisearchanycaseinsensitiveutf8) provide case-insensitive and/or UTF-8 variants of this function. **Syntax** @@ -719,7 +719,7 @@ Result: ## multiSearchAnyCaseInsensitiveUTF8 -Like [multiSearchAnyUTF8](#multiSearchAnyUTF8) but ignores case. +Like [multiSearchAnyUTF8](#multisearchanyutf8) but ignores case. *Syntax** @@ -880,7 +880,7 @@ extractAll(haystack, pattern) Matches all groups of the `haystack` string using the `pattern` regular expression. Returns an array of arrays, where the first array includes all fragments matching the first group, the second array - matching the second group, etc. -This function is slower than [extractAllGroupsVertical](#extractallgroups-vertical). +This function is slower than [extractAllGroupsVertical](#extractallgroupsvertical). **Syntax** @@ -952,7 +952,7 @@ Result: └────────────────────────────────────────────────────────────────────────────────────────┘ ``` -## like {#like} +## like Returns whether string `haystack` matches the LIKE expression `pattern`. @@ -1215,7 +1215,7 @@ Result: ## ngramSearchCaseInsensitive -Provides a case-insensitive variant of [ngramSearch](#ngramSearch). +Provides a case-insensitive variant of [ngramSearch](#ngramsearch). **Syntax** @@ -1630,7 +1630,7 @@ Result: ## hasSubsequenceCaseInsensitive -Like [hasSubsequence](#hasSubsequence) but searches case-insensitively. +Like [hasSubsequence](#hassubsequence) but searches case-insensitively. **Syntax** @@ -1665,7 +1665,7 @@ Result: ## hasSubsequenceUTF8 -Like [hasSubsequence](#hasSubsequence) but assumes `haystack` and `needle` are UTF-8 encoded strings. +Like [hasSubsequence](#hassubsequence) but assumes `haystack` and `needle` are UTF-8 encoded strings. **Syntax** @@ -1700,7 +1700,7 @@ Result: ## hasSubsequenceCaseInsensitiveUTF8 -Like [hasSubsequenceUTF8](#hasSubsequenceUTF8) but searches case-insensitively. +Like [hasSubsequenceUTF8](#hassubsequenceutf8) but searches case-insensitively. **Syntax** diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index 2ec51d43c59..61e84ca72d1 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -10,7 +10,7 @@ sidebar_label: Type Conversion ClickHouse generally uses the [same behavior as C++ programs](https://en.cppreference.com/w/cpp/language/implicit_conversion). -`to` functions and [cast](#castx-t) behave differently in some cases, for example in case of [LowCardinality](../data-types/lowcardinality.md): [cast](#castx-t) removes [LowCardinality](../data-types/lowcardinality.md) trait `to` functions don't. The same with [Nullable](../data-types/nullable.md), this behaviour is not compatible with SQL standard, and it can be changed using [cast_keep_nullable](../../operations/settings/settings.md/#cast_keep_nullable) setting. +`to` functions and [cast](#cast) behave differently in some cases, for example in case of [LowCardinality](../data-types/lowcardinality.md): [cast](#cast) removes [LowCardinality](../data-types/lowcardinality.md) trait `to` functions don't. The same with [Nullable](../data-types/nullable.md), this behaviour is not compatible with SQL standard, and it can be changed using [cast_keep_nullable](../../operations/settings/settings.md/#cast_keep_nullable) setting. :::note Be aware of potential data loss if values of a datatype are converted to a smaller datatype (for example from `Int64` to `Int32`) or between @@ -70,7 +70,7 @@ Integer value in the `Int8`, `Int16`, `Int32`, `Int64`, `Int128` or `Int256` dat Functions use [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning they truncate fractional digits of numbers. -The behavior of functions for the [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments is undefined. Remember about [numeric conversions issues](#numeric-conversion-issues), when using the functions. +The behavior of functions for the [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments is undefined. Remember about [numeric conversions issues](#common-issues-with-data-conversion), when using the functions. **Example** @@ -169,7 +169,7 @@ Converts an input value to the [UInt](../data-types/int-uint.md) data type. This Functions use [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning they truncate fractional digits of numbers. -The behavior of functions for negative arguments and for the [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments is undefined. If you pass a string with a negative number, for example `'-32'`, ClickHouse raises an exception. Remember about [numeric conversions issues](#numeric-conversion-issues), when using the functions. +The behavior of functions for negative arguments and for the [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments is undefined. If you pass a string with a negative number, for example `'-32'`, ClickHouse raises an exception. Remember about [numeric conversions issues](#common-issues-with-data-conversion), when using the functions. **Example** @@ -996,7 +996,7 @@ Result: ## reinterpretAsUInt8 -Performs byte reinterpretation by treating the input value as a value of type UInt8. Unlike [`CAST`](#castx-t), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless. +Performs byte reinterpretation by treating the input value as a value of type UInt8. Unlike [`CAST`](#cast), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless. **Syntax** @@ -1034,7 +1034,7 @@ Result: ## reinterpretAsUInt16 -Performs byte reinterpretation by treating the input value as a value of type UInt16. Unlike [`CAST`](#castx-t), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless. +Performs byte reinterpretation by treating the input value as a value of type UInt16. Unlike [`CAST`](#cast), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless. **Syntax** @@ -1072,7 +1072,7 @@ Result: ## reinterpretAsUInt32 -Performs byte reinterpretation by treating the input value as a value of type UInt32. Unlike [`CAST`](#castx-t), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless. +Performs byte reinterpretation by treating the input value as a value of type UInt32. Unlike [`CAST`](#cast), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless. **Syntax** @@ -1110,7 +1110,7 @@ Result: ## reinterpretAsUInt64 -Performs byte reinterpretation by treating the input value as a value of type UInt64. Unlike [`CAST`](#castx-t), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless. +Performs byte reinterpretation by treating the input value as a value of type UInt64. Unlike [`CAST`](#cast), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless. **Syntax** @@ -1148,7 +1148,7 @@ Result: ## reinterpretAsUInt128 -Performs byte reinterpretation by treating the input value as a value of type UInt128. Unlike [`CAST`](#castx-t), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless. +Performs byte reinterpretation by treating the input value as a value of type UInt128. Unlike [`CAST`](#cast), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless. **Syntax** @@ -1186,7 +1186,7 @@ Result: ## reinterpretAsUInt256 -Performs byte reinterpretation by treating the input value as a value of type UInt256. Unlike [`CAST`](#castx-t), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless. +Performs byte reinterpretation by treating the input value as a value of type UInt256. Unlike [`CAST`](#cast), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless. **Syntax** @@ -1224,7 +1224,7 @@ Result: ## reinterpretAsInt8 -Performs byte reinterpretation by treating the input value as a value of type Int8. Unlike [`CAST`](#castx-t), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless. +Performs byte reinterpretation by treating the input value as a value of type Int8. Unlike [`CAST`](#cast), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless. **Syntax** @@ -1262,7 +1262,7 @@ Result: ## reinterpretAsInt16 -Performs byte reinterpretation by treating the input value as a value of type Int16. Unlike [`CAST`](#castx-t), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless. +Performs byte reinterpretation by treating the input value as a value of type Int16. Unlike [`CAST`](#cast), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless. **Syntax** @@ -1300,7 +1300,7 @@ Result: ## reinterpretAsInt32 -Performs byte reinterpretation by treating the input value as a value of type Int32. Unlike [`CAST`](#castx-t), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless. +Performs byte reinterpretation by treating the input value as a value of type Int32. Unlike [`CAST`](#cast), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless. **Syntax** @@ -1338,7 +1338,7 @@ Result: ## reinterpretAsInt64 -Performs byte reinterpretation by treating the input value as a value of type Int64. Unlike [`CAST`](#castx-t), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless. +Performs byte reinterpretation by treating the input value as a value of type Int64. Unlike [`CAST`](#cast), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless. **Syntax** @@ -1376,7 +1376,7 @@ Result: ## reinterpretAsInt128 -Performs byte reinterpretation by treating the input value as a value of type Int128. Unlike [`CAST`](#castx-t), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless. +Performs byte reinterpretation by treating the input value as a value of type Int128. Unlike [`CAST`](#cast), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless. **Syntax** @@ -1414,7 +1414,7 @@ Result: ## reinterpretAsInt256 -Performs byte reinterpretation by treating the input value as a value of type Int256. Unlike [`CAST`](#castx-t), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless. +Performs byte reinterpretation by treating the input value as a value of type Int256. Unlike [`CAST`](#cast), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless. **Syntax** @@ -1452,7 +1452,7 @@ Result: ## reinterpretAsFloat32 -Performs byte reinterpretation by treating the input value as a value of type Float32. Unlike [`CAST`](#castx-t), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless. +Performs byte reinterpretation by treating the input value as a value of type Float32. Unlike [`CAST`](#cast), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless. **Syntax** @@ -1486,7 +1486,7 @@ Result: ## reinterpretAsFloat64 -Performs byte reinterpretation by treating the input value as a value of type Float64. Unlike [`CAST`](#castx-t), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless. +Performs byte reinterpretation by treating the input value as a value of type Float64. Unlike [`CAST`](#cast), the function does not attempt to preserve the original value - if the target type is not able to represent the input type, the output is meaningless. **Syntax** @@ -1730,7 +1730,7 @@ Result: └─────────────────────┘ ``` -## reinterpret(x, T) +## reinterpret Uses the same source in-memory bytes sequence for `x` value and reinterprets it to destination type. @@ -1766,9 +1766,9 @@ Result: └─────────────┴──────────────┴───────────────┘ ``` -## CAST(x, T) +## CAST -Converts an input value to the specified data type. Unlike the [reinterpret](#type_conversion_function-reinterpret) function, `CAST` tries to present the same value using the new data type. If the conversion can not be done then an exception is raised. +Converts an input value to the specified data type. Unlike the [reinterpret](#reinterpret) function, `CAST` tries to present the same value using the new data type. If the conversion can not be done then an exception is raised. Several syntax variants are supported. **Syntax** @@ -1875,7 +1875,7 @@ Result: Converts `x` to the `T` data type. -The difference from [cast(x, T)](#type_conversion_function-cast) is that `accurateCast` does not allow overflow of numeric types during cast if type value `x` does not fit the bounds of type `T`. For example, `accurateCast(-1, 'UInt8')` throws an exception. +The difference from [cast](#cast) is that `accurateCast` does not allow overflow of numeric types during cast if type value `x` does not fit the bounds of type `T`. For example, `accurateCast(-1, 'UInt8')` throws an exception. **Example** @@ -2061,7 +2061,7 @@ Result: └───────────────────────────┴──────────────────────────────┘ ``` -## parseDateTime {#type_conversion_functions-parseDateTime} +## parseDateTime Converts a [String](../data-types/string.md) to [DateTime](../data-types/datetime.md) according to a [MySQL format string](https://dev.mysql.com/doc/refman/8.0/en/date-and-time-functions.html#function_date-format). @@ -2102,15 +2102,15 @@ Alias: `TO_TIMESTAMP`. ## parseDateTimeOrZero -Same as for [parseDateTime](#type_conversion_functions-parseDateTime) except that it returns zero date when it encounters a date format that cannot be processed. +Same as for [parseDateTime](#parsedatetime) except that it returns zero date when it encounters a date format that cannot be processed. ## parseDateTimeOrNull -Same as for [parseDateTime](#type_conversion_functions-parseDateTime) except that it returns `NULL` when it encounters a date format that cannot be processed. +Same as for [parseDateTime](#parsedatetime) except that it returns `NULL` when it encounters a date format that cannot be processed. Alias: `str_to_date`. -## parseDateTimeInJodaSyntax {#type_conversion_functions-parseDateTimeInJodaSyntax} +## parseDateTimeInJodaSyntax Similar to [parseDateTime](#parsedatetime), except that the format string is in [Joda](https://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html) instead of MySQL syntax. @@ -2151,11 +2151,11 @@ SELECT parseDateTimeInJodaSyntax('2023-02-24 14:53:31', 'yyyy-MM-dd HH:mm:ss', ' ## parseDateTimeInJodaSyntaxOrZero -Same as for [parseDateTimeInJodaSyntax](#type_conversion_functions-parseDateTimeInJodaSyntax) except that it returns zero date when it encounters a date format that cannot be processed. +Same as for [parseDateTimeInJodaSyntax](#parsedatetimeinjodasyntax) except that it returns zero date when it encounters a date format that cannot be processed. ## parseDateTimeInJodaSyntaxOrNull -Same as for [parseDateTimeInJodaSyntax](#type_conversion_functions-parseDateTimeInJodaSyntax) except that it returns `NULL` when it encounters a date format that cannot be processed. +Same as for [parseDateTimeInJodaSyntax](#parsedatetimeinjodasyntax) except that it returns `NULL` when it encounters a date format that cannot be processed. ## parseDateTimeBestEffort ## parseDateTime32BestEffort @@ -2313,11 +2313,11 @@ Same as for [parseDateTimeBestEffort](#parsedatetimebesteffort) except that it r ## parseDateTimeBestEffortUSOrNull -Same as [parseDateTimeBestEffortUS](#parsedatetimebesteffortUS) function except that it returns `NULL` when it encounters a date format that cannot be processed. +Same as [parseDateTimeBestEffortUS](#parsedatetimebesteffortus) function except that it returns `NULL` when it encounters a date format that cannot be processed. ## parseDateTimeBestEffortUSOrZero -Same as [parseDateTimeBestEffortUS](#parsedatetimebesteffortUS) function except that it returns zero date (`1970-01-01`) or zero date with time (`1970-01-01 00:00:00`) when it encounters a date format that cannot be processed. +Same as [parseDateTimeBestEffortUS](#parsedatetimebesteffortus) function except that it returns zero date (`1970-01-01`) or zero date with time (`1970-01-01 00:00:00`) when it encounters a date format that cannot be processed. ## parseDateTime64BestEffort @@ -2389,7 +2389,7 @@ Same as for [parseDateTime64BestEffort](#parsedatetime64besteffort), except that Converts input parameter to the [LowCardinality](../data-types/lowcardinality.md) version of same data type. -To convert data from the `LowCardinality` data type use the [CAST](#type_conversion_function-cast) function. For example, `CAST(x as String)`. +To convert data from the `LowCardinality` data type use the [CAST](#cast) function. For example, `CAST(x as String)`. **Syntax** diff --git a/docs/en/sql-reference/functions/uuid-functions.md b/docs/en/sql-reference/functions/uuid-functions.md index 0323ae728a9..5f15907d029 100644 --- a/docs/en/sql-reference/functions/uuid-functions.md +++ b/docs/en/sql-reference/functions/uuid-functions.md @@ -150,7 +150,7 @@ The function also works for [Arrays](array-functions.md#function-empty) and [Str **Example** -To generate the UUID value, ClickHouse provides the [generateUUIDv4](#uuid-function-generate) function. +To generate the UUID value, ClickHouse provides the [generateUUIDv4](#generateuuidv4) function. Query: @@ -190,7 +190,7 @@ The function also works for [Arrays](array-functions.md#function-notempty) or [S **Example** -To generate the UUID value, ClickHouse provides the [generateUUIDv4](#uuid-function-generate) function. +To generate the UUID value, ClickHouse provides the [generateUUIDv4](#generateuuidv4) function. Query: diff --git a/docs/en/sql-reference/operators/in.md b/docs/en/sql-reference/operators/in.md index 0257d21b30f..ed75b1802d8 100644 --- a/docs/en/sql-reference/operators/in.md +++ b/docs/en/sql-reference/operators/in.md @@ -235,7 +235,7 @@ If `some_predicate` is not selective enough, it will return a large amount of da ### Distributed Subqueries and max_parallel_replicas -When [max_parallel_replicas](#settings-max_parallel_replicas) is greater than 1, distributed queries are further transformed. +When [max_parallel_replicas](#distributed-subqueries-and-max_parallel_replicas) is greater than 1, distributed queries are further transformed. For example, the following: @@ -255,7 +255,7 @@ where `M` is between `1` and `3` depending on which replica the local query is e These settings affect every MergeTree-family table in the query and have the same effect as applying `SAMPLE 1/3 OFFSET (M-1)/3` on each table. -Therefore adding the [max_parallel_replicas](#settings-max_parallel_replicas) setting will only produce correct results if both tables have the same replication scheme and are sampled by UserID or a subkey of it. In particular, if `local_table_2` does not have a sampling key, incorrect results will be produced. The same rule applies to `JOIN`. +Therefore adding the [max_parallel_replicas](#distributed-subqueries-and-max_parallel_replicas) setting will only produce correct results if both tables have the same replication scheme and are sampled by UserID or a subkey of it. In particular, if `local_table_2` does not have a sampling key, incorrect results will be produced. The same rule applies to `JOIN`. One workaround if `local_table_2` does not meet the requirements, is to use `GLOBAL IN` or `GLOBAL JOIN`. diff --git a/docs/en/sql-reference/statements/alter/column.md b/docs/en/sql-reference/statements/alter/column.md index a23710b12bd..aa6f132e08e 100644 --- a/docs/en/sql-reference/statements/alter/column.md +++ b/docs/en/sql-reference/statements/alter/column.md @@ -108,7 +108,7 @@ ALTER TABLE visits RENAME COLUMN webBrowser TO browser CLEAR COLUMN [IF EXISTS] name IN PARTITION partition_name ``` -Resets all data in a column for a specified partition. Read more about setting the partition name in the section [How to set the partition expression](partition.md/#how-to-set-partition-expression). +Resets all data in a column for a specified partition. Read more about setting the partition name in the section [How to set the partition expression](../alter/partition.md/#how-to-set-partition-expression). If the `IF EXISTS` clause is specified, the query won’t return an error if the column does not exist. @@ -173,7 +173,7 @@ ALTER TABLE visits MODIFY COLUMN browser Array(String) Changing the column type is the only complex action – it changes the contents of files with data. For large tables, this may take a long time. -The query also can change the order of the columns using `FIRST | AFTER` clause, see [ADD COLUMN](#alter_add-column) description, but column type is mandatory in this case. +The query also can change the order of the columns using `FIRST | AFTER` clause, see [ADD COLUMN](#add-column) description, but column type is mandatory in this case. Example: diff --git a/docs/en/sql-reference/statements/alter/partition.md b/docs/en/sql-reference/statements/alter/partition.md index 0ed1e523669..778816f8934 100644 --- a/docs/en/sql-reference/statements/alter/partition.md +++ b/docs/en/sql-reference/statements/alter/partition.md @@ -31,7 +31,7 @@ The following operations with [partitions](/docs/en/engines/table-engines/merget ALTER TABLE table_name [ON CLUSTER cluster] DETACH PARTITION|PART partition_expr ``` -Moves all data for the specified partition to the `detached` directory. The server forgets about the detached data partition as if it does not exist. The server will not know about this data until you make the [ATTACH](#alter_attach-partition) query. +Moves all data for the specified partition to the `detached` directory. The server forgets about the detached data partition as if it does not exist. The server will not know about this data until you make the [ATTACH](#attach-partitionpart) query. Example: @@ -252,7 +252,7 @@ Downloads a partition from another server. This query only works for the replica The query does the following: 1. Downloads the partition|part from the specified shard. In ‘path-in-zookeeper’ you must specify a path to the shard in ZooKeeper. -2. Then the query puts the downloaded data to the `detached` directory of the `table_name` table. Use the [ATTACH PARTITION\|PART](#alter_attach-partition) query to add the data to the table. +2. Then the query puts the downloaded data to the `detached` directory of the `table_name` table. Use the [ATTACH PARTITION\|PART](#attach-partitionpart) query to add the data to the table. For example: @@ -353,7 +353,7 @@ You can specify the partition expression in `ALTER ... PARTITION` queries in dif - Using the keyword `ALL`. It can be used only with DROP/DETACH/ATTACH. For example, `ALTER TABLE visits ATTACH PARTITION ALL`. - As a tuple of expressions or constants that matches (in types) the table partitioning keys tuple. In the case of a single element partitioning key, the expression should be wrapped in the `tuple (...)` function. For example, `ALTER TABLE visits DETACH PARTITION tuple(toYYYYMM(toDate('2019-01-25')))`. - Using the partition ID. Partition ID is a string identifier of the partition (human-readable, if possible) that is used as the names of partitions in the file system and in ZooKeeper. The partition ID must be specified in the `PARTITION ID` clause, in a single quotes. For example, `ALTER TABLE visits DETACH PARTITION ID '201901'`. -- In the [ALTER ATTACH PART](#alter_attach-partition) and [DROP DETACHED PART](#alter_drop-detached) query, to specify the name of a part, use string literal with a value from the `name` column of the [system.detached_parts](/docs/en/operations/system-tables/detached_parts.md/#system_tables-detached_parts) table. For example, `ALTER TABLE visits ATTACH PART '201901_1_1_0'`. +- In the [ALTER ATTACH PART](#attach-partitionpart) and [DROP DETACHED PART](#drop-detached-partitionpart) query, to specify the name of a part, use string literal with a value from the `name` column of the [system.detached_parts](/docs/en/operations/system-tables/detached_parts.md/#system_tables-detached_parts) table. For example, `ALTER TABLE visits ATTACH PART '201901_1_1_0'`. Usage of quotes when specifying the partition depends on the type of partition expression. For example, for the `String` type, you have to specify its name in quotes (`'`). For the `Date` and `Int*` types no quotes are needed. diff --git a/docs/en/sql-reference/statements/create/table.md b/docs/en/sql-reference/statements/create/table.md index 628fe1d2875..0253bc647e6 100644 --- a/docs/en/sql-reference/statements/create/table.md +++ b/docs/en/sql-reference/statements/create/table.md @@ -17,8 +17,8 @@ By default, tables are created only on the current server. Distributed DDL queri ``` sql CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ( - name1 [type1] [NULL|NOT NULL] [DEFAULT|MATERIALIZED|EPHEMERAL|ALIAS expr1] [compression_codec] [TTL expr1] [COMMENT 'comment for column'], - name2 [type2] [NULL|NOT NULL] [DEFAULT|MATERIALIZED|EPHEMERAL|ALIAS expr2] [compression_codec] [TTL expr2] [COMMENT 'comment for column'], + name1 [type1] [NULL|NOT NULL] [DEFAULT|MATERIALIZED|EPHEMERAL|ALIAS expr1] [COMMENT 'comment for column'] [compression_codec] [TTL expr1], + name2 [type2] [NULL|NOT NULL] [DEFAULT|MATERIALIZED|EPHEMERAL|ALIAS expr2] [COMMENT 'comment for column'] [compression_codec] [TTL expr2], ... ) ENGINE = engine COMMENT 'comment for table' diff --git a/docs/en/sql-reference/statements/create/view.md b/docs/en/sql-reference/statements/create/view.md index 1bdf22b35b0..1fabb6d8cc7 100644 --- a/docs/en/sql-reference/statements/create/view.md +++ b/docs/en/sql-reference/statements/create/view.md @@ -6,7 +6,7 @@ sidebar_label: VIEW # CREATE VIEW -Creates a new view. Views can be [normal](#normal-view), [materialized](#materialized-view), [live](#live-view-experimental), and [window](#window-view-experimental) (live view and window view are experimental features). +Creates a new view. Views can be [normal](#normal-view), [materialized](#materialized-view), [live](#live-view-deprecated), and [window](#window-view-experimental) (live view and window view are experimental features). ## Normal View diff --git a/docs/en/sql-reference/statements/grant.md b/docs/en/sql-reference/statements/grant.md index 2850ce71781..43fa344a16d 100644 --- a/docs/en/sql-reference/statements/grant.md +++ b/docs/en/sql-reference/statements/grant.md @@ -33,7 +33,7 @@ GRANT [ON CLUSTER cluster_name] role [,...] TO {user | another_role | CURRENT_US - `role` — ClickHouse user role. - `user` — ClickHouse user account. -The `WITH ADMIN OPTION` clause grants [ADMIN OPTION](#admin-option-privilege) privilege to `user` or `role`. +The `WITH ADMIN OPTION` clause grants [ADMIN OPTION](#admin-option) privilege to `user` or `role`. The `WITH REPLACE OPTION` clause replace old roles by new role for the `user` or `role`, if is not specified it appends roles. ## Grant Current Grants Syntax @@ -201,7 +201,7 @@ Hierarchy of privileges: - `HDFS` - `S3` - [dictGet](#dictget) -- [displaySecretsInShowAndSelect](#display-secrets) +- [displaySecretsInShowAndSelect](#displaysecretsinshowandselect) - [NAMED COLLECTION ADMIN](#named-collection-admin) - `CREATE NAMED COLLECTION` - `DROP NAMED COLLECTION` @@ -498,7 +498,7 @@ Privilege level: `DICTIONARY`. - `GRANT dictGet ON mydictionary TO john` -### displaySecretsInShowAndSelect {#display-secrets} +### displaySecretsInShowAndSelect Allows a user to view secrets in `SHOW` and `SELECT` queries if both [`display_secrets_in_show_and_select` server setting](../../operations/server-configuration-parameters/settings#display_secrets_in_show_and_select) diff --git a/docs/en/sql-reference/statements/select/sample.md b/docs/en/sql-reference/statements/select/sample.md index 137f86cc8b9..78e05b19bd1 100644 --- a/docs/en/sql-reference/statements/select/sample.md +++ b/docs/en/sql-reference/statements/select/sample.md @@ -27,14 +27,14 @@ The features of data sampling are listed below: For the `SAMPLE` clause the following syntax is supported: -| SAMPLE Clause Syntax | Description | -|----------------------|------------------------------| -| `SAMPLE k` | Here `k` is the number from 0 to 1. The query is executed on `k` fraction of data. For example, `SAMPLE 0.1` runs the query on 10% of data. [Read more](#select-sample-k) | -| `SAMPLE n` | Here `n` is a sufficiently large integer. The query is executed on a sample of at least `n` rows (but not significantly more than this). For example, `SAMPLE 10000000` runs the query on a minimum of 10,000,000 rows. [Read more](#select-sample-n) | -| `SAMPLE k OFFSET m` | Here `k` and `m` are the numbers from 0 to 1. The query is executed on a sample of `k` fraction of the data. The data used for the sample is offset by `m` fraction. [Read more](#select-sample-offset) | +| SAMPLE Clause Syntax | Description | +|----------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `SAMPLE k` | Here `k` is the number from 0 to 1. The query is executed on `k` fraction of data. For example, `SAMPLE 0.1` runs the query on 10% of data. [Read more](#sample-k) | +| `SAMPLE n` | Here `n` is a sufficiently large integer. The query is executed on a sample of at least `n` rows (but not significantly more than this). For example, `SAMPLE 10000000` runs the query on a minimum of 10,000,000 rows. [Read more](#sample-n) | +| `SAMPLE k OFFSET m` | Here `k` and `m` are the numbers from 0 to 1. The query is executed on a sample of `k` fraction of the data. The data used for the sample is offset by `m` fraction. [Read more](#sample-k-offset-m) | -## SAMPLE K {#select-sample-k} +## SAMPLE K Here `k` is the number from 0 to 1 (both fractional and decimal notations are supported). For example, `SAMPLE 1/2` or `SAMPLE 0.5`. @@ -54,7 +54,7 @@ ORDER BY PageViews DESC LIMIT 1000 In this example, the query is executed on a sample from 0.1 (10%) of data. Values of aggregate functions are not corrected automatically, so to get an approximate result, the value `count()` is manually multiplied by 10. -## SAMPLE N {#select-sample-n} +## SAMPLE N Here `n` is a sufficiently large integer. For example, `SAMPLE 10000000`. @@ -90,7 +90,7 @@ FROM visits SAMPLE 10000000 ``` -## SAMPLE K OFFSET M {#select-sample-offset} +## SAMPLE K OFFSET M Here `k` and `m` are numbers from 0 to 1. Examples are shown below. diff --git a/docs/en/sql-reference/statements/system.md b/docs/en/sql-reference/statements/system.md index 7efbff1b42b..e6d3439d2b9 100644 --- a/docs/en/sql-reference/statements/system.md +++ b/docs/en/sql-reference/statements/system.md @@ -174,7 +174,7 @@ Aborts ClickHouse process (like `kill -9 {$ pid_clickhouse-server}`) ## Managing Distributed Tables -ClickHouse can manage [distributed](../../engines/table-engines/special/distributed.md) tables. When a user inserts data into these tables, ClickHouse first creates a queue of the data that should be sent to cluster nodes, then asynchronously sends it. You can manage queue processing with the [STOP DISTRIBUTED SENDS](#query_language-system-stop-distributed-sends), [FLUSH DISTRIBUTED](#query_language-system-flush-distributed), and [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) queries. You can also synchronously insert distributed data with the [distributed_foreground_insert](../../operations/settings/settings.md#distributed_foreground_insert) setting. +ClickHouse can manage [distributed](../../engines/table-engines/special/distributed.md) tables. When a user inserts data into these tables, ClickHouse first creates a queue of the data that should be sent to cluster nodes, then asynchronously sends it. You can manage queue processing with the [STOP DISTRIBUTED SENDS](#stop-distributed-sends), [FLUSH DISTRIBUTED](#flush-distributed), and [START DISTRIBUTED SENDS](#start-distributed-sends) queries. You can also synchronously insert distributed data with the [distributed_foreground_insert](../../operations/settings/settings.md#distributed_foreground_insert) setting. ### STOP DISTRIBUTED SENDS diff --git a/docs/en/sql-reference/syntax.md b/docs/en/sql-reference/syntax.md index fc0286e76ad..6a4afb63db8 100644 --- a/docs/en/sql-reference/syntax.md +++ b/docs/en/sql-reference/syntax.md @@ -54,11 +54,11 @@ Identifiers are: - Cluster, database, table, partition, and column names. - Functions. - Data types. -- [Expression aliases](#expression_aliases). +- [Expression aliases](#expression-aliases). Identifiers can be quoted or non-quoted. The latter is preferred. -Non-quoted identifiers must match the regex `^[a-zA-Z_][0-9a-zA-Z_]*$` and can not be equal to [keywords](#syntax-keywords). Examples: `x`, `_1`, `X_y__Z123_`. +Non-quoted identifiers must match the regex `^[a-zA-Z_][0-9a-zA-Z_]*$` and can not be equal to [keywords](#keywords). Examples: `x`, `_1`, `X_y__Z123_`. If you want to use identifiers the same as keywords or you want to use other symbols in identifiers, quote it using double quotes or backticks, for example, `"id"`, `` `id` ``. diff --git a/docs/en/sql-reference/table-functions/azureBlobStorage.md b/docs/en/sql-reference/table-functions/azureBlobStorage.md index 1510489ce83..f59fedeb3a2 100644 --- a/docs/en/sql-reference/table-functions/azureBlobStorage.md +++ b/docs/en/sql-reference/table-functions/azureBlobStorage.md @@ -72,6 +72,7 @@ SELECT count(*) FROM azureBlobStorage('DefaultEndpointsProtocol=https;AccountNam - `_path` — Path to the file. Type: `LowCardinalty(String)`. - `_file` — Name of the file. Type: `LowCardinalty(String)`. - `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the file size is unknown, the value is `NULL`. +- `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`. **See Also** diff --git a/docs/en/sql-reference/table-functions/file.md b/docs/en/sql-reference/table-functions/file.md index f66178afbb2..3a3162dad9a 100644 --- a/docs/en/sql-reference/table-functions/file.md +++ b/docs/en/sql-reference/table-functions/file.md @@ -18,7 +18,7 @@ file([path_to_archive ::] path [,format] [,structure] [,compression]) **Parameters** -- `path` — The relative path to the file from [user_files_path](/docs/en/operations/server-configuration-parameters/settings.md#server_configuration_parameters-user_files_path). Supports in read-only mode the following [globs](#globs_in_path): `*`, `?`, `{abc,def}` (with `'abc'` and `'def'` being strings) and `{N..M}` (with `N` and `M` being numbers). +- `path` — The relative path to the file from [user_files_path](/docs/en/operations/server-configuration-parameters/settings.md#server_configuration_parameters-user_files_path). Supports in read-only mode the following [globs](#globs-in-path): `*`, `?`, `{abc,def}` (with `'abc'` and `'def'` being strings) and `{N..M}` (with `N` and `M` being numbers). - `path_to_archive` - The relative path to a zip/tar/7z archive. Supports the same globs as `path`. - `format` — The [format](/docs/en/interfaces/formats.md#formats) of the file. - `structure` — Structure of the table. Format: `'column1_name column1_type, column2_name column2_type, ...'`. @@ -128,7 +128,7 @@ Reading data from `table.csv`, located in `archive1.zip` or/and `archive2.zip`: SELECT * FROM file('user_files/archives/archive{1..2}.zip :: table.csv'); ``` -## Globs in path {#globs_in_path} +## Globs in path Paths may use globbing. Files must match the whole path pattern, not only the suffix or prefix. @@ -196,6 +196,7 @@ SELECT count(*) FROM file('big_dir/**/file002', 'CSV', 'name String, value UInt3 - `_path` — Path to the file. Type: `LowCardinalty(String)`. - `_file` — Name of the file. Type: `LowCardinalty(String)`. - `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the file size is unknown, the value is `NULL`. +- `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`. ## Settings {#settings} diff --git a/docs/en/sql-reference/table-functions/fileCluster.md b/docs/en/sql-reference/table-functions/fileCluster.md index 4677d2883a7..62b00fadd62 100644 --- a/docs/en/sql-reference/table-functions/fileCluster.md +++ b/docs/en/sql-reference/table-functions/fileCluster.md @@ -22,7 +22,7 @@ fileCluster(cluster_name, path[, format, structure, compression_method]) **Arguments** - `cluster_name` — Name of a cluster that is used to build a set of addresses and connection parameters to remote and local servers. -- `path` — The relative path to the file from [user_files_path](/docs/en/operations/server-configuration-parameters/settings.md#server_configuration_parameters-user_files_path). Path to file also supports [globs](#globs_in_path). +- `path` — The relative path to the file from [user_files_path](/docs/en/operations/server-configuration-parameters/settings.md#server_configuration_parameters-user_files_path). Path to file also supports [globs](#globs-in-path). - `format` — [Format](../../interfaces/formats.md#formats) of the files. Type: [String](../../sql-reference/data-types/string.md). - `structure` — Table structure in `'UserID UInt64, Name String'` format. Determines column names and types. Type: [String](../../sql-reference/data-types/string.md). - `compression_method` — Compression method. Supported compression types are `gz`, `br`, `xz`, `zst`, `lz4`, and `bz2`. @@ -74,7 +74,7 @@ SELECT * FROM fileCluster('my_cluster', 'file{1,2}.csv', 'CSV', 'i UInt32, s Str ``` -## Globs in Path {#globs_in_path} +## Globs in Path All patterns supported by [File](../../sql-reference/table-functions/file.md#globs-in-path) table function are supported by FileCluster. diff --git a/docs/en/sql-reference/table-functions/hdfs.md b/docs/en/sql-reference/table-functions/hdfs.md index d65615e7588..28cba5ccc6a 100644 --- a/docs/en/sql-reference/table-functions/hdfs.md +++ b/docs/en/sql-reference/table-functions/hdfs.md @@ -97,6 +97,7 @@ FROM hdfs('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name Strin - `_path` — Path to the file. Type: `LowCardinalty(String)`. - `_file` — Name of the file. Type: `LowCardinalty(String)`. - `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`. +- `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`. ## Storage Settings {#storage-settings} diff --git a/docs/en/sql-reference/table-functions/s3.md b/docs/en/sql-reference/table-functions/s3.md index cbef80371a3..1a7e2b8d66a 100644 --- a/docs/en/sql-reference/table-functions/s3.md +++ b/docs/en/sql-reference/table-functions/s3.md @@ -272,6 +272,7 @@ FROM s3( - `_path` — Path to the file. Type: `LowCardinalty(String)`. - `_file` — Name of the file. Type: `LowCardinalty(String)`. - `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the file size is unknown, the value is `NULL`. +- `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`. ## Storage Settings {#storage-settings} diff --git a/docs/en/sql-reference/table-functions/url.md b/docs/en/sql-reference/table-functions/url.md index 4dc6e435b50..3bb7aff53a7 100644 --- a/docs/en/sql-reference/table-functions/url.md +++ b/docs/en/sql-reference/table-functions/url.md @@ -53,6 +53,7 @@ Character `|` inside patterns is used to specify failover addresses. They are it - `_path` — Path to the `URL`. Type: `LowCardinalty(String)`. - `_file` — Resource name of the `URL`. Type: `LowCardinalty(String)`. - `_size` — Size of the resource in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`. +- `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`. ## Storage Settings {#storage-settings} diff --git a/docs/ru/interfaces/http.md b/docs/ru/interfaces/http.md index 5f11f1b430b..d9da51892f9 100644 --- a/docs/ru/interfaces/http.md +++ b/docs/ru/interfaces/http.md @@ -414,6 +414,8 @@ $ curl -v 'http://localhost:8123/predefined_query' - `content_type` — используется со всеми типами, возвращает [content-type](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type). + - `http_response_headers` — используется со всеми типами чтобы добавить кастомные хедеры в ответ. Может использоваться в том числе для задания хедера `Content-Type` вместо `content_type`. + - `response_content` — используется с типом`static`, содержимое ответа, отправленное клиенту, при использовании префикса ‘file://’ or ‘config://’, находит содержимое из файла или конфигурации, отправленного клиенту. Далее приведены методы настройки для различных типов. @@ -509,6 +511,33 @@ max_final_threads 2 static 402 text/html; charset=UTF-8 + + en + 43 + + Say Hi! + + + +
+``` + +`http_response_headers` так же может использоваться для определения `Content-Type` вместо `content_type`. + +``` xml + + + GET + xxx + /hi + + static + 402 + + text/html; charset=UTF-8 + en + 43 + Say Hi! @@ -589,6 +618,9 @@ $ curl -v -H 'XXX:xxx' 'http://localhost:8123/get_config_static_handler' static text/html; charset=UTF-8 + + 737060cd8c284d8af7ad3082f209582d + file:///absolute_path_file.html @@ -599,6 +631,9 @@ $ curl -v -H 'XXX:xxx' 'http://localhost:8123/get_config_static_handler' static text/html; charset=UTF-8 + + 737060cd8c284d8af7ad3082f209582d + file://./relative_path_file.html diff --git a/docs/ru/sql-reference/functions/rounding-functions.md b/docs/ru/sql-reference/functions/rounding-functions.md index cc939f69afc..f1dd57505ea 100644 --- a/docs/ru/sql-reference/functions/rounding-functions.md +++ b/docs/ru/sql-reference/functions/rounding-functions.md @@ -10,7 +10,7 @@ sidebar_label: "Функции округления" Возвращает наибольшее круглое число, которое меньше или равно, чем x. Круглым называется число, кратное 1 / 10N или ближайшее к нему число соответствующего типа данных, если 1 / 10N не представимо точно. -N - целочисленная константа, не обязательный параметр. По умолчанию - ноль, что означает - округлять до целого числа. +N - целочисленный аргумент, не обязательный параметр. По умолчанию - ноль, что означает - округлять до целого числа. N может быть отрицательным. Примеры: `floor(123.45, 1) = 123.4, floor(123.45, -1) = 120.` diff --git a/docs/ru/sql-reference/functions/string-functions.md b/docs/ru/sql-reference/functions/string-functions.md index fc258f7b4cf..fa76e84f130 100644 --- a/docs/ru/sql-reference/functions/string-functions.md +++ b/docs/ru/sql-reference/functions/string-functions.md @@ -538,16 +538,28 @@ SELECT base58Decode('3dc8KtHrwM'); Синоним: `TO_BASE64`. +## base64UrlEncode(s) + +Производит кодирование URL (String или FixedString) в base64-представление в соответствии с [RFC 4648](https://tools.ietf.org/html/rfc4648). + ## base64Decode(s) {#base64decode} Декодирует base64-представление s в исходную строку. При невозможности декодирования выбрасывает исключение Синоним: `FROM_BASE64`. +## base64UrlDecode(s) + +Декодирует base64-представление URL в исходную строку в соответствии с [RFC 4648](https://tools.ietf.org/html/rfc4648). При невозможности декодирования выбрасывает исключение + ## tryBase64Decode(s) {#trybase64decode} Функционал аналогичен base64Decode, но при невозможности декодирования возвращает пустую строку. +## tryBase64UrlDecode(s) + +Функционал аналогичен base64UrlDecode, но при невозможности декодирования возвращает пустую строку. + ## endsWith(s, suffix) {#endswith} Возвращает 1, если строка завершается указанным суффиксом, и 0 в противном случае. diff --git a/docs/zh/guides/improving-query-performance/skipping-indexes.md b/docs/zh/guides/improving-query-performance/skipping-indexes.md index f9f43e46927..8eb88d859f2 100644 --- a/docs/zh/guides/improving-query-performance/skipping-indexes.md +++ b/docs/zh/guides/improving-query-performance/skipping-indexes.md @@ -123,7 +123,7 @@ Bloom filter是一种数据结构,它允许对集合成员进行高效的是 有三种基于Bloom过滤器的数据跳数索引类型: -* 基本的**bloom_filter**接受一个可选参数,该参数表示在0到1之间允许的“假阳性”率(如果未指定,则使用.025)。 +* 基本的**bloom_filter**接受一个可选参数,该参数表示在0到1之间允许的“假阳性”率(如果未指定,则使用0.025)。 * 更专业的**tokenbf_v1**。需要三个参数,用来优化布隆过滤器:(1)过滤器的大小字节(大过滤器有更少的假阳性,有更高的存储成本),(2)哈希函数的个数(更多的散列函数可以减少假阳性)。(3)布隆过滤器哈希函数的种子。有关这些参数如何影响布隆过滤器功能的更多细节,请参阅 [这里](https://hur.st/bloomfilter/) 。此索引仅适用于String、FixedString和Map类型的数据。输入表达式被分割为由非字母数字字符分隔的字符序列。例如,列值`This is a candidate for a "full text" search`将被分割为`This` `is` `a` `candidate` `for` `full` `text` `search`。它用于LIKE、EQUALS、in、hasToken()和类似的长字符串中单词和其他值的搜索。例如,一种可能的用途是在非结构的应用程序日志行列中搜索少量的类名或行号。 diff --git a/programs/keeper/CMakeLists.txt b/programs/keeper/CMakeLists.txt index 52aa601b1a2..079951be55e 100644 --- a/programs/keeper/CMakeLists.txt +++ b/programs/keeper/CMakeLists.txt @@ -154,8 +154,6 @@ if (BUILD_STANDALONE_KEEPER) ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/IO/ThreadPoolRemoteFSReader.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/IO/ThreadPoolReader.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Storages/StorageS3Settings.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Daemon/BaseDaemon.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Daemon/SentryWriter.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Daemon/GraphiteWriter.cpp diff --git a/programs/keeper/Keeper.cpp b/programs/keeper/Keeper.cpp index dba5c2b7d2a..0d3c1f10894 100644 --- a/programs/keeper/Keeper.cpp +++ b/programs/keeper/Keeper.cpp @@ -361,9 +361,10 @@ try } GlobalThreadPool::initialize( - config().getUInt("max_thread_pool_size", 100), - config().getUInt("max_thread_pool_free_size", 1000), - config().getUInt("thread_pool_queue_size", 10000) + /// We need to have sufficient amount of threads for connections + nuraft workers + keeper workers, 1000 is an estimation + std::min(1000U, config().getUInt("max_thread_pool_size", 1000)), + config().getUInt("max_thread_pool_free_size", 100), + config().getUInt("thread_pool_queue_size", 1000) ); /// Wait for all threads to avoid possible use-after-free (for example logging objects can be already destroyed). SCOPE_EXIT({ diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 8fcb9d87a93..7747831534a 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -48,6 +48,7 @@ #include #include #include +#include #include #include #include @@ -70,7 +71,6 @@ #include #include #include -#include #include #include #include @@ -773,7 +773,27 @@ try LOG_INFO(log, "Available CPU instruction sets: {}", cpu_info); #endif - bool will_have_trace_collector = hasPHDRCache() && config().has("trace_log"); + bool has_trace_collector = false; + /// Disable it if we collect test coverage information, because it will work extremely slow. +#if !WITH_COVERAGE + /// Profilers cannot work reliably with any other libunwind or without PHDR cache. + has_trace_collector = hasPHDRCache() && config().has("trace_log"); +#endif + + /// Describe multiple reasons when query profiler cannot work. + +#if WITH_COVERAGE + LOG_INFO(log, "Query Profiler and TraceCollector are disabled because they work extremely slow with test coverage."); +#endif + +#if defined(SANITIZER) + LOG_INFO(log, "Query Profiler disabled because they cannot work under sanitizers" + " when two different stack unwinding methods will interfere with each other."); +#endif + + if (!hasPHDRCache()) + LOG_INFO(log, "Query Profiler and TraceCollector are disabled because they require PHDR cache to be created" + " (otherwise the function 'dl_iterate_phdr' is not lock free and not async-signal safe)."); // Initialize global thread pool. Do it before we fetch configs from zookeeper // nodes (`from_zk`), because ZooKeeper interface uses the pool. We will @@ -782,8 +802,27 @@ try server_settings.max_thread_pool_size, server_settings.max_thread_pool_free_size, server_settings.thread_pool_queue_size, - will_have_trace_collector ? server_settings.global_profiler_real_time_period_ns : 0, - will_have_trace_collector ? server_settings.global_profiler_cpu_time_period_ns : 0); + has_trace_collector ? server_settings.global_profiler_real_time_period_ns : 0, + has_trace_collector ? server_settings.global_profiler_cpu_time_period_ns : 0); + + if (has_trace_collector) + { + global_context->createTraceCollector(); + + /// Set up server-wide memory profiler (for total memory tracker). + if (server_settings.total_memory_profiler_step) + total_memory_tracker.setProfilerStep(server_settings.total_memory_profiler_step); + + if (server_settings.total_memory_tracker_sample_probability > 0.0) + total_memory_tracker.setSampleProbability(server_settings.total_memory_tracker_sample_probability); + + if (server_settings.total_memory_profiler_sample_min_allocation_size) + total_memory_tracker.setSampleMinAllocationSize(server_settings.total_memory_profiler_sample_min_allocation_size); + + if (server_settings.total_memory_profiler_sample_max_allocation_size) + total_memory_tracker.setSampleMaxAllocationSize(server_settings.total_memory_profiler_sample_max_allocation_size); + } + /// Wait for all threads to avoid possible use-after-free (for example logging objects can be already destroyed). SCOPE_EXIT({ Stopwatch watch; @@ -944,6 +983,18 @@ try } } + std::string path_str = getCanonicalPath(config().getString("path", DBMS_DEFAULT_PATH)); + fs::path path = path_str; + + /// Check that the process user id matches the owner of the data. + assertProcessUserMatchesDataOwner(path_str, [&](const std::string & message){ global_context->addWarningMessage(message); }); + + global_context->setPath(path_str); + + StatusFile status{path / "status", StatusFile::write_full_info}; + + ServerUUID::load(path / "uuid", log); + zkutil::validateZooKeeperConfig(config()); bool has_zookeeper = zkutil::hasZooKeeperConfig(config()); @@ -955,7 +1006,7 @@ try ConfigProcessor config_processor(config_path); loaded_config = config_processor.loadConfigWithZooKeeperIncludes( main_config_zk_node_cache, main_config_zk_changed_event, /* fallback_to_preprocessed = */ true); - config_processor.savePreprocessedConfig(loaded_config, config().getString("path", DBMS_DEFAULT_PATH)); + config_processor.savePreprocessedConfig(loaded_config, path_str); config().removeConfiguration(old_configuration.get()); config().add(loaded_config.configuration.duplicate(), PRIO_DEFAULT, false); global_context->setConfig(loaded_config.configuration); @@ -1089,19 +1140,6 @@ try global_context->setRemoteHostFilter(config()); global_context->setHTTPHeaderFilter(config()); - std::string path_str = getCanonicalPath(config().getString("path", DBMS_DEFAULT_PATH)); - fs::path path = path_str; - std::string default_database = server_settings.default_database.toString(); - - /// Check that the process user id matches the owner of the data. - assertProcessUserMatchesDataOwner(path_str, [&](const std::string & message){ global_context->addWarningMessage(message); }); - - global_context->setPath(path_str); - - StatusFile status{path / "status", StatusFile::write_full_info}; - - ServerUUID::load(path / "uuid", log); - /// Try to increase limit on number of open files. { rlimit rlim; @@ -1339,7 +1377,7 @@ try CompiledExpressionCacheFactory::instance().init(compiled_expression_cache_max_size_in_bytes, compiled_expression_cache_max_elements); #endif - NamedCollectionUtils::loadIfNot(); + NamedCollectionFactory::instance().loadIfNot(); /// Initialize main config reloader. std::string include_from_path = config().getString("include_from", "/etc/metrika.xml"); @@ -1571,6 +1609,10 @@ try 0, // We don't need any threads one all the parts will be deleted new_server_settings.max_parts_cleaning_thread_pool_size); + + global_context->setMergeWorkload(new_server_settings.merge_workload); + global_context->setMutationWorkload(new_server_settings.mutation_workload); + if (config->has("resources")) { global_context->getResourceManager()->updateConfiguration(*config); @@ -1608,7 +1650,7 @@ try #if USE_SSL CertificateReloader::instance().tryLoad(*config); #endif - NamedCollectionUtils::reloadFromConfig(*config); + NamedCollectionFactory::instance().reloadFromConfig(*config); FileCacheFactory::instance().updateSettingsFromConfig(*config); @@ -1632,6 +1674,10 @@ try if (global_context->isServerCompletelyStarted()) CannotAllocateThreadFaultInjector::setFaultProbability(new_server_settings.cannot_allocate_thread_fault_injection_probability); +#if USE_GWP_ASAN + GWPAsan::setForceSampleProbability(new_server_settings.gwp_asan_force_sample_probability); +#endif + ProfileEvents::increment(ProfileEvents::MainConfigLoads); /// Must be the last. @@ -1889,6 +1935,7 @@ try /// Set current database name before loading tables and databases because /// system logs may copy global context. + std::string default_database = server_settings.default_database.toString(); global_context->setCurrentDatabaseNameInGlobalContext(default_database); LOG_INFO(log, "Loading metadata from {}", path_str); @@ -1950,52 +1997,9 @@ try LOG_DEBUG(log, "Loaded metadata."); - /// Init trace collector only after trace_log system table was created - /// Disable it if we collect test coverage information, because it will work extremely slow. -#if !WITH_COVERAGE - /// Profilers cannot work reliably with any other libunwind or without PHDR cache. - if (hasPHDRCache()) - { + if (has_trace_collector) global_context->initializeTraceCollector(); - /// Set up server-wide memory profiler (for total memory tracker). - if (server_settings.total_memory_profiler_step) - { - total_memory_tracker.setProfilerStep(server_settings.total_memory_profiler_step); - } - - if (server_settings.total_memory_tracker_sample_probability > 0.0) - { - total_memory_tracker.setSampleProbability(server_settings.total_memory_tracker_sample_probability); - } - - if (server_settings.total_memory_profiler_sample_min_allocation_size) - { - total_memory_tracker.setSampleMinAllocationSize(server_settings.total_memory_profiler_sample_min_allocation_size); - } - - if (server_settings.total_memory_profiler_sample_max_allocation_size) - { - total_memory_tracker.setSampleMaxAllocationSize(server_settings.total_memory_profiler_sample_max_allocation_size); - } - } -#endif - - /// Describe multiple reasons when query profiler cannot work. - -#if WITH_COVERAGE - LOG_INFO(log, "Query Profiler and TraceCollector are disabled because they work extremely slow with test coverage."); -#endif - -#if defined(SANITIZER) - LOG_INFO(log, "Query Profiler disabled because they cannot work under sanitizers" - " when two different stack unwinding methods will interfere with each other."); -#endif - - if (!hasPHDRCache()) - LOG_INFO(log, "Query Profiler and TraceCollector are disabled because they require PHDR cache to be created" - " (otherwise the function 'dl_iterate_phdr' is not lock free and not async-signal safe)."); - #if defined(OS_LINUX) auto tasks_stats_provider = TasksStatsCounters::findBestAvailableProvider(); if (tasks_stats_provider == TasksStatsCounters::MetricsProvider::None) @@ -2124,6 +2128,10 @@ try CannotAllocateThreadFaultInjector::setFaultProbability(server_settings.cannot_allocate_thread_fault_injection_probability); +#if USE_GWP_ASAN + GWPAsan::setForceSampleProbability(server_settings.gwp_asan_force_sample_probability); +#endif + try { global_context->startClusterDiscovery(); diff --git a/programs/server/config.xml b/programs/server/config.xml index 4b3248d9d1c..a0cb5b14007 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -371,7 +371,7 @@ - + true @@ -1396,6 +1396,14 @@ + + + + + + http://resolver:8080/hostname + http + 80 + 10 + + + + diff --git a/tests/integration/test_s3_table_function_with_http_proxy/test.py b/tests/integration/test_s3_table_function_with_http_proxy/test.py index 76ad2109efc..2ec73ecbef6 100644 --- a/tests/integration/test_s3_table_function_with_http_proxy/test.py +++ b/tests/integration/test_s3_table_function_with_http_proxy/test.py @@ -19,6 +19,14 @@ def cluster(): with_minio=True, ) + cluster.add_instance( + "remote_proxy_node_no_proxy", + main_configs=[ + "configs/config.d/proxy_remote_no_proxy.xml", + ], + with_minio=True, + ) + cluster.add_instance( "proxy_list_node", main_configs=[ @@ -27,6 +35,14 @@ def cluster(): with_minio=True, ) + cluster.add_instance( + "proxy_list_node_no_proxy", + main_configs=[ + "configs/config.d/proxy_list_no_proxy.xml", + ], + with_minio=True, + ) + cluster.add_instance( "env_node", with_minio=True, @@ -36,6 +52,16 @@ def cluster(): instance_env_variables=True, ) + cluster.add_instance( + "env_node_no_proxy", + with_minio=True, + env_variables={ + "http_proxy": "http://proxy1", + "no_proxy": "not_important_host,, minio1 ,", + }, + instance_env_variables=True, + ) + logging.info("Starting cluster...") cluster.start() logging.info("Cluster started") @@ -48,6 +74,24 @@ def cluster(): cluster.shutdown() +def test_s3_with_http_proxy_list_no_proxy(cluster): + proxy_util.simple_test_assert_no_proxy( + cluster, ["proxy1", "proxy2"], "http", "proxy_list_node_no_proxy" + ) + + +def test_s3_with_http_remote_proxy_no_proxy(cluster): + proxy_util.simple_test_assert_no_proxy( + cluster, ["proxy1"], "http", "remote_proxy_node_no_proxy" + ) + + +def test_s3_with_http_env_no_proxy(cluster): + proxy_util.simple_test_assert_no_proxy( + cluster, ["proxy1"], "http", "env_node_no_proxy" + ) + + def test_s3_with_http_proxy_list(cluster): proxy_util.simple_test(cluster, ["proxy1", "proxy2"], "http", "proxy_list_node") diff --git a/tests/integration/test_s3_table_function_with_https_proxy/configs/config.d/proxy_list_no_proxy.xml b/tests/integration/test_s3_table_function_with_https_proxy/configs/config.d/proxy_list_no_proxy.xml new file mode 100644 index 00000000000..0a03986f839 --- /dev/null +++ b/tests/integration/test_s3_table_function_with_https_proxy/configs/config.d/proxy_list_no_proxy.xml @@ -0,0 +1,13 @@ + + + not_important_host,, minio1 , + + http://proxy1 + http://proxy2 + + + https://proxy1 + https://proxy2 + + + diff --git a/tests/integration/test_s3_table_function_with_https_proxy/configs/config.d/proxy_remote_no_proxy.xml b/tests/integration/test_s3_table_function_with_https_proxy/configs/config.d/proxy_remote_no_proxy.xml new file mode 100644 index 00000000000..943f2b36a34 --- /dev/null +++ b/tests/integration/test_s3_table_function_with_https_proxy/configs/config.d/proxy_remote_no_proxy.xml @@ -0,0 +1,18 @@ + + + not_important_host,, minio1 , + + + + http://resolver:8080/hostname + https + 443 + 10 + + + + diff --git a/tests/integration/test_s3_table_function_with_https_proxy/test.py b/tests/integration/test_s3_table_function_with_https_proxy/test.py index 8b40b232742..54452dda401 100644 --- a/tests/integration/test_s3_table_function_with_https_proxy/test.py +++ b/tests/integration/test_s3_table_function_with_https_proxy/test.py @@ -23,6 +23,15 @@ def cluster(): minio_certs_dir="minio_certs", ) + cluster.add_instance( + "remote_proxy_node_no_proxy", + main_configs=[ + "configs/config.d/proxy_remote_no_proxy.xml", + "configs/config.d/ssl.xml", + ], + with_minio=True, + ) + cluster.add_instance( "proxy_list_node", main_configs=[ @@ -32,6 +41,15 @@ def cluster(): with_minio=True, ) + cluster.add_instance( + "proxy_list_node_no_proxy", + main_configs=[ + "configs/config.d/proxy_list_no_proxy.xml", + "configs/config.d/ssl.xml", + ], + with_minio=True, + ) + cluster.add_instance( "env_node", main_configs=[ @@ -44,6 +62,19 @@ def cluster(): instance_env_variables=True, ) + cluster.add_instance( + "env_node_no_proxy", + main_configs=[ + "configs/config.d/ssl.xml", + ], + with_minio=True, + env_variables={ + "https_proxy": "https://proxy1", + "no_proxy": "not_important_host,, minio1 ,", + }, + instance_env_variables=True, + ) + logging.info("Starting cluster...") cluster.start() logging.info("Cluster started") @@ -56,6 +87,24 @@ def cluster(): cluster.shutdown() +def test_s3_with_https_proxy_list_no_proxy(cluster): + proxy_util.simple_test_assert_no_proxy( + cluster, ["proxy1", "proxy2"], "https", "proxy_list_node_no_proxy" + ) + + +def test_s3_with_https_env_no_proxy(cluster): + proxy_util.simple_test_assert_no_proxy( + cluster, ["proxy1"], "https", "env_node_no_proxy" + ) + + +def test_s3_with_https_remote_no_proxy(cluster): + proxy_util.simple_test_assert_no_proxy( + cluster, ["proxy1"], "https", "remote_proxy_node_no_proxy" + ) + + def test_s3_with_https_proxy_list(cluster): proxy_util.simple_test(cluster, ["proxy1", "proxy2"], "https", "proxy_list_node") diff --git a/tests/integration/test_scheduler/configs/resources.xml b/tests/integration/test_scheduler/configs/resources.xml new file mode 100644 index 00000000000..197bf660500 --- /dev/null +++ b/tests/integration/test_scheduler/configs/resources.xml @@ -0,0 +1,3 @@ + + + diff --git a/tests/integration/test_scheduler/configs/resources.xml.default b/tests/integration/test_scheduler/configs/resources.xml.default new file mode 100644 index 00000000000..3b003a17557 --- /dev/null +++ b/tests/integration/test_scheduler/configs/resources.xml.default @@ -0,0 +1,76 @@ + + + + inflight_limit1000000 + priority + fifo0 + fair1 + fifo9 + fifo1 + fair90 + fifo + fifo + fifo9 + fifo9 + fifo9 + fifo9 + + + inflight_limit1000000 + priority + fifo0 + fair1 + fifo9 + fifo1 + fair90 + fifo + fifo + fifo9 + fifo9 + fifo9 + fifo9 + + + + + /prio/admin + /prio/admin + + + /prio/fair/prod + /prio/fair/prod + + + /prio/fair/dev + /prio/fair/dev + + + /prio/fair/dev + /prio/fair/dev + + + /prio/fair/sys/merges + /prio/fair/sys/merges + + + /prio/fair/sys/mutations + /prio/fair/sys/mutations + + + /prio/fair/prod_merges + /prio/fair/prod_merges + + + /prio/fair/prod_mutations + /prio/fair/prod_mutations + + + /prio/fair/dev_merges + /prio/fair/dev_merges + + + /prio/fair/dev_mutations + /prio/fair/dev_mutations + + + diff --git a/tests/integration/test_scheduler/configs/scheduler.xml b/tests/integration/test_scheduler/configs/scheduler.xml deleted file mode 100644 index 523ba1a5a98..00000000000 --- a/tests/integration/test_scheduler/configs/scheduler.xml +++ /dev/null @@ -1,62 +0,0 @@ - - - - - s3 - http://minio1:9001/root/data/ - minio - minio123 - 33554432 - 10 - 10 - network_read - network_write - - - - - -
- s3 -
-
-
-
-
- - - inflight_limit1000000 - priority - fifo0 - fair1 - fifo9 - fifo1 - - - inflight_limit1000000 - priority - fifo0 - fair1 - fifo9 - fifo1 - - - - - /prio/admin - /prio/admin - - - /prio/fair/prod - /prio/fair/prod - - - /prio/fair/dev - /prio/fair/dev - - - /prio/fair/dev - /prio/fair/dev - - -
diff --git a/tests/integration/test_scheduler/configs/storage_configuration.xml b/tests/integration/test_scheduler/configs/storage_configuration.xml new file mode 100644 index 00000000000..823a00a05de --- /dev/null +++ b/tests/integration/test_scheduler/configs/storage_configuration.xml @@ -0,0 +1,26 @@ + + + + + s3 + http://minio1:9001/root/data/ + minio + minio123 + 33554432 + 10 + 10 + network_read + network_write + + + + + +
+ s3 +
+
+
+
+
+
diff --git a/tests/integration/test_scheduler/configs/workloads.xml b/tests/integration/test_scheduler/configs/workloads.xml new file mode 100644 index 00000000000..197bf660500 --- /dev/null +++ b/tests/integration/test_scheduler/configs/workloads.xml @@ -0,0 +1,3 @@ + + + diff --git a/tests/integration/test_scheduler/configs/workloads.xml.default b/tests/integration/test_scheduler/configs/workloads.xml.default new file mode 100644 index 00000000000..f010993335d --- /dev/null +++ b/tests/integration/test_scheduler/configs/workloads.xml.default @@ -0,0 +1,4 @@ + + sys_merges + sys_mutations + diff --git a/tests/integration/test_scheduler/test.py b/tests/integration/test_scheduler/test.py index 8e37bd8d403..cde75c244e8 100644 --- a/tests/integration/test_scheduler/test.py +++ b/tests/integration/test_scheduler/test.py @@ -14,7 +14,13 @@ cluster = ClickHouseCluster(__file__) node = cluster.add_instance( "node", stay_alive=True, - main_configs=["configs/scheduler.xml"], + main_configs=[ + "configs/storage_configuration.xml", + "configs/resources.xml", + "configs/resources.xml.default", + "configs/workloads.xml", + "configs/workloads.xml.default", + ], with_minio=True, ) @@ -28,6 +34,41 @@ def start_cluster(): cluster.shutdown() +@pytest.fixture(scope="function", autouse=True) +def set_default_configs(): + node.exec_in_container( + [ + "bash", + "-c", + "cp /etc/clickhouse-server/config.d/resources.xml.default /etc/clickhouse-server/config.d/resources.xml", + ] + ) + node.exec_in_container( + [ + "bash", + "-c", + "cp /etc/clickhouse-server/config.d/workloads.xml.default /etc/clickhouse-server/config.d/workloads.xml", + ] + ) + node.query("system reload config") + yield + + +def update_workloads_config(**settings): + xml = "" + for name in settings: + xml += f"<{name}>{settings[name]}" + print(xml) + node.exec_in_container( + [ + "bash", + "-c", + f"echo '{xml}' > /etc/clickhouse-server/config.d/workloads.xml", + ] + ) + node.query("system reload config") + + def test_s3_disk(): node.query( f""" @@ -111,3 +152,302 @@ def test_s3_disk(): ) == "1\n" ) + + +def test_merge_workload(): + node.query( + f""" + drop table if exists data; + create table data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9, storage_policy='s3'; + """ + ) + + reads_before = int( + node.query( + f"select dequeued_requests from system.scheduler where resource='network_read' and path='/prio/fair/sys/merges'" + ).strip() + ) + writes_before = int( + node.query( + f"select dequeued_requests from system.scheduler where resource='network_write' and path='/prio/fair/sys/merges'" + ).strip() + ) + + node.query(f"insert into data select * from numbers(1e4)") + node.query(f"insert into data select * from numbers(2e4)") + node.query(f"insert into data select * from numbers(3e4)") + node.query(f"optimize table data final") + + reads_after = int( + node.query( + f"select dequeued_requests from system.scheduler where resource='network_read' and path='/prio/fair/sys/merges'" + ).strip() + ) + writes_after = int( + node.query( + f"select dequeued_requests from system.scheduler where resource='network_write' and path='/prio/fair/sys/merges'" + ).strip() + ) + + assert reads_before < reads_after + assert writes_before < writes_after + + +def test_merge_workload_override(): + node.query( + f""" + drop table if exists prod_data; + drop table if exists dev_data; + create table prod_data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9, storage_policy='s3', merge_workload='prod_merges'; + create table dev_data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9, storage_policy='s3', merge_workload='dev_merges'; + """ + ) + + prod_reads_before = int( + node.query( + f"select dequeued_requests from system.scheduler where resource='network_read' and path='/prio/fair/prod_merges'" + ).strip() + ) + prod_writes_before = int( + node.query( + f"select dequeued_requests from system.scheduler where resource='network_write' and path='/prio/fair/prod_merges'" + ).strip() + ) + dev_reads_before = int( + node.query( + f"select dequeued_requests from system.scheduler where resource='network_read' and path='/prio/fair/dev_merges'" + ).strip() + ) + dev_writes_before = int( + node.query( + f"select dequeued_requests from system.scheduler where resource='network_write' and path='/prio/fair/dev_merges'" + ).strip() + ) + + node.query(f"insert into prod_data select * from numbers(1e4)") + node.query(f"insert into prod_data select * from numbers(2e4)") + node.query(f"insert into prod_data select * from numbers(3e4)") + node.query(f"insert into dev_data select * from numbers(1e4)") + node.query(f"insert into dev_data select * from numbers(2e4)") + node.query(f"insert into dev_data select * from numbers(3e4)") + node.query(f"optimize table prod_data final") + node.query(f"optimize table dev_data final") + + prod_reads_after = int( + node.query( + f"select dequeued_requests from system.scheduler where resource='network_read' and path='/prio/fair/prod_merges'" + ).strip() + ) + prod_writes_after = int( + node.query( + f"select dequeued_requests from system.scheduler where resource='network_write' and path='/prio/fair/prod_merges'" + ).strip() + ) + dev_reads_after = int( + node.query( + f"select dequeued_requests from system.scheduler where resource='network_read' and path='/prio/fair/dev_merges'" + ).strip() + ) + dev_writes_after = int( + node.query( + f"select dequeued_requests from system.scheduler where resource='network_write' and path='/prio/fair/dev_merges'" + ).strip() + ) + + assert prod_reads_before < prod_reads_after + assert prod_writes_before < prod_writes_after + assert dev_reads_before < dev_reads_after + assert dev_writes_before < dev_writes_after + + +def test_mutate_workload(): + node.query( + f""" + drop table if exists data; + create table data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9, storage_policy='s3'; + """ + ) + + node.query(f"insert into data select * from numbers(1e4)") + node.query(f"optimize table data final") + + reads_before = int( + node.query( + f"select dequeued_requests from system.scheduler where resource='network_read' and path='/prio/fair/sys/mutations'" + ).strip() + ) + writes_before = int( + node.query( + f"select dequeued_requests from system.scheduler where resource='network_write' and path='/prio/fair/sys/mutations'" + ).strip() + ) + + node.query(f"alter table data update key = 1 where key = 42") + node.query(f"optimize table data final") + + reads_after = int( + node.query( + f"select dequeued_requests from system.scheduler where resource='network_read' and path='/prio/fair/sys/mutations'" + ).strip() + ) + writes_after = int( + node.query( + f"select dequeued_requests from system.scheduler where resource='network_write' and path='/prio/fair/sys/mutations'" + ).strip() + ) + + assert reads_before < reads_after + assert writes_before < writes_after + + +def test_mutation_workload_override(): + node.query( + f""" + drop table if exists prod_data; + drop table if exists dev_data; + create table prod_data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9, storage_policy='s3', mutation_workload='prod_mutations'; + create table dev_data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9, storage_policy='s3', mutation_workload='dev_mutations'; + """ + ) + + node.query(f"insert into prod_data select * from numbers(1e4)") + node.query(f"optimize table prod_data final") + node.query(f"insert into dev_data select * from numbers(1e4)") + node.query(f"optimize table dev_data final") + + prod_reads_before = int( + node.query( + f"select dequeued_requests from system.scheduler where resource='network_read' and path='/prio/fair/prod_mutations'" + ).strip() + ) + prod_writes_before = int( + node.query( + f"select dequeued_requests from system.scheduler where resource='network_write' and path='/prio/fair/prod_mutations'" + ).strip() + ) + dev_reads_before = int( + node.query( + f"select dequeued_requests from system.scheduler where resource='network_read' and path='/prio/fair/dev_mutations'" + ).strip() + ) + dev_writes_before = int( + node.query( + f"select dequeued_requests from system.scheduler where resource='network_write' and path='/prio/fair/dev_mutations'" + ).strip() + ) + + node.query(f"alter table prod_data update key = 1 where key = 42") + node.query(f"optimize table prod_data final") + node.query(f"alter table dev_data update key = 1 where key = 42") + node.query(f"optimize table dev_data final") + + prod_reads_after = int( + node.query( + f"select dequeued_requests from system.scheduler where resource='network_read' and path='/prio/fair/prod_mutations'" + ).strip() + ) + prod_writes_after = int( + node.query( + f"select dequeued_requests from system.scheduler where resource='network_write' and path='/prio/fair/prod_mutations'" + ).strip() + ) + dev_reads_after = int( + node.query( + f"select dequeued_requests from system.scheduler where resource='network_read' and path='/prio/fair/dev_mutations'" + ).strip() + ) + dev_writes_after = int( + node.query( + f"select dequeued_requests from system.scheduler where resource='network_write' and path='/prio/fair/dev_mutations'" + ).strip() + ) + + assert prod_reads_before < prod_reads_after + assert prod_writes_before < prod_writes_after + assert dev_reads_before < dev_reads_after + assert dev_writes_before < dev_writes_after + + +def test_merge_workload_change(): + node.query( + f""" + drop table if exists data; + create table data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9, storage_policy='s3'; + """ + ) + + for env in ["prod", "dev"]: + update_workloads_config(merge_workload=f"{env}_merges") + + reads_before = int( + node.query( + f"select dequeued_requests from system.scheduler where resource='network_read' and path='/prio/fair/{env}_merges'" + ).strip() + ) + writes_before = int( + node.query( + f"select dequeued_requests from system.scheduler where resource='network_write' and path='/prio/fair/{env}_merges'" + ).strip() + ) + + node.query(f"insert into data select * from numbers(1e4)") + node.query(f"insert into data select * from numbers(2e4)") + node.query(f"insert into data select * from numbers(3e4)") + node.query(f"optimize table data final") + + reads_after = int( + node.query( + f"select dequeued_requests from system.scheduler where resource='network_read' and path='/prio/fair/{env}_merges'" + ).strip() + ) + writes_after = int( + node.query( + f"select dequeued_requests from system.scheduler where resource='network_write' and path='/prio/fair/{env}_merges'" + ).strip() + ) + + assert reads_before < reads_after + assert writes_before < writes_after + + +def test_mutation_workload_change(): + node.query( + f""" + drop table if exists data; + create table data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9, storage_policy='s3'; + """ + ) + + for env in ["prod", "dev"]: + update_workloads_config(mutation_workload=f"{env}_mutations") + + node.query(f"insert into data select * from numbers(1e4)") + node.query(f"optimize table data final") + + reads_before = int( + node.query( + f"select dequeued_requests from system.scheduler where resource='network_read' and path='/prio/fair/{env}_mutations'" + ).strip() + ) + writes_before = int( + node.query( + f"select dequeued_requests from system.scheduler where resource='network_write' and path='/prio/fair/{env}_mutations'" + ).strip() + ) + + node.query(f"alter table data update key = 1 where key = 42") + node.query(f"optimize table data final") + + reads_after = int( + node.query( + f"select dequeued_requests from system.scheduler where resource='network_read' and path='/prio/fair/{env}_mutations'" + ).strip() + ) + writes_after = int( + node.query( + f"select dequeued_requests from system.scheduler where resource='network_write' and path='/prio/fair/{env}_mutations'" + ).strip() + ) + + assert reads_before < reads_after + assert writes_before < writes_after diff --git a/tests/integration/test_settings_constraints_distributed/test.py b/tests/integration/test_settings_constraints_distributed/test.py index d29b66b43bb..fbebbac276e 100644 --- a/tests/integration/test_settings_constraints_distributed/test.py +++ b/tests/integration/test_settings_constraints_distributed/test.py @@ -136,10 +136,7 @@ def test_select_clamps_settings(): ) assert ( - distributed.query( - query, - settings={"max_memory_usage": 1, "min_untracked_memory": 4 * 1024 * 1024}, - ) + distributed.query(query, settings={"max_memory_usage": 1}) == "node1\tmax_memory_usage\t11111111\n" "node1\treadonly\t0\n" "node2\tmax_memory_usage\t0\n" diff --git a/tests/integration/test_storage_azure_blob_storage/test.py b/tests/integration/test_storage_azure_blob_storage/test.py index f836c58ce30..d986c1f9746 100644 --- a/tests/integration/test_storage_azure_blob_storage/test.py +++ b/tests/integration/test_storage_azure_blob_storage/test.py @@ -790,6 +790,25 @@ def test_read_subcolumns(cluster): assert res == "42\tcont/test_subcolumns.jsonl\t(42,42)\ttest_subcolumns.jsonl\t42\n" +def test_read_subcolumn_time(cluster): + node = cluster.instances["node"] + storage_account_url = cluster.env_variables["AZURITE_STORAGE_ACCOUNT_URL"] + azure_query( + node, + f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_subcolumn_time.tsv', " + f"'devstoreaccount1', 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto'," + f" 'a UInt32') select (42)", + ) + + res = node.query( + f"select a, dateDiff('minute', _time, now()) < 59 from azureBlobStorage('{storage_account_url}', 'cont', 'test_subcolumn_time.tsv'," + f" 'devstoreaccount1', 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto'," + f" 'a UInt32')" + ) + + assert res == "42\t1\n" + + def test_read_from_not_existing_container(cluster): node = cluster.instances["node"] query = ( diff --git a/tests/integration/test_storage_hdfs/test.py b/tests/integration/test_storage_hdfs/test.py index 44c0223e677..47d8f44c0b7 100644 --- a/tests/integration/test_storage_hdfs/test.py +++ b/tests/integration/test_storage_hdfs/test.py @@ -999,6 +999,20 @@ def test_read_subcolumns(started_cluster): assert res == "42\ttest_subcolumns.jsonl\t(42,42)\ttest_subcolumns.jsonl\t42\n" +def test_read_subcolumn_time(started_cluster): + node = started_cluster.instances["node1"] + + node.query( + f"insert into function hdfs('hdfs://hdfs1:9000/test_subcolumn_time.tsv', auto, 'a UInt32') select (42)" + ) + + res = node.query( + f"select a, dateDiff('minute', _time, now()) < 59 from hdfs('hdfs://hdfs1:9000/test_subcolumn_time.tsv', auto, 'a UInt32')" + ) + + assert res == "42\t1\n" + + def test_union_schema_inference_mode(started_cluster): node = started_cluster.instances["node1"] diff --git a/tests/integration/test_storage_s3/test.py b/tests/integration/test_storage_s3/test.py index 09b27fff1e8..b2ebd12ce00 100644 --- a/tests/integration/test_storage_s3/test.py +++ b/tests/integration/test_storage_s3/test.py @@ -1133,6 +1133,7 @@ def test_seekable_formats(started_cluster): exec_query_with_retry( instance, f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(1000000) settings s3_truncate_on_insert=1", + timeout=100, ) result = instance.query(f"SELECT count() FROM {table_function}") @@ -1142,6 +1143,7 @@ def test_seekable_formats(started_cluster): exec_query_with_retry( instance, f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(1500000) settings s3_truncate_on_insert=1", + timeout=100, ) result = instance.query( @@ -1169,6 +1171,7 @@ def test_seekable_formats_url(started_cluster): exec_query_with_retry( instance, f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(1500000) settings s3_truncate_on_insert=1", + timeout=100, ) result = instance.query(f"SELECT count() FROM {table_function}") @@ -1178,6 +1181,7 @@ def test_seekable_formats_url(started_cluster): exec_query_with_retry( instance, f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(1500000) settings s3_truncate_on_insert=1", + timeout=100, ) table_function = f"url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_parquet', 'Parquet', 'a Int32, b String')" @@ -2149,6 +2153,21 @@ def test_read_subcolumns(started_cluster): ) +def test_read_subcolumn_time(started_cluster): + bucket = started_cluster.minio_bucket + instance = started_cluster.instances["dummy"] + + instance.query( + f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_subcolumn_time.tsv', auto, 'a UInt32') select (42)" + ) + + res = instance.query( + f"select a, dateDiff('minute', _time, now()) < 59 from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_subcolumn_time.tsv', auto, 'a UInt32')" + ) + + assert res == "42\t1\n" + + def test_filtering_by_file_or_path(started_cluster): bucket = started_cluster.minio_bucket instance = started_cluster.instances["dummy"] diff --git a/tests/performance/least_greatest.xml b/tests/performance/least_greatest.xml new file mode 100644 index 00000000000..522bcd9004d --- /dev/null +++ b/tests/performance/least_greatest.xml @@ -0,0 +1,10 @@ + + CREATE TABLE test (id Int32, x1 Nullable(Int32), x2 Nullable(Float32)) ENGINE = MergeTree() ORDER BY id + + INSERT INTO test SELECT number, number+1, number + 2 FROM numbers(1000000) + + SELECT COUNT(1) FROM test WHERE least(x1, x2) > 1 + SELECT COUNT(1) FROM test WHERE GREATEST(x1, x2) > 1 + + DROP TABLE IF EXISTS test + \ No newline at end of file diff --git a/tests/queries/0_stateless/00098_k_union_all.sql b/tests/queries/0_stateless/00098_k_union_all.sql index 059d27075d7..280ad5ca2e6 100644 --- a/tests/queries/0_stateless/00098_k_union_all.sql +++ b/tests/queries/0_stateless/00098_k_union_all.sql @@ -1,4 +1,5 @@ SET output_format_pretty_color=1; +SET output_format_pretty_display_footer_column_names=0; SELECT 1 FORMAT PrettySpace; SELECT 1 UNION ALL SELECT 1 FORMAT PrettySpace; SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 FORMAT PrettySpace; diff --git a/tests/queries/0_stateless/00300_csv.reference b/tests/queries/0_stateless/00300_csv.reference index e7966a9e8d9..42cd22078c4 100644 --- a/tests/queries/0_stateless/00300_csv.reference +++ b/tests/queries/0_stateless/00300_csv.reference @@ -1,11 +1,11 @@ -"Hello, ""World""",123,"[1,2,3]","(456,['abc','def'])","Newline +"Hello, ""World""",123,"[1,2,3]",456,"['abc','def']","Newline here" "x","y","z","a","b" -"Hello, ""World""",123,"[1,2,3]","(456,['abc','def'])","Newline +"Hello, ""World""",123,"[1,2,3]",456,"['abc','def']","Newline here" "x","y","z","a","b" "String","UInt8","Array(UInt8)","Tuple(UInt16, Array(String))","String" -"Hello, ""World""",123,"[1,2,3]","(456,['abc','def'])","Newline +"Hello, ""World""",123,"[1,2,3]",456,"['abc','def']","Newline here" 0,"0","[]","2000-01-01","2000-01-01 00:00:00" 1,"1","[0]","2000-01-02","2000-01-01 00:00:01" diff --git a/tests/queries/0_stateless/00309_formats.reference b/tests/queries/0_stateless/00309_formats.reference index 5c0ae4d2c3b..e637ee0363a 100644 Binary files a/tests/queries/0_stateless/00309_formats.reference and b/tests/queries/0_stateless/00309_formats.reference differ diff --git a/tests/queries/0_stateless/00405_output_format_pretty_color.sql b/tests/queries/0_stateless/00405_output_format_pretty_color.sql index bc2d0c3adbf..de83567dd5d 100644 --- a/tests/queries/0_stateless/00405_output_format_pretty_color.sql +++ b/tests/queries/0_stateless/00405_output_format_pretty_color.sql @@ -1,3 +1,4 @@ +SET output_format_pretty_display_footer_column_names=0; SET output_format_pretty_color = 0; SHOW SETTING output_format_pretty_color; diff --git a/tests/queries/0_stateless/00405_pretty_formats.sql b/tests/queries/0_stateless/00405_pretty_formats.sql index 00bb09a1c30..4715f4fc110 100644 --- a/tests/queries/0_stateless/00405_pretty_formats.sql +++ b/tests/queries/0_stateless/00405_pretty_formats.sql @@ -1,3 +1,4 @@ +SET output_format_pretty_display_footer_column_names=0; SET output_format_pretty_color = 1; SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT Pretty; diff --git a/tests/queries/0_stateless/00476_pretty_formats_and_widths.sql b/tests/queries/0_stateless/00476_pretty_formats_and_widths.sql index 4bace207fb5..59809841726 100644 --- a/tests/queries/0_stateless/00476_pretty_formats_and_widths.sql +++ b/tests/queries/0_stateless/00476_pretty_formats_and_widths.sql @@ -1,4 +1,4 @@ -SET output_format_pretty_color=1, output_format_pretty_highlight_digit_groups=0; +SET output_format_pretty_color=1, output_format_pretty_highlight_digit_groups=0, output_format_pretty_display_footer_column_names=0; SELECT toUInt64(round(exp10(number))) AS x, toString(x) AS s FROM system.numbers LIMIT 10 FORMAT Pretty; SELECT toUInt64(round(exp10(number))) AS x, toString(x) AS s FROM system.numbers LIMIT 10 FORMAT PrettyCompact; SELECT toUInt64(round(exp10(number))) AS x, toString(x) AS s FROM system.numbers LIMIT 10 FORMAT PrettySpace; diff --git a/tests/queries/0_stateless/00569_parse_date_time_best_effort.sql b/tests/queries/0_stateless/00569_parse_date_time_best_effort.sql index 511addb4e4d..ca423c1922a 100644 --- a/tests/queries/0_stateless/00569_parse_date_time_best_effort.sql +++ b/tests/queries/0_stateless/00569_parse_date_time_best_effort.sql @@ -1,3 +1,4 @@ +SET output_format_pretty_display_footer_column_names=0; SELECT s, parseDateTimeBestEffortOrNull(s, 'UTC') AS a, diff --git a/tests/queries/0_stateless/00636_partition_key_parts_pruning.sh b/tests/queries/0_stateless/00636_partition_key_parts_pruning.sh index 7ec4d99f028..f2b4cae8bb0 100755 --- a/tests/queries/0_stateless/00636_partition_key_parts_pruning.sh +++ b/tests/queries/0_stateless/00636_partition_key_parts_pruning.sh @@ -11,9 +11,11 @@ ${CLICKHOUSE_CLIENT} --query="CREATE TABLE single_col_partition_key(x UInt32) EN ${CLICKHOUSE_CLIENT} --query="INSERT INTO single_col_partition_key VALUES (1), (2), (3), (4), (11), (12), (20)" -${CLICKHOUSE_CLIENT} --query="SELECT count() FROM single_col_partition_key WHERE x < 3 FORMAT XML" | grep -F rows_read | sed 's/^[ \t]*//g' -${CLICKHOUSE_CLIENT} --query="SELECT count() FROM single_col_partition_key WHERE x >= 11 FORMAT XML" | grep -F rows_read | sed 's/^[ \t]*//g' -${CLICKHOUSE_CLIENT} --query="SELECT count() FROM single_col_partition_key WHERE x = 20 FORMAT XML" | grep -F rows_read | sed 's/^[ \t]*//g' +DISABLE_COUNT_OPTIMIZATION="SETTINGS optimize_trivial_count_query = 0, optimize_use_implicit_projections = 0" + +${CLICKHOUSE_CLIENT} --query="SELECT count() FROM single_col_partition_key WHERE x < 3 FORMAT XML $DISABLE_COUNT_OPTIMIZATION" | grep -F rows_read | sed 's/^[ \t]*//g' +${CLICKHOUSE_CLIENT} --query="SELECT count() FROM single_col_partition_key WHERE x >= 11 FORMAT XML $DISABLE_COUNT_OPTIMIZATION" | grep -F rows_read | sed 's/^[ \t]*//g' +${CLICKHOUSE_CLIENT} --query="SELECT count() FROM single_col_partition_key WHERE x = 20 FORMAT XML $DISABLE_COUNT_OPTIMIZATION" | grep -F rows_read | sed 's/^[ \t]*//g' ${CLICKHOUSE_CLIENT} --query="DROP TABLE single_col_partition_key" @@ -31,14 +33,14 @@ ${CLICKHOUSE_CLIENT} --query="INSERT INTO composite_partition_key VALUES \ ${CLICKHOUSE_CLIENT} --query="INSERT INTO composite_partition_key VALUES \ (301, 20, 3), (302, 21, 3), (303, 22, 3)" -${CLICKHOUSE_CLIENT} --query="SELECT count() FROM composite_partition_key WHERE a > 400 FORMAT XML SETTINGS optimize_trivial_count_query = 0" | grep -F rows_read | sed 's/^[ \t]*//g' -${CLICKHOUSE_CLIENT} --query="SELECT count() FROM composite_partition_key WHERE b = 11 FORMAT XML SETTINGS optimize_trivial_count_query = 0" | grep -F rows_read | sed 's/^[ \t]*//g' -${CLICKHOUSE_CLIENT} --query="SELECT count() FROM composite_partition_key WHERE c = 4 FORMAT XML SETTINGS optimize_trivial_count_query = 0" | grep -F rows_read | sed 's/^[ \t]*//g' +${CLICKHOUSE_CLIENT} --query="SELECT count() FROM composite_partition_key WHERE a > 400 FORMAT XML $DISABLE_COUNT_OPTIMIZATION" | grep -F rows_read | sed 's/^[ \t]*//g' +${CLICKHOUSE_CLIENT} --query="SELECT count() FROM composite_partition_key WHERE b = 11 FORMAT XML $DISABLE_COUNT_OPTIMIZATION" | grep -F rows_read | sed 's/^[ \t]*//g' +${CLICKHOUSE_CLIENT} --query="SELECT count() FROM composite_partition_key WHERE c = 4 FORMAT XML $DISABLE_COUNT_OPTIMIZATION" | grep -F rows_read | sed 's/^[ \t]*//g' -${CLICKHOUSE_CLIENT} --query="SELECT count() FROM composite_partition_key WHERE a < 200 AND c = 2 FORMAT XML SETTINGS optimize_trivial_count_query = 0" | grep -F rows_read | sed 's/^[ \t]*//g' -${CLICKHOUSE_CLIENT} --query="SELECT count() FROM composite_partition_key WHERE a = 301 AND b < 20 FORMAT XML SETTINGS optimize_trivial_count_query = 0" | grep -F rows_read | sed 's/^[ \t]*//g' -${CLICKHOUSE_CLIENT} --query="SELECT count() FROM composite_partition_key WHERE b >= 12 AND c = 2 FORMAT XML SETTINGS optimize_trivial_count_query = 0" | grep -F rows_read | sed 's/^[ \t]*//g' +${CLICKHOUSE_CLIENT} --query="SELECT count() FROM composite_partition_key WHERE a < 200 AND c = 2 FORMAT XML $DISABLE_COUNT_OPTIMIZATION" | grep -F rows_read | sed 's/^[ \t]*//g' +${CLICKHOUSE_CLIENT} --query="SELECT count() FROM composite_partition_key WHERE a = 301 AND b < 20 FORMAT XML $DISABLE_COUNT_OPTIMIZATION" | grep -F rows_read | sed 's/^[ \t]*//g' +${CLICKHOUSE_CLIENT} --query="SELECT count() FROM composite_partition_key WHERE b >= 12 AND c = 2 FORMAT XML $DISABLE_COUNT_OPTIMIZATION" | grep -F rows_read | sed 's/^[ \t]*//g' -${CLICKHOUSE_CLIENT} --query="SELECT count() FROM composite_partition_key WHERE a = 301 AND b = 21 AND c = 3 FORMAT XML SETTINGS optimize_trivial_count_query = 0" | grep -F rows_read | sed 's/^[ \t]*//g' +${CLICKHOUSE_CLIENT} --query="SELECT count() FROM composite_partition_key WHERE a = 301 AND b = 21 AND c = 3 FORMAT XML $DISABLE_COUNT_OPTIMIZATION" | grep -F rows_read | sed 's/^[ \t]*//g' ${CLICKHOUSE_CLIENT} --query="DROP TABLE composite_partition_key" diff --git a/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh b/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh index e10b2f86145..7766d7720e1 100755 --- a/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh +++ b/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh @@ -45,6 +45,7 @@ ${CLICKHOUSE_CLIENT} --query="INSERT INTO enum_test_table VALUES ('hello'), ('wo ${CLICKHOUSE_CLIENT} --query="INSERT INTO date_test_table VALUES (1), (2), (2), (256), (257), (257);" CLICKHOUSE_CLIENT=$(echo ${CLICKHOUSE_CLIENT} | sed 's/'"--send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}"'/--send_logs_level=debug/g') +CLICKHOUSE_CLIENT="${CLICKHOUSE_CLIENT} --optimize_use_implicit_projections 0" ${CLICKHOUSE_CLIENT} --query="SELECT count() FROM string_test_table WHERE toUInt64(val) == 0;" 2>&1 |grep -q "3 marks to read from 1 ranges" && echo "no monotonic int case: String -> UInt64" ${CLICKHOUSE_CLIENT} --query="SELECT count() FROM fixed_string_test_table WHERE toUInt64(val) == 0;" 2>&1 |grep -q "3 marks to read from 1 ranges" && echo "no monotonic int case: FixedString -> UInt64" diff --git a/tests/queries/0_stateless/00695_pretty_max_column_pad_width.sql b/tests/queries/0_stateless/00695_pretty_max_column_pad_width.sql index 9a421361b71..24dd9336a88 100644 --- a/tests/queries/0_stateless/00695_pretty_max_column_pad_width.sql +++ b/tests/queries/0_stateless/00695_pretty_max_column_pad_width.sql @@ -1,2 +1,3 @@ +SET output_format_pretty_display_footer_column_names=0; SET output_format_pretty_max_column_pad_width = 250; SELECT range(number) FROM system.numbers LIMIT 100 FORMAT PrettyCompactNoEscapes; diff --git a/tests/queries/0_stateless/00732_base64_functions.sql b/tests/queries/0_stateless/00732_base64_functions.sql index 3c60bf939fe..b4be8db4ede 100644 --- a/tests/queries/0_stateless/00732_base64_functions.sql +++ b/tests/queries/0_stateless/00732_base64_functions.sql @@ -1,6 +1,5 @@ -- Tags: no-fasttest - -SET send_logs_level = 'fatal'; +-- no-fasttest because aklomp-base64 library is required SELECT base64Encode(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } SELECT base64Decode(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } diff --git a/tests/queries/0_stateless/00813_parse_date_time_best_effort_more.sql b/tests/queries/0_stateless/00813_parse_date_time_best_effort_more.sql index 1e3b24e60c0..7c2b75cdd70 100644 --- a/tests/queries/0_stateless/00813_parse_date_time_best_effort_more.sql +++ b/tests/queries/0_stateless/00813_parse_date_time_best_effort_more.sql @@ -1,3 +1,4 @@ +SET output_format_pretty_display_footer_column_names=0; SELECT s, parseDateTimeBestEffortOrNull(s, 'UTC') AS a, diff --git a/tests/queries/0_stateless/01016_input_null_as_default.sh b/tests/queries/0_stateless/01016_input_null_as_default.sh index 8d6a9a07435..24d93b2703c 100755 --- a/tests/queries/0_stateless/01016_input_null_as_default.sh +++ b/tests/queries/0_stateless/01016_input_null_as_default.sh @@ -11,8 +11,8 @@ $CLICKHOUSE_CLIENT --query="CREATE TABLE default_by_other_column (a Float32 DEFA echo 'CSV' echo '\N, 1, \N, "2019-07-22", "[10, 20, 30]", \N -1, world, 3, "2019-07-23", \N, "('\''tuple'\'', 3.14)" -2, \N, 123, \N, "[]", "('\''test'\'', 2.71828)" +1, world, 3, "2019-07-23", \N, tuple, 3.14 +2, \N, 123, \N, "[]", test, 2.71828 3, \N, \N, \N, \N, \N' | $CLICKHOUSE_CLIENT --input_format_null_as_default=1 --query="INSERT INTO null_as_default FORMAT CSV"; $CLICKHOUSE_CLIENT --query="SELECT * FROM null_as_default ORDER BY i"; $CLICKHOUSE_CLIENT --query="TRUNCATE TABLE null_as_default"; diff --git a/tests/queries/0_stateless/01017_uniqCombined_memory_usage.sql b/tests/queries/0_stateless/01017_uniqCombined_memory_usage.sql index de84846c1d7..c13a0859183 100644 --- a/tests/queries/0_stateless/01017_uniqCombined_memory_usage.sql +++ b/tests/queries/0_stateless/01017_uniqCombined_memory_usage.sql @@ -7,8 +7,7 @@ -- sizeof(HLL) is (2^K * 6 / 8) -- hence max_memory_usage for 100 rows = (96<<10)*100 = 9830400 -SET use_uncompressed_cache = 0; -SET min_untracked_memory = '4Mi'; +SET use_uncompressed_cache = 0; -- HashTable for UInt32 (used until (1<<13) elements), hence 8192 elements SELECT 'UInt32'; diff --git a/tests/queries/0_stateless/01019_alter_materialized_view_consistent.sh b/tests/queries/0_stateless/01019_alter_materialized_view_consistent.sh index 3a2eac1f38f..26c2e54e637 100755 --- a/tests/queries/0_stateless/01019_alter_materialized_view_consistent.sh +++ b/tests/queries/0_stateless/01019_alter_materialized_view_consistent.sh @@ -54,10 +54,10 @@ function alter_thread() { for i in {0..5}; do ALTER[$i]="ALTER TABLE mv MODIFY QUERY SELECT v == 1 as test, v as case FROM src_a;" done - # Insert 3 ALTERs to src_b, one in the first half of the array and two in arbitrary positions. - ALTER[$RANDOM % 3]="ALTER TABLE mv MODIFY QUERY SELECT v == 2 as test, v as case FROM src_b;" - ALTER[$RANDOM % 6]="ALTER TABLE mv MODIFY QUERY SELECT v == 2 as test, v as case FROM src_b;" - ALTER[$RANDOM % 6]="ALTER TABLE mv MODIFY QUERY SELECT v == 2 as test, v as case FROM src_b;" + # Insert 3 ALTERs to src_b randomly in each third of array. + ALTER[$RANDOM % 2]="ALTER TABLE mv MODIFY QUERY SELECT v == 2 as test, v as case FROM src_b;" + ALTER[$RANDOM % 2 + 2]="ALTER TABLE mv MODIFY QUERY SELECT v == 2 as test, v as case FROM src_b;" + ALTER[$RANDOM % 2 + 4]="ALTER TABLE mv MODIFY QUERY SELECT v == 2 as test, v as case FROM src_b;" i=0 while true; do diff --git a/tests/queries/0_stateless/01246_buffer_flush.sql b/tests/queries/0_stateless/01246_buffer_flush.sql index 36bcaae383f..66f93371c29 100644 --- a/tests/queries/0_stateless/01246_buffer_flush.sql +++ b/tests/queries/0_stateless/01246_buffer_flush.sql @@ -9,14 +9,14 @@ create table data_01256 as system.numbers Engine=Memory(); select 'min'; create table buffer_01256 as system.numbers Engine=Buffer(currentDatabase(), data_01256, 1, - 2, 100, /* time */ + 5, 100, /* time */ 4, 100, /* rows */ 1, 1e6 /* bytes */ ); insert into buffer_01256 select * from system.numbers limit 5; select count() from data_01256; --- sleep 2 (min time) + 1 (round up) + bias (1) = 4 -select sleepEachRow(2) from numbers(2) FORMAT Null; +-- It is enough to ensure that the buffer will be flushed earlier then 2*min_time (10 sec) +select sleepEachRow(9) FORMAT Null SETTINGS function_sleep_max_microseconds_per_block=10e6; select count() from data_01256; drop table buffer_01256; diff --git a/tests/queries/0_stateless/01293_pretty_max_value_width.sql b/tests/queries/0_stateless/01293_pretty_max_value_width.sql index a8e0f19f58e..f1dc0cd1912 100644 --- a/tests/queries/0_stateless/01293_pretty_max_value_width.sql +++ b/tests/queries/0_stateless/01293_pretty_max_value_width.sql @@ -1,4 +1,5 @@ SET output_format_pretty_color = 1, output_format_pretty_max_value_width_apply_for_single_value = 1, output_format_pretty_row_numbers = 0; +SET output_format_pretty_display_footer_column_names=0; SELECT 'привет' AS x, 'мир' AS y FORMAT Pretty; SET output_format_pretty_max_value_width = 5; diff --git a/tests/queries/0_stateless/01351_parse_date_time_best_effort_us.sql b/tests/queries/0_stateless/01351_parse_date_time_best_effort_us.sql index ca3166142f0..f83d5d96384 100644 --- a/tests/queries/0_stateless/01351_parse_date_time_best_effort_us.sql +++ b/tests/queries/0_stateless/01351_parse_date_time_best_effort_us.sql @@ -1,3 +1,4 @@ +SET output_format_pretty_display_footer_column_names=0; SELECT 'parseDateTimeBestEffortUS'; SELECT diff --git a/tests/queries/0_stateless/01509_output_format_pretty_row_numbers.sql b/tests/queries/0_stateless/01509_output_format_pretty_row_numbers.sql index 0a09aef7fb2..04d02b8d389 100644 --- a/tests/queries/0_stateless/01509_output_format_pretty_row_numbers.sql +++ b/tests/queries/0_stateless/01509_output_format_pretty_row_numbers.sql @@ -1,5 +1,6 @@ SET output_format_pretty_color=1; SET output_format_pretty_row_numbers=0; +SET output_format_pretty_display_footer_column_names=0; SELECT * FROM numbers(10) FORMAT Pretty; SELECT * FROM numbers(10) FORMAT PrettyCompact; SELECT * FROM numbers(10) FORMAT PrettyCompactMonoBlock; diff --git a/tests/queries/0_stateless/01553_settings_early_apply.sql b/tests/queries/0_stateless/01553_settings_early_apply.sql index 4c168bdb3a5..821e09f6651 100644 --- a/tests/queries/0_stateless/01553_settings_early_apply.sql +++ b/tests/queries/0_stateless/01553_settings_early_apply.sql @@ -1,3 +1,4 @@ +set output_format_pretty_display_footer_column_names=0; set output_format_write_statistics=0; select * from numbers(100) settings max_result_rows = 1; -- { serverError TOO_MANY_ROWS_OR_BYTES } diff --git a/tests/queries/0_stateless/01676_dictget_in_default_expression.sql b/tests/queries/0_stateless/01676_dictget_in_default_expression.sql index 54e46a2b718..db23ae1919c 100644 --- a/tests/queries/0_stateless/01676_dictget_in_default_expression.sql +++ b/tests/queries/0_stateless/01676_dictget_in_default_expression.sql @@ -22,7 +22,8 @@ DETACH DATABASE test_01676; ATTACH DATABASE test_01676; SELECT 'status_after_detach_and_attach:'; -SELECT status FROM system.dictionaries WHERE database='test_01676' AND name='dict'; +-- It can be not loaded, or not even finish attaching in case of asynchronous tables loading. +SELECT COALESCE((SELECT status FROM system.dictionaries WHERE database='test_01676' AND name='dict')::Nullable(String), 'NOT_LOADED'); INSERT INTO test_01676.table (x) VALUES (toInt64(4)); SELECT * FROM test_01676.table ORDER BY x; diff --git a/tests/queries/0_stateless/01710_projection_pk_trivial_count.reference b/tests/queries/0_stateless/01710_projection_pk_trivial_count.reference new file mode 100644 index 00000000000..43316772467 --- /dev/null +++ b/tests/queries/0_stateless/01710_projection_pk_trivial_count.reference @@ -0,0 +1,3 @@ + ReadFromMergeTree (default.x) + ReadFromPreparedSource (Optimized trivial count) +5 diff --git a/tests/queries/0_stateless/01710_projection_pk_trivial_count.sql b/tests/queries/0_stateless/01710_projection_pk_trivial_count.sql new file mode 100644 index 00000000000..ce9eadf06b2 --- /dev/null +++ b/tests/queries/0_stateless/01710_projection_pk_trivial_count.sql @@ -0,0 +1,8 @@ +drop table if exists x; + +create table x (i int) engine MergeTree order by i settings index_granularity = 3; +insert into x select * from numbers(10); +select * from (explain select count() from x where (i >= 3 and i <= 6) or i = 7) where explain like '%ReadFromPreparedSource%' or explain like '%ReadFromMergeTree%'; +select count() from x where (i >= 3 and i <= 6) or i = 7; + +drop table x; diff --git a/tests/queries/0_stateless/01822_union_and_constans_error.reference b/tests/queries/0_stateless/01822_union_and_constans_error.reference index d00491fd7e5..e69de29bb2d 100644 --- a/tests/queries/0_stateless/01822_union_and_constans_error.reference +++ b/tests/queries/0_stateless/01822_union_and_constans_error.reference @@ -1 +0,0 @@ -1 diff --git a/tests/queries/0_stateless/01822_union_and_constans_error.sql b/tests/queries/0_stateless/01822_union_and_constans_error.sql index 38b7df700cd..9017e8769eb 100644 --- a/tests/queries/0_stateless/01822_union_and_constans_error.sql +++ b/tests/queries/0_stateless/01822_union_and_constans_error.sql @@ -15,6 +15,6 @@ SELECT isNull(t0.c0) OR COUNT('\n?pVa') FROM t0 GROUP BY t0.c0 HAVING isNull(isNull(t0.c0)) -SETTINGS aggregate_functions_null_for_empty = 1, enable_optimize_predicate_expression = 0; +SETTINGS aggregate_functions_null_for_empty = 1, enable_optimize_predicate_expression = 0 format Null; drop table if exists t0; diff --git a/tests/queries/0_stateless/02117_show_create_table_system.reference b/tests/queries/0_stateless/02117_show_create_table_system.reference index d625feb94d3..d15c1545135 100644 --- a/tests/queries/0_stateless/02117_show_create_table_system.reference +++ b/tests/queries/0_stateless/02117_show_create_table_system.reference @@ -4,7 +4,7 @@ CREATE TABLE system.aggregate_function_combinators `is_internal` UInt8 ) ENGINE = SystemAggregateFunctionCombinators -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains a list of all available aggregate function combinators, which could be applied to aggregate functions and change the way they work.' CREATE TABLE system.asynchronous_inserts ( `query` String, @@ -17,7 +17,7 @@ CREATE TABLE system.asynchronous_inserts `entries.bytes` Array(UInt64) ) ENGINE = SystemAsynchronousInserts -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains information about pending asynchronous inserts in queue in server\'s memory.' CREATE TABLE system.asynchronous_metrics ( `metric` String, @@ -25,14 +25,14 @@ CREATE TABLE system.asynchronous_metrics `description` String ) ENGINE = SystemAsynchronousMetrics -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains metrics that are calculated periodically in the background. For example, the amount of RAM in use.' CREATE TABLE system.build_options ( `name` String, `value` String ) ENGINE = SystemBuildOptions -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains a list of all build flags, compiler options and commit hash for used build.' CREATE TABLE system.clusters ( `cluster` String, @@ -55,14 +55,14 @@ CREATE TABLE system.clusters `name` String ALIAS cluster ) ENGINE = SystemClusters -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains information about clusters defined in the configuration file or generated by a Replicated database.' CREATE TABLE system.collations ( `name` String, `language` Nullable(String) ) ENGINE = SystemTableCollations -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains a list of all available collations for alphabetical comparison of strings.' CREATE TABLE system.columns ( `database` String, @@ -88,13 +88,13 @@ CREATE TABLE system.columns `datetime_precision` Nullable(UInt64) ) ENGINE = SystemColumns -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Lists all columns from all tables of the current server.' CREATE TABLE system.contributors ( `name` String ) ENGINE = SystemContributors -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains a list of all ClickHouse contributors <3' CREATE TABLE system.current_roles ( `role_name` String, @@ -102,7 +102,7 @@ CREATE TABLE system.current_roles `is_default` UInt8 ) ENGINE = SystemCurrentRoles -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains active roles of a current user. SET ROLE changes the contents of this table.' CREATE TABLE system.data_skipping_indices ( `database` String, @@ -117,7 +117,7 @@ CREATE TABLE system.data_skipping_indices `marks` UInt64 ) ENGINE = SystemDataSkippingIndices -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains all the information about all the data skipping indices in tables, similar to system.columns.' CREATE TABLE system.data_type_families ( `name` String, @@ -125,7 +125,7 @@ CREATE TABLE system.data_type_families `alias_to` String ) ENGINE = SystemTableDataTypeFamilies -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains a list of all available native data types along with all the aliases used for compatibility with other DBMS.' CREATE TABLE system.databases ( `name` String, @@ -138,7 +138,7 @@ CREATE TABLE system.databases `database` String ALIAS name ) ENGINE = SystemDatabases -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Lists all databases of the current server.' CREATE TABLE system.detached_parts ( `database` String, @@ -155,7 +155,7 @@ CREATE TABLE system.detached_parts `level` Nullable(UInt32) ) ENGINE = SystemDetachedParts -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains a list of all parts which are being found in /detached directory along with a reason why it was detached. ClickHouse server doesn\'t use such parts anyhow.' CREATE TABLE system.dictionaries ( `database` String, @@ -185,7 +185,7 @@ CREATE TABLE system.dictionaries `comment` String ) ENGINE = SystemDictionaries -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains information about dictionaries.' CREATE TABLE system.disks ( `name` String, @@ -205,7 +205,7 @@ CREATE TABLE system.disks `cache_path` String ) ENGINE = SystemDisks -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains information about disks defined in the server configuration.' CREATE TABLE system.distributed_ddl_queue ( `entry` String, @@ -225,7 +225,7 @@ CREATE TABLE system.distributed_ddl_queue `query_duration_ms` Nullable(UInt64) ) ENGINE = SystemDDLWorkerQueue -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains information about distributed DDL queries (ON CLUSTER clause) that were executed on a cluster.' CREATE TABLE system.distribution_queue ( `database` String, @@ -241,7 +241,7 @@ CREATE TABLE system.distribution_queue `last_exception_time` DateTime ) ENGINE = SystemDistributionQueue -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains information about local files that are in the queue to be sent to the shards. These local files contain new parts that are created by inserting new data into the Distributed table in asynchronous mode.' CREATE TABLE system.enabled_roles ( `role_name` String, @@ -250,7 +250,7 @@ CREATE TABLE system.enabled_roles `is_default` UInt8 ) ENGINE = SystemEnabledRoles -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains all active roles at the moment, including current role of the current user and granted roles for current role.' CREATE TABLE system.errors ( `name` String, @@ -262,7 +262,7 @@ CREATE TABLE system.errors `remote` UInt8 ) ENGINE = SystemErrors -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains a list of all errors which have ever happened including the error code, last time and message with unsymbolized stacktrace.' CREATE TABLE system.events ( `event` String, @@ -271,7 +271,7 @@ CREATE TABLE system.events `name` String ALIAS event ) ENGINE = SystemEvents -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains profiling events and their current value.' CREATE TABLE system.formats ( `name` String, @@ -281,7 +281,7 @@ CREATE TABLE system.formats `supports_parallel_formatting` UInt8 ) ENGINE = SystemFormats -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains a list of all the formats along with flags whether a format is suitable for input/output or whether it supports parallelization.' CREATE TABLE system.functions ( `name` String, @@ -299,7 +299,7 @@ CREATE TABLE system.functions `categories` String ) ENGINE = SystemFunctions -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains a list of all available ordinary and aggregate functions with their descriptions.' CREATE TABLE system.graphite_retentions ( `config_name` String, @@ -314,7 +314,7 @@ CREATE TABLE system.graphite_retentions `Tables.table` Array(String) ) ENGINE = SystemGraphite -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains information about parameters graphite_rollup which are used in tables with *GraphiteMergeTree engines.' CREATE TABLE system.licenses ( `library_name` String, @@ -323,14 +323,14 @@ CREATE TABLE system.licenses `license_text` String ) ENGINE = SystemLicenses -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains licenses of third-party libraries that are located in the contrib directory of ClickHouse sources.' CREATE TABLE system.macros ( `macro` String, `substitution` String ) ENGINE = SystemMacros -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains a list of all macros defined in server configuration.' CREATE TABLE system.merge_tree_settings ( `name` String, @@ -344,7 +344,7 @@ CREATE TABLE system.merge_tree_settings `is_obsolete` UInt8 ) ENGINE = SystemMergeTreeSettings -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains a list of all MergeTree engine specific settings, their current and default values along with descriptions. You may change any of them in SETTINGS section in CREATE query.' CREATE TABLE system.merges ( `database` String, @@ -373,7 +373,7 @@ CREATE TABLE system.merges `merge_algorithm` String ) ENGINE = SystemMerges -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains a list of merges currently executing merges of MergeTree tables and their progress. Each merge operation is represented by a single row.' CREATE TABLE system.metrics ( `metric` String, @@ -382,7 +382,7 @@ CREATE TABLE system.metrics `name` String ALIAS metric ) ENGINE = SystemMetrics -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains metrics which can be calculated instantly, or have a current value. For example, the number of simultaneously processed queries or the current replica delay. This table is always up to date.' CREATE TABLE system.moves ( `database` String, @@ -395,7 +395,7 @@ CREATE TABLE system.moves `thread_id` UInt64 ) ENGINE = SystemMoves -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains information about in-progress data part moves of MergeTree tables. Each data part movement is represented by a single row.' CREATE TABLE system.mutations ( `database` String, @@ -414,25 +414,25 @@ CREATE TABLE system.mutations `latest_fail_reason` String ) ENGINE = SystemMutations -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains a list of mutations and their progress. Each mutation command is represented by a single row.' CREATE TABLE system.numbers ( `number` UInt64 ) ENGINE = SystemNumbers -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Generates all natural numbers, starting from 0 (to 2^64 - 1, and then again) in sorted order.' CREATE TABLE system.numbers_mt ( `number` UInt64 ) ENGINE = SystemNumbers -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Multithreaded version of `system.numbers`. Numbers order is not guaranteed.' CREATE TABLE system.one ( `dummy` UInt8 ) ENGINE = SystemOne -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'This table contains a single row with a single dummy UInt8 column containing the value 0. Used when the table is not specified explicitly, for example in queries like `SELECT 1`.' CREATE TABLE system.part_moves_between_shards ( `database` String, @@ -451,7 +451,7 @@ CREATE TABLE system.part_moves_between_shards `last_exception` String ) ENGINE = SystemShardMoves -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains information about parts which are currently in a process of moving between shards and their progress.' CREATE TABLE system.parts ( `partition` String, @@ -522,7 +522,7 @@ CREATE TABLE system.parts `part_name` String ALIAS name ) ENGINE = SystemParts -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains a list of currently existing (both active and inactive) parts of all *-MergeTree tables. Each part is represented by a single row.' CREATE TABLE system.parts_columns ( `partition` String, @@ -582,7 +582,7 @@ CREATE TABLE system.parts_columns `part_name` String ALIAS name ) ENGINE = SystemPartsColumns -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains a list of columns of all currently existing parts of all MergeTree tables. Each column is represented by a single row.' CREATE TABLE system.processes ( `is_initial_query` UInt8, @@ -630,7 +630,7 @@ CREATE TABLE system.processes `Settings.Values` Array(String) ALIAS mapValues(Settings) ) ENGINE = SystemProcesses -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains a list of currently executing processes (queries) with their progress.' CREATE TABLE system.projection_parts ( `partition` String, @@ -698,7 +698,7 @@ CREATE TABLE system.projection_parts `part_name` String ALIAS name ) ENGINE = SystemProjectionParts -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains a list of currently existing projection parts (a copy of some part containing aggregated data or just sorted in different order) created for all the projections for all tables within a cluster.' CREATE TABLE system.projection_parts_columns ( `partition` String, @@ -754,7 +754,7 @@ CREATE TABLE system.projection_parts_columns `part_name` String ALIAS name ) ENGINE = SystemProjectionPartsColumns -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains a list of columns of all currently existing projection parts of all MergeTree tables. Each column is represented by a single row.' CREATE TABLE system.quota_limits ( `quota_name` String, @@ -773,7 +773,7 @@ CREATE TABLE system.quota_limits `max_failed_sequential_authentications` Nullable(UInt64) ) ENGINE = SystemQuotaLimits -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains information about maximums for all intervals of all quotas. Any number of rows or zero can correspond to specific quota.' CREATE TABLE system.quota_usage ( `quota_name` String, @@ -805,7 +805,7 @@ CREATE TABLE system.quota_usage `max_failed_sequential_authentications` Nullable(UInt64) ) ENGINE = SystemQuotaUsage -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains quota usage by the current user: how much is used and how much is left.' CREATE TABLE system.quotas ( `name` String, @@ -818,7 +818,7 @@ CREATE TABLE system.quotas `apply_to_except` Array(String) ) ENGINE = SystemQuotas -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains information about quotas.' CREATE TABLE system.quotas_usage ( `quota_name` String, @@ -851,7 +851,7 @@ CREATE TABLE system.quotas_usage `max_failed_sequential_authentications` Nullable(UInt64) ) ENGINE = SystemQuotasUsage -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains quota usage by all users.' CREATE TABLE system.replicas ( `database` String, @@ -891,7 +891,7 @@ CREATE TABLE system.replicas `replica_is_active` Map(String, UInt8) ) ENGINE = SystemReplicas -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains information and status of all table replicas on current server. Each replica is represented by a single row.' CREATE TABLE system.replicated_fetches ( `database` String, @@ -912,7 +912,7 @@ CREATE TABLE system.replicated_fetches `thread_id` UInt64 ) ENGINE = SystemReplicatedFetches -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains information about currently running background fetches.' CREATE TABLE system.replicated_merge_tree_settings ( `name` String, @@ -926,7 +926,7 @@ CREATE TABLE system.replicated_merge_tree_settings `is_obsolete` UInt8 ) ENGINE = SystemReplicatedMergeTreeSettings -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains a list of all ReplicatedMergeTree engine specific settings, their current and default values along with descriptions. You may change any of them in SETTINGS section in CREATE query. ' CREATE TABLE system.replication_queue ( `database` String, @@ -952,7 +952,7 @@ CREATE TABLE system.replication_queue `merge_type` String ) ENGINE = SystemReplicationQueue -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains information about tasks from replication queues stored in ClickHouse Keeper, or ZooKeeper, for each table replica.' CREATE TABLE system.role_grants ( `user_name` Nullable(String), @@ -963,7 +963,7 @@ CREATE TABLE system.role_grants `with_admin_option` UInt8 ) ENGINE = SystemRoleGrants -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains the role grants for users and roles. To add entries to this table, use `GRANT role TO user`. Using this table you may find out which roles are assigned to which users or which roles a user has.' CREATE TABLE system.roles ( `name` String, @@ -971,7 +971,7 @@ CREATE TABLE system.roles `storage` String ) ENGINE = SystemRoles -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains a list of all roles created at the server.' CREATE TABLE system.row_policies ( `name` String, @@ -987,7 +987,7 @@ CREATE TABLE system.row_policies `apply_to_except` Array(String) ) ENGINE = SystemRowPolicies -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains filters for one particular table, as well as a list of roles and/or users which should use this row policy.' CREATE TABLE system.settings ( `name` String, @@ -1003,7 +1003,7 @@ CREATE TABLE system.settings `is_obsolete` UInt8 ) ENGINE = SystemSettings -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains a list of all user-level settings (which can be modified in a scope of query or session), their current and default values along with descriptions.' CREATE TABLE system.settings_profile_elements ( `profile_name` Nullable(String), @@ -1018,7 +1018,7 @@ CREATE TABLE system.settings_profile_elements `inherit_profile` Nullable(String) ) ENGINE = SystemSettingsProfileElements -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Describes the content of each settings profile configured on the server. Including settings constraints, roles and users for which the settings are applied, and parent settings profiles.' CREATE TABLE system.settings_profiles ( `name` String, @@ -1030,7 +1030,7 @@ CREATE TABLE system.settings_profiles `apply_to_except` Array(String) ) ENGINE = SystemSettingsProfiles -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains properties of configured setting profiles.' CREATE TABLE system.stack_trace ( `thread_name` String, @@ -1039,7 +1039,7 @@ CREATE TABLE system.stack_trace `trace` Array(UInt64) ) ENGINE = SystemStackTrace -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Allows to obtain an unsymbolized stacktrace from all the threads of the server process.' CREATE TABLE system.storage_policies ( `policy_name` String, @@ -1054,7 +1054,7 @@ CREATE TABLE system.storage_policies `load_balancing` Enum8('ROUND_ROBIN' = 0, 'LEAST_USED' = 1) ) ENGINE = SystemStoragePolicies -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains information about storage policies and volumes defined in the server configuration.' CREATE TABLE system.table_engines ( `name` String, @@ -1068,7 +1068,7 @@ CREATE TABLE system.table_engines `supports_parallel_insert` UInt8 ) ENGINE = SystemTableEngines -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains a list of all available table engines along with information whether a particular table engine supports some specific features (e.g. settings, skipping indices, projections, replication, TTL, deduplication, parallel insert, etc.)' CREATE TABLE system.table_functions ( `name` String, @@ -1076,7 +1076,7 @@ CREATE TABLE system.table_functions `allow_readonly` UInt8 ) ENGINE = SystemTableFunctions -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains a list of all available table functions with their descriptions.' CREATE TABLE system.tables ( `database` String, @@ -1115,13 +1115,13 @@ CREATE TABLE system.tables `table` String ALIAS name ) ENGINE = SystemTables -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Lists all tables of the current server.' CREATE TABLE system.time_zones ( `time_zone` String ) ENGINE = SystemTimeZones -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains a list of time zones that are supported by the ClickHouse server. This list of timezones might vary depending on the version of ClickHouse.' CREATE TABLE system.user_directories ( `name` String, @@ -1130,7 +1130,7 @@ CREATE TABLE system.user_directories `precedence` UInt64 ) ENGINE = SystemUserDirectories -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains the information about configured user directories - directories on the file system from which ClickHouse server is allowed to read user provided data.' CREATE TABLE system.users ( `name` String, @@ -1151,22 +1151,22 @@ CREATE TABLE system.users `default_database` String ) ENGINE = SystemUsers -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains a list of all users profiles either configured at the server through a configuration file or created via SQL.' CREATE TABLE system.warnings ( `message` String ) ENGINE = SystemWarnings -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Contains warnings about server configuration to be displayed by clickhouse-client right after it connects to the server.' CREATE TABLE system.zeros ( `zero` UInt8 ) ENGINE = SystemZeros -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Produces unlimited number of non-materialized zeros.' CREATE TABLE system.zeros_mt ( `zero` UInt8 ) ENGINE = SystemZeros -COMMENT 'SYSTEM TABLE is built on the fly.' +COMMENT 'Multithreaded version of system.zeros.' diff --git a/tests/queries/0_stateless/02118_show_create_table_rocksdb.reference b/tests/queries/0_stateless/02118_show_create_table_rocksdb.reference index 848abb332bb..e1e1ab3f441 100644 --- a/tests/queries/0_stateless/02118_show_create_table_rocksdb.reference +++ b/tests/queries/0_stateless/02118_show_create_table_rocksdb.reference @@ -1 +1 @@ -CREATE TABLE system.rocksdb\n(\n `database` String,\n `table` String,\n `name` String,\n `value` UInt64\n)\nENGINE = SystemRocksDB\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.rocksdb\n(\n `database` String,\n `table` String,\n `name` String,\n `value` UInt64\n)\nENGINE = SystemRocksDB\nCOMMENT \'Contains a list of metrics exposed from embedded RocksDB.\' diff --git a/tests/queries/0_stateless/02156_storage_merge_prewhere.reference b/tests/queries/0_stateless/02156_storage_merge_prewhere.reference index 86a36a9392c..8a18c609ede 100644 --- a/tests/queries/0_stateless/02156_storage_merge_prewhere.reference +++ b/tests/queries/0_stateless/02156_storage_merge_prewhere.reference @@ -1,6 +1,3 @@ - Prewhere info - Prewhere filter - Prewhere filter column: and(notEmpty(v), equals(k, 3)) (removed) Prewhere info Prewhere filter Prewhere filter column: and(notEmpty(v), equals(k, 3)) (removed) @@ -8,8 +5,15 @@ Prewhere filter Prewhere filter column: and(notEmpty(v), equals(k, 3)) (removed) 2 - Filter column: and(equals(k, 3), notEmpty(v)) (removed) + Filter column: and(equals(k, 3), notEmpty(v)) (removed) + Prewhere info + Prewhere filter + Prewhere filter column: and(notEmpty(v), equals(k, 3)) (removed) 2 - Filter column: and(equals(k, 3), notEmpty(v)) (removed) - Filter column: and(equals(k, 3), notEmpty(v)) (removed) + Prewhere info + Prewhere filter + Prewhere filter column: and(notEmpty(v), equals(k, 3)) (removed) + Prewhere info + Prewhere filter + Prewhere filter column: and(notEmpty(v), equals(k, 3)) (removed) 2 diff --git a/tests/queries/0_stateless/02156_storage_merge_prewhere_not_ready_set_bug.reference b/tests/queries/0_stateless/02156_storage_merge_prewhere_not_ready_set_bug.reference new file mode 100644 index 00000000000..20c58c33770 --- /dev/null +++ b/tests/queries/0_stateless/02156_storage_merge_prewhere_not_ready_set_bug.reference @@ -0,0 +1 @@ +59900 1000 1396 diff --git a/tests/queries/0_stateless/02156_storage_merge_prewhere_not_ready_set_bug.sql b/tests/queries/0_stateless/02156_storage_merge_prewhere_not_ready_set_bug.sql new file mode 100644 index 00000000000..fc18c97cb6e --- /dev/null +++ b/tests/queries/0_stateless/02156_storage_merge_prewhere_not_ready_set_bug.sql @@ -0,0 +1,7 @@ +create table merge_kek_1 (x UInt32, y UInt32) engine = MergeTree order by x; +create table merge_kek_2 (x UInt32, y UInt32) engine = MergeTree order by x; + +insert into merge_kek_1 select number, number from numbers(100); +insert into merge_kek_2 select number + 500, number + 500 from numbers(1e6); + +select sum(x), min(x + x), max(x + x) from merge(currentDatabase(), '^merge_kek_.$') where x > 200 and y in (select 500 + number * 2 from numbers(100)) settings max_threads=2; diff --git a/tests/queries/0_stateless/02246_tsv_csv_best_effort_schema_inference.reference b/tests/queries/0_stateless/02246_tsv_csv_best_effort_schema_inference.reference index 8ad0a566c62..1c60e40942c 100644 --- a/tests/queries/0_stateless/02246_tsv_csv_best_effort_schema_inference.reference +++ b/tests/queries/0_stateless/02246_tsv_csv_best_effort_schema_inference.reference @@ -97,8 +97,8 @@ c1 Array(Nullable(Bool)) [] [NULL] [false] -c1 Tuple(Nullable(Int64), Nullable(Int64), Nullable(Int64)) -(1,2,3) +c1 Nullable(String) +(1, 2, 3) c1 Nullable(String) 123.123 c1 Array(Tuple(Nullable(Int64), Nullable(Int64), Nullable(Int64))) diff --git a/tests/queries/0_stateless/02340_parts_refcnt_mergetree.sh b/tests/queries/0_stateless/02340_parts_refcnt_mergetree.sh index caa600298ce..b100f96befa 100755 --- a/tests/queries/0_stateless/02340_parts_refcnt_mergetree.sh +++ b/tests/queries/0_stateless/02340_parts_refcnt_mergetree.sh @@ -24,6 +24,7 @@ function check_refcnt_for_table() local log_file log_file=$(mktemp "$CUR_DIR/clickhouse-tests.XXXXXX.log") local args=( + --allow_repeated_settings --format Null --max_threads 1 --max_block_size 1 diff --git a/tests/queries/0_stateless/02366_kql_create_table.sql b/tests/queries/0_stateless/02366_kql_create_table.sql index b266679b06a..75a81c5dbd3 100644 --- a/tests/queries/0_stateless/02366_kql_create_table.sql +++ b/tests/queries/0_stateless/02366_kql_create_table.sql @@ -1,8 +1,8 @@ DROP TABLE IF EXISTS Customers; CREATE TABLE Customers -( +( FirstName Nullable(String), - LastName String, + LastName String, Occupation String, Education String, Age Nullable(UInt8) @@ -10,20 +10,20 @@ CREATE TABLE Customers INSERT INTO Customers VALUES ('Theodore','Diaz','Skilled Manual','Bachelors',28),('Stephanie','Cox','Management abcd defg','Bachelors',33),('Peter','Nara','Skilled Manual','Graduate Degree',26),('Latoya','Shen','Professional','Graduate Degree',25),('Apple','','Skilled Manual','Bachelors',28),(NULL,'why','Professional','Partial College',38); Select '-- test create table --' ; -Select * from kql(Customers|project FirstName) limit 1;; +Select * from kql($$Customers|project FirstName$$) limit 1;; DROP TABLE IF EXISTS kql_table1; -CREATE TABLE kql_table1 ENGINE = Memory AS select *, now() as new_column From kql(Customers | project LastName | filter LastName=='Diaz'); +CREATE TABLE kql_table1 ENGINE = Memory AS select *, now() as new_column From kql($$Customers | project LastName | filter LastName=='Diaz'$$); select LastName from kql_table1 limit 1; DROP TABLE IF EXISTS kql_table2; CREATE TABLE kql_table2 -( +( FirstName Nullable(String), - LastName String, + LastName String, Age Nullable(UInt8) ) ENGINE = Memory; -INSERT INTO kql_table2 select * from kql(Customers|project FirstName,LastName,Age | filter FirstName=='Theodore'); +INSERT INTO kql_table2 select * from kql($$Customers|project FirstName,LastName,Age | filter FirstName=='Theodore'$$); select * from kql_table2 limit 1; --- select * from kql(Customers | where FirstName !in ("test", "test2")); +-- select * from kql($$Customers | where FirstName !in ("test", "test2")$$); DROP TABLE IF EXISTS Customers; DROP TABLE IF EXISTS kql_table1; -DROP TABLE IF EXISTS kql_table2; \ No newline at end of file +DROP TABLE IF EXISTS kql_table2; diff --git a/tests/queries/0_stateless/02375_pretty_formats.sql.j2 b/tests/queries/0_stateless/02375_pretty_formats.sql.j2 index 55462ea6b61..36a4479260c 100644 --- a/tests/queries/0_stateless/02375_pretty_formats.sql.j2 +++ b/tests/queries/0_stateless/02375_pretty_formats.sql.j2 @@ -3,6 +3,6 @@ 'PrettySpaceNoEscapesMonoBlock'] -%} select '{{ format }}'; -select number as x, number + 1 as y from numbers(4) settings max_block_size=2, output_format_pretty_color=1 format {{ format }}; +select number as x, number + 1 as y from numbers(4) settings max_block_size=2, output_format_pretty_color=1, output_format_pretty_display_footer_column_names=0 format {{ format }}; {% endfor -%} diff --git a/tests/queries/0_stateless/02381_parseDateTime64BestEffortUS.sql b/tests/queries/0_stateless/02381_parseDateTime64BestEffortUS.sql index 21dc7b1a990..510ed4b126f 100644 --- a/tests/queries/0_stateless/02381_parseDateTime64BestEffortUS.sql +++ b/tests/queries/0_stateless/02381_parseDateTime64BestEffortUS.sql @@ -1,3 +1,4 @@ +SET output_format_pretty_display_footer_column_names=0; SELECT 'parseDateTime64BestEffortUS'; SELECT diff --git a/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.sql b/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.sql index cabcd230eb6..e9deb778075 100644 --- a/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.sql +++ b/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.sql @@ -3,7 +3,6 @@ SELECT name FROM system.functions WHERE NOT is_aggregate AND origin = 'System' AND alias_to = '' AND length(description) < 10 AND name NOT IN ( 'aes_decrypt_mysql', 'aes_encrypt_mysql', 'decrypt', 'encrypt', - 'base64Decode', 'base64Encode', 'tryBase64Decode', 'convertCharset', 'detectLanguage', 'detectLanguageMixed', 'geoToH3', diff --git a/tests/queries/0_stateless/02477_logical_expressions_optimizer_low_cardinality.reference b/tests/queries/0_stateless/02477_logical_expressions_optimizer_low_cardinality.reference index 649b037fafa..11c178ac0d0 100644 --- a/tests/queries/0_stateless/02477_logical_expressions_optimizer_low_cardinality.reference +++ b/tests/queries/0_stateless/02477_logical_expressions_optimizer_low_cardinality.reference @@ -10,11 +10,15 @@ QUERY id: 0 JOIN TREE TABLE id: 3, alias: __table1, table_name: default.t_logical_expressions_optimizer_low_cardinality WHERE - FUNCTION id: 4, function_name: in, function_type: ordinary, result_type: UInt8 + FUNCTION id: 4, function_name: or, function_type: ordinary, result_type: UInt8 ARGUMENTS LIST id: 5, nodes: 2 - COLUMN id: 2, column_name: a, result_type: LowCardinality(String), source_id: 3 - CONSTANT id: 6, constant_value: Tuple_(\'x\', \'y\'), constant_value_type: Tuple(String, String) + FUNCTION id: 6, function_name: in, function_type: ordinary, result_type: LowCardinality(UInt8) + ARGUMENTS + LIST id: 7, nodes: 2 + COLUMN id: 2, column_name: a, result_type: LowCardinality(String), source_id: 3 + CONSTANT id: 8, constant_value: Tuple_(\'x\', \'y\'), constant_value_type: Tuple(String, String) + CONSTANT id: 9, constant_value: UInt64_0, constant_value_type: UInt8 SETTINGS allow_experimental_analyzer=1 SELECT a FROM t_logical_expressions_optimizer_low_cardinality @@ -28,11 +32,15 @@ QUERY id: 0 JOIN TREE TABLE id: 3, alias: __table1, table_name: default.t_logical_expressions_optimizer_low_cardinality WHERE - FUNCTION id: 4, function_name: in, function_type: ordinary, result_type: UInt8 + FUNCTION id: 4, function_name: or, function_type: ordinary, result_type: UInt8 ARGUMENTS LIST id: 5, nodes: 2 - COLUMN id: 2, column_name: a, result_type: LowCardinality(String), source_id: 3 - CONSTANT id: 6, constant_value: Tuple_(\'x\', \'y\'), constant_value_type: Tuple(String, String) + FUNCTION id: 6, function_name: in, function_type: ordinary, result_type: LowCardinality(UInt8) + ARGUMENTS + LIST id: 7, nodes: 2 + COLUMN id: 2, column_name: a, result_type: LowCardinality(String), source_id: 3 + CONSTANT id: 8, constant_value: Tuple_(\'x\', \'y\'), constant_value_type: Tuple(String, String) + CONSTANT id: 9, constant_value: UInt64_0, constant_value_type: UInt8 SETTINGS allow_experimental_analyzer=1 SELECT a FROM t_logical_expressions_optimizer_low_cardinality @@ -46,11 +54,15 @@ QUERY id: 0 JOIN TREE TABLE id: 3, alias: __table1, table_name: default.t_logical_expressions_optimizer_low_cardinality WHERE - FUNCTION id: 4, function_name: notIn, function_type: ordinary, result_type: UInt8 + FUNCTION id: 4, function_name: _CAST, function_type: ordinary, result_type: UInt8 ARGUMENTS LIST id: 5, nodes: 2 - COLUMN id: 2, column_name: a, result_type: LowCardinality(String), source_id: 3 - CONSTANT id: 6, constant_value: Tuple_(\'x\', \'y\'), constant_value_type: Tuple(String, String) + FUNCTION id: 6, function_name: notIn, function_type: ordinary, result_type: LowCardinality(UInt8) + ARGUMENTS + LIST id: 7, nodes: 2 + COLUMN id: 2, column_name: a, result_type: LowCardinality(String), source_id: 3 + CONSTANT id: 8, constant_value: Tuple_(\'x\', \'y\'), constant_value_type: Tuple(String, String) + CONSTANT id: 9, constant_value: \'UInt8\', constant_value_type: String SETTINGS allow_experimental_analyzer=1 SELECT a FROM t_logical_expressions_optimizer_low_cardinality @@ -64,11 +76,15 @@ QUERY id: 0 JOIN TREE TABLE id: 3, alias: __table1, table_name: default.t_logical_expressions_optimizer_low_cardinality WHERE - FUNCTION id: 4, function_name: notIn, function_type: ordinary, result_type: UInt8 + FUNCTION id: 4, function_name: _CAST, function_type: ordinary, result_type: UInt8 ARGUMENTS LIST id: 5, nodes: 2 - COLUMN id: 2, column_name: a, result_type: LowCardinality(String), source_id: 3 - CONSTANT id: 6, constant_value: Tuple_(\'x\', \'y\'), constant_value_type: Tuple(String, String) + FUNCTION id: 6, function_name: notIn, function_type: ordinary, result_type: LowCardinality(UInt8) + ARGUMENTS + LIST id: 7, nodes: 2 + COLUMN id: 2, column_name: a, result_type: LowCardinality(String), source_id: 3 + CONSTANT id: 8, constant_value: Tuple_(\'x\', \'y\'), constant_value_type: Tuple(String, String) + CONSTANT id: 9, constant_value: \'UInt8\', constant_value_type: String SETTINGS allow_experimental_analyzer=1 SELECT a FROM t_logical_expressions_optimizer_low_cardinality diff --git a/tests/queries/0_stateless/02488_zero_copy_detached_parts_drop_table.sh b/tests/queries/0_stateless/02488_zero_copy_detached_parts_drop_table.sh index b01f16e1cad..60cec5caea3 100755 --- a/tests/queries/0_stateless/02488_zero_copy_detached_parts_drop_table.sh +++ b/tests/queries/0_stateless/02488_zero_copy_detached_parts_drop_table.sh @@ -19,7 +19,7 @@ $CLICKHOUSE_CLIENT -q "select throwIf(substring('$path', 1, 1) != '/', 'Path is rm -f $path/count.txt $CLICKHOUSE_CLIENT -q "detach table rmt2 sync" -$CLICKHOUSE_CLIENT --send_logs_level='fatal' -q "attach table rmt2" +$CLICKHOUSE_CLIENT --allow_repeated_settings --send_logs_level='fatal' -q "attach table rmt2" $CLICKHOUSE_CLIENT -q "select reason, name from system.detached_parts where database='$CLICKHOUSE_DATABASE' and table='rmt2'" diff --git a/tests/queries/0_stateless/02494_query_cache_nested_query_bug.sh b/tests/queries/0_stateless/02494_query_cache_nested_query_bug.sh index 6bc3d03ac66..24803ed7420 100755 --- a/tests/queries/0_stateless/02494_query_cache_nested_query_bug.sh +++ b/tests/queries/0_stateless/02494_query_cache_nested_query_bug.sh @@ -20,12 +20,12 @@ SETTINGS_ANALYZER="SETTINGS use_query_cache=1, max_threads=1, allow_experimental # Verify that the first query does two aggregations and the second query zero aggregations. Since query cache is currently not integrated # with EXPLAIN PLAN, we need to check the logs. -${CLICKHOUSE_CLIENT} --send_logs_level=trace --query "SELECT count(a) / (SELECT sum(a) FROM tab) FROM tab $SETTINGS_NO_ANALYZER" 2>&1 | grep "Aggregated. " | wc -l -${CLICKHOUSE_CLIENT} --send_logs_level=trace --query "SELECT count(a) / (SELECT sum(a) FROM tab) FROM tab $SETTINGS_NO_ANALYZER" 2>&1 | grep "Aggregated. " | wc -l +${CLICKHOUSE_CLIENT} --allow_repeated_settings --send_logs_level=trace --query "SELECT count(a) / (SELECT sum(a) FROM tab) FROM tab $SETTINGS_NO_ANALYZER" 2>&1 | grep "Aggregated. " | wc -l +${CLICKHOUSE_CLIENT} --allow_repeated_settings --send_logs_level=trace --query "SELECT count(a) / (SELECT sum(a) FROM tab) FROM tab $SETTINGS_NO_ANALYZER" 2>&1 | grep "Aggregated. " | wc -l ${CLICKHOUSE_CLIENT} --query "SYSTEM DROP QUERY CACHE" -${CLICKHOUSE_CLIENT} --send_logs_level=trace --query "SELECT count(a) / (SELECT sum(a) FROM tab) FROM tab $SETTINGS_ANALYZER" 2>&1 | grep "Aggregated. " | wc -l -${CLICKHOUSE_CLIENT} --send_logs_level=trace --query "SELECT count(a) / (SELECT sum(a) FROM tab) FROM tab $SETTINGS_ANALYZER" 2>&1 | grep "Aggregated. " | wc -l +${CLICKHOUSE_CLIENT} --allow_repeated_settings --send_logs_level=trace --query "SELECT count(a) / (SELECT sum(a) FROM tab) FROM tab $SETTINGS_ANALYZER" 2>&1 | grep "Aggregated. " | wc -l +${CLICKHOUSE_CLIENT} --allow_repeated_settings --send_logs_level=trace --query "SELECT count(a) / (SELECT sum(a) FROM tab) FROM tab $SETTINGS_ANALYZER" 2>&1 | grep "Aggregated. " | wc -l ${CLICKHOUSE_CLIENT} --query "SYSTEM DROP QUERY CACHE" diff --git a/tests/queries/0_stateless/02530_dictionaries_update_field.reference b/tests/queries/0_stateless/02530_dictionaries_update_field.reference index 88c910e0313..4d5a7447a49 100644 --- a/tests/queries/0_stateless/02530_dictionaries_update_field.reference +++ b/tests/queries/0_stateless/02530_dictionaries_update_field.reference @@ -10,7 +10,7 @@ SELECT key, value FROM dict_flat ORDER BY key ASC; 2 Second INSERT INTO table_for_update_field_dictionary VALUES (2, 'SecondUpdated', now()); INSERT INTO table_for_update_field_dictionary VALUES (3, 'Third', now()); -SELECT sleepEachRow(1) FROM numbers(10) SETTINGS function_sleep_max_microseconds_per_block = 10000000 FORMAT Null; +SELECT sleepEachRow(1) FROM numbers(20) SETTINGS function_sleep_max_microseconds_per_block = 20000000 FORMAT Null; SELECT key, value FROM dict_flat ORDER BY key ASC; 1 First 2 SecondUpdated @@ -27,7 +27,7 @@ SELECT key, value FROM dict_flat_custom ORDER BY key ASC; 2 Second INSERT INTO table_for_update_field_dictionary VALUES (2, 'SecondUpdated', now()); INSERT INTO table_for_update_field_dictionary VALUES (3, 'Third', now()); -SELECT sleepEachRow(1) FROM numbers(10) SETTINGS function_sleep_max_microseconds_per_block = 10000000 FORMAT Null; +SELECT sleepEachRow(1) FROM numbers(20) SETTINGS function_sleep_max_microseconds_per_block = 20000000 FORMAT Null; SELECT key, value FROM dict_flat_custom ORDER BY key ASC; 1 First 2 SecondUpdated @@ -44,7 +44,7 @@ SELECT key, value FROM dict_hashed ORDER BY key ASC; 2 Second INSERT INTO table_for_update_field_dictionary VALUES (2, 'SecondUpdated', now()); INSERT INTO table_for_update_field_dictionary VALUES (3, 'Third', now()); -SELECT sleepEachRow(1) FROM numbers(10) SETTINGS function_sleep_max_microseconds_per_block = 10000000 FORMAT Null; +SELECT sleepEachRow(1) FROM numbers(20) SETTINGS function_sleep_max_microseconds_per_block = 20000000 FORMAT Null; SELECT key, value FROM dict_hashed ORDER BY key ASC; 1 First 2 SecondUpdated @@ -61,7 +61,7 @@ SELECT key, value FROM dict_hashed_custom ORDER BY key ASC; 2 Second INSERT INTO table_for_update_field_dictionary VALUES (2, 'SecondUpdated', now()); INSERT INTO table_for_update_field_dictionary VALUES (3, 'Third', now()); -SELECT sleepEachRow(1) FROM numbers(10) SETTINGS function_sleep_max_microseconds_per_block = 10000000 FORMAT Null; +SELECT sleepEachRow(1) FROM numbers(20) SETTINGS function_sleep_max_microseconds_per_block = 20000000 FORMAT Null; SELECT key, value FROM dict_hashed_custom ORDER BY key ASC; 1 First 2 SecondUpdated @@ -78,7 +78,7 @@ SELECT key, value FROM dict_complex_key_hashed ORDER BY key ASC; 2 Second INSERT INTO table_for_update_field_dictionary VALUES (2, 'SecondUpdated', now()); INSERT INTO table_for_update_field_dictionary VALUES (3, 'Third', now()); -SELECT sleepEachRow(1) FROM numbers(10) SETTINGS function_sleep_max_microseconds_per_block = 10000000 FORMAT Null; +SELECT sleepEachRow(1) FROM numbers(20) SETTINGS function_sleep_max_microseconds_per_block = 20000000 FORMAT Null; SELECT key, value FROM dict_complex_key_hashed ORDER BY key ASC; 1 First 2 SecondUpdated @@ -95,7 +95,7 @@ SELECT key, value FROM dict_complex_key_hashed_custom ORDER BY key ASC; 2 Second INSERT INTO table_for_update_field_dictionary VALUES (2, 'SecondUpdated', now()); INSERT INTO table_for_update_field_dictionary VALUES (3, 'Third', now()); -SELECT sleepEachRow(1) FROM numbers(10) SETTINGS function_sleep_max_microseconds_per_block = 10000000 FORMAT Null; +SELECT sleepEachRow(1) FROM numbers(20) SETTINGS function_sleep_max_microseconds_per_block = 20000000 FORMAT Null; SELECT key, value FROM dict_complex_key_hashed_custom ORDER BY key ASC; 1 First 2 SecondUpdated diff --git a/tests/queries/0_stateless/02530_dictionaries_update_field.sh b/tests/queries/0_stateless/02530_dictionaries_update_field.sh index 35881bdf896..9ced78a1196 100755 --- a/tests/queries/0_stateless/02530_dictionaries_update_field.sh +++ b/tests/queries/0_stateless/02530_dictionaries_update_field.sh @@ -60,7 +60,7 @@ for layout in "${layouts[@]}"; do INSERT INTO table_for_update_field_dictionary VALUES (2, 'SecondUpdated', now()); INSERT INTO table_for_update_field_dictionary VALUES (3, 'Third', now()); - SELECT sleepEachRow(1) FROM numbers(10) SETTINGS function_sleep_max_microseconds_per_block = 10000000 FORMAT Null; + SELECT sleepEachRow(1) FROM numbers(20) SETTINGS function_sleep_max_microseconds_per_block = 20000000 FORMAT Null; SELECT key, value FROM $dictionary_name ORDER BY key ASC; -- { echoOff } diff --git a/tests/queries/0_stateless/02535_max_parallel_replicas_custom_key.sh b/tests/queries/0_stateless/02535_max_parallel_replicas_custom_key.sh index 9850406eb3a..dccb680be42 100755 --- a/tests/queries/0_stateless/02535_max_parallel_replicas_custom_key.sh +++ b/tests/queries/0_stateless/02535_max_parallel_replicas_custom_key.sh @@ -30,7 +30,7 @@ run_with_custom_key "SELECT * FROM cluster(test_cluster_one_shard_three_replicas $CLICKHOUSE_CLIENT --query="DROP TABLE 02535_custom_key" -$CLICKHOUSE_CLIENT --query="CREATE TABLE 02535_custom_key (x String, y Int32) ENGINE = MergeTree ORDER BY cityHash64(x)" +$CLICKHOUSE_CLIENT --query="CREATE TABLE 02535_custom_key (x String, y UInt32) ENGINE = MergeTree ORDER BY cityHash64(x)" $CLICKHOUSE_CLIENT --query="INSERT INTO 02535_custom_key SELECT toString(number), number % 3 FROM numbers(1000)" function run_count_with_custom_key { @@ -41,6 +41,6 @@ run_count_with_custom_key "y" run_count_with_custom_key "cityHash64(y)" run_count_with_custom_key "cityHash64(y) + 1" -$CLICKHOUSE_CLIENT --query="SELECT count() FROM cluster(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), 02535_custom_key) as t1 JOIN 02535_custom_key USING y" --parallel_replicas_custom_key="y" --send_logs_level="trace" 2>&1 | grep -Fac "JOINs are not supported with" +$CLICKHOUSE_CLIENT --query="SELECT count() FROM cluster(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), 02535_custom_key) as t1 JOIN 02535_custom_key USING y" --allow_repeated_settings --parallel_replicas_custom_key="y" --send_logs_level="trace" 2>&1 | grep -Fac "JOINs are not supported with" $CLICKHOUSE_CLIENT --query="DROP TABLE 02535_custom_key" diff --git a/tests/queries/0_stateless/02581_share_big_sets_between_multiple_mutations_tasks_long.sql b/tests/queries/0_stateless/02581_share_big_sets_between_multiple_mutations_tasks_long.sql index ff8b9c71e92..741d0177971 100644 --- a/tests/queries/0_stateless/02581_share_big_sets_between_multiple_mutations_tasks_long.sql +++ b/tests/queries/0_stateless/02581_share_big_sets_between_multiple_mutations_tasks_long.sql @@ -1,4 +1,4 @@ --- Tags: long, no-debug, no-tsan, no-asan, no-ubsan, no-msan, no-parallel +-- Tags: long, no-debug, no-tsan, no-asan, no-ubsan, no-msan, no-parallel, no-sanitize-coverage -- no-parallel because the sets use a lot of memory, which may interfere with other tests diff --git a/tests/queries/0_stateless/02798_explain_settings_not_applied_bug.sql b/tests/queries/0_stateless/02798_explain_settings_not_applied_bug.sql index 5b9976714ea..b0c08134816 100644 --- a/tests/queries/0_stateless/02798_explain_settings_not_applied_bug.sql +++ b/tests/queries/0_stateless/02798_explain_settings_not_applied_bug.sql @@ -1,3 +1,4 @@ +SET output_format_pretty_display_footer_column_names=0; SET output_format_pretty_color=1; SET read_in_order_two_level_merge_threshold=1000000; diff --git a/tests/queries/0_stateless/02892_orc_filter_pushdown.reference b/tests/queries/0_stateless/02892_orc_filter_pushdown.reference index e6c2e9b2b57..903d42bf492 100644 --- a/tests/queries/0_stateless/02892_orc_filter_pushdown.reference +++ b/tests/queries/0_stateless/02892_orc_filter_pushdown.reference @@ -206,7 +206,7 @@ select count(), sum(number) from file('02892.orc', ORC, 'number UInt64, negative select count(), min(negative_or_null), max(negative_or_null) from file('02892.orc', ORC, 'number UInt64, negative_or_null Int64') where (negative_or_null < -500); 596 -1099 -501 select count(), sum(number) from file('02892.orc', ORC, 'number UInt64, negative_or_null Int64') where indexHint(negative_or_null is null); -1000 499500 +0 0 select count(), min(negative_or_null), max(negative_or_null) from file('02892.orc', ORC, 'number UInt64, negative_or_null Int64') where (negative_or_null is null); 0 0 0 select count(), sum(number) from file('02892.orc', ORC, 'number UInt64, negative_or_null Int64') where indexHint(negative_or_null in (0, -1, -10, -100, -1000)); diff --git a/tests/queries/0_stateless/02893_vertical_final_array_join.reference b/tests/queries/0_stateless/02893_vertical_final_bugs.reference similarity index 99% rename from tests/queries/0_stateless/02893_vertical_final_array_join.reference rename to tests/queries/0_stateless/02893_vertical_final_bugs.reference index 27b54a2e42e..ab23116aa5f 100644 --- a/tests/queries/0_stateless/02893_vertical_final_array_join.reference +++ b/tests/queries/0_stateless/02893_vertical_final_bugs.reference @@ -1,3 +1,4 @@ +1 2 b 1 -- { echo ON } SELECT arrayJoin([(k1, v), (k2, v)]) AS row, row.1 as k FROM t FINAL WHERE k1 != 3 AND k = 1 ORDER BY row SETTINGS enable_vertical_final = 0; (1,4) 1 diff --git a/tests/queries/0_stateless/02893_vertical_final_array_join.sql b/tests/queries/0_stateless/02893_vertical_final_bugs.sql similarity index 65% rename from tests/queries/0_stateless/02893_vertical_final_array_join.sql rename to tests/queries/0_stateless/02893_vertical_final_bugs.sql index cc2e37fdc6e..e82ab674c58 100644 --- a/tests/queries/0_stateless/02893_vertical_final_array_join.sql +++ b/tests/queries/0_stateless/02893_vertical_final_bugs.sql @@ -1,3 +1,15 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/64543 +DROP TABLE IF EXISTS foo; +DROP TABLE IF EXISTS bar; +CREATE TABLE foo (id UInt64, seq UInt64) ENGINE = Memory; +CREATE TABLE bar (id UInt64, seq UInt64, name String) ENGINE = ReplacingMergeTree ORDER BY id; +INSERT INTO foo VALUES (1, 1); +INSERT INTO bar VALUES (1, 1, 'a') (2, 2, 'b'); +INSERT INTO bar VALUES (1, 2, 'b') (2, 3, 'c'); +SELECT * FROM bar INNER JOIN foo USING id WHERE bar.seq > foo.seq SETTINGS final = 1; + +-- Same problem possible can happen with array join +DROP TABLE IF EXISTS t; CREATE TABLE t (k1 UInt64, k2 UInt64, v UInt64) ENGINE = ReplacingMergeTree() ORDER BY (k1, k2); SET optimize_on_insert = 0; INSERT INTO t VALUES (1, 2, 3) (1, 2, 4) (2, 3, 4), (2, 3, 5); diff --git a/tests/queries/0_stateless/02896_cyclic_aliases_crash.reference b/tests/queries/0_stateless/02896_cyclic_aliases_crash.reference index caf11f5c15a..e537236478d 100644 --- a/tests/queries/0_stateless/02896_cyclic_aliases_crash.reference +++ b/tests/queries/0_stateless/02896_cyclic_aliases_crash.reference @@ -1,2 +1,3 @@ 1 2 3 1 5 +300 diff --git a/tests/queries/0_stateless/02896_cyclic_aliases_crash.sql b/tests/queries/0_stateless/02896_cyclic_aliases_crash.sql index 5fb628eeb67..5440872e052 100644 --- a/tests/queries/0_stateless/02896_cyclic_aliases_crash.sql +++ b/tests/queries/0_stateless/02896_cyclic_aliases_crash.sql @@ -30,3 +30,7 @@ WHERE (time_stamp_utc >= toDateTime('2024-04-25 00:00:00')) AND (time_stamp_utc GROUP BY time_stamp_utc ORDER BY Impressions DESC LIMIT 1000; + +drop table test_table; +create table test_table engine MergeTree order by sum as select 100 as sum union all select 200 as sum; +select sum as sum from (select sum(sum) as sum from test_table); diff --git a/tests/queries/0_stateless/02908_many_requests_to_system_replicas.sh b/tests/queries/0_stateless/02908_many_requests_to_system_replicas.sh index 144831a2cdc..a247c99a818 100755 --- a/tests/queries/0_stateless/02908_many_requests_to_system_replicas.sh +++ b/tests/queries/0_stateless/02908_many_requests_to_system_replicas.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: long, zookeeper, no-parallel, no-fasttest +# Tags: long, zookeeper, no-parallel, no-fasttest, no-asan CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/02918_optimize_count_for_merge_tables.reference b/tests/queries/0_stateless/02918_optimize_count_for_merge_tables.reference index 786a6b3bf25..7278018f1d6 100644 --- a/tests/queries/0_stateless/02918_optimize_count_for_merge_tables.reference +++ b/tests/queries/0_stateless/02918_optimize_count_for_merge_tables.reference @@ -7,6 +7,9 @@ Expression ((Projection + Before ORDER BY)) Aggregating Expression (Before GROUP BY) ReadFromMerge - ReadFromMergeTree (default.mt1) - ReadFromMergeTree (default.mt2) - ReadFromStorage (TinyLog) + Expression + ReadFromMergeTree (default.mt1) + Expression + ReadFromMergeTree (default.mt2) + Expression + ReadFromStorage (TinyLog) diff --git a/tests/queries/0_stateless/02922_deduplication_with_zero_copy.sh b/tests/queries/0_stateless/02922_deduplication_with_zero_copy.sh index bb013dccb65..dad4e6747e0 100755 --- a/tests/queries/0_stateless/02922_deduplication_with_zero_copy.sh +++ b/tests/queries/0_stateless/02922_deduplication_with_zero_copy.sh @@ -58,9 +58,9 @@ function filter_temporary_locks() function insert_duplicates() { - $CLICKHOUSE_CLIENT -q "insert into r1 values(1);" --send_logs_level="error" & + $CLICKHOUSE_CLIENT -q "insert into r1 values(1);" --allow_repeated_settings --send_logs_level="error" & - $CLICKHOUSE_CLIENT -q "insert into r2 values(1);" --send_logs_level="error" + $CLICKHOUSE_CLIENT -q "insert into r2 values(1);" --allow_repeated_settings --send_logs_level="error" wait @@ -137,8 +137,8 @@ function list_keeper_nodes() { list_keeper_nodes "${table_shared_id}" -$CLICKHOUSE_CLIENT -nm -q "drop table r1;" --send_logs_level="error" & -$CLICKHOUSE_CLIENT -nm -q "drop table r2;" --send_logs_level="error" & +$CLICKHOUSE_CLIENT -nm -q "drop table r1;" --allow_repeated_settings --send_logs_level="error" & +$CLICKHOUSE_CLIENT -nm -q "drop table r2;" --allow_repeated_settings --send_logs_level="error" & wait list_keeper_nodes "${table_shared_id}" diff --git a/tests/queries/0_stateless/02922_server_exit_code.sh b/tests/queries/0_stateless/02922_server_exit_code.sh index 60049902410..ded0dc4763f 100755 --- a/tests/queries/0_stateless/02922_server_exit_code.sh +++ b/tests/queries/0_stateless/02922_server_exit_code.sh @@ -7,6 +7,6 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # We will check that the server's exit code corresponds to the exception code if it was terminated after exception. # In this example, we provide an invalid path to the server's config, ignore its logs and check the exit code. -# The exception code is 400 = CANNOT_STAT, so the exit code will be 400 % 256. +# The exception code is 76 = CANNOT_OPEN_FILE, so the exit code will be 76 % 256. -${CLICKHOUSE_SERVER_BINARY} -- --path /dev/null 2>/dev/null; [[ "$?" == "$((400 % 256))" ]] && echo 'Ok' || echo 'Fail' +${CLICKHOUSE_SERVER_BINARY} -- --path /dev/null 2>/dev/null; [[ "$?" == "$((76 % 256))" ]] && echo 'Ok' || echo 'Fail' diff --git a/tests/queries/0_stateless/02941_variant_type_1.sh b/tests/queries/0_stateless/02941_variant_type_1.sh index 22ca909a26e..723de45eaad 100755 --- a/tests/queries/0_stateless/02941_variant_type_1.sh +++ b/tests/queries/0_stateless/02941_variant_type_1.sh @@ -7,7 +7,7 @@ CLICKHOUSE_LOG_COMMENT= # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --allow_suspicious_variant_types=1 --index_granularity_bytes=10485760 --index_granularity=8192" +CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --allow_suspicious_variant_types=1" function test1_insert() { @@ -115,11 +115,11 @@ run 0 $CH_CLIENT -q "drop table test;" echo "MergeTree compact" -$CH_CLIENT -q "create table test (id UInt64, v Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))) engine=MergeTree order by id settings min_rows_for_wide_part=100000000, min_bytes_for_wide_part=1000000000;" +$CH_CLIENT -q "create table test (id UInt64, v Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))) engine=MergeTree order by id settings min_rows_for_wide_part=100000000, min_bytes_for_wide_part=1000000000, index_granularity_bytes=10485760, index_granularity=8192;" run 1 $CH_CLIENT -q "drop table test;" echo "MergeTree wide" -$CH_CLIENT -q "create table test (id UInt64, v Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;" +$CH_CLIENT -q "create table test (id UInt64, v Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, index_granularity_bytes=10485760, index_granularity=8192;" run 1 $CH_CLIENT -q "drop table test;" diff --git a/tests/queries/0_stateless/02941_variant_type_2.sh b/tests/queries/0_stateless/02941_variant_type_2.sh index 91ba0285bd8..f43cd2bb0d6 100755 --- a/tests/queries/0_stateless/02941_variant_type_2.sh +++ b/tests/queries/0_stateless/02941_variant_type_2.sh @@ -7,7 +7,7 @@ CLICKHOUSE_LOG_COMMENT= # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --allow_suspicious_variant_types=1 --index_granularity_bytes=10485760 --index_granularity=8192" +CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --allow_suspicious_variant_types=1" function test4_insert() { @@ -61,11 +61,11 @@ run 0 $CH_CLIENT -q "drop table test;" echo "MergeTree compact" -$CH_CLIENT -q "create table test (id UInt64, v Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))) engine=MergeTree order by id settings min_rows_for_wide_part=100000000, min_bytes_for_wide_part=1000000000;" +$CH_CLIENT -q "create table test (id UInt64, v Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))) engine=MergeTree order by id settings min_rows_for_wide_part=100000000, min_bytes_for_wide_part=1000000000, index_granularity_bytes=10485760, index_granularity=8192;" run 1 $CH_CLIENT -q "drop table test;" echo "MergeTree wide" -$CH_CLIENT -q "create table test (id UInt64, v Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;" +$CH_CLIENT -q "create table test (id UInt64, v Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, index_granularity_bytes=10485760, index_granularity=8192;" run 1 $CH_CLIENT -q "drop table test;" diff --git a/tests/queries/0_stateless/02941_variant_type_3.sh b/tests/queries/0_stateless/02941_variant_type_3.sh index 8a039a02d6d..f4b2b304f56 100755 --- a/tests/queries/0_stateless/02941_variant_type_3.sh +++ b/tests/queries/0_stateless/02941_variant_type_3.sh @@ -7,7 +7,7 @@ CLICKHOUSE_LOG_COMMENT= # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --allow_suspicious_variant_types=1 --index_granularity_bytes=10485760 --index_granularity=8192 " +CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --allow_suspicious_variant_types=1" function test5_insert() { @@ -63,11 +63,11 @@ run 0 $CH_CLIENT -q "drop table test;" echo "MergeTree compact" -$CH_CLIENT -q "create table test (id UInt64, v Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))) engine=MergeTree order by id settings min_rows_for_wide_part=100000000, min_bytes_for_wide_part=1000000000;" +$CH_CLIENT -q "create table test (id UInt64, v Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))) engine=MergeTree order by id settings min_rows_for_wide_part=100000000, min_bytes_for_wide_part=1000000000, index_granularity_bytes=10485760, index_granularity=8192;" run 1 $CH_CLIENT -q "drop table test;" echo "MergeTree wide" -$CH_CLIENT -q "create table test (id UInt64, v Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;" +$CH_CLIENT -q "create table test (id UInt64, v Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, index_granularity_bytes=10485760, index_granularity=8192;" run 1 $CH_CLIENT -q "drop table test;" diff --git a/tests/queries/0_stateless/02941_variant_type_4.sh b/tests/queries/0_stateless/02941_variant_type_4.sh index e38db8fda54..f9a16847864 100755 --- a/tests/queries/0_stateless/02941_variant_type_4.sh +++ b/tests/queries/0_stateless/02941_variant_type_4.sh @@ -7,7 +7,8 @@ CLICKHOUSE_LOG_COMMENT= # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --allow_suspicious_variant_types=1 --index_granularity_bytes=10485760 --index_granularity=8192 " + +CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --allow_suspicious_variant_types=1" function test6_insert() { @@ -57,11 +58,11 @@ run 0 $CH_CLIENT -q "drop table test;" echo "MergeTree compact" -$CH_CLIENT -q "create table test (id UInt64, v Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))) engine=MergeTree order by id settings min_rows_for_wide_part=100000000, min_bytes_for_wide_part=1000000000;" +$CH_CLIENT -q "create table test (id UInt64, v Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))) engine=MergeTree order by id settings min_rows_for_wide_part=100000000, min_bytes_for_wide_part=1000000000, index_granularity_bytes=10485760, index_granularity=8192;" run 1 $CH_CLIENT -q "drop table test;" echo "MergeTree wide" -$CH_CLIENT -q "create table test (id UInt64, v Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;" +$CH_CLIENT -q "create table test (id UInt64, v Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, index_granularity_bytes=10485760, index_granularity=8192;" run 1 $CH_CLIENT -q "drop table test;" diff --git a/tests/queries/0_stateless/02942_variant_cast.sql b/tests/queries/0_stateless/02942_variant_cast.sql index fc2d1d63657..33587e3e438 100644 --- a/tests/queries/0_stateless/02942_variant_cast.sql +++ b/tests/queries/0_stateless/02942_variant_cast.sql @@ -1,5 +1,4 @@ set allow_experimental_variant_type=1; -set allow_experimental_analyzer=0; -- It's currently doesn't work with analyzer because of the way it works with constants, but it will be refactored and fixed in future select NULL::Variant(String, UInt64); select 42::UInt64::Variant(String, UInt64); diff --git a/tests/queries/0_stateless/02944_variant_as_common_type.sql b/tests/queries/0_stateless/02944_variant_as_common_type.sql index e985cf365dd..49ea5f2769c 100644 --- a/tests/queries/0_stateless/02944_variant_as_common_type.sql +++ b/tests/queries/0_stateless/02944_variant_as_common_type.sql @@ -1,5 +1,3 @@ -set allow_experimental_analyzer=0; -- The result type for if function with constant is different with analyzer. It wil be fixed after refactoring around constants in analyzer. - set allow_experimental_variant_type=1; set use_variant_as_common_type=1; diff --git a/tests/queries/0_stateless/02969_auto_format_detection.reference b/tests/queries/0_stateless/02969_auto_format_detection.reference index 865db11defc..4b86be04996 100644 --- a/tests/queries/0_stateless/02969_auto_format_detection.reference +++ b/tests/queries/0_stateless/02969_auto_format_detection.reference @@ -82,7 +82,8 @@ CSV c1 Nullable(UInt64) c2 Nullable(String) c3 Array(Nullable(UInt64)) -c4 Tuple(Nullable(UInt64), Nullable(String)) +c4 Nullable(UInt64) +c5 Nullable(String) a Nullable(String) b Nullable(String) c Array(Nullable(String)) diff --git a/tests/queries/0_stateless/02977_csv_format_support_tuple.sql b/tests/queries/0_stateless/02977_csv_format_support_tuple.sql index d00cc00e097..f30e217ca0f 100644 --- a/tests/queries/0_stateless/02977_csv_format_support_tuple.sql +++ b/tests/queries/0_stateless/02977_csv_format_support_tuple.sql @@ -1,5 +1,9 @@ -- Tags: no-parallel +SET output_format_csv_serialize_tuple_into_separate_columns = false; +SET input_format_csv_deserialize_separate_columns_into_tuple = false; +SET input_format_csv_try_infer_strings_from_quoted_tuples = false; + insert into function file('02977_1.csv') select '20240305', 1, ['s', 'd'], map('a', 2), tuple('222', 33, map('abc', 5)) SETTINGS engine_file_truncate_on_insert=1; desc file('02977_1.csv'); select * from file('02977_1.csv') settings max_threads=1; diff --git a/tests/queries/0_stateless/02998_pretty_format_print_readable_number_on_single_value.sql b/tests/queries/0_stateless/02998_pretty_format_print_readable_number_on_single_value.sql index 5dc69488cea..46d6bb657c9 100644 --- a/tests/queries/0_stateless/02998_pretty_format_print_readable_number_on_single_value.sql +++ b/tests/queries/0_stateless/02998_pretty_format_print_readable_number_on_single_value.sql @@ -1,3 +1,4 @@ +SET output_format_pretty_display_footer_column_names=0; SELECT 1_000_000 as a FORMAT Pretty; SELECT 1_000_000 as a FORMAT PrettyNoEscapes; SELECT 1_000_000 as a FORMAT PrettyMonoBlock; diff --git a/tests/queries/0_stateless/03015_parser_shortcut_lexer_errors.reference b/tests/queries/0_stateless/03015_parser_shortcut_lexer_errors.reference new file mode 100644 index 00000000000..f83d884fd78 --- /dev/null +++ b/tests/queries/0_stateless/03015_parser_shortcut_lexer_errors.reference @@ -0,0 +1 @@ +Syntax error diff --git a/tests/queries/0_stateless/03015_parser_shortcut_lexer_errors.sh b/tests/queries/0_stateless/03015_parser_shortcut_lexer_errors.sh new file mode 100755 index 00000000000..762201ed5fc --- /dev/null +++ b/tests/queries/0_stateless/03015_parser_shortcut_lexer_errors.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +$CLICKHOUSE_LOCAL --query "SELECT((((((((((SELECT(((((((((SELECT((((((((((SELECT(((((((((SELECT((((((((((SELECT(((((((((SELECT 1+)))))))))))))))))))))))))))))))))))))))))))))))))))))))))'" 2>&1 | grep -o -F 'Syntax error' diff --git a/tests/queries/0_stateless/03022_highlight_digit_groups.sql b/tests/queries/0_stateless/03022_highlight_digit_groups.sql index c48a02e712f..8c371c409c4 100644 --- a/tests/queries/0_stateless/03022_highlight_digit_groups.sql +++ b/tests/queries/0_stateless/03022_highlight_digit_groups.sql @@ -1,3 +1,4 @@ +SET output_format_pretty_display_footer_column_names=0; SET output_format_pretty_row_numbers = 0; SELECT exp10(number) * (number % 2 ? 1 : -1) FROM numbers(30) FORMAT PrettySpace SETTINGS output_format_pretty_color = 1; diff --git a/tests/queries/0_stateless/03023_zeros_generate_random_with_limit_progress_bar.expect b/tests/queries/0_stateless/03023_zeros_generate_random_with_limit_progress_bar.expect deleted file mode 100755 index de15a199132..00000000000 --- a/tests/queries/0_stateless/03023_zeros_generate_random_with_limit_progress_bar.expect +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/expect -f - -set basedir [file dirname $argv0] -set basename [file tail $argv0] -if {[info exists env(CLICKHOUSE_TMP)]} { - set CLICKHOUSE_TMP $env(CLICKHOUSE_TMP) -} else { - set CLICKHOUSE_TMP "." -} -exp_internal -f $CLICKHOUSE_TMP/$basename.debuglog 0 - -log_user 0 -set timeout 60 -match_max 100000 -set stty_init "rows 25 cols 120" - -expect_after { - -i $any_spawn_id eof { exp_continue } - -i $any_spawn_id timeout { exit 1 } -} - -spawn clickhouse-local -expect ":) " - -# Trivial SELECT with LIMIT from system.zeros shows progress bar. -send "SELECT * FROM system.zeros LIMIT 10000000 FORMAT Null SETTINGS max_execution_speed = 1000000, timeout_before_checking_execution_speed = 0, max_block_size = 128\r" -expect "Progress: " -expect "█" -send "\3" -expect "Query was cancelled." -expect ":) " - -send "SELECT * FROM system.zeros_mt LIMIT 10000000 FORMAT Null SETTINGS max_execution_speed = 1000000, timeout_before_checking_execution_speed = 0, max_block_size = 128\r" -expect "Progress: " -expect "█" -send "\3" -expect "Query was cancelled." -expect ":) " - -# As well as from generateRandom -send "SELECT * FROM generateRandom() LIMIT 10000000 FORMAT Null SETTINGS max_execution_speed = 1000000, timeout_before_checking_execution_speed = 0, max_block_size = 128\r" -expect "Progress: " -expect "█" -send "\3" -expect "Query was cancelled." -expect ":) " - -send "exit\r" -expect eof diff --git a/tests/queries/0_stateless/03023_zeros_generate_random_with_limit_progress_bar.reference b/tests/queries/0_stateless/03023_zeros_generate_random_with_limit_progress_bar.reference index e69de29bb2d..6ca5ae94f9a 100644 --- a/tests/queries/0_stateless/03023_zeros_generate_random_with_limit_progress_bar.reference +++ b/tests/queries/0_stateless/03023_zeros_generate_random_with_limit_progress_bar.reference @@ -0,0 +1,3 @@ +Matched +Matched +Matched diff --git a/tests/queries/0_stateless/03023_zeros_generate_random_with_limit_progress_bar.sh b/tests/queries/0_stateless/03023_zeros_generate_random_with_limit_progress_bar.sh new file mode 100755 index 00000000000..500a12587a2 --- /dev/null +++ b/tests/queries/0_stateless/03023_zeros_generate_random_with_limit_progress_bar.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +# Tags: no-random-settings + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +function run_with_progress_and_match_total_rows() +{ + CURL_RESPONSE=$(echo "$1" | \ + ${CLICKHOUSE_CURL} -vsS "${CLICKHOUSE_URL}&wait_end_of_query=1&max_block_size=1&send_progress_in_http_headers=1&http_headers_progress_interval_ms=0&output_format_parallel_formatting=0" --data-binary @- 2>&1) + + echo "$CURL_RESPONSE" | grep -q '"total_rows_to_read":"100"' && echo "Matched" || echo "Expected total_rows_to_read not found: ${CURL_RESPONSE}" +} + +run_with_progress_and_match_total_rows 'SELECT * FROM system.zeros LIMIT 100' +run_with_progress_and_match_total_rows 'SELECT * FROM system.zeros_mt LIMIT 100' +run_with_progress_and_match_total_rows "SELECT * FROM generateRandom('number UInt64') LIMIT 100" diff --git a/tests/queries/0_stateless/03030_system_flush_distributed_settings.sql b/tests/queries/0_stateless/03030_system_flush_distributed_settings.sql index 7961444dbc2..da2a387e07c 100644 --- a/tests/queries/0_stateless/03030_system_flush_distributed_settings.sql +++ b/tests/queries/0_stateless/03030_system_flush_distributed_settings.sql @@ -13,8 +13,6 @@ create table dist_out as data engine=Distributed(test_shard_localhost, currentDa set prefer_localhost_replica=0; -set min_untracked_memory='4Mi'; -- Disable precise memory tracking - insert into dist_in select number/100, number from system.numbers limit 1e6 settings max_memory_usage='20Mi'; system flush distributed dist_in; -- { serverError MEMORY_LIMIT_EXCEEDED } system flush distributed dist_in settings max_memory_usage=0; diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal.sh b/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal.sh index 7c1ac41cfdc..887b2ed94d7 100755 --- a/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal.sh +++ b/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal.sh @@ -8,7 +8,7 @@ CLICKHOUSE_LOG_COMMENT= . "$CUR_DIR"/../shell_config.sh -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_dynamic_type=1 --index_granularity_bytes 10485760 --merge_max_block_size 8192 --merge_max_block_size_bytes=10485760 --index_granularity 8192" +CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_dynamic_type=1" function test() { @@ -41,12 +41,12 @@ function test() $CH_CLIENT -q "drop table if exists test;" echo "MergeTree compact" -$CH_CLIENT -q "create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=10;" +$CH_CLIENT -q "create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=10, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760;" test $CH_CLIENT -q "drop table test;" echo "MergeTree wide" -$CH_CLIENT -q "create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=10;" +$CH_CLIENT -q "create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=10, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760;" test $CH_CLIENT -q "drop table test;" diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_vertical.sh b/tests/queries/0_stateless/03037_dynamic_merges_1_vertical.sh index 927ceac72b5..371ae87c2ef 100755 --- a/tests/queries/0_stateless/03037_dynamic_merges_1_vertical.sh +++ b/tests/queries/0_stateless/03037_dynamic_merges_1_vertical.sh @@ -9,7 +9,7 @@ CLICKHOUSE_LOG_COMMENT= -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_dynamic_type=1 --index_granularity_bytes 10485760 --merge_max_block_size 8192 --merge_max_block_size_bytes=10485760 --index_granularity 8192" +CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_dynamic_type=1" function test() { echo "test" @@ -41,11 +41,11 @@ function test() $CH_CLIENT -q "drop table if exists test;" echo "MergeTree compact" -$CH_CLIENT -q "create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1;" +$CH_CLIENT -q "create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760;" test $CH_CLIENT -q "drop table test;" echo "MergeTree wide" -$CH_CLIENT -q "create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1;" +$CH_CLIENT -q "create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760;" test $CH_CLIENT -q "drop table test;" diff --git a/tests/queries/0_stateless/03039_dynamic_versioned_collapsing_merge_tree.sh b/tests/queries/0_stateless/03039_dynamic_versioned_collapsing_merge_tree.sh index ca313307a6d..60248f4453a 100755 --- a/tests/queries/0_stateless/03039_dynamic_versioned_collapsing_merge_tree.sh +++ b/tests/queries/0_stateless/03039_dynamic_versioned_collapsing_merge_tree.sh @@ -7,6 +7,7 @@ CLICKHOUSE_LOG_COMMENT= # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh + # Fix some settings to avoid timeouts because of some settings randomization CH_CLIENT="$CLICKHOUSE_CLIENT --allow_merge_tree_settings --allow_experimental_dynamic_type=1 --index_granularity_bytes 10485760 --index_granularity 8128 --merge_max_block_size 8128" @@ -32,7 +33,7 @@ echo "MergeTree wide + horizontal merge" test "min_rows_for_wide_part=1, min_bytes_for_wide_part=1" echo "MergeTree compact + vertical merge" -test "min_rows_for_wide_part=100000000000, min_bytes_for_wide_part=1000000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1" +test "min_rows_for_wide_part=100000000000, min_bytes_for_wide_part=1000000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1;" echo "MergeTree wide + vertical merge" -test "min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1" +test "min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1;" diff --git a/tests/queries/0_stateless/03142_alter_comment_parameterized_view.reference b/tests/queries/0_stateless/03142_alter_comment_parameterized_view.reference new file mode 100644 index 00000000000..9b93c75ea56 --- /dev/null +++ b/tests/queries/0_stateless/03142_alter_comment_parameterized_view.reference @@ -0,0 +1 @@ +CREATE VIEW default.test_table_comment AS (SELECT toString({date_from:String})) COMMENT \'test comment\' diff --git a/tests/queries/0_stateless/03142_alter_comment_parameterized_view.sql b/tests/queries/0_stateless/03142_alter_comment_parameterized_view.sql new file mode 100644 index 00000000000..98318e99e4a --- /dev/null +++ b/tests/queries/0_stateless/03142_alter_comment_parameterized_view.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS test_table_comment; +CREATE VIEW test_table_comment AS SELECT toString({date_from:String}); +ALTER TABLE test_table_comment MODIFY COMMENT 'test comment'; +SELECT create_table_query FROM system.tables WHERE name = 'test_table_comment' AND database = currentDatabase(); +DROP TABLE test_table_comment; diff --git a/tests/queries/0_stateless/03143_prewhere_profile_events.sh b/tests/queries/0_stateless/03143_prewhere_profile_events.sh index 863fcc1fe01..00daa0fe7cc 100755 --- a/tests/queries/0_stateless/03143_prewhere_profile_events.sh +++ b/tests/queries/0_stateless/03143_prewhere_profile_events.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-random-merge-tree-settings +# Tags: no-random-settings, no-random-merge-tree-settings CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) ${CLICKHOUSE_CLIENT} -nq " DROP TABLE IF EXISTS t; - CREATE TABLE t(a UInt32, b UInt32, c UInt32, d UInt32) ENGINE=MergeTree ORDER BY a SETTINGS min_bytes_for_wide_part=1, min_rows_for_wide_part=1; + CREATE TABLE t(a UInt32, b UInt32, c UInt32, d UInt32) ENGINE=MergeTree ORDER BY a SETTINGS min_bytes_for_wide_part=0, min_rows_for_wide_part=0; INSERT INTO t SELECT number, number, number, number FROM numbers_mt(1e7); diff --git a/tests/queries/0_stateless/03146_parameterized_view_with_date.reference b/tests/queries/0_stateless/03146_parameterized_view_with_date.reference new file mode 100644 index 00000000000..1d6227dbbcb --- /dev/null +++ b/tests/queries/0_stateless/03146_parameterized_view_with_date.reference @@ -0,0 +1 @@ +2 2024-04-01 01:00:00 diff --git a/tests/queries/0_stateless/03146_parameterized_view_with_date.sql b/tests/queries/0_stateless/03146_parameterized_view_with_date.sql new file mode 100644 index 00000000000..2cfadb70b24 --- /dev/null +++ b/tests/queries/0_stateless/03146_parameterized_view_with_date.sql @@ -0,0 +1,14 @@ + +drop table if exists table_pv; +create table table_pv (id Int32, timestamp_field DateTime) engine = Memory(); + +insert into table_pv values(1, '2024-03-01 00:00:00'); +insert into table_pv values (2, '2024-04-01 01:00:00'); + +create view pv as select * from table_pv where timestamp_field > {timestamp_param:DateTime}; + +select * from pv (timestamp_param=toDateTime('2024-04-01 00:00:01')); + +select * from pv (timestamp_param=toDateTime('2024-040')); -- { serverError CANNOT_PARSE_DATETIME } + +drop table table_pv; diff --git a/tests/queries/0_stateless/03155_analyzer_interpolate.reference b/tests/queries/0_stateless/03155_analyzer_interpolate.reference index 791aaa5b2a2..eade3b45d26 100644 --- a/tests/queries/0_stateless/03155_analyzer_interpolate.reference +++ b/tests/queries/0_stateless/03155_analyzer_interpolate.reference @@ -11,3 +11,8 @@ 5 [5] 5.5 [5] 7 [7] +2 +100500 +18 +26 +34 diff --git a/tests/queries/0_stateless/03155_analyzer_interpolate.sql b/tests/queries/0_stateless/03155_analyzer_interpolate.sql index b3c1d233f47..30423cb86ff 100644 --- a/tests/queries/0_stateless/03155_analyzer_interpolate.sql +++ b/tests/queries/0_stateless/03155_analyzer_interpolate.sql @@ -10,3 +10,6 @@ SELECT n, number+5 AS inter FROM ( -- { serverError NOT_AN_AGGREGATE } SELECT toFloat32(number % 10) AS n, number, number*2 AS mn FROM numbers(10) WHERE number % 3 = 1 ) GROUP BY n, inter ORDER BY n WITH FILL FROM 0 TO 5.51 STEP 0.5 INTERPOLATE (inter AS mn * 2); + +-- https://github.com/ClickHouse/ClickHouse/issues/64636 +select sum(number) as s from remote('127.0.0.{1,2}', numbers(10)) where (intDiv(number, 2) as key) != 1 group by key order by key with fill interpolate (s as 100500); diff --git a/tests/queries/0_stateless/03155_test_move_to_prewhere.reference b/tests/queries/0_stateless/03155_test_move_to_prewhere.reference new file mode 100644 index 00000000000..0cfbf08886f --- /dev/null +++ b/tests/queries/0_stateless/03155_test_move_to_prewhere.reference @@ -0,0 +1 @@ +2 diff --git a/tests/queries/0_stateless/03155_test_move_to_prewhere.sh b/tests/queries/0_stateless/03155_test_move_to_prewhere.sh new file mode 100755 index 00000000000..b6980b3a23a --- /dev/null +++ b/tests/queries/0_stateless/03155_test_move_to_prewhere.sh @@ -0,0 +1,46 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} -nq " + CREATE TABLE event_envoy + ( + timestamp_interval DateTime CODEC(DoubleDelta), + region LowCardinality(String), + cluster LowCardinality(String) + ) + ENGINE = MergeTree + ORDER BY (timestamp_interval) + SETTINGS index_granularity = 8192; + + INSERT INTO event_envoy SELECT now() - number, 'us-east-1', 'ch_super_fast' FROM numbers_mt(1e5); +" + +${CLICKHOUSE_CLIENT} -nq " + CREATE TABLE event_envoy_remote + ( + timestamp_interval DateTime CODEC(DoubleDelta), + region LowCardinality(String), + cluster LowCardinality(String) + ) AS remote('127.0.0.1', '${CLICKHOUSE_DATABASE}', event_envoy); +" + +${CLICKHOUSE_CLIENT} -q " + CREATE TABLE global_event_envoy + ( + timestamp_interval DateTime, + region LowCardinality(String), + cluster LowCardinality(String) + ) + ENGINE = Merge('${CLICKHOUSE_DATABASE}', 'event_envoy.*'); +" + +${CLICKHOUSE_CLIENT} --prefer_localhost_replica 1 -q " + EXPLAIN indexes=1 + SELECT timestamp_interval + FROM global_event_envoy + WHERE timestamp_interval <= now() - 54321 AND region = 'us-east-1' +" | grep -c 'Condition.*timestamp_interval' + diff --git a/tests/queries/0_stateless/03156_group_concat.reference b/tests/queries/0_stateless/03156_group_concat.reference new file mode 100644 index 00000000000..75b347be0c4 --- /dev/null +++ b/tests/queries/0_stateless/03156_group_concat.reference @@ -0,0 +1,14 @@ +0 95 abc [1,2,3] +1 \N a [993,986,979,972] +2 123 makson95 [] +95123 +abcamakson95 +[1,2,3][993,986,979,972][] +95,123 +abc,a,makson95 +[1,2,3],[993,986,979,972] +\N +951239512395123 +abc,a,makson95,abc,a,makson95,abc,a,makson95 +[1,2,3][993,986,979,972][][1,2,3][993,986,979,972][][1,2,3][993,986,979,972][] +488890 diff --git a/tests/queries/0_stateless/03156_group_concat.sql b/tests/queries/0_stateless/03156_group_concat.sql new file mode 100644 index 00000000000..c14fde8943a --- /dev/null +++ b/tests/queries/0_stateless/03156_group_concat.sql @@ -0,0 +1,40 @@ +DROP TABLE IF EXISTS test_groupConcat; +CREATE TABLE test_groupConcat +( + id UInt64, + p_int Int32 NULL, + p_string String, + p_array Array(Int32) +) ENGINE = MergeTree ORDER BY id; + +SET max_insert_threads = 1, max_threads = 1, min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0; +INSERT INTO test_groupConcat VALUES (0, 95, 'abc', [1, 2, 3]), (1, NULL, 'a', [993, 986, 979, 972]), (2, 123, 'makson95', []); + +SELECT * FROM test_groupConcat; + +SELECT groupConcat(p_int) FROM test_groupConcat; +SELECT groupConcat(p_string) FROM test_groupConcat; +SELECT groupConcat(p_array) FROM test_groupConcat; + +SELECT groupConcat(',')(p_int) FROM test_groupConcat; +SELECT groupConcat(',')(p_string) FROM test_groupConcat; +SELECT groupConcat(',', 2)(p_array) FROM test_groupConcat; + +SELECT groupConcat(p_int) FROM test_groupConcat WHERE id = 1; + +INSERT INTO test_groupConcat VALUES (0, 95, 'abc', [1, 2, 3]), (1, NULL, 'a', [993, 986, 979, 972]), (2, 123, 'makson95', []); +INSERT INTO test_groupConcat VALUES (0, 95, 'abc', [1, 2, 3]), (1, NULL, 'a', [993, 986, 979, 972]), (2, 123, 'makson95', []); + +SELECT groupConcat(p_int) FROM test_groupConcat; +SELECT groupConcat(',')(p_string) FROM test_groupConcat; +SELECT groupConcat(p_array) FROM test_groupConcat; + +SELECT groupConcat(123)(number) FROM numbers(10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT groupConcat(',', '3')(number) FROM numbers(10); -- { serverError BAD_ARGUMENTS } +SELECT groupConcat(',', 0)(number) FROM numbers(10); -- { serverError BAD_ARGUMENTS } +SELECT groupConcat(',', -1)(number) FROM numbers(10); -- { serverError BAD_ARGUMENTS } +SELECT groupConcat(',', 3, 3)(number) FROM numbers(10); -- { serverError TOO_MANY_ARGUMENTS_FOR_FUNCTION } + +SELECT length(groupConcat(number)) FROM numbers(100000); + +DROP TABLE IF EXISTS test_groupConcat; diff --git a/tests/queries/0_stateless/03156_nullable_number_tips.sql b/tests/queries/0_stateless/03156_nullable_number_tips.sql index e6f2fa36d86..9a494e3292b 100644 --- a/tests/queries/0_stateless/03156_nullable_number_tips.sql +++ b/tests/queries/0_stateless/03156_nullable_number_tips.sql @@ -1,3 +1,4 @@ +SET output_format_pretty_display_footer_column_names=0; SELECT 123456789 AS x FORMAT PrettyCompact; SELECT toNullable(123456789) AS x FORMAT PrettyCompact; SELECT toLowCardinality(toNullable(123456789)) AS x FORMAT PrettyCompact; diff --git a/tests/queries/0_stateless/03160_pretty_format_tty.sh b/tests/queries/0_stateless/03160_pretty_format_tty.sh index bbc4b96eb90..200bd52f3fa 100755 --- a/tests/queries/0_stateless/03160_pretty_format_tty.sh +++ b/tests/queries/0_stateless/03160_pretty_format_tty.sh @@ -5,4 +5,4 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CUR_DIR"/../shell_config.sh # default output_format_pretty_max_rows is 10K -$CLICKHOUSE_LOCAL -q "select * from numbers(100e3) format PrettySpace settings max_threads=1" | wc -l +$CLICKHOUSE_LOCAL -q "select * from numbers(100e3) format PrettySpace settings max_threads=1, output_format_pretty_display_footer_column_names=0" | wc -l diff --git a/tests/queries/0_stateless/03164_adapting_parquet_reader_output_size.reference b/tests/queries/0_stateless/03164_adapting_parquet_reader_output_size.reference new file mode 100644 index 00000000000..ef9b07ba955 --- /dev/null +++ b/tests/queries/0_stateless/03164_adapting_parquet_reader_output_size.reference @@ -0,0 +1,4 @@ +65409 +16 +128 +2363 diff --git a/tests/queries/0_stateless/03164_adapting_parquet_reader_output_size.sql b/tests/queries/0_stateless/03164_adapting_parquet_reader_output_size.sql new file mode 100644 index 00000000000..fa098b64702 --- /dev/null +++ b/tests/queries/0_stateless/03164_adapting_parquet_reader_output_size.sql @@ -0,0 +1,25 @@ +-- Tags: no-fasttest, no-parallel, no-random-settings + +set max_insert_threads=1; + +DROP TABLE IF EXISTS test_parquet; +CREATE TABLE test_parquet (col1 String, col2 String, col3 String, col4 String, col5 String, col6 String, col7 String) ENGINE=File(Parquet); +INSERT INTO test_parquet SELECT rand(),rand(),rand(),rand(),rand(),rand(),rand() FROM numbers(100000); +SELECT max(blockSize()) FROM test_parquet; + +DROP TABLE IF EXISTS test_parquet; +CREATE TABLE test_parquet (col1 String, col2 String, col3 String, col4 String, col5 String, col6 String, col7 String) ENGINE=File(Parquet) settings input_format_parquet_max_block_size=16; +INSERT INTO test_parquet SELECT rand(),rand(),rand(),rand(),rand(),rand(),rand() FROM numbers(100000); +SELECT max(blockSize()) FROM test_parquet; + +DROP TABLE IF EXISTS test_parquet; +CREATE TABLE test_parquet (col1 String, col2 String, col3 String, col4 String, col5 String, col6 String, col7 String) ENGINE=File(Parquet) settings input_format_parquet_prefer_block_bytes=30; +INSERT INTO test_parquet SELECT rand(),rand(),rand(),rand(),rand(),rand(),rand() FROM numbers(100000); +SELECT max(blockSize()) FROM test_parquet; + +DROP TABLE IF EXISTS test_parquet; +CREATE TABLE test_parquet (col1 String, col2 String, col3 String, col4 String, col5 String, col6 String, col7 String) ENGINE=File(Parquet) settings input_format_parquet_prefer_block_bytes=30720; +INSERT INTO test_parquet SELECT rand(),rand(),rand(),rand(),rand(),rand(),rand() FROM numbers(100000); +SELECT max(blockSize()) FROM test_parquet; + +DROP TABLE IF EXISTS test_parquet; \ No newline at end of file diff --git a/tests/queries/0_stateless/03164_early_constant_folding_analyzer.reference b/tests/queries/0_stateless/03164_early_constant_folding_analyzer.reference new file mode 100644 index 00000000000..227b118bb7f --- /dev/null +++ b/tests/queries/0_stateless/03164_early_constant_folding_analyzer.reference @@ -0,0 +1 @@ +ReadFromPreparedSource (Optimized trivial count) diff --git a/tests/queries/0_stateless/03164_early_constant_folding_analyzer.sql b/tests/queries/0_stateless/03164_early_constant_folding_analyzer.sql new file mode 100644 index 00000000000..dbffbc1af71 --- /dev/null +++ b/tests/queries/0_stateless/03164_early_constant_folding_analyzer.sql @@ -0,0 +1,30 @@ +CREATE TABLE checks +( + `pull_request_number` UInt32, + `commit_sha` LowCardinality(String), + `check_name` LowCardinality(String), + `check_status` LowCardinality(String), + `check_duration_ms` UInt64, + `check_start_time` DateTime, + `test_name` LowCardinality(String), + `test_status` LowCardinality(String), + `test_duration_ms` UInt64, + `report_url` String, + `pull_request_url` String, + `commit_url` String, + `task_url` String, + `base_ref` String, + `base_repo` String, + `head_ref` String, + `head_repo` String, + `test_context_raw` String, + `instance_type` LowCardinality(String), + `instance_id` String, + `date` Date MATERIALIZED toDate(check_start_time) +) +ENGINE = MergeTree ORDER BY (date, pull_request_number, commit_sha, check_name, test_name, check_start_time); + +insert into checks select * from generateRandom() limit 1; + + +select trimLeft(explain) from (explain SELECT count(1) FROM checks WHERE test_name IS NOT NULL) where explain like '%ReadFromPreparedSource%' SETTINGS allow_experimental_analyzer = 1, allow_experimental_parallel_reading_from_replicas = 0; diff --git a/tests/queries/0_stateless/03164_linestring_geometry.reference b/tests/queries/0_stateless/03164_linestring_geometry.reference new file mode 100644 index 00000000000..1f68df04614 --- /dev/null +++ b/tests/queries/0_stateless/03164_linestring_geometry.reference @@ -0,0 +1,11 @@ +-- { echoOn } +SELECT readWKTLineString('LINESTRING (1 1, 2 2, 3 3, 1 1)'); +[(1,1),(2,2),(3,3),(1,1)] +SELECT toTypeName(readWKTLineString('LINESTRING (1 1, 2 2, 3 3, 1 1)')); +LineString +SELECT wkt(readWKTLineString('LINESTRING (1 1, 2 2, 3 3, 1 1)')); +LINESTRING(1 1,2 2,3 3,1 1) +-- Native Array(Tuple(Float64, Float64)) is threated as Ring, not as LineString. +WITH wkt(CAST([(1, 1), (2, 2), (3, 3)], 'Array(Tuple(Float64, Float64))')) as x +SELECT x, toTypeName(x), readWKTRing(x) as y, toTypeName(y); +POLYGON((1 1,2 2,3 3)) String [(1,1),(2,2),(3,3)] Ring diff --git a/tests/queries/0_stateless/03164_linestring_geometry.sql b/tests/queries/0_stateless/03164_linestring_geometry.sql new file mode 100644 index 00000000000..e4f1d1295e7 --- /dev/null +++ b/tests/queries/0_stateless/03164_linestring_geometry.sql @@ -0,0 +1,8 @@ +-- { echoOn } +SELECT readWKTLineString('LINESTRING (1 1, 2 2, 3 3, 1 1)'); +SELECT toTypeName(readWKTLineString('LINESTRING (1 1, 2 2, 3 3, 1 1)')); +SELECT wkt(readWKTLineString('LINESTRING (1 1, 2 2, 3 3, 1 1)')); + +-- Native Array(Tuple(Float64, Float64)) is threated as Ring, not as LineString. +WITH wkt(CAST([(1, 1), (2, 2), (3, 3)], 'Array(Tuple(Float64, Float64))')) as x +SELECT x, toTypeName(x), readWKTRing(x) as y, toTypeName(y); diff --git a/tests/queries/0_stateless/03164_parallel_replicas_range_filter_min_max.reference b/tests/queries/0_stateless/03164_parallel_replicas_range_filter_min_max.reference new file mode 100644 index 00000000000..4dded9eda81 --- /dev/null +++ b/tests/queries/0_stateless/03164_parallel_replicas_range_filter_min_max.reference @@ -0,0 +1,10 @@ +10 +10 +10 +10 +10 +10 +10 +10 +13 +4 diff --git a/tests/queries/0_stateless/03164_parallel_replicas_range_filter_min_max.sql b/tests/queries/0_stateless/03164_parallel_replicas_range_filter_min_max.sql new file mode 100644 index 00000000000..58143395e44 --- /dev/null +++ b/tests/queries/0_stateless/03164_parallel_replicas_range_filter_min_max.sql @@ -0,0 +1,125 @@ +DROP TABLE IF EXISTS range_filter_custom_range_test; + +CREATE TABLE range_filter_custom_range_test (k UInt64) ENGINE=MergeTree ORDER BY k; + +INSERT INTO range_filter_custom_range_test SELECT number + 5 from numbers(10); + +SELECT count() +FROM +( + SELECT * + FROM cluster(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), range_filter_custom_range_test) + SETTINGS prefer_localhost_replica = 0, max_parallel_replicas = 3, distributed_group_by_no_merge = 0, + parallel_replicas_custom_key = 'k', parallel_replicas_custom_key_filter_type = 'range', + parallel_replicas_custom_key_range_lower = 5, parallel_replicas_custom_key_range_upper = 15 +); + +SELECT count() +FROM +( + SELECT * + FROM cluster(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), range_filter_custom_range_test) + SETTINGS prefer_localhost_replica = 0, max_parallel_replicas = 3, distributed_group_by_no_merge = 0, + parallel_replicas_custom_key = 'k', parallel_replicas_custom_key_filter_type = 'range', + parallel_replicas_custom_key_range_lower = 4, parallel_replicas_custom_key_range_upper = 14 +); + +SELECT count() +FROM +( + SELECT * + FROM cluster(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), range_filter_custom_range_test) + SETTINGS prefer_localhost_replica = 0, max_parallel_replicas = 3, distributed_group_by_no_merge = 0, + parallel_replicas_custom_key = 'k', parallel_replicas_custom_key_filter_type = 'range', + parallel_replicas_custom_key_range_lower = 6, parallel_replicas_custom_key_range_upper = 17 +); + + +SELECT count() +FROM +( + SELECT * + FROM cluster(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), range_filter_custom_range_test) + SETTINGS prefer_localhost_replica = 0, max_parallel_replicas = 3, distributed_group_by_no_merge = 0, + parallel_replicas_custom_key = 'k', parallel_replicas_custom_key_filter_type = 'range', + parallel_replicas_custom_key_range_lower = 0, parallel_replicas_custom_key_range_upper = 15 +); + +SELECT count() +FROM +( + SELECT * + FROM cluster(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), range_filter_custom_range_test) + SETTINGS prefer_localhost_replica = 0, max_parallel_replicas = 3, distributed_group_by_no_merge = 0, + parallel_replicas_custom_key = 'k', parallel_replicas_custom_key_filter_type = 'range', + parallel_replicas_custom_key_range_lower = 15, parallel_replicas_custom_key_range_upper = 25 +); + +SELECT count() +FROM +( + SELECT * + FROM cluster(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), range_filter_custom_range_test) + SETTINGS prefer_localhost_replica = 0, max_parallel_replicas = 3, distributed_group_by_no_merge = 0, + parallel_replicas_custom_key = 'k', parallel_replicas_custom_key_filter_type = 'range', + parallel_replicas_custom_key_range_lower = 0, parallel_replicas_custom_key_range_upper = 5 +); + +SELECT count() +FROM +( + SELECT * + FROM cluster(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), range_filter_custom_range_test) + SETTINGS prefer_localhost_replica = 0, max_parallel_replicas = 3, distributed_group_by_no_merge = 0, + parallel_replicas_custom_key = 'k', parallel_replicas_custom_key_filter_type = 'range', + parallel_replicas_custom_key_range_lower = 500, parallel_replicas_custom_key_range_upper = 10000 +); + + +SELECT count() +FROM +( + SELECT * + FROM cluster(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), range_filter_custom_range_test) + SETTINGS prefer_localhost_replica = 0, max_parallel_replicas = 3, distributed_group_by_no_merge = 0, + parallel_replicas_custom_key = 'k', parallel_replicas_custom_key_filter_type = 'range', + parallel_replicas_custom_key_range_lower = 10, parallel_replicas_custom_key_range_upper = 13 +); + +DROP TABLE range_filter_custom_range_test; + +DROP TABLE IF EXISTS range_filter_custom_range_test_2; + +CREATE TABLE range_filter_custom_range_test_2 (k UInt64) ENGINE=MergeTree ORDER BY k; + +INSERT INTO range_filter_custom_range_test_2 SELECT number from numbers(13); + +SELECT count() +FROM +( + SELECT * + FROM cluster(parallel_replicas, currentDatabase(), range_filter_custom_range_test_2) + SETTINGS prefer_localhost_replica = 0, max_parallel_replicas = 12, distributed_group_by_no_merge = 0, + parallel_replicas_custom_key = 'k', parallel_replicas_custom_key_filter_type = 'range', + parallel_replicas_custom_key_range_lower = 0, parallel_replicas_custom_key_range_upper = 13 +); + +DROP TABLE range_filter_custom_range_test_2; + +DROP TABLE IF EXISTS range_filter_custom_range_test_3; + +CREATE TABLE range_filter_custom_range_test_3 (k UInt64) ENGINE=MergeTree ORDER BY k; + +INSERT INTO range_filter_custom_range_test_3 SELECT number from numbers(4); + +SELECT count() +FROM +( + SELECT * + FROM cluster(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), range_filter_custom_range_test_3) + SETTINGS prefer_localhost_replica = 0, max_parallel_replicas = 12, distributed_group_by_no_merge = 0, + parallel_replicas_custom_key = 'k', parallel_replicas_custom_key_filter_type = 'range', + parallel_replicas_custom_key_range_lower = 0, parallel_replicas_custom_key_range_upper = 4 +); + +DROP TABLE range_filter_custom_range_test_3; \ No newline at end of file diff --git a/tests/queries/0_stateless/03165_round_scale_as_column.reference b/tests/queries/0_stateless/03165_round_scale_as_column.reference new file mode 100644 index 00000000000..9ad25ed466a --- /dev/null +++ b/tests/queries/0_stateless/03165_round_scale_as_column.reference @@ -0,0 +1,2165 @@ +0 0 0 0 0 0 +1 1 1 1 1 1 +2 2 2 2 2 2 +3 3 3 3 3 3 +4 4 4 4 4 4 +5 5 5 5 5 5 +6 6 6 6 6 6 +7 7 7 7 7 7 +8 8 8 8 8 8 +9 9 9 9 9 9 +10 10 10 10 10 10 +11 11 11 11 11 11 +12 12 12 12 12 12 +13 13 13 13 13 13 +14 14 14 14 14 14 +15 15 15 15 15 15 +16 16 16 16 16 16 +17 17 17 17 17 17 +18 18 18 18 18 18 +19 19 19 19 19 19 +0 0 0 0 0 0 +1 1 1 1 1 1 +2 2 2 2 2 2 +3 3 3 3 3 3 +4 4 4 4 4 4 +5 5 5 5 5 5 +6 6 6 6 6 6 +7 7 7 7 7 7 +8 8 8 8 8 8 +9 9 9 9 9 9 +10 10 10 10 10 10 +11 11 11 11 11 11 +12 12 12 12 12 12 +13 13 13 13 13 13 +14 14 14 14 14 14 +15 15 15 15 15 15 +16 16 16 16 16 16 +17 17 17 17 17 17 +18 18 18 18 18 18 +19 19 19 19 19 19 +0 0 0 0 0 0 +1 1 1 1 1 1 +2 2 2 2 2 2 +3 3 3 3 3 3 +4 4 4 4 4 4 +5 5 5 5 5 5 +6 6 6 6 6 6 +7 7 7 7 7 7 +8 8 8 8 8 8 +9 9 9 9 9 9 +10 10 10 10 10 10 +11 11 11 11 11 11 +12 12 12 12 12 12 +13 13 13 13 13 13 +14 14 14 14 14 14 +15 15 15 15 15 15 +16 16 16 16 16 16 +17 17 17 17 17 17 +18 18 18 18 18 18 +19 19 19 19 19 19 +0 0 0 0 0 0 +1 1 1 1 1 1 +2 2 2 2 2 2 +3 3 3 3 3 3 +4 4 4 4 4 4 +5 5 5 5 5 5 +6 6 6 6 6 6 +7 7 7 7 7 7 +8 8 8 8 8 8 +9 9 9 9 9 9 +10 10 10 10 10 10 +11 11 11 11 11 11 +12 12 12 12 12 12 +13 13 13 13 13 13 +14 14 14 14 14 14 +15 15 15 15 15 15 +16 16 16 16 16 16 +17 17 17 17 17 17 +18 18 18 18 18 18 +19 19 19 19 19 19 +-10 -10 -10 -10 -10 -10 +-9 -9 -9 -9 -9 -9 +-8 -8 -8 -8 -8 -8 +-7 -7 -7 -7 -7 -7 +-6 -6 -6 -6 -6 -6 +-5 -5 -5 -5 -5 -5 +-4 -4 -4 -4 -4 -4 +-3 -3 -3 -3 -3 -3 +-2 -2 -2 -2 -2 -2 +-1 -1 -1 -1 -1 -1 +0 0 0 0 0 0 +1 1 1 1 1 1 +2 2 2 2 2 2 +3 3 3 3 3 3 +4 4 4 4 4 4 +5 5 5 5 5 5 +6 6 6 6 6 6 +7 7 7 7 7 7 +8 8 8 8 8 8 +9 9 9 9 9 9 +-10 -10 -10 -10 -10 -10 +-9 -9 -9 -9 -9 -9 +-8 -8 -8 -8 -8 -8 +-7 -7 -7 -7 -7 -7 +-6 -6 -6 -6 -6 -6 +-5 -5 -5 -5 -5 -5 +-4 -4 -4 -4 -4 -4 +-3 -3 -3 -3 -3 -3 +-2 -2 -2 -2 -2 -2 +-1 -1 -1 -1 -1 -1 +0 0 0 0 0 0 +1 1 1 1 1 1 +2 2 2 2 2 2 +3 3 3 3 3 3 +4 4 4 4 4 4 +5 5 5 5 5 5 +6 6 6 6 6 6 +7 7 7 7 7 7 +8 8 8 8 8 8 +9 9 9 9 9 9 +-10 -10 -10 -10 -10 -10 +-9 -9 -9 -9 -9 -9 +-8 -8 -8 -8 -8 -8 +-7 -7 -7 -7 -7 -7 +-6 -6 -6 -6 -6 -6 +-5 -5 -5 -5 -5 -5 +-4 -4 -4 -4 -4 -4 +-3 -3 -3 -3 -3 -3 +-2 -2 -2 -2 -2 -2 +-1 -1 -1 -1 -1 -1 +0 0 0 0 0 0 +1 1 1 1 1 1 +2 2 2 2 2 2 +3 3 3 3 3 3 +4 4 4 4 4 4 +5 5 5 5 5 5 +6 6 6 6 6 6 +7 7 7 7 7 7 +8 8 8 8 8 8 +9 9 9 9 9 9 +-10 -10 -10 -10 -10 -10 +-9 -9 -9 -9 -9 -9 +-8 -8 -8 -8 -8 -8 +-7 -7 -7 -7 -7 -7 +-6 -6 -6 -6 -6 -6 +-5 -5 -5 -5 -5 -5 +-4 -4 -4 -4 -4 -4 +-3 -3 -3 -3 -3 -3 +-2 -2 -2 -2 -2 -2 +-1 -1 -1 -1 -1 -1 +0 0 0 0 0 0 +1 1 1 1 1 1 +2 2 2 2 2 2 +3 3 3 3 3 3 +4 4 4 4 4 4 +5 5 5 5 5 5 +6 6 6 6 6 6 +7 7 7 7 7 7 +8 8 8 8 8 8 +9 9 9 9 9 9 +-10 -10 -10 -10 -10 -10 +-9 -9 -9 -9 -9 -9 +-8 -8 -8 -8 -8 -8 +-7 -7 -7 -7 -7 -7 +-6 -6 -6 -6 -6 -6 +-5 -5 -5 -5 -5 -5 +-4 -4 -4 -4 -4 -4 +-3 -3 -3 -3 -3 -3 +-2 -2 -2 -2 -2 -2 +-1 -1 -1 -1 -1 -1 +0 0 0 0 0 0 +1 1 1 1 1 1 +2 2 2 2 2 2 +3 3 3 3 3 3 +4 4 4 4 4 4 +5 5 5 5 5 5 +6 6 6 6 6 6 +7 7 7 7 7 7 +8 8 8 8 8 8 +9 9 9 9 9 9 +-10 -10 -10 -10 -10 -10 +-9 -9 -9 -9 -9 -9 +-8 -8 -8 -8 -8 -8 +-7 -7 -7 -7 -7 -7 +-6 -6 -6 -6 -6 -6 +-5 -5 -5 -5 -5 -5 +-4 -4 -4 -4 -4 -4 +-3 -3 -3 -3 -3 -3 +-2 -2 -2 -2 -2 -2 +-1 -1 -1 -1 -1 -1 +0 0 0 0 0 0 +1 1 1 1 1 1 +2 2 2 2 2 2 +3 3 3 3 3 3 +4 4 4 4 4 4 +5 5 5 5 5 5 +6 6 6 6 6 6 +7 7 7 7 7 7 +8 8 8 8 8 8 +9 9 9 9 9 9 +-1 -1 -1 -1 -1 -1 +-0.9 -1 -1 -1 -0 -0 +-0.8 -1 -1 -1 -0 -0 +-0.7 -1 -1 -1 -0 -0 +-0.6 -1 -1 -1 -0 -0 +-0.5 -0 -0 -1 -0 -0 +-0.4 -0 -0 -1 -0 -0 +-0.3 -0 -0 -1 -0 -0 +-0.2 -0 -0 -1 -0 -0 +-0.1 -0 -0 -1 -0 -0 +0 0 0 0 0 0 +0.1 0 0 0 1 0 +0.2 0 0 0 1 0 +0.3 0 0 0 1 0 +0.4 0 0 0 1 0 +0.5 0 0 0 1 0 +0.6 1 1 0 1 0 +0.7 1 1 0 1 0 +0.8 1 1 0 1 0 +0.9 1 1 0 1 0 +-1 -1 -1 -1 -1 -1 +-0.9 -1 -1 -1 -0 -0 +-0.8 -1 -1 -1 -0 -0 +-0.7 -1 -1 -1 -0 -0 +-0.6 -1 -1 -1 -0 -0 +-0.5 -0 -0 -1 -0 -0 +-0.4 -0 -0 -1 -0 -0 +-0.3 -0 -0 -1 -0 -0 +-0.2 -0 -0 -1 -0 -0 +-0.1 -0 -0 -1 -0 -0 +0 0 0 0 0 0 +0.1 0 0 0 1 0 +0.2 0 0 0 1 0 +0.3 0 0 0 1 0 +0.4 0 0 0 1 0 +0.5 0 0 0 1 0 +0.6 1 1 0 1 0 +0.7 1 1 0 1 0 +0.8 1 1 0 1 0 +0.9 1 1 0 1 0 +-1 -1 -1 -1 -1 -1 +-0.9 -0.9 -0.9 -0.9 -0.9 -0.9 +-0.8 -0.8 -0.8 -0.8 -0.8 -0.8 +-0.7 -0.7 -0.7 -0.7 -0.7 -0.7 +-0.6 -0.6 -0.6 -0.6 -0.6 -0.6 +-0.5 -0.5 -0.5 -0.5 -0.5 -0.5 +-0.4 -0.4 -0.4 -0.4 -0.4 -0.4 +-0.3 -0.3 -0.3 -0.3 -0.3 -0.3 +-0.2 -0.2 -0.2 -0.2 -0.2 -0.2 +-0.1 -0.1 -0.1 -0.1 -0.1 -0.1 +0 0 0 0 0 0 +0.1 0.1 0.1 0.1 0.1 0.1 +0.2 0.2 0.2 0.2 0.2 0.2 +0.3 0.3 0.3 0.3 0.3 0.3 +0.4 0.4 0.4 0.4 0.4 0.4 +0.5 0.5 0.5 0.5 0.5 0.5 +0.6 0.6 0.6 0.6 0.6 0.6 +0.7 0.7 0.7 0.7 0.7 0.7 +0.8 0.8 0.8 0.8 0.8 0.8 +0.9 0.9 0.9 0.9 0.9 0.9 +-1 -1 -1 -1 -1 -1 +-0.9 -0.9 -0.9 -0.9 -0.9 -0.9 +-0.8 -0.8 -0.8 -0.8 -0.8 -0.8 +-0.7 -0.7 -0.7 -0.7 -0.7 -0.7 +-0.6 -0.6 -0.6 -0.6 -0.6 -0.6 +-0.5 -0.5 -0.5 -0.5 -0.5 -0.5 +-0.4 -0.4 -0.4 -0.4 -0.4 -0.4 +-0.3 -0.3 -0.3 -0.3 -0.3 -0.3 +-0.2 -0.2 -0.2 -0.2 -0.2 -0.2 +-0.1 -0.1 -0.1 -0.1 -0.1 -0.1 +0 0 0 0 0 0 +0.1 0.1 0.1 0.1 0.1 0.1 +0.2 0.2 0.2 0.2 0.2 0.2 +0.3 0.3 0.3 0.3 0.3 0.3 +0.4 0.4 0.4 0.4 0.4 0.4 +0.5 0.5 0.5 0.5 0.5 0.5 +0.6 0.6 0.6 0.6 0.6 0.6 +0.7 0.7 0.7 0.7 0.7 0.7 +0.8 0.8 0.8 0.8 0.8 0.8 +0.9 0.9 0.9 0.9 0.9 0.9 +0 0 0 0 0 0 +1 0 0 0 10 0 +2 0 0 0 10 0 +3 0 0 0 10 0 +4 0 0 0 10 0 +5 10 0 0 10 0 +6 10 10 0 10 0 +7 10 10 0 10 0 +8 10 10 0 10 0 +9 10 10 0 10 0 +10 10 10 10 10 10 +11 10 10 10 20 10 +12 10 10 10 20 10 +13 10 10 10 20 10 +14 10 10 10 20 10 +15 20 20 10 20 10 +16 20 20 10 20 10 +17 20 20 10 20 10 +18 20 20 10 20 10 +19 20 20 10 20 10 +0 0 0 0 0 0 +1 0 0 0 10 0 +2 0 0 0 10 0 +3 0 0 0 10 0 +4 0 0 0 10 0 +5 10 0 0 10 0 +6 10 10 0 10 0 +7 10 10 0 10 0 +8 10 10 0 10 0 +9 10 10 0 10 0 +10 10 10 10 10 10 +11 10 10 10 20 10 +12 10 10 10 20 10 +13 10 10 10 20 10 +14 10 10 10 20 10 +15 20 20 10 20 10 +16 20 20 10 20 10 +17 20 20 10 20 10 +18 20 20 10 20 10 +19 20 20 10 20 10 +0 0 0 0 0 0 +1 0 0 0 10 0 +2 0 0 0 10 0 +3 0 0 0 10 0 +4 0 0 0 10 0 +5 10 0 0 10 0 +6 10 10 0 10 0 +7 10 10 0 10 0 +8 10 10 0 10 0 +9 10 10 0 10 0 +10 10 10 10 10 10 +11 10 10 10 20 10 +12 10 10 10 20 10 +13 10 10 10 20 10 +14 10 10 10 20 10 +15 20 20 10 20 10 +16 20 20 10 20 10 +17 20 20 10 20 10 +18 20 20 10 20 10 +19 20 20 10 20 10 +0 0 0 0 0 0 +1 0 0 0 10 0 +2 0 0 0 10 0 +3 0 0 0 10 0 +4 0 0 0 10 0 +5 10 0 0 10 0 +6 10 10 0 10 0 +7 10 10 0 10 0 +8 10 10 0 10 0 +9 10 10 0 10 0 +10 10 10 10 10 10 +11 10 10 10 20 10 +12 10 10 10 20 10 +13 10 10 10 20 10 +14 10 10 10 20 10 +15 20 20 10 20 10 +16 20 20 10 20 10 +17 20 20 10 20 10 +18 20 20 10 20 10 +19 20 20 10 20 10 +-10 -10 -10 -10 -10 -10 +-9 -10 -10 -10 0 0 +-8 -10 -10 -10 0 0 +-7 -10 -10 -10 0 0 +-6 -10 -10 -10 0 0 +-5 -10 0 -10 0 0 +-4 0 0 -10 0 0 +-3 0 0 -10 0 0 +-2 0 0 -10 0 0 +-1 0 0 -10 0 0 +0 0 0 0 0 0 +1 0 0 0 10 0 +2 0 0 0 10 0 +3 0 0 0 10 0 +4 0 0 0 10 0 +5 10 0 0 10 0 +6 10 10 0 10 0 +7 10 10 0 10 0 +8 10 10 0 10 0 +9 10 10 0 10 0 +-10 -10 -10 -10 -10 -10 +-9 -10 -10 -10 0 0 +-8 -10 -10 -10 0 0 +-7 -10 -10 -10 0 0 +-6 -10 -10 -10 0 0 +-5 -10 0 -10 0 0 +-4 0 0 -10 0 0 +-3 0 0 -10 0 0 +-2 0 0 -10 0 0 +-1 0 0 -10 0 0 +0 0 0 0 0 0 +1 0 0 0 10 0 +2 0 0 0 10 0 +3 0 0 0 10 0 +4 0 0 0 10 0 +5 10 0 0 10 0 +6 10 10 0 10 0 +7 10 10 0 10 0 +8 10 10 0 10 0 +9 10 10 0 10 0 +-10 -10 -10 -10 -10 -10 +-9 -10 -10 -10 0 0 +-8 -10 -10 -10 0 0 +-7 -10 -10 -10 0 0 +-6 -10 -10 -10 0 0 +-5 -10 0 -10 0 0 +-4 0 0 -10 0 0 +-3 0 0 -10 0 0 +-2 0 0 -10 0 0 +-1 0 0 -10 0 0 +0 0 0 0 0 0 +1 0 0 0 10 0 +2 0 0 0 10 0 +3 0 0 0 10 0 +4 0 0 0 10 0 +5 10 0 0 10 0 +6 10 10 0 10 0 +7 10 10 0 10 0 +8 10 10 0 10 0 +9 10 10 0 10 0 +-10 -10 -10 -10 -10 -10 +-9 -10 -10 -10 0 0 +-8 -10 -10 -10 0 0 +-7 -10 -10 -10 0 0 +-6 -10 -10 -10 0 0 +-5 -10 0 -10 0 0 +-4 0 0 -10 0 0 +-3 0 0 -10 0 0 +-2 0 0 -10 0 0 +-1 0 0 -10 0 0 +0 0 0 0 0 0 +1 0 0 0 10 0 +2 0 0 0 10 0 +3 0 0 0 10 0 +4 0 0 0 10 0 +5 10 0 0 10 0 +6 10 10 0 10 0 +7 10 10 0 10 0 +8 10 10 0 10 0 +9 10 10 0 10 0 +-10 -10 -10 -10 -10 -10 +-9 -10 -10 -10 -0 -0 +-8 -10 -10 -10 -0 -0 +-7 -10 -10 -10 -0 -0 +-6 -10 -10 -10 -0 -0 +-5 -0 -0 -10 -0 -0 +-4 -0 -0 -10 -0 -0 +-3 -0 -0 -10 -0 -0 +-2 -0 -0 -10 -0 -0 +-1 -0 -0 -10 -0 -0 +0 0 0 0 0 0 +1 0 0 0 10 0 +2 0 0 0 10 0 +3 0 0 0 10 0 +4 0 0 0 10 0 +5 0 0 0 10 0 +6 10 10 0 10 0 +7 10 10 0 10 0 +8 10 10 0 10 0 +9 10 10 0 10 0 +-10 -10 -10 -10 -10 -10 +-9 -10 -10 -10 -0 -0 +-8 -10 -10 -10 -0 -0 +-7 -10 -10 -10 -0 -0 +-6 -10 -10 -10 -0 -0 +-5 -0 -0 -10 -0 -0 +-4 -0 -0 -10 -0 -0 +-3 -0 -0 -10 -0 -0 +-2 -0 -0 -10 -0 -0 +-1 -0 -0 -10 -0 -0 +0 0 0 0 0 0 +1 0 0 0 10 0 +2 0 0 0 10 0 +3 0 0 0 10 0 +4 0 0 0 10 0 +5 0 0 0 10 0 +6 10 10 0 10 0 +7 10 10 0 10 0 +8 10 10 0 10 0 +9 10 10 0 10 0 +0 0 0 0 0 0 +1 0 0 0 100 0 +2 0 0 0 100 0 +3 0 0 0 100 0 +4 0 0 0 100 0 +5 0 0 0 100 0 +6 0 0 0 100 0 +7 0 0 0 100 0 +8 0 0 0 100 0 +9 0 0 0 100 0 +10 0 0 0 100 0 +11 0 0 0 100 0 +12 0 0 0 100 0 +13 0 0 0 100 0 +14 0 0 0 100 0 +15 0 0 0 100 0 +16 0 0 0 100 0 +17 0 0 0 100 0 +18 0 0 0 100 0 +19 0 0 0 100 0 +0 0 0 0 0 0 +1 0 0 0 100 0 +2 0 0 0 100 0 +3 0 0 0 100 0 +4 0 0 0 100 0 +5 0 0 0 100 0 +6 0 0 0 100 0 +7 0 0 0 100 0 +8 0 0 0 100 0 +9 0 0 0 100 0 +10 0 0 0 100 0 +11 0 0 0 100 0 +12 0 0 0 100 0 +13 0 0 0 100 0 +14 0 0 0 100 0 +15 0 0 0 100 0 +16 0 0 0 100 0 +17 0 0 0 100 0 +18 0 0 0 100 0 +19 0 0 0 100 0 +0 0 0 0 0 0 +1 0 0 0 100 0 +2 0 0 0 100 0 +3 0 0 0 100 0 +4 0 0 0 100 0 +5 0 0 0 100 0 +6 0 0 0 100 0 +7 0 0 0 100 0 +8 0 0 0 100 0 +9 0 0 0 100 0 +10 0 0 0 100 0 +11 0 0 0 100 0 +12 0 0 0 100 0 +13 0 0 0 100 0 +14 0 0 0 100 0 +15 0 0 0 100 0 +16 0 0 0 100 0 +17 0 0 0 100 0 +18 0 0 0 100 0 +19 0 0 0 100 0 +0 0 0 0 0 0 +1 0 0 0 100 0 +2 0 0 0 100 0 +3 0 0 0 100 0 +4 0 0 0 100 0 +5 0 0 0 100 0 +6 0 0 0 100 0 +7 0 0 0 100 0 +8 0 0 0 100 0 +9 0 0 0 100 0 +10 0 0 0 100 0 +11 0 0 0 100 0 +12 0 0 0 100 0 +13 0 0 0 100 0 +14 0 0 0 100 0 +15 0 0 0 100 0 +16 0 0 0 100 0 +17 0 0 0 100 0 +18 0 0 0 100 0 +19 0 0 0 100 0 +-10 0 0 -100 0 0 +-9 0 0 -100 0 0 +-8 0 0 -100 0 0 +-7 0 0 -100 0 0 +-6 0 0 -100 0 0 +-5 0 0 -100 0 0 +-4 0 0 -100 0 0 +-3 0 0 -100 0 0 +-2 0 0 -100 0 0 +-1 0 0 -100 0 0 +0 0 0 0 0 0 +1 0 0 0 100 0 +2 0 0 0 100 0 +3 0 0 0 100 0 +4 0 0 0 100 0 +5 0 0 0 100 0 +6 0 0 0 100 0 +7 0 0 0 100 0 +8 0 0 0 100 0 +9 0 0 0 100 0 +-10 0 0 -100 0 0 +-9 0 0 -100 0 0 +-8 0 0 -100 0 0 +-7 0 0 -100 0 0 +-6 0 0 -100 0 0 +-5 0 0 -100 0 0 +-4 0 0 -100 0 0 +-3 0 0 -100 0 0 +-2 0 0 -100 0 0 +-1 0 0 -100 0 0 +0 0 0 0 0 0 +1 0 0 0 100 0 +2 0 0 0 100 0 +3 0 0 0 100 0 +4 0 0 0 100 0 +5 0 0 0 100 0 +6 0 0 0 100 0 +7 0 0 0 100 0 +8 0 0 0 100 0 +9 0 0 0 100 0 +-10 0 0 -100 0 0 +-9 0 0 -100 0 0 +-8 0 0 -100 0 0 +-7 0 0 -100 0 0 +-6 0 0 -100 0 0 +-5 0 0 -100 0 0 +-4 0 0 -100 0 0 +-3 0 0 -100 0 0 +-2 0 0 -100 0 0 +-1 0 0 -100 0 0 +0 0 0 0 0 0 +1 0 0 0 100 0 +2 0 0 0 100 0 +3 0 0 0 100 0 +4 0 0 0 100 0 +5 0 0 0 100 0 +6 0 0 0 100 0 +7 0 0 0 100 0 +8 0 0 0 100 0 +9 0 0 0 100 0 +-10 0 0 -100 0 0 +-9 0 0 -100 0 0 +-8 0 0 -100 0 0 +-7 0 0 -100 0 0 +-6 0 0 -100 0 0 +-5 0 0 -100 0 0 +-4 0 0 -100 0 0 +-3 0 0 -100 0 0 +-2 0 0 -100 0 0 +-1 0 0 -100 0 0 +0 0 0 0 0 0 +1 0 0 0 100 0 +2 0 0 0 100 0 +3 0 0 0 100 0 +4 0 0 0 100 0 +5 0 0 0 100 0 +6 0 0 0 100 0 +7 0 0 0 100 0 +8 0 0 0 100 0 +9 0 0 0 100 0 +-10 -0 -0 -100 -0 -0 +-9 -0 -0 -100 -0 -0 +-8 -0 -0 -100 -0 -0 +-7 -0 -0 -100 -0 -0 +-6 -0 -0 -100 -0 -0 +-5 -0 -0 -100 -0 -0 +-4 -0 -0 -100 -0 -0 +-3 -0 -0 -100 -0 -0 +-2 -0 -0 -100 -0 -0 +-1 -0 -0 -100 -0 -0 +0 0 0 0 0 0 +1 0 0 0 100 0 +2 0 0 0 100 0 +3 0 0 0 100 0 +4 0 0 0 100 0 +5 0 0 0 100 0 +6 0 0 0 100 0 +7 0 0 0 100 0 +8 0 0 0 100 0 +9 0 0 0 100 0 +-10 -0 -0 -100 -0 -0 +-9 -0 -0 -100 -0 -0 +-8 -0 -0 -100 -0 -0 +-7 -0 -0 -100 -0 -0 +-6 -0 -0 -100 -0 -0 +-5 -0 -0 -100 -0 -0 +-4 -0 -0 -100 -0 -0 +-3 -0 -0 -100 -0 -0 +-2 -0 -0 -100 -0 -0 +-1 -0 -0 -100 -0 -0 +0 0 0 0 0 0 +1 0 0 0 100 0 +2 0 0 0 100 0 +3 0 0 0 100 0 +4 0 0 0 100 0 +5 0 0 0 100 0 +6 0 0 0 100 0 +7 0 0 0 100 0 +8 0 0 0 100 0 +9 0 0 0 100 0 +CHECKPOINT1 +id u8 scale round(u8, scale) roundBankers(x, scale) floor(x, scale) ceil(x, scale) trunc(x, scale) +0 0 0 0 0 0 0 0 +1 1 0 1 1 1 1 1 +2 2 0 2 2 2 2 2 +3 3 0 3 3 3 3 3 +4 4 0 4 4 4 4 4 +5 5 0 5 5 5 5 5 +6 6 0 6 6 6 6 6 +7 7 0 7 7 7 7 7 +8 8 0 8 8 8 8 8 +9 9 0 9 9 9 9 9 +10 10 0 10 10 10 10 10 +11 11 0 11 11 11 11 11 +12 12 0 12 12 12 12 12 +13 13 0 13 13 13 13 13 +14 14 0 14 14 14 14 14 +15 15 0 15 15 15 15 15 +16 16 0 16 16 16 16 16 +17 17 0 17 17 17 17 17 +18 18 0 18 18 18 18 18 +19 19 0 19 19 19 19 19 +20 10 0 10 10 10 10 10 +21 11 0 11 11 11 11 11 +22 12 0 12 12 12 12 12 +23 13 0 13 13 13 13 13 +24 14 0 14 14 14 14 14 +25 15 0 15 15 15 15 15 +26 16 0 16 16 16 16 16 +27 17 0 17 17 17 17 17 +28 18 0 18 18 18 18 18 +29 19 0 19 19 19 19 19 +30 20 0 20 20 20 20 20 +31 21 0 21 21 21 21 21 +32 22 0 22 22 22 22 22 +33 23 0 23 23 23 23 23 +34 24 0 24 24 24 24 24 +35 25 0 25 25 25 25 25 +36 26 0 26 26 26 26 26 +37 27 0 27 27 27 27 27 +38 28 0 28 28 28 28 28 +39 29 0 29 29 29 29 29 +40 0 -1 0 0 0 0 0 +41 1 -1 0 0 0 10 0 +42 2 -1 0 0 0 10 0 +43 3 -1 0 0 0 10 0 +44 4 -1 0 0 0 10 0 +45 5 -1 10 0 0 10 0 +46 6 -1 10 10 0 10 0 +47 7 -1 10 10 0 10 0 +48 8 -1 10 10 0 10 0 +49 9 -1 10 10 0 10 0 +50 10 -1 10 10 10 10 10 +51 11 -1 10 10 10 20 10 +52 12 -1 10 10 10 20 10 +53 13 -1 10 10 10 20 10 +54 14 -1 10 10 10 20 10 +55 15 -1 20 20 10 20 10 +56 16 -1 20 20 10 20 10 +57 17 -1 20 20 10 20 10 +58 18 -1 20 20 10 20 10 +59 19 -1 20 20 10 20 10 +60 10 -1 10 10 10 10 10 +61 11 -1 10 10 10 20 10 +62 12 -1 10 10 10 20 10 +63 13 -1 10 10 10 20 10 +64 14 -1 10 10 10 20 10 +65 15 -1 20 20 10 20 10 +66 16 -1 20 20 10 20 10 +67 17 -1 20 20 10 20 10 +68 18 -1 20 20 10 20 10 +69 19 -1 20 20 10 20 10 +70 20 -1 20 20 20 20 20 +71 21 -1 20 20 20 30 20 +72 22 -1 20 20 20 30 20 +73 23 -1 20 20 20 30 20 +74 24 -1 20 20 20 30 20 +75 25 -1 30 20 20 30 20 +76 26 -1 30 30 20 30 20 +77 27 -1 30 30 20 30 20 +78 28 -1 30 30 20 30 20 +79 29 -1 30 30 20 30 20 +80 0 -2 0 0 0 0 0 +81 1 -2 0 0 0 100 0 +82 2 -2 0 0 0 100 0 +83 3 -2 0 0 0 100 0 +84 4 -2 0 0 0 100 0 +85 5 -2 0 0 0 100 0 +86 6 -2 0 0 0 100 0 +87 7 -2 0 0 0 100 0 +88 8 -2 0 0 0 100 0 +89 9 -2 0 0 0 100 0 +90 10 -2 0 0 0 100 0 +91 11 -2 0 0 0 100 0 +92 12 -2 0 0 0 100 0 +93 13 -2 0 0 0 100 0 +94 14 -2 0 0 0 100 0 +95 15 -2 0 0 0 100 0 +96 16 -2 0 0 0 100 0 +97 17 -2 0 0 0 100 0 +98 18 -2 0 0 0 100 0 +99 19 -2 0 0 0 100 0 +100 10 -2 0 0 0 100 0 +101 11 -2 0 0 0 100 0 +102 12 -2 0 0 0 100 0 +103 13 -2 0 0 0 100 0 +104 14 -2 0 0 0 100 0 +105 15 -2 0 0 0 100 0 +106 16 -2 0 0 0 100 0 +107 17 -2 0 0 0 100 0 +108 18 -2 0 0 0 100 0 +109 19 -2 0 0 0 100 0 +110 20 -2 0 0 0 100 0 +111 21 -2 0 0 0 100 0 +112 22 -2 0 0 0 100 0 +113 23 -2 0 0 0 100 0 +114 24 -2 0 0 0 100 0 +115 25 -2 0 0 0 100 0 +116 26 -2 0 0 0 100 0 +117 27 -2 0 0 0 100 0 +118 28 -2 0 0 0 100 0 +119 29 -2 0 0 0 100 0 +200 0 0 0 0 0 0 0 +201 0 -1 0 0 0 0 0 +202 0 -2 0 0 0 0 0 +203 0 -3 0 0 0 0 0 +204 0 -4 0 0 0 0 0 +205 0 -5 0 0 0 0 0 +206 0 -6 0 0 0 0 0 +207 0 -7 0 0 0 0 0 +208 0 -8 0 0 0 0 0 +209 0 -9 0 0 0 0 0 +210 0 0 0 0 0 0 0 +211 0 -1 0 0 0 0 0 +212 0 -2 0 0 0 0 0 +213 0 -3 0 0 0 0 0 +214 0 -4 0 0 0 0 0 +215 0 -5 0 0 0 0 0 +216 0 -6 0 0 0 0 0 +217 0 -7 0 0 0 0 0 +218 0 -8 0 0 0 0 0 +219 0 -9 0 0 0 0 0 +300 2 4 2 2 2 2 2 +301 20 4 20 20 20 20 20 +302 200 4 200 200 200 200 200 +303 5 4 5 5 5 5 5 +304 50 4 50 50 50 50 50 +305 244 4 244 244 244 244 244 +id u16 scale round(u8, scale) roundBankers(x, scale) floor(x, scale) ceil(x, scale) trunc(x, scale) +0 0 0 0 0 0 0 0 +1 1 0 1 1 1 1 1 +2 2 0 2 2 2 2 2 +3 3 0 3 3 3 3 3 +4 4 0 4 4 4 4 4 +5 5 0 5 5 5 5 5 +6 6 0 6 6 6 6 6 +7 7 0 7 7 7 7 7 +8 8 0 8 8 8 8 8 +9 9 0 9 9 9 9 9 +10 10 0 10 10 10 10 10 +11 11 0 11 11 11 11 11 +12 12 0 12 12 12 12 12 +13 13 0 13 13 13 13 13 +14 14 0 14 14 14 14 14 +15 15 0 15 15 15 15 15 +16 16 0 16 16 16 16 16 +17 17 0 17 17 17 17 17 +18 18 0 18 18 18 18 18 +19 19 0 19 19 19 19 19 +20 10 0 10 10 10 10 10 +21 11 0 11 11 11 11 11 +22 12 0 12 12 12 12 12 +23 13 0 13 13 13 13 13 +24 14 0 14 14 14 14 14 +25 15 0 15 15 15 15 15 +26 16 0 16 16 16 16 16 +27 17 0 17 17 17 17 17 +28 18 0 18 18 18 18 18 +29 19 0 19 19 19 19 19 +30 20 0 20 20 20 20 20 +31 21 0 21 21 21 21 21 +32 22 0 22 22 22 22 22 +33 23 0 23 23 23 23 23 +34 24 0 24 24 24 24 24 +35 25 0 25 25 25 25 25 +36 26 0 26 26 26 26 26 +37 27 0 27 27 27 27 27 +38 28 0 28 28 28 28 28 +39 29 0 29 29 29 29 29 +40 0 -1 0 0 0 0 0 +41 1 -1 0 0 0 10 0 +42 2 -1 0 0 0 10 0 +43 3 -1 0 0 0 10 0 +44 4 -1 0 0 0 10 0 +45 5 -1 10 0 0 10 0 +46 6 -1 10 10 0 10 0 +47 7 -1 10 10 0 10 0 +48 8 -1 10 10 0 10 0 +49 9 -1 10 10 0 10 0 +50 10 -1 10 10 10 10 10 +51 11 -1 10 10 10 20 10 +52 12 -1 10 10 10 20 10 +53 13 -1 10 10 10 20 10 +54 14 -1 10 10 10 20 10 +55 15 -1 20 20 10 20 10 +56 16 -1 20 20 10 20 10 +57 17 -1 20 20 10 20 10 +58 18 -1 20 20 10 20 10 +59 19 -1 20 20 10 20 10 +60 10 -1 10 10 10 10 10 +61 11 -1 10 10 10 20 10 +62 12 -1 10 10 10 20 10 +63 13 -1 10 10 10 20 10 +64 14 -1 10 10 10 20 10 +65 15 -1 20 20 10 20 10 +66 16 -1 20 20 10 20 10 +67 17 -1 20 20 10 20 10 +68 18 -1 20 20 10 20 10 +69 19 -1 20 20 10 20 10 +70 20 -1 20 20 20 20 20 +71 21 -1 20 20 20 30 20 +72 22 -1 20 20 20 30 20 +73 23 -1 20 20 20 30 20 +74 24 -1 20 20 20 30 20 +75 25 -1 30 20 20 30 20 +76 26 -1 30 30 20 30 20 +77 27 -1 30 30 20 30 20 +78 28 -1 30 30 20 30 20 +79 29 -1 30 30 20 30 20 +80 0 -2 0 0 0 0 0 +81 1 -2 0 0 0 100 0 +82 2 -2 0 0 0 100 0 +83 3 -2 0 0 0 100 0 +84 4 -2 0 0 0 100 0 +85 5 -2 0 0 0 100 0 +86 6 -2 0 0 0 100 0 +87 7 -2 0 0 0 100 0 +88 8 -2 0 0 0 100 0 +89 9 -2 0 0 0 100 0 +90 10 -2 0 0 0 100 0 +91 11 -2 0 0 0 100 0 +92 12 -2 0 0 0 100 0 +93 13 -2 0 0 0 100 0 +94 14 -2 0 0 0 100 0 +95 15 -2 0 0 0 100 0 +96 16 -2 0 0 0 100 0 +97 17 -2 0 0 0 100 0 +98 18 -2 0 0 0 100 0 +99 19 -2 0 0 0 100 0 +100 10 -2 0 0 0 100 0 +101 11 -2 0 0 0 100 0 +102 12 -2 0 0 0 100 0 +103 13 -2 0 0 0 100 0 +104 14 -2 0 0 0 100 0 +105 15 -2 0 0 0 100 0 +106 16 -2 0 0 0 100 0 +107 17 -2 0 0 0 100 0 +108 18 -2 0 0 0 100 0 +109 19 -2 0 0 0 100 0 +110 20 -2 0 0 0 100 0 +111 21 -2 0 0 0 100 0 +112 22 -2 0 0 0 100 0 +113 23 -2 0 0 0 100 0 +114 24 -2 0 0 0 100 0 +115 25 -2 0 0 0 100 0 +116 26 -2 0 0 0 100 0 +117 27 -2 0 0 0 100 0 +118 28 -2 0 0 0 100 0 +119 29 -2 0 0 0 100 0 +200 0 0 0 0 0 0 0 +201 0 -1 0 0 0 0 0 +202 0 -2 0 0 0 0 0 +203 0 -3 0 0 0 0 0 +204 0 -4 0 0 0 0 0 +205 0 -5 0 0 0 0 0 +206 0 -6 0 0 0 0 0 +207 0 -7 0 0 0 0 0 +208 0 -8 0 0 0 0 0 +209 0 -9 0 0 0 0 0 +210 0 0 0 0 0 0 0 +211 0 -1 0 0 0 0 0 +212 0 -2 0 0 0 0 0 +213 0 -3 0 0 0 0 0 +214 0 -4 0 0 0 0 0 +215 0 -5 0 0 0 0 0 +216 0 -6 0 0 0 0 0 +217 0 -7 0 0 0 0 0 +218 0 -8 0 0 0 0 0 +219 0 -9 0 0 0 0 0 +300 2 4 2 2 2 2 2 +301 20 4 20 20 20 20 20 +302 200 4 200 200 200 200 200 +303 5 4 5 5 5 5 5 +304 50 4 50 50 50 50 50 +305 500 4 500 500 500 500 500 +id u32 scale round(u8, scale) roundBankers(x, scale) floor(x, scale) ceil(x, scale) trunc(x, scale) +0 0 0 0 0 0 0 0 +1 1 0 1 1 1 1 1 +2 2 0 2 2 2 2 2 +3 3 0 3 3 3 3 3 +4 4 0 4 4 4 4 4 +5 5 0 5 5 5 5 5 +6 6 0 6 6 6 6 6 +7 7 0 7 7 7 7 7 +8 8 0 8 8 8 8 8 +9 9 0 9 9 9 9 9 +10 10 0 10 10 10 10 10 +11 11 0 11 11 11 11 11 +12 12 0 12 12 12 12 12 +13 13 0 13 13 13 13 13 +14 14 0 14 14 14 14 14 +15 15 0 15 15 15 15 15 +16 16 0 16 16 16 16 16 +17 17 0 17 17 17 17 17 +18 18 0 18 18 18 18 18 +19 19 0 19 19 19 19 19 +20 10 0 10 10 10 10 10 +21 11 0 11 11 11 11 11 +22 12 0 12 12 12 12 12 +23 13 0 13 13 13 13 13 +24 14 0 14 14 14 14 14 +25 15 0 15 15 15 15 15 +26 16 0 16 16 16 16 16 +27 17 0 17 17 17 17 17 +28 18 0 18 18 18 18 18 +29 19 0 19 19 19 19 19 +30 20 0 20 20 20 20 20 +31 21 0 21 21 21 21 21 +32 22 0 22 22 22 22 22 +33 23 0 23 23 23 23 23 +34 24 0 24 24 24 24 24 +35 25 0 25 25 25 25 25 +36 26 0 26 26 26 26 26 +37 27 0 27 27 27 27 27 +38 28 0 28 28 28 28 28 +39 29 0 29 29 29 29 29 +40 0 -1 0 0 0 0 0 +41 1 -1 0 0 0 10 0 +42 2 -1 0 0 0 10 0 +43 3 -1 0 0 0 10 0 +44 4 -1 0 0 0 10 0 +45 5 -1 10 0 0 10 0 +46 6 -1 10 10 0 10 0 +47 7 -1 10 10 0 10 0 +48 8 -1 10 10 0 10 0 +49 9 -1 10 10 0 10 0 +50 10 -1 10 10 10 10 10 +51 11 -1 10 10 10 20 10 +52 12 -1 10 10 10 20 10 +53 13 -1 10 10 10 20 10 +54 14 -1 10 10 10 20 10 +55 15 -1 20 20 10 20 10 +56 16 -1 20 20 10 20 10 +57 17 -1 20 20 10 20 10 +58 18 -1 20 20 10 20 10 +59 19 -1 20 20 10 20 10 +60 10 -1 10 10 10 10 10 +61 11 -1 10 10 10 20 10 +62 12 -1 10 10 10 20 10 +63 13 -1 10 10 10 20 10 +64 14 -1 10 10 10 20 10 +65 15 -1 20 20 10 20 10 +66 16 -1 20 20 10 20 10 +67 17 -1 20 20 10 20 10 +68 18 -1 20 20 10 20 10 +69 19 -1 20 20 10 20 10 +70 20 -1 20 20 20 20 20 +71 21 -1 20 20 20 30 20 +72 22 -1 20 20 20 30 20 +73 23 -1 20 20 20 30 20 +74 24 -1 20 20 20 30 20 +75 25 -1 30 20 20 30 20 +76 26 -1 30 30 20 30 20 +77 27 -1 30 30 20 30 20 +78 28 -1 30 30 20 30 20 +79 29 -1 30 30 20 30 20 +80 0 -2 0 0 0 0 0 +81 1 -2 0 0 0 100 0 +82 2 -2 0 0 0 100 0 +83 3 -2 0 0 0 100 0 +84 4 -2 0 0 0 100 0 +85 5 -2 0 0 0 100 0 +86 6 -2 0 0 0 100 0 +87 7 -2 0 0 0 100 0 +88 8 -2 0 0 0 100 0 +89 9 -2 0 0 0 100 0 +90 10 -2 0 0 0 100 0 +91 11 -2 0 0 0 100 0 +92 12 -2 0 0 0 100 0 +93 13 -2 0 0 0 100 0 +94 14 -2 0 0 0 100 0 +95 15 -2 0 0 0 100 0 +96 16 -2 0 0 0 100 0 +97 17 -2 0 0 0 100 0 +98 18 -2 0 0 0 100 0 +99 19 -2 0 0 0 100 0 +100 10 -2 0 0 0 100 0 +101 11 -2 0 0 0 100 0 +102 12 -2 0 0 0 100 0 +103 13 -2 0 0 0 100 0 +104 14 -2 0 0 0 100 0 +105 15 -2 0 0 0 100 0 +106 16 -2 0 0 0 100 0 +107 17 -2 0 0 0 100 0 +108 18 -2 0 0 0 100 0 +109 19 -2 0 0 0 100 0 +110 20 -2 0 0 0 100 0 +111 21 -2 0 0 0 100 0 +112 22 -2 0 0 0 100 0 +113 23 -2 0 0 0 100 0 +114 24 -2 0 0 0 100 0 +115 25 -2 0 0 0 100 0 +116 26 -2 0 0 0 100 0 +117 27 -2 0 0 0 100 0 +118 28 -2 0 0 0 100 0 +119 29 -2 0 0 0 100 0 +200 0 0 0 0 0 0 0 +201 0 -1 0 0 0 0 0 +202 0 -2 0 0 0 0 0 +203 0 -3 0 0 0 0 0 +204 0 -4 0 0 0 0 0 +205 0 -5 0 0 0 0 0 +206 0 -6 0 0 0 0 0 +207 0 -7 0 0 0 0 0 +208 0 -8 0 0 0 0 0 +209 0 -9 0 0 0 0 0 +210 0 0 0 0 0 0 0 +211 0 -1 0 0 0 0 0 +212 0 -2 0 0 0 0 0 +213 0 -3 0 0 0 0 0 +214 0 -4 0 0 0 0 0 +215 0 -5 0 0 0 0 0 +216 0 -6 0 0 0 0 0 +217 0 -7 0 0 0 0 0 +218 0 -8 0 0 0 0 0 +219 0 -9 0 0 0 0 0 +300 2 4 2 2 2 2 2 +301 20 4 20 20 20 20 20 +302 200 4 200 200 200 200 200 +303 5 4 5 5 5 5 5 +304 50 4 50 50 50 50 50 +305 500 4 500 500 500 500 500 +id u64 scale round(u8, scale) roundBankers(x, scale) floor(x, scale) ceil(x, scale) trunc(x, scale) +0 0 0 0 0 0 0 0 +1 1 0 1 1 1 1 1 +2 2 0 2 2 2 2 2 +3 3 0 3 3 3 3 3 +4 4 0 4 4 4 4 4 +5 5 0 5 5 5 5 5 +6 6 0 6 6 6 6 6 +7 7 0 7 7 7 7 7 +8 8 0 8 8 8 8 8 +9 9 0 9 9 9 9 9 +10 10 0 10 10 10 10 10 +11 11 0 11 11 11 11 11 +12 12 0 12 12 12 12 12 +13 13 0 13 13 13 13 13 +14 14 0 14 14 14 14 14 +15 15 0 15 15 15 15 15 +16 16 0 16 16 16 16 16 +17 17 0 17 17 17 17 17 +18 18 0 18 18 18 18 18 +19 19 0 19 19 19 19 19 +20 10 0 10 10 10 10 10 +21 11 0 11 11 11 11 11 +22 12 0 12 12 12 12 12 +23 13 0 13 13 13 13 13 +24 14 0 14 14 14 14 14 +25 15 0 15 15 15 15 15 +26 16 0 16 16 16 16 16 +27 17 0 17 17 17 17 17 +28 18 0 18 18 18 18 18 +29 19 0 19 19 19 19 19 +30 20 0 20 20 20 20 20 +31 21 0 21 21 21 21 21 +32 22 0 22 22 22 22 22 +33 23 0 23 23 23 23 23 +34 24 0 24 24 24 24 24 +35 25 0 25 25 25 25 25 +36 26 0 26 26 26 26 26 +37 27 0 27 27 27 27 27 +38 28 0 28 28 28 28 28 +39 29 0 29 29 29 29 29 +40 0 -1 0 0 0 0 0 +41 1 -1 0 0 0 10 0 +42 2 -1 0 0 0 10 0 +43 3 -1 0 0 0 10 0 +44 4 -1 0 0 0 10 0 +45 5 -1 10 0 0 10 0 +46 6 -1 10 10 0 10 0 +47 7 -1 10 10 0 10 0 +48 8 -1 10 10 0 10 0 +49 9 -1 10 10 0 10 0 +50 10 -1 10 10 10 10 10 +51 11 -1 10 10 10 20 10 +52 12 -1 10 10 10 20 10 +53 13 -1 10 10 10 20 10 +54 14 -1 10 10 10 20 10 +55 15 -1 20 20 10 20 10 +56 16 -1 20 20 10 20 10 +57 17 -1 20 20 10 20 10 +58 18 -1 20 20 10 20 10 +59 19 -1 20 20 10 20 10 +60 10 -1 10 10 10 10 10 +61 11 -1 10 10 10 20 10 +62 12 -1 10 10 10 20 10 +63 13 -1 10 10 10 20 10 +64 14 -1 10 10 10 20 10 +65 15 -1 20 20 10 20 10 +66 16 -1 20 20 10 20 10 +67 17 -1 20 20 10 20 10 +68 18 -1 20 20 10 20 10 +69 19 -1 20 20 10 20 10 +70 20 -1 20 20 20 20 20 +71 21 -1 20 20 20 30 20 +72 22 -1 20 20 20 30 20 +73 23 -1 20 20 20 30 20 +74 24 -1 20 20 20 30 20 +75 25 -1 30 20 20 30 20 +76 26 -1 30 30 20 30 20 +77 27 -1 30 30 20 30 20 +78 28 -1 30 30 20 30 20 +79 29 -1 30 30 20 30 20 +80 0 -2 0 0 0 0 0 +81 1 -2 0 0 0 100 0 +82 2 -2 0 0 0 100 0 +83 3 -2 0 0 0 100 0 +84 4 -2 0 0 0 100 0 +85 5 -2 0 0 0 100 0 +86 6 -2 0 0 0 100 0 +87 7 -2 0 0 0 100 0 +88 8 -2 0 0 0 100 0 +89 9 -2 0 0 0 100 0 +90 10 -2 0 0 0 100 0 +91 11 -2 0 0 0 100 0 +92 12 -2 0 0 0 100 0 +93 13 -2 0 0 0 100 0 +94 14 -2 0 0 0 100 0 +95 15 -2 0 0 0 100 0 +96 16 -2 0 0 0 100 0 +97 17 -2 0 0 0 100 0 +98 18 -2 0 0 0 100 0 +99 19 -2 0 0 0 100 0 +100 10 -2 0 0 0 100 0 +101 11 -2 0 0 0 100 0 +102 12 -2 0 0 0 100 0 +103 13 -2 0 0 0 100 0 +104 14 -2 0 0 0 100 0 +105 15 -2 0 0 0 100 0 +106 16 -2 0 0 0 100 0 +107 17 -2 0 0 0 100 0 +108 18 -2 0 0 0 100 0 +109 19 -2 0 0 0 100 0 +110 20 -2 0 0 0 100 0 +111 21 -2 0 0 0 100 0 +112 22 -2 0 0 0 100 0 +113 23 -2 0 0 0 100 0 +114 24 -2 0 0 0 100 0 +115 25 -2 0 0 0 100 0 +116 26 -2 0 0 0 100 0 +117 27 -2 0 0 0 100 0 +118 28 -2 0 0 0 100 0 +119 29 -2 0 0 0 100 0 +200 0 0 0 0 0 0 0 +201 0 -1 0 0 0 0 0 +202 0 -2 0 0 0 0 0 +203 0 -3 0 0 0 0 0 +204 0 -4 0 0 0 0 0 +205 0 -5 0 0 0 0 0 +206 0 -6 0 0 0 0 0 +207 0 -7 0 0 0 0 0 +208 0 -8 0 0 0 0 0 +209 0 -9 0 0 0 0 0 +210 0 0 0 0 0 0 0 +211 0 -1 0 0 0 0 0 +212 0 -2 0 0 0 0 0 +213 0 -3 0 0 0 0 0 +214 0 -4 0 0 0 0 0 +215 0 -5 0 0 0 0 0 +216 0 -6 0 0 0 0 0 +217 0 -7 0 0 0 0 0 +218 0 -8 0 0 0 0 0 +219 0 -9 0 0 0 0 0 +300 2 4 2 2 2 2 2 +301 20 4 20 20 20 20 20 +302 200 4 200 200 200 200 200 +303 5 4 5 5 5 5 5 +304 50 4 50 50 50 50 50 +305 500 4 500 500 500 500 500 +id i8 scale round(u8, scale) roundBankers(x, scale) floor(x, scale) ceil(x, scale) trunc(x, scale) +0 0 0 0 0 0 0 0 +1 1 0 1 1 1 1 1 +2 2 0 2 2 2 2 2 +3 3 0 3 3 3 3 3 +4 4 0 4 4 4 4 4 +5 5 0 5 5 5 5 5 +6 6 0 6 6 6 6 6 +7 7 0 7 7 7 7 7 +8 8 0 8 8 8 8 8 +9 9 0 9 9 9 9 9 +10 10 0 10 10 10 10 10 +11 11 0 11 11 11 11 11 +12 12 0 12 12 12 12 12 +13 13 0 13 13 13 13 13 +14 14 0 14 14 14 14 14 +15 15 0 15 15 15 15 15 +16 16 0 16 16 16 16 16 +17 17 0 17 17 17 17 17 +18 18 0 18 18 18 18 18 +19 19 0 19 19 19 19 19 +20 -10 0 -10 -10 -10 -10 -10 +21 -9 0 -9 -9 -9 -9 -9 +22 -8 0 -8 -8 -8 -8 -8 +23 -7 0 -7 -7 -7 -7 -7 +24 -6 0 -6 -6 -6 -6 -6 +25 -5 0 -5 -5 -5 -5 -5 +26 -4 0 -4 -4 -4 -4 -4 +27 -3 0 -3 -3 -3 -3 -3 +28 -2 0 -2 -2 -2 -2 -2 +29 -1 0 -1 -1 -1 -1 -1 +30 0 0 0 0 0 0 0 +31 1 0 1 1 1 1 1 +32 2 0 2 2 2 2 2 +33 3 0 3 3 3 3 3 +34 4 0 4 4 4 4 4 +35 5 0 5 5 5 5 5 +36 6 0 6 6 6 6 6 +37 7 0 7 7 7 7 7 +38 8 0 8 8 8 8 8 +39 9 0 9 9 9 9 9 +40 0 -1 0 0 0 0 0 +41 1 -1 0 0 0 10 0 +42 2 -1 0 0 0 10 0 +43 3 -1 0 0 0 10 0 +44 4 -1 0 0 0 10 0 +45 5 -1 10 0 0 10 0 +46 6 -1 10 10 0 10 0 +47 7 -1 10 10 0 10 0 +48 8 -1 10 10 0 10 0 +49 9 -1 10 10 0 10 0 +50 10 -1 10 10 10 10 10 +51 11 -1 10 10 10 20 10 +52 12 -1 10 10 10 20 10 +53 13 -1 10 10 10 20 10 +54 14 -1 10 10 10 20 10 +55 15 -1 20 20 10 20 10 +56 16 -1 20 20 10 20 10 +57 17 -1 20 20 10 20 10 +58 18 -1 20 20 10 20 10 +59 19 -1 20 20 10 20 10 +60 -10 -1 -10 -10 -10 -10 -10 +61 -9 -1 -10 -10 -10 0 0 +62 -8 -1 -10 -10 -10 0 0 +63 -7 -1 -10 -10 -10 0 0 +64 -6 -1 -10 -10 -10 0 0 +65 -5 -1 -10 0 -10 0 0 +66 -4 -1 0 0 -10 0 0 +67 -3 -1 0 0 -10 0 0 +68 -2 -1 0 0 -10 0 0 +69 -1 -1 0 0 -10 0 0 +70 0 -1 0 0 0 0 0 +71 1 -1 0 0 0 10 0 +72 2 -1 0 0 0 10 0 +73 3 -1 0 0 0 10 0 +74 4 -1 0 0 0 10 0 +75 5 -1 10 0 0 10 0 +76 6 -1 10 10 0 10 0 +77 7 -1 10 10 0 10 0 +78 8 -1 10 10 0 10 0 +79 9 -1 10 10 0 10 0 +80 0 -2 0 0 0 0 0 +81 1 -2 0 0 0 100 0 +82 2 -2 0 0 0 100 0 +83 3 -2 0 0 0 100 0 +84 4 -2 0 0 0 100 0 +85 5 -2 0 0 0 100 0 +86 6 -2 0 0 0 100 0 +87 7 -2 0 0 0 100 0 +88 8 -2 0 0 0 100 0 +89 9 -2 0 0 0 100 0 +90 10 -2 0 0 0 100 0 +91 11 -2 0 0 0 100 0 +92 12 -2 0 0 0 100 0 +93 13 -2 0 0 0 100 0 +94 14 -2 0 0 0 100 0 +95 15 -2 0 0 0 100 0 +96 16 -2 0 0 0 100 0 +97 17 -2 0 0 0 100 0 +98 18 -2 0 0 0 100 0 +99 19 -2 0 0 0 100 0 +100 -10 -2 0 0 -100 0 0 +101 -9 -2 0 0 -100 0 0 +102 -8 -2 0 0 -100 0 0 +103 -7 -2 0 0 -100 0 0 +104 -6 -2 0 0 -100 0 0 +105 -5 -2 0 0 -100 0 0 +106 -4 -2 0 0 -100 0 0 +107 -3 -2 0 0 -100 0 0 +108 -2 -2 0 0 -100 0 0 +109 -1 -2 0 0 -100 0 0 +110 0 -2 0 0 0 0 0 +111 1 -2 0 0 0 100 0 +112 2 -2 0 0 0 100 0 +113 3 -2 0 0 0 100 0 +114 4 -2 0 0 0 100 0 +115 5 -2 0 0 0 100 0 +116 6 -2 0 0 0 100 0 +117 7 -2 0 0 0 100 0 +118 8 -2 0 0 0 100 0 +119 9 -2 0 0 0 100 0 +200 0 0 0 0 0 0 0 +201 0 -1 0 0 0 0 0 +202 0 -2 0 0 0 0 0 +203 0 -3 0 0 0 0 0 +204 0 -4 0 0 0 0 0 +205 0 -5 0 0 0 0 0 +206 0 -6 0 0 0 0 0 +207 0 -7 0 0 0 0 0 +208 0 -8 0 0 0 0 0 +209 0 -9 0 0 0 0 0 +210 0 0 0 0 0 0 0 +211 0 -1 0 0 0 0 0 +212 0 -2 0 0 0 0 0 +213 0 -3 0 0 0 0 0 +214 0 -4 0 0 0 0 0 +215 0 -5 0 0 0 0 0 +216 0 -6 0 0 0 0 0 +217 0 -7 0 0 0 0 0 +218 0 -8 0 0 0 0 0 +219 0 -9 0 0 0 0 0 +300 2 4 2 2 2 2 2 +301 20 4 20 20 20 20 20 +302 -56 4 -56 -56 -56 -56 -56 +303 5 4 5 5 5 5 5 +304 50 4 50 50 50 50 50 +305 -12 4 -12 -12 -12 -12 -12 +id i16 scale round(u8, scale) roundBankers(x, scale) floor(x, scale) ceil(x, scale) trunc(x, scale) +0 0 0 0 0 0 0 0 +1 1 0 1 1 1 1 1 +2 2 0 2 2 2 2 2 +3 3 0 3 3 3 3 3 +4 4 0 4 4 4 4 4 +5 5 0 5 5 5 5 5 +6 6 0 6 6 6 6 6 +7 7 0 7 7 7 7 7 +8 8 0 8 8 8 8 8 +9 9 0 9 9 9 9 9 +10 10 0 10 10 10 10 10 +11 11 0 11 11 11 11 11 +12 12 0 12 12 12 12 12 +13 13 0 13 13 13 13 13 +14 14 0 14 14 14 14 14 +15 15 0 15 15 15 15 15 +16 16 0 16 16 16 16 16 +17 17 0 17 17 17 17 17 +18 18 0 18 18 18 18 18 +19 19 0 19 19 19 19 19 +20 -10 0 -10 -10 -10 -10 -10 +21 -9 0 -9 -9 -9 -9 -9 +22 -8 0 -8 -8 -8 -8 -8 +23 -7 0 -7 -7 -7 -7 -7 +24 -6 0 -6 -6 -6 -6 -6 +25 -5 0 -5 -5 -5 -5 -5 +26 -4 0 -4 -4 -4 -4 -4 +27 -3 0 -3 -3 -3 -3 -3 +28 -2 0 -2 -2 -2 -2 -2 +29 -1 0 -1 -1 -1 -1 -1 +30 0 0 0 0 0 0 0 +31 1 0 1 1 1 1 1 +32 2 0 2 2 2 2 2 +33 3 0 3 3 3 3 3 +34 4 0 4 4 4 4 4 +35 5 0 5 5 5 5 5 +36 6 0 6 6 6 6 6 +37 7 0 7 7 7 7 7 +38 8 0 8 8 8 8 8 +39 9 0 9 9 9 9 9 +40 0 -1 0 0 0 0 0 +41 1 -1 0 0 0 10 0 +42 2 -1 0 0 0 10 0 +43 3 -1 0 0 0 10 0 +44 4 -1 0 0 0 10 0 +45 5 -1 10 0 0 10 0 +46 6 -1 10 10 0 10 0 +47 7 -1 10 10 0 10 0 +48 8 -1 10 10 0 10 0 +49 9 -1 10 10 0 10 0 +50 10 -1 10 10 10 10 10 +51 11 -1 10 10 10 20 10 +52 12 -1 10 10 10 20 10 +53 13 -1 10 10 10 20 10 +54 14 -1 10 10 10 20 10 +55 15 -1 20 20 10 20 10 +56 16 -1 20 20 10 20 10 +57 17 -1 20 20 10 20 10 +58 18 -1 20 20 10 20 10 +59 19 -1 20 20 10 20 10 +60 -10 -1 -10 -10 -10 -10 -10 +61 -9 -1 -10 -10 -10 0 0 +62 -8 -1 -10 -10 -10 0 0 +63 -7 -1 -10 -10 -10 0 0 +64 -6 -1 -10 -10 -10 0 0 +65 -5 -1 -10 0 -10 0 0 +66 -4 -1 0 0 -10 0 0 +67 -3 -1 0 0 -10 0 0 +68 -2 -1 0 0 -10 0 0 +69 -1 -1 0 0 -10 0 0 +70 0 -1 0 0 0 0 0 +71 1 -1 0 0 0 10 0 +72 2 -1 0 0 0 10 0 +73 3 -1 0 0 0 10 0 +74 4 -1 0 0 0 10 0 +75 5 -1 10 0 0 10 0 +76 6 -1 10 10 0 10 0 +77 7 -1 10 10 0 10 0 +78 8 -1 10 10 0 10 0 +79 9 -1 10 10 0 10 0 +80 0 -2 0 0 0 0 0 +81 1 -2 0 0 0 100 0 +82 2 -2 0 0 0 100 0 +83 3 -2 0 0 0 100 0 +84 4 -2 0 0 0 100 0 +85 5 -2 0 0 0 100 0 +86 6 -2 0 0 0 100 0 +87 7 -2 0 0 0 100 0 +88 8 -2 0 0 0 100 0 +89 9 -2 0 0 0 100 0 +90 10 -2 0 0 0 100 0 +91 11 -2 0 0 0 100 0 +92 12 -2 0 0 0 100 0 +93 13 -2 0 0 0 100 0 +94 14 -2 0 0 0 100 0 +95 15 -2 0 0 0 100 0 +96 16 -2 0 0 0 100 0 +97 17 -2 0 0 0 100 0 +98 18 -2 0 0 0 100 0 +99 19 -2 0 0 0 100 0 +100 -10 -2 0 0 -100 0 0 +101 -9 -2 0 0 -100 0 0 +102 -8 -2 0 0 -100 0 0 +103 -7 -2 0 0 -100 0 0 +104 -6 -2 0 0 -100 0 0 +105 -5 -2 0 0 -100 0 0 +106 -4 -2 0 0 -100 0 0 +107 -3 -2 0 0 -100 0 0 +108 -2 -2 0 0 -100 0 0 +109 -1 -2 0 0 -100 0 0 +110 0 -2 0 0 0 0 0 +111 1 -2 0 0 0 100 0 +112 2 -2 0 0 0 100 0 +113 3 -2 0 0 0 100 0 +114 4 -2 0 0 0 100 0 +115 5 -2 0 0 0 100 0 +116 6 -2 0 0 0 100 0 +117 7 -2 0 0 0 100 0 +118 8 -2 0 0 0 100 0 +119 9 -2 0 0 0 100 0 +200 0 0 0 0 0 0 0 +201 0 -1 0 0 0 0 0 +202 0 -2 0 0 0 0 0 +203 0 -3 0 0 0 0 0 +204 0 -4 0 0 0 0 0 +205 0 -5 0 0 0 0 0 +206 0 -6 0 0 0 0 0 +207 0 -7 0 0 0 0 0 +208 0 -8 0 0 0 0 0 +209 0 -9 0 0 0 0 0 +210 0 0 0 0 0 0 0 +211 0 -1 0 0 0 0 0 +212 0 -2 0 0 0 0 0 +213 0 -3 0 0 0 0 0 +214 0 -4 0 0 0 0 0 +215 0 -5 0 0 0 0 0 +216 0 -6 0 0 0 0 0 +217 0 -7 0 0 0 0 0 +218 0 -8 0 0 0 0 0 +219 0 -9 0 0 0 0 0 +300 2 4 2 2 2 2 2 +301 20 4 20 20 20 20 20 +302 200 4 200 200 200 200 200 +303 5 4 5 5 5 5 5 +304 50 4 50 50 50 50 50 +305 500 4 500 500 500 500 500 +id i32 scale round(u8, scale) roundBankers(x, scale) floor(x, scale) ceil(x, scale) trunc(x, scale) +0 0 0 0 0 0 0 0 +1 1 0 1 1 1 1 1 +2 2 0 2 2 2 2 2 +3 3 0 3 3 3 3 3 +4 4 0 4 4 4 4 4 +5 5 0 5 5 5 5 5 +6 6 0 6 6 6 6 6 +7 7 0 7 7 7 7 7 +8 8 0 8 8 8 8 8 +9 9 0 9 9 9 9 9 +10 10 0 10 10 10 10 10 +11 11 0 11 11 11 11 11 +12 12 0 12 12 12 12 12 +13 13 0 13 13 13 13 13 +14 14 0 14 14 14 14 14 +15 15 0 15 15 15 15 15 +16 16 0 16 16 16 16 16 +17 17 0 17 17 17 17 17 +18 18 0 18 18 18 18 18 +19 19 0 19 19 19 19 19 +20 -10 0 -10 -10 -10 -10 -10 +21 -9 0 -9 -9 -9 -9 -9 +22 -8 0 -8 -8 -8 -8 -8 +23 -7 0 -7 -7 -7 -7 -7 +24 -6 0 -6 -6 -6 -6 -6 +25 -5 0 -5 -5 -5 -5 -5 +26 -4 0 -4 -4 -4 -4 -4 +27 -3 0 -3 -3 -3 -3 -3 +28 -2 0 -2 -2 -2 -2 -2 +29 -1 0 -1 -1 -1 -1 -1 +30 0 0 0 0 0 0 0 +31 1 0 1 1 1 1 1 +32 2 0 2 2 2 2 2 +33 3 0 3 3 3 3 3 +34 4 0 4 4 4 4 4 +35 5 0 5 5 5 5 5 +36 6 0 6 6 6 6 6 +37 7 0 7 7 7 7 7 +38 8 0 8 8 8 8 8 +39 9 0 9 9 9 9 9 +40 0 -1 0 0 0 0 0 +41 1 -1 0 0 0 10 0 +42 2 -1 0 0 0 10 0 +43 3 -1 0 0 0 10 0 +44 4 -1 0 0 0 10 0 +45 5 -1 10 0 0 10 0 +46 6 -1 10 10 0 10 0 +47 7 -1 10 10 0 10 0 +48 8 -1 10 10 0 10 0 +49 9 -1 10 10 0 10 0 +50 10 -1 10 10 10 10 10 +51 11 -1 10 10 10 20 10 +52 12 -1 10 10 10 20 10 +53 13 -1 10 10 10 20 10 +54 14 -1 10 10 10 20 10 +55 15 -1 20 20 10 20 10 +56 16 -1 20 20 10 20 10 +57 17 -1 20 20 10 20 10 +58 18 -1 20 20 10 20 10 +59 19 -1 20 20 10 20 10 +60 -10 -1 -10 -10 -10 -10 -10 +61 -9 -1 -10 -10 -10 0 0 +62 -8 -1 -10 -10 -10 0 0 +63 -7 -1 -10 -10 -10 0 0 +64 -6 -1 -10 -10 -10 0 0 +65 -5 -1 -10 0 -10 0 0 +66 -4 -1 0 0 -10 0 0 +67 -3 -1 0 0 -10 0 0 +68 -2 -1 0 0 -10 0 0 +69 -1 -1 0 0 -10 0 0 +70 0 -1 0 0 0 0 0 +71 1 -1 0 0 0 10 0 +72 2 -1 0 0 0 10 0 +73 3 -1 0 0 0 10 0 +74 4 -1 0 0 0 10 0 +75 5 -1 10 0 0 10 0 +76 6 -1 10 10 0 10 0 +77 7 -1 10 10 0 10 0 +78 8 -1 10 10 0 10 0 +79 9 -1 10 10 0 10 0 +80 0 -2 0 0 0 0 0 +81 1 -2 0 0 0 100 0 +82 2 -2 0 0 0 100 0 +83 3 -2 0 0 0 100 0 +84 4 -2 0 0 0 100 0 +85 5 -2 0 0 0 100 0 +86 6 -2 0 0 0 100 0 +87 7 -2 0 0 0 100 0 +88 8 -2 0 0 0 100 0 +89 9 -2 0 0 0 100 0 +90 10 -2 0 0 0 100 0 +91 11 -2 0 0 0 100 0 +92 12 -2 0 0 0 100 0 +93 13 -2 0 0 0 100 0 +94 14 -2 0 0 0 100 0 +95 15 -2 0 0 0 100 0 +96 16 -2 0 0 0 100 0 +97 17 -2 0 0 0 100 0 +98 18 -2 0 0 0 100 0 +99 19 -2 0 0 0 100 0 +100 -10 -2 0 0 -100 0 0 +101 -9 -2 0 0 -100 0 0 +102 -8 -2 0 0 -100 0 0 +103 -7 -2 0 0 -100 0 0 +104 -6 -2 0 0 -100 0 0 +105 -5 -2 0 0 -100 0 0 +106 -4 -2 0 0 -100 0 0 +107 -3 -2 0 0 -100 0 0 +108 -2 -2 0 0 -100 0 0 +109 -1 -2 0 0 -100 0 0 +110 0 -2 0 0 0 0 0 +111 1 -2 0 0 0 100 0 +112 2 -2 0 0 0 100 0 +113 3 -2 0 0 0 100 0 +114 4 -2 0 0 0 100 0 +115 5 -2 0 0 0 100 0 +116 6 -2 0 0 0 100 0 +117 7 -2 0 0 0 100 0 +118 8 -2 0 0 0 100 0 +119 9 -2 0 0 0 100 0 +200 0 0 0 0 0 0 0 +201 0 -1 0 0 0 0 0 +202 0 -2 0 0 0 0 0 +203 0 -3 0 0 0 0 0 +204 0 -4 0 0 0 0 0 +205 0 -5 0 0 0 0 0 +206 0 -6 0 0 0 0 0 +207 0 -7 0 0 0 0 0 +208 0 -8 0 0 0 0 0 +209 0 -9 0 0 0 0 0 +210 0 0 0 0 0 0 0 +211 0 -1 0 0 0 0 0 +212 0 -2 0 0 0 0 0 +213 0 -3 0 0 0 0 0 +214 0 -4 0 0 0 0 0 +215 0 -5 0 0 0 0 0 +216 0 -6 0 0 0 0 0 +217 0 -7 0 0 0 0 0 +218 0 -8 0 0 0 0 0 +219 0 -9 0 0 0 0 0 +300 2 4 2 2 2 2 2 +301 20 4 20 20 20 20 20 +302 200 4 200 200 200 200 200 +303 5 4 5 5 5 5 5 +304 50 4 50 50 50 50 50 +305 500 4 500 500 500 500 500 +id i64 scale round(u8, scale) roundBankers(x, scale) floor(x, scale) ceil(x, scale) trunc(x, scale) +0 0 0 0 0 0 0 0 +1 1 0 1 1 1 1 1 +2 2 0 2 2 2 2 2 +3 3 0 3 3 3 3 3 +4 4 0 4 4 4 4 4 +5 5 0 5 5 5 5 5 +6 6 0 6 6 6 6 6 +7 7 0 7 7 7 7 7 +8 8 0 8 8 8 8 8 +9 9 0 9 9 9 9 9 +10 10 0 10 10 10 10 10 +11 11 0 11 11 11 11 11 +12 12 0 12 12 12 12 12 +13 13 0 13 13 13 13 13 +14 14 0 14 14 14 14 14 +15 15 0 15 15 15 15 15 +16 16 0 16 16 16 16 16 +17 17 0 17 17 17 17 17 +18 18 0 18 18 18 18 18 +19 19 0 19 19 19 19 19 +20 -10 0 -10 -10 -10 -10 -10 +21 -9 0 -9 -9 -9 -9 -9 +22 -8 0 -8 -8 -8 -8 -8 +23 -7 0 -7 -7 -7 -7 -7 +24 -6 0 -6 -6 -6 -6 -6 +25 -5 0 -5 -5 -5 -5 -5 +26 -4 0 -4 -4 -4 -4 -4 +27 -3 0 -3 -3 -3 -3 -3 +28 -2 0 -2 -2 -2 -2 -2 +29 -1 0 -1 -1 -1 -1 -1 +30 0 0 0 0 0 0 0 +31 1 0 1 1 1 1 1 +32 2 0 2 2 2 2 2 +33 3 0 3 3 3 3 3 +34 4 0 4 4 4 4 4 +35 5 0 5 5 5 5 5 +36 6 0 6 6 6 6 6 +37 7 0 7 7 7 7 7 +38 8 0 8 8 8 8 8 +39 9 0 9 9 9 9 9 +40 0 -1 0 0 0 0 0 +41 1 -1 0 0 0 10 0 +42 2 -1 0 0 0 10 0 +43 3 -1 0 0 0 10 0 +44 4 -1 0 0 0 10 0 +45 5 -1 10 0 0 10 0 +46 6 -1 10 10 0 10 0 +47 7 -1 10 10 0 10 0 +48 8 -1 10 10 0 10 0 +49 9 -1 10 10 0 10 0 +50 10 -1 10 10 10 10 10 +51 11 -1 10 10 10 20 10 +52 12 -1 10 10 10 20 10 +53 13 -1 10 10 10 20 10 +54 14 -1 10 10 10 20 10 +55 15 -1 20 20 10 20 10 +56 16 -1 20 20 10 20 10 +57 17 -1 20 20 10 20 10 +58 18 -1 20 20 10 20 10 +59 19 -1 20 20 10 20 10 +60 -10 -1 -10 -10 -10 -10 -10 +61 -9 -1 -10 -10 -10 0 0 +62 -8 -1 -10 -10 -10 0 0 +63 -7 -1 -10 -10 -10 0 0 +64 -6 -1 -10 -10 -10 0 0 +65 -5 -1 -10 0 -10 0 0 +66 -4 -1 0 0 -10 0 0 +67 -3 -1 0 0 -10 0 0 +68 -2 -1 0 0 -10 0 0 +69 -1 -1 0 0 -10 0 0 +70 0 -1 0 0 0 0 0 +71 1 -1 0 0 0 10 0 +72 2 -1 0 0 0 10 0 +73 3 -1 0 0 0 10 0 +74 4 -1 0 0 0 10 0 +75 5 -1 10 0 0 10 0 +76 6 -1 10 10 0 10 0 +77 7 -1 10 10 0 10 0 +78 8 -1 10 10 0 10 0 +79 9 -1 10 10 0 10 0 +80 0 -2 0 0 0 0 0 +81 1 -2 0 0 0 100 0 +82 2 -2 0 0 0 100 0 +83 3 -2 0 0 0 100 0 +84 4 -2 0 0 0 100 0 +85 5 -2 0 0 0 100 0 +86 6 -2 0 0 0 100 0 +87 7 -2 0 0 0 100 0 +88 8 -2 0 0 0 100 0 +89 9 -2 0 0 0 100 0 +90 10 -2 0 0 0 100 0 +91 11 -2 0 0 0 100 0 +92 12 -2 0 0 0 100 0 +93 13 -2 0 0 0 100 0 +94 14 -2 0 0 0 100 0 +95 15 -2 0 0 0 100 0 +96 16 -2 0 0 0 100 0 +97 17 -2 0 0 0 100 0 +98 18 -2 0 0 0 100 0 +99 19 -2 0 0 0 100 0 +100 -10 -2 0 0 -100 0 0 +101 -9 -2 0 0 -100 0 0 +102 -8 -2 0 0 -100 0 0 +103 -7 -2 0 0 -100 0 0 +104 -6 -2 0 0 -100 0 0 +105 -5 -2 0 0 -100 0 0 +106 -4 -2 0 0 -100 0 0 +107 -3 -2 0 0 -100 0 0 +108 -2 -2 0 0 -100 0 0 +109 -1 -2 0 0 -100 0 0 +110 0 -2 0 0 0 0 0 +111 1 -2 0 0 0 100 0 +112 2 -2 0 0 0 100 0 +113 3 -2 0 0 0 100 0 +114 4 -2 0 0 0 100 0 +115 5 -2 0 0 0 100 0 +116 6 -2 0 0 0 100 0 +117 7 -2 0 0 0 100 0 +118 8 -2 0 0 0 100 0 +119 9 -2 0 0 0 100 0 +200 0 0 0 0 0 0 0 +201 0 -1 0 0 0 0 0 +202 0 -2 0 0 0 0 0 +203 0 -3 0 0 0 0 0 +204 0 -4 0 0 0 0 0 +205 0 -5 0 0 0 0 0 +206 0 -6 0 0 0 0 0 +207 0 -7 0 0 0 0 0 +208 0 -8 0 0 0 0 0 +209 0 -9 0 0 0 0 0 +210 0 0 0 0 0 0 0 +211 0 -1 0 0 0 0 0 +212 0 -2 0 0 0 0 0 +213 0 -3 0 0 0 0 0 +214 0 -4 0 0 0 0 0 +215 0 -5 0 0 0 0 0 +216 0 -6 0 0 0 0 0 +217 0 -7 0 0 0 0 0 +218 0 -8 0 0 0 0 0 +219 0 -9 0 0 0 0 0 +300 2 4 2 2 2 2 2 +301 20 4 20 20 20 20 20 +302 200 4 200 200 200 200 200 +303 5 4 5 5 5 5 5 +304 50 4 50 50 50 50 50 +305 500 4 500 500 500 500 500 +id f32 scale round(u8, scale) roundBankers(x, scale) floor(x, scale) ceil(x, scale) trunc(x, scale) +0 0 0 0 0 0 0 0 +1 1 0 1 1 1 1 1 +2 2 0 2 2 2 2 2 +3 3 0 3 3 3 3 3 +4 4 0 4 4 4 4 4 +5 5 0 5 5 5 5 5 +6 6 0 6 6 6 6 6 +7 7 0 7 7 7 7 7 +8 8 0 8 8 8 8 8 +9 9 0 9 9 9 9 9 +10 10 0 10 10 10 10 10 +11 11 0 11 11 11 11 11 +12 12 0 12 12 12 12 12 +13 13 0 13 13 13 13 13 +14 14 0 14 14 14 14 14 +15 15 0 15 15 15 15 15 +16 16 0 16 16 16 16 16 +17 17 0 17 17 17 17 17 +18 18 0 18 18 18 18 18 +19 19 0 19 19 19 19 19 +20 -1 0 -1 -1 -1 -1 -1 +21 -0.9 0 -1 -1 -1 -0 -0 +22 -0.8 0 -1 -1 -1 -0 -0 +23 -0.7 0 -1 -1 -1 -0 -0 +24 -0.6 0 -1 -1 -1 -0 -0 +25 -0.5 0 -0 -0 -1 -0 -0 +26 -0.4 0 -0 -0 -1 -0 -0 +27 -0.3 0 -0 -0 -1 -0 -0 +28 -0.2 0 -0 -0 -1 -0 -0 +29 -0.1 0 -0 -0 -1 -0 -0 +30 0 0 0 0 0 0 0 +31 0.1 0 0 0 0 1 0 +32 0.2 0 0 0 0 1 0 +33 0.3 0 0 0 0 1 0 +34 0.4 0 0 0 0 1 0 +35 0.5 0 0 0 0 1 0 +36 0.6 0 1 1 0 1 0 +37 0.7 0 1 1 0 1 0 +38 0.8 0 1 1 0 1 0 +39 0.9 0 1 1 0 1 0 +40 0 -1 0 0 0 0 0 +41 1 -1 0 0 0 10 0 +42 2 -1 0 0 0 10 0 +43 3 -1 0 0 0 10 0 +44 4 -1 0 0 0 10 0 +45 5 -1 0 0 0 10 0 +46 6 -1 10 10 0 10 0 +47 7 -1 10 10 0 10 0 +48 8 -1 10 10 0 10 0 +49 9 -1 10 10 0 10 0 +50 10 -1 10 10 10 10 10 +51 11 -1 10 10 10 20 10 +52 12 -1 10 10 10 20 10 +53 13 -1 10 10 10 20 10 +54 14 -1 10 10 10 20 10 +55 15 -1 20 20 10 20 10 +56 16 -1 20 20 10 20 10 +57 17 -1 20 20 10 20 10 +58 18 -1 20 20 10 20 10 +59 19 -1 20 20 10 20 10 +60 -1 -1 -0 -0 -10 -0 -0 +61 -0.9 -1 -0 -0 -10 -0 -0 +62 -0.8 -1 -0 -0 -10 -0 -0 +63 -0.7 -1 -0 -0 -10 -0 -0 +64 -0.6 -1 -0 -0 -10 -0 -0 +65 -0.5 -1 -0 -0 -10 -0 -0 +66 -0.4 -1 -0 -0 -10 -0 -0 +67 -0.3 -1 -0 -0 -10 -0 -0 +68 -0.2 -1 -0 -0 -10 -0 -0 +69 -0.1 -1 -0 -0 -10 -0 -0 +70 0 -1 0 0 0 0 0 +71 0.1 -1 0 0 0 10 0 +72 0.2 -1 0 0 0 10 0 +73 0.3 -1 0 0 0 10 0 +74 0.4 -1 0 0 0 10 0 +75 0.5 -1 0 0 0 10 0 +76 0.6 -1 0 0 0 10 0 +77 0.7 -1 0 0 0 10 0 +78 0.8 -1 0 0 0 10 0 +79 0.9 -1 0 0 0 10 0 +80 0 -2 0 0 0 0 0 +81 1 -2 0 0 0 100 0 +82 2 -2 0 0 0 100 0 +83 3 -2 0 0 0 100 0 +84 4 -2 0 0 0 100 0 +85 5 -2 0 0 0 100 0 +86 6 -2 0 0 0 100 0 +87 7 -2 0 0 0 100 0 +88 8 -2 0 0 0 100 0 +89 9 -2 0 0 0 100 0 +90 10 -2 0 0 0 100 0 +91 11 -2 0 0 0 100 0 +92 12 -2 0 0 0 100 0 +93 13 -2 0 0 0 100 0 +94 14 -2 0 0 0 100 0 +95 15 -2 0 0 0 100 0 +96 16 -2 0 0 0 100 0 +97 17 -2 0 0 0 100 0 +98 18 -2 0 0 0 100 0 +99 19 -2 0 0 0 100 0 +100 -1 -2 -0 -0 -100 -0 -0 +101 -0.9 -2 -0 -0 -100 -0 -0 +102 -0.8 -2 -0 -0 -100 -0 -0 +103 -0.7 -2 -0 -0 -100 -0 -0 +104 -0.6 -2 -0 -0 -100 -0 -0 +105 -0.5 -2 -0 -0 -100 -0 -0 +106 -0.4 -2 -0 -0 -100 -0 -0 +107 -0.3 -2 -0 -0 -100 -0 -0 +108 -0.2 -2 -0 -0 -100 -0 -0 +109 -0.1 -2 -0 -0 -100 -0 -0 +110 0 -2 0 0 0 0 0 +111 0.1 -2 0 0 0 100 0 +112 0.2 -2 0 0 0 100 0 +113 0.3 -2 0 0 0 100 0 +114 0.4 -2 0 0 0 100 0 +115 0.5 -2 0 0 0 100 0 +116 0.6 -2 0 0 0 100 0 +117 0.7 -2 0 0 0 100 0 +118 0.8 -2 0 0 0 100 0 +119 0.9 -2 0 0 0 100 0 +200 12345.679 0 12346 12346 12345 12346 12345 +201 12345.679 -1 12350 12350 12340 12350 12340 +202 12345.679 -2 12300 12300 12300 12400 12300 +203 12345.679 -3 12000 12000 12000 13000 12000 +204 12345.679 -4 10000 10000 10000 20000 10000 +205 12345.679 -5 0 0 0 100000 0 +206 12345.679 -6 0 0 0 1000000 0 +207 12345.679 -7 0 0 0 10000000 0 +208 12345.679 -8 0 0 0 100000000 0 +209 12345.679 -9 0 0 0 1000000000 0 +210 12345.679 0 12346 12346 12345 12346 12345 +211 12345.679 -1 12350 12350 12340 12350 12340 +212 12345.679 -2 12300 12300 12300 12400 12300 +213 12345.679 -3 12000 12000 12000 13000 12000 +214 12345.679 -4 10000 10000 10000 20000 10000 +215 12345.679 -5 0 0 0 100000 0 +216 12345.679 -6 0 0 0 1000000 0 +217 12345.679 -7 0 0 0 10000000 0 +218 12345.679 -8 0 0 0 100000000 0 +219 12345.679 -9 0 0 0 1000000000 0 +300 2 4 2 2 2 2 2 +301 20 4 20 20 20 20 20 +302 200 4 200 200 200 200 200 +303 5 4 5 5 5 5 5 +304 50 4 50 50 50 50 50 +305 500 4 500 500 500 500 500 +id f64 scale round(u8, scale) roundBankers(x, scale) floor(x, scale) ceil(x, scale) trunc(x, scale) +0 0 0 0 0 0 0 0 +1 1 0 1 1 1 1 1 +2 2 0 2 2 2 2 2 +3 3 0 3 3 3 3 3 +4 4 0 4 4 4 4 4 +5 5 0 5 5 5 5 5 +6 6 0 6 6 6 6 6 +7 7 0 7 7 7 7 7 +8 8 0 8 8 8 8 8 +9 9 0 9 9 9 9 9 +10 10 0 10 10 10 10 10 +11 11 0 11 11 11 11 11 +12 12 0 12 12 12 12 12 +13 13 0 13 13 13 13 13 +14 14 0 14 14 14 14 14 +15 15 0 15 15 15 15 15 +16 16 0 16 16 16 16 16 +17 17 0 17 17 17 17 17 +18 18 0 18 18 18 18 18 +19 19 0 19 19 19 19 19 +20 -1 0 -1 -1 -1 -1 -1 +21 -0.9 0 -1 -1 -1 -0 -0 +22 -0.8 0 -1 -1 -1 -0 -0 +23 -0.7 0 -1 -1 -1 -0 -0 +24 -0.6 0 -1 -1 -1 -0 -0 +25 -0.5 0 -0 -0 -1 -0 -0 +26 -0.4 0 -0 -0 -1 -0 -0 +27 -0.3 0 -0 -0 -1 -0 -0 +28 -0.2 0 -0 -0 -1 -0 -0 +29 -0.1 0 -0 -0 -1 -0 -0 +30 0 0 0 0 0 0 0 +31 0.1 0 0 0 0 1 0 +32 0.2 0 0 0 0 1 0 +33 0.3 0 0 0 0 1 0 +34 0.4 0 0 0 0 1 0 +35 0.5 0 0 0 0 1 0 +36 0.6 0 1 1 0 1 0 +37 0.7 0 1 1 0 1 0 +38 0.8 0 1 1 0 1 0 +39 0.9 0 1 1 0 1 0 +40 0 -1 0 0 0 0 0 +41 1 -1 0 0 0 10 0 +42 2 -1 0 0 0 10 0 +43 3 -1 0 0 0 10 0 +44 4 -1 0 0 0 10 0 +45 5 -1 0 0 0 10 0 +46 6 -1 10 10 0 10 0 +47 7 -1 10 10 0 10 0 +48 8 -1 10 10 0 10 0 +49 9 -1 10 10 0 10 0 +50 10 -1 10 10 10 10 10 +51 11 -1 10 10 10 20 10 +52 12 -1 10 10 10 20 10 +53 13 -1 10 10 10 20 10 +54 14 -1 10 10 10 20 10 +55 15 -1 20 20 10 20 10 +56 16 -1 20 20 10 20 10 +57 17 -1 20 20 10 20 10 +58 18 -1 20 20 10 20 10 +59 19 -1 20 20 10 20 10 +60 -1 -1 -0 -0 -10 -0 -0 +61 -0.9 -1 -0 -0 -10 -0 -0 +62 -0.8 -1 -0 -0 -10 -0 -0 +63 -0.7 -1 -0 -0 -10 -0 -0 +64 -0.6 -1 -0 -0 -10 -0 -0 +65 -0.5 -1 -0 -0 -10 -0 -0 +66 -0.4 -1 -0 -0 -10 -0 -0 +67 -0.3 -1 -0 -0 -10 -0 -0 +68 -0.2 -1 -0 -0 -10 -0 -0 +69 -0.1 -1 -0 -0 -10 -0 -0 +70 0 -1 0 0 0 0 0 +71 0.1 -1 0 0 0 10 0 +72 0.2 -1 0 0 0 10 0 +73 0.3 -1 0 0 0 10 0 +74 0.4 -1 0 0 0 10 0 +75 0.5 -1 0 0 0 10 0 +76 0.6 -1 0 0 0 10 0 +77 0.7 -1 0 0 0 10 0 +78 0.8 -1 0 0 0 10 0 +79 0.9 -1 0 0 0 10 0 +80 0 -2 0 0 0 0 0 +81 1 -2 0 0 0 100 0 +82 2 -2 0 0 0 100 0 +83 3 -2 0 0 0 100 0 +84 4 -2 0 0 0 100 0 +85 5 -2 0 0 0 100 0 +86 6 -2 0 0 0 100 0 +87 7 -2 0 0 0 100 0 +88 8 -2 0 0 0 100 0 +89 9 -2 0 0 0 100 0 +90 10 -2 0 0 0 100 0 +91 11 -2 0 0 0 100 0 +92 12 -2 0 0 0 100 0 +93 13 -2 0 0 0 100 0 +94 14 -2 0 0 0 100 0 +95 15 -2 0 0 0 100 0 +96 16 -2 0 0 0 100 0 +97 17 -2 0 0 0 100 0 +98 18 -2 0 0 0 100 0 +99 19 -2 0 0 0 100 0 +100 -1 -2 -0 -0 -100 -0 -0 +101 -0.9 -2 -0 -0 -100 -0 -0 +102 -0.8 -2 -0 -0 -100 -0 -0 +103 -0.7 -2 -0 -0 -100 -0 -0 +104 -0.6 -2 -0 -0 -100 -0 -0 +105 -0.5 -2 -0 -0 -100 -0 -0 +106 -0.4 -2 -0 -0 -100 -0 -0 +107 -0.3 -2 -0 -0 -100 -0 -0 +108 -0.2 -2 -0 -0 -100 -0 -0 +109 -0.1 -2 -0 -0 -100 -0 -0 +110 0 -2 0 0 0 0 0 +111 0.1 -2 0 0 0 100 0 +112 0.2 -2 0 0 0 100 0 +113 0.3 -2 0 0 0 100 0 +114 0.4 -2 0 0 0 100 0 +115 0.5 -2 0 0 0 100 0 +116 0.6 -2 0 0 0 100 0 +117 0.7 -2 0 0 0 100 0 +118 0.8 -2 0 0 0 100 0 +119 0.9 -2 0 0 0 100 0 +200 12345.6789 0 12346 12346 12345 12346 12345 +201 12345.6789 -1 12350 12350 12340 12350 12340 +202 12345.6789 -2 12300 12300 12300 12400 12300 +203 12345.6789 -3 12000 12000 12000 13000 12000 +204 12345.6789 -4 10000 10000 10000 20000 10000 +205 12345.6789 -5 0 0 0 100000 0 +206 12345.6789 -6 0 0 0 1000000 0 +207 12345.6789 -7 0 0 0 10000000 0 +208 12345.6789 -8 0 0 0 100000000 0 +209 12345.6789 -9 0 0 0 1000000000 0 +210 12345.6789 0 12346 12346 12345 12346 12345 +211 12345.6789 -1 12350 12350 12340 12350 12340 +212 12345.6789 -2 12300 12300 12300 12400 12300 +213 12345.6789 -3 12000 12000 12000 13000 12000 +214 12345.6789 -4 10000 10000 10000 20000 10000 +215 12345.6789 -5 0 0 0 100000 0 +216 12345.6789 -6 0 0 0 1000000 0 +217 12345.6789 -7 0 0 0 10000000 0 +218 12345.6789 -8 0 0 0 100000000 0 +219 12345.6789 -9 0 0 0 1000000000 0 +300 2 4 2 2 2 2 2 +301 20 4 20 20 20 20 20 +302 200 4 200 200 200 200 200 +303 5 4 5 5 5 5 5 +304 50 4 50 50 50 50 50 +305 500 4 500 500 500 500 500 +CHECKPOINT2 +1 42.42 42.42 42.42 42.42 +2 0.0084 0.0084 0.0084 0.0084 +3 6.513 6.513 6.513 6.513 +4 115.6011 115.6011 115.6011 115.6011 +5 1.6029 1.6029 1.6029 1.6029 +6 -0.9999 -0.9999 -0.9999 -0.9999 +7 0.0084 0.0084 0.0084 0.0084 +8 3.7476 3.7476 3.7476 3.7476 +9 5.4066 5.4066 5.4066 5.4066 +10 1.6275 1.6275 1.6275 1.6275 +1 +1 +1 diff --git a/tests/queries/0_stateless/03165_round_scale_as_column.sql b/tests/queries/0_stateless/03165_round_scale_as_column.sql new file mode 100644 index 00000000000..229f705808d --- /dev/null +++ b/tests/queries/0_stateless/03165_round_scale_as_column.sql @@ -0,0 +1,123 @@ +-- Tests functions round(), roundBankers(), floor(), ceil() and trunc() with default 'scale' argument +SELECT toUInt8(number) AS x, round(x), roundBankers(x), floor(x), ceil(x), trunc(x) FROM system.numbers LIMIT 20; +SELECT toUInt16(number) AS x, round(x), roundBankers(x), floor(x), ceil(x), trunc(x) FROM system.numbers LIMIT 20; +SELECT toUInt32(number) AS x, round(x), roundBankers(x), floor(x), ceil(x), trunc(x) FROM system.numbers LIMIT 20; +SELECT toUInt64(number) AS x, round(x), roundBankers(x), floor(x), ceil(x), trunc(x) FROM system.numbers LIMIT 20; +SELECT toInt8(number - 10) AS x, round(x), roundBankers(x), floor(x), ceil(x), trunc(x) FROM system.numbers LIMIT 20; +SELECT toInt16(number - 10) AS x, round(x), roundBankers(x), floor(x), ceil(x), trunc(x) FROM system.numbers LIMIT 20; +SELECT toInt32(number - 10) AS x, round(x), roundBankers(x), floor(x), ceil(x), trunc(x) FROM system.numbers LIMIT 20; +SELECT toInt64(number - 10) AS x, round(x), roundBankers(x), floor(x), ceil(x), trunc(x) FROM system.numbers LIMIT 20; + +SELECT toFloat32(number - 10) AS x, round(x), roundBankers(x), floor(x), ceil(x), trunc(x) FROM system.numbers LIMIT 20; +SELECT toFloat64(number - 10) AS x, round(x), roundBankers(x), floor(x), ceil(x), trunc(x) FROM system.numbers LIMIT 20; +SELECT toFloat32((number - 10) / 10) AS x, round(x), roundBankers(x), floor(x), ceil(x), trunc(x) FROM system.numbers LIMIT 20; +SELECT toFloat64((number - 10) / 10) AS x, round(x), roundBankers(x), floor(x), ceil(x), trunc(x) FROM system.numbers LIMIT 20; + +-- Functions round(), roundBankers(), floor(), ceil() and trunc() accept non-const 'scale' arguments +SELECT toFloat32((number - 10) / 10) AS x, round(x, materialize(1)), roundBankers(x, materialize(1)), floor(x, materialize(1)), ceil(x, materialize(1)), trunc(x, materialize(1)) FROM system.numbers LIMIT 20; +SELECT toFloat64((number - 10) / 10) AS x, round(x, materialize(1)), roundBankers(x, materialize(1)), floor(x, materialize(1)), ceil(x, materialize(1)), trunc(x, materialize(1)) FROM system.numbers LIMIT 20; +SELECT toUInt8(number) AS x, round(x, materialize(-1)), roundBankers(x, materialize(-1)), floor(x, materialize(-1)), ceil(x, materialize(-1)), trunc(x, materialize(-1)) FROM system.numbers LIMIT 20; +SELECT toUInt16(number) AS x, round(x, materialize(-1)), roundBankers(x, materialize(-1)), floor(x, materialize(-1)), ceil(x, materialize(-1)), trunc(x, materialize(-1)) FROM system.numbers LIMIT 20; +SELECT toUInt32(number) AS x, round(x, materialize(-1)), roundBankers(x, materialize(-1)), floor(x, materialize(-1)), ceil(x, materialize(-1)), trunc(x, materialize(-1)) FROM system.numbers LIMIT 20; +SELECT toUInt64(number) AS x, round(x, materialize(-1)), roundBankers(x, materialize(-1)), floor(x, materialize(-1)), ceil(x, materialize(-1)), trunc(x, materialize(-1)) FROM system.numbers LIMIT 20; + +SELECT toInt8(number - 10) AS x, round(x, materialize(-1)), roundBankers(x, materialize(-1)), floor(x, materialize(-1)), ceil(x, materialize(-1)), trunc(x, materialize(-1)) FROM system.numbers LIMIT 20; +SELECT toInt16(number - 10) AS x, round(x, materialize(-1)), roundBankers(x, materialize(-1)), floor(x, materialize(-1)), ceil(x, materialize(-1)), trunc(x, materialize(-1)) FROM system.numbers LIMIT 20; +SELECT toInt32(number - 10) AS x, round(x, materialize(-1)), roundBankers(x, materialize(-1)), floor(x, materialize(-1)), ceil(x, materialize(-1)), trunc(x, materialize(-1)) FROM system.numbers LIMIT 20; +SELECT toInt64(number - 10) AS x, round(x, materialize(-1)), roundBankers(x, materialize(-1)), floor(x, materialize(-1)), ceil(x, materialize(-1)), trunc(x, materialize(-1)) FROM system.numbers LIMIT 20; +SELECT toFloat32(number - 10) AS x, round(x, materialize(-1)), roundBankers(x, materialize(-1)), floor(x, materialize(-1)), ceil(x, materialize(-1)), trunc(x, materialize(-1)) FROM system.numbers LIMIT 20; +SELECT toFloat64(number - 10) AS x, round(x, materialize(-1)), roundBankers(x, materialize(-1)), floor(x, materialize(-1)), ceil(x, materialize(-1)), trunc(x, materialize(-1)) FROM system.numbers LIMIT 20; + +SELECT toUInt8(number) AS x, round(x, materialize(-2)), roundBankers(x, materialize(-2)), floor(x, materialize(-2)), ceil(x, materialize(-2)), trunc(x, materialize(-2)) FROM system.numbers LIMIT 20; +SELECT toUInt16(number) AS x, round(x, materialize(-2)), roundBankers(x, materialize(-2)), floor(x, materialize(-2)), ceil(x, materialize(-2)), trunc(x, materialize(-2)) FROM system.numbers LIMIT 20; +SELECT toUInt32(number) AS x, round(x, materialize(-2)), roundBankers(x, materialize(-2)), floor(x, materialize(-2)), ceil(x, materialize(-2)), trunc(x, materialize(-2)) FROM system.numbers LIMIT 20; +SELECT toUInt64(number) AS x, round(x, materialize(-2)), roundBankers(x, materialize(-2)), floor(x, materialize(-2)), ceil(x, materialize(-2)), trunc(x, materialize(-2)) FROM system.numbers LIMIT 20; +SELECT toInt8(number - 10) AS x, round(x, materialize(-2)), roundBankers(x, materialize(-2)), floor(x, materialize(-2)), ceil(x, materialize(-2)), trunc(x, materialize(-2)) FROM system.numbers LIMIT 20; +SELECT toInt16(number - 10) AS x, round(x, materialize(-2)), roundBankers(x, materialize(-2)), floor(x, materialize(-2)), ceil(x, materialize(-2)), trunc(x, materialize(-2)) FROM system.numbers LIMIT 20; +SELECT toInt32(number - 10) AS x, round(x, materialize(-2)), roundBankers(x, materialize(-2)), floor(x, materialize(-2)), ceil(x, materialize(-2)), trunc(x, materialize(-2)) FROM system.numbers LIMIT 20; +SELECT toInt64(number - 10) AS x, round(x, materialize(-2)), roundBankers(x, materialize(-2)), floor(x, materialize(-2)), ceil(x, materialize(-2)), trunc(x, materialize(-2)) FROM system.numbers LIMIT 20; +SELECT toFloat32(number - 10) AS x, round(x, materialize(-2)), roundBankers(x, materialize(-2)), floor(x, materialize(-2)), ceil(x, materialize(-2)), trunc(x, materialize(-2)) FROM system.numbers LIMIT 20; +SELECT toFloat64(number - 10) AS x, round(x, materialize(-2)), roundBankers(x, materialize(-2)), floor(x, materialize(-2)), ceil(x, materialize(-2)), trunc(x, materialize(-2)) FROM system.numbers LIMIT 20; + +SELECT toString('CHECKPOINT1'); + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab ( + id Int32, + scale Int16, + u8 UInt8, u16 UInt16, u32 UInt32, u64 UInt64, + i8 Int8, i16 Int16, i32 Int32, i64 Int64, + f32 Float32, f64 Float64 +) ENGINE = Memory; + +INSERT INTO tab SELECT number , 0, number, number, number, number, number, number, number, number, number, number, FROM system.numbers LIMIT 20; +INSERT INTO tab SELECT number+20 , 0, number+10, number+10, number+10, number+10, number-10, number-10, number-10, number-10, (toFloat32(number)-10)/10, (toFloat64(number)-10)/10, FROM system.numbers LIMIT 20; +INSERT INTO tab SELECT number+40 , -1, number, number, number, number, number, number, number, number, number, number, FROM system.numbers LIMIT 20; +INSERT INTO tab SELECT number+60 , -1, number+10, number+10, number+10, number+10, number-10, number-10, number-10, number-10, (toFloat32(number)-10)/10, (toFloat64(number)-10)/10, FROM system.numbers LIMIT 20; +INSERT INTO tab SELECT number+80 , -2, number, number, number, number, number, number, number, number, number, number, FROM system.numbers LIMIT 20; +INSERT INTO tab SELECT number+100, -2, number+10, number+10, number+10, number+10, number-10, number-10, number-10, number-10, (toFloat32(number)-10)/10, (toFloat64(number)-10)/10, FROM system.numbers LIMIT 20; + +INSERT INTO tab SELECT number+200, -number, 0, 0, 0, 0, 0, 0, 0, 0, 12345.6789, 12345.6789, FROM system.numbers LIMIT 10; +INSERT INTO tab SELECT number+210, -number, 0, 0, 0, 0, 0, 0, 0, 0, 12345.6789, 12345.6789, FROM system.numbers LIMIT 10; + +INSERT INTO tab VALUES (300, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0); +INSERT INTO tab VALUES (301, 4, 20, 20, 20, 20, 20, 20, 20, 20, 20.0, 20.0); +INSERT INTO tab VALUES (302, 4, 200, 200, 200, 200, 200, 200, 200, 200, 200.0, 200.0); +INSERT INTO tab VALUES (303, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5.0, 5.0); +INSERT INTO tab VALUES (304, 4, 50, 50, 50, 50, 50, 50, 50, 50, 50.0, 50.0); +INSERT INTO tab VALUES (305, 4, 500, 500, 500, 500, 500, 500, 500, 500, 500.0, 500.0); + +SELECT toString('id u8 scale round(u8, scale) roundBankers(x, scale) floor(x, scale) ceil(x, scale) trunc(x, scale)'); +SELECT id, u8 AS x, scale, round(x, scale), roundBankers(x, scale), floor(x, scale), ceil(x, scale), trunc(x, scale) FROM tab ORDER BY id; +SELECT toString('id u16 scale round(u8, scale) roundBankers(x, scale) floor(x, scale) ceil(x, scale) trunc(x, scale)'); +SELECT id, u16 AS x, scale, round(x, scale), roundBankers(x, scale), floor(x, scale), ceil(x, scale), trunc(x, scale) FROM tab ORDER BY id; +SELECT toString('id u32 scale round(u8, scale) roundBankers(x, scale) floor(x, scale) ceil(x, scale) trunc(x, scale)'); +SELECT id, u32 AS x, scale, round(x, scale), roundBankers(x, scale), floor(x, scale), ceil(x, scale), trunc(x, scale) FROM tab ORDER BY id; +SELECT toString('id u64 scale round(u8, scale) roundBankers(x, scale) floor(x, scale) ceil(x, scale) trunc(x, scale)'); +SELECT id, u64 AS x, scale, round(x, scale), roundBankers(x, scale), floor(x, scale), ceil(x, scale), trunc(x, scale) FROM tab ORDER BY id; +SELECT toString('id i8 scale round(u8, scale) roundBankers(x, scale) floor(x, scale) ceil(x, scale) trunc(x, scale)'); +SELECT id, i8 AS x, scale, round(x, scale), roundBankers(x, scale), floor(x, scale), ceil(x, scale), trunc(x, scale) FROM tab ORDER BY id; +SELECT toString('id i16 scale round(u8, scale) roundBankers(x, scale) floor(x, scale) ceil(x, scale) trunc(x, scale)'); +SELECT id, i16 AS x, scale, round(x, scale), roundBankers(x, scale), floor(x, scale), ceil(x, scale), trunc(x, scale) FROM tab ORDER BY id; +SELECT toString('id i32 scale round(u8, scale) roundBankers(x, scale) floor(x, scale) ceil(x, scale) trunc(x, scale)'); +SELECT id, i32 AS x, scale, round(x, scale), roundBankers(x, scale), floor(x, scale), ceil(x, scale), trunc(x, scale) FROM tab ORDER BY id; +SELECT toString('id i64 scale round(u8, scale) roundBankers(x, scale) floor(x, scale) ceil(x, scale) trunc(x, scale)'); +SELECT id, i64 AS x, scale, round(x, scale), roundBankers(x, scale), floor(x, scale), ceil(x, scale), trunc(x, scale) FROM tab ORDER BY id; +SELECT toString('id f32 scale round(u8, scale) roundBankers(x, scale) floor(x, scale) ceil(x, scale) trunc(x, scale)'); +SELECT id, f32 AS x, scale, round(x, scale), roundBankers(x, scale), floor(x, scale), ceil(x, scale), trunc(x, scale) FROM tab ORDER BY id; +SELECT toString('id f64 scale round(u8, scale) roundBankers(x, scale) floor(x, scale) ceil(x, scale) trunc(x, scale)'); +SELECT id, f64 AS x, scale, round(x, scale), roundBankers(x, scale), floor(x, scale), ceil(x, scale), trunc(x, scale) FROM tab ORDER BY id; + +DROP TABLE tab; +-- +SELECT toString('CHECKPOINT2'); + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab ( + id Int32, + scale Int16, + d32 Decimal32(4), d64 Decimal64(4), d128 Decimal128(4), d256 Decimal256(4) +) ENGINE = Memory; + +INSERT INTO tab VALUES (1, 6, toDecimal32('42.42', 4), toDecimal64('42.42', 4), toDecimal128('42.42', 4), toDecimal256('42.42', 4)); +INSERT INTO tab SELECT 2 , 6, cos(d32), cos(d64), cos(d128), cos(d256) FROM tab WHERE id = 1; +INSERT INTO tab SELECT 3 , 6, sqrt(d32), sqrt(d64), sqrt(d128), sqrt(d256) FROM tab WHERE id = 1; +INSERT INTO tab SELECT 4 , 6, lgamma(d32), lgamma(d64), lgamma(d128), lgamma(d256) FROM tab WHERE id = 1; +INSERT INTO tab SELECT 5 , 6, tgamma(d32)/1e50, tgamma(d64)/1e50, tgamma(d128)/1e50, tgamma(d256)/1e50 FROM tab WHERE id = 1; +INSERT INTO tab SELECT 6 , 8, sin(d32), sin(d64), sin(d128), sin(d256) FROM tab WHERE id = 1; +INSERT INTO tab SELECT 7 , 8, cos(d32), cos(d64), cos(d128), cos(d256) FROM tab WHERE id = 1; +INSERT INTO tab SELECT 8 , 8, log(d32), log(d64), log(d128), log(d256) FROM tab WHERE id = 1; +INSERT INTO tab SELECT 9 , 8, log2(d32), log2(d64), log2(d128), log2(d256) FROM tab WHERE id = 1; +INSERT INTO tab SELECT 10, 8, log10(d32), log10(d64), log10(d128), log10(d256) FROM tab WHERE id = 1; + +SELECT id, round(d32, scale), round(d64, scale), round(d128, scale), round(d256, scale) FROM tab ORDER BY id; + +DROP TABLE tab; + +SELECT round(1, 1); +SELECT round(materialize(1), materialize(1)); +SELECT round(1, materialize(1)); --{serverError ILLEGAL_COLUMN} +SELECT round(materialize(1), 1); +SELECT materialize(10.1) AS x, ceil(x, toUInt256(123)); --{serverError ILLEGAL_TYPE_OF_ARGUMENT} diff --git a/tests/queries/0_stateless/03165_storage_merge_view_prewhere.reference b/tests/queries/0_stateless/03165_storage_merge_view_prewhere.reference new file mode 100644 index 00000000000..4cd7f2cb141 --- /dev/null +++ b/tests/queries/0_stateless/03165_storage_merge_view_prewhere.reference @@ -0,0 +1,8 @@ +a1451105-722e-4fe7-bfaa-65ad2ae249c2 whatever +a1451105-722e-4fe7-bfaa-65ad2ae249c2 whatever +a1451105-722e-4fe7-bfaa-65ad2ae249c2 whatever +a1451105-722e-4fe7-bfaa-65ad2ae249c2 whatever +a1451105-722e-4fe7-bfaa-65ad2ae249c2 whatever +a1451105-722e-4fe7-bfaa-65ad2ae249c2 whatever +a1451105-722e-4fe7-bfaa-65ad2ae249c2 whatever +a1451105-722e-4fe7-bfaa-65ad2ae249c2 whatever diff --git a/tests/queries/0_stateless/03165_storage_merge_view_prewhere.sql b/tests/queries/0_stateless/03165_storage_merge_view_prewhere.sql new file mode 100644 index 00000000000..97651d1b0fd --- /dev/null +++ b/tests/queries/0_stateless/03165_storage_merge_view_prewhere.sql @@ -0,0 +1,41 @@ +-- Tags: distributed + +DROP TABLE IF EXISTS ids; +DROP TABLE IF EXISTS data; +DROP TABLE IF EXISTS data2; + +CREATE TABLE ids (id UUID, whatever String) Engine=MergeTree ORDER BY tuple(); +INSERT INTO ids VALUES ('a1451105-722e-4fe7-bfaa-65ad2ae249c2', 'whatever'); + +CREATE TABLE data (id UUID, event_time DateTime, status String) Engine=MergeTree ORDER BY tuple(); +INSERT INTO data VALUES ('a1451105-722e-4fe7-bfaa-65ad2ae249c2', '2000-01-01', 'CREATED'); + +CREATE TABLE data2 (id UUID, event_time DateTime, status String) Engine=MergeTree ORDER BY tuple(); +INSERT INTO data2 VALUES ('a1451105-722e-4fe7-bfaa-65ad2ae249c2', '2000-01-02', 'CREATED'); + +SELECT + id, + whatever +FROM ids AS l +INNER JOIN merge(currentDatabase(), 'data*') AS s ON l.id = s.id +WHERE (status IN ['CREATED', 'CREATING']) +ORDER BY event_time DESC +; + +SELECT + id, + whatever +FROM ids AS l +INNER JOIN clusterAllReplicas(test_cluster_two_shards, merge(currentDatabase(), 'data*')) AS s ON l.id = s.id +WHERE (status IN ['CREATED', 'CREATING']) +ORDER BY event_time DESC +; + +SELECT + id, + whatever +FROM ids AS l +INNER JOIN view(SELECT * FROM merge(currentDatabase(), 'data*')) AS s ON l.id = s.id +WHERE (status IN ['CREATED', 'CREATING']) +ORDER BY event_time DESC +; diff --git a/tests/queries/0_stateless/03165_string_functions_with_token_text_indexes.sql b/tests/queries/0_stateless/03165_string_functions_with_token_text_indexes.sql index fee30af0245..bae98bd1eb6 100644 --- a/tests/queries/0_stateless/03165_string_functions_with_token_text_indexes.sql +++ b/tests/queries/0_stateless/03165_string_functions_with_token_text_indexes.sql @@ -127,7 +127,9 @@ CREATE TABLE 03165_token_ft INDEX idx_message message TYPE full_text() GRANULARITY 1 ) ENGINE = MergeTree -ORDER BY id; +ORDER BY id +-- Full text index works only with full parts. +SETTINGS min_bytes_for_full_part_storage=0; INSERT INTO 03165_token_ft VALUES(1, 'Service is not ready'); diff --git a/tests/queries/0_stateless/03166_mv_prewhere_duplicating_name_bug.reference b/tests/queries/0_stateless/03166_mv_prewhere_duplicating_name_bug.reference new file mode 100644 index 00000000000..b50fdcee209 --- /dev/null +++ b/tests/queries/0_stateless/03166_mv_prewhere_duplicating_name_bug.reference @@ -0,0 +1,2 @@ +a b +a b diff --git a/tests/queries/0_stateless/03166_mv_prewhere_duplicating_name_bug.sql b/tests/queries/0_stateless/03166_mv_prewhere_duplicating_name_bug.sql new file mode 100644 index 00000000000..e32d23920dd --- /dev/null +++ b/tests/queries/0_stateless/03166_mv_prewhere_duplicating_name_bug.sql @@ -0,0 +1,7 @@ +create table src (x Int64) engine = Log; +create table dst (s String, lc LowCardinality(String)) engine MergeTree order by s; +create materialized view mv to dst (s String, lc String) as select 'a' as s, toLowCardinality('b') as lc from src; +insert into src values (1); + +select s, lc from mv where not ignore(lc) settings allow_experimental_analyzer=0; +select s, lc from mv where not ignore(lc) settings allow_experimental_analyzer=1; diff --git a/tests/queries/0_stateless/03166_optimize_row_order_during_insert.sql b/tests/queries/0_stateless/03166_optimize_row_order_during_insert.sql index bb2f5e94d05..5fc71598e47 100644 --- a/tests/queries/0_stateless/03166_optimize_row_order_during_insert.sql +++ b/tests/queries/0_stateless/03166_optimize_row_order_during_insert.sql @@ -14,7 +14,7 @@ CREATE TABLE tab ( event Int8 ) ENGINE = MergeTree ORDER BY name -SETTINGS allow_experimental_optimized_row_order = true; +SETTINGS optimize_row_order = true; INSERT INTO tab VALUES ('Igor', 3), ('Egor', 1), ('Egor', 2), ('Igor', 2), ('Igor', 1); SELECT * FROM tab ORDER BY name SETTINGS max_threads=1; @@ -34,7 +34,7 @@ CREATE TABLE tab ( flag String ) ENGINE = MergeTree ORDER BY () -SETTINGS allow_experimental_optimized_row_order = True; +SETTINGS optimize_row_order = True; INSERT INTO tab VALUES ('Bob', 4, 100, '1'), ('Nikita', 2, 54, '1'), ('Nikita', 1, 228, '1'), ('Alex', 4, 83, '1'), ('Alex', 4, 134, '1'), ('Alex', 1, 65, '0'), ('Alex', 4, 134, '1'), ('Bob', 2, 53, '0'), ('Alex', 4, 83, '0'), ('Alex', 1, 63, '1'), ('Bob', 2, 53, '1'), ('Alex', 4, 192, '1'), ('Alex', 2, 128, '1'), ('Nikita', 2, 148, '0'), ('Bob', 4, 177, '0'), ('Nikita', 1, 173, '0'), ('Alex', 1, 239, '0'), ('Alex', 1, 63, '0'), ('Alex', 2, 224, '1'), ('Bob', 4, 177, '0'), ('Alex', 2, 128, '1'), ('Alex', 4, 134, '0'), ('Alex', 4, 83, '1'), ('Bob', 4, 100, '0'), ('Nikita', 2, 54, '1'), ('Alex', 1, 239, '1'), ('Bob', 2, 187, '1'), ('Alex', 1, 65, '1'), ('Bob', 2, 53, '1'), ('Alex', 2, 224, '0'), ('Alex', 4, 192, '0'), ('Nikita', 1, 173, '1'), ('Nikita', 2, 148, '1'), ('Bob', 2, 187, '1'), ('Nikita', 2, 208, '1'), ('Nikita', 2, 208, '0'), ('Nikita', 1, 228, '0'), ('Nikita', 2, 148, '0'); SELECT * FROM tab SETTINGS max_threads=1; @@ -58,7 +58,7 @@ CREATE TABLE tab ( flag Nullable(Int32) ) ENGINE = MergeTree ORDER BY (flag, money) -SETTINGS allow_experimental_optimized_row_order = True, allow_nullable_key = True; +SETTINGS optimize_row_order = True, allow_nullable_key = True; INSERT INTO tab VALUES ('AB', 0, 42, Null), ('AB', 0, 42, Null), ('A', 1, 42, Null), ('AB', 1, 9.81, 0), ('B', 0, 42, Null), ('B', -1, 3.14, Null), ('B', 1, 2.7, 1), ('B', 0, 42, 1), ('A', 1, 42, 1), ('B', 1, 42, Null), ('B', 0, 2.7, 1), ('A', 0, 2.7, 1), ('B', 2, 3.14, Null), ('A', 0, 3.14, Null), ('A', 1, 2.7, 1), ('A', 1, 42, Null); SELECT * FROM tab ORDER BY (flag, money) SETTINGS max_threads=1; @@ -89,7 +89,7 @@ CREATE TABLE tab ( tuple_column Tuple(UInt256) ) ENGINE = MergeTree() ORDER BY (fixed_str, event_date) -SETTINGS allow_experimental_optimized_row_order = True; +SETTINGS optimize_row_order = True; INSERT INTO tab VALUES ('A', '2020-01-01', [0.0, 1.1], 10, 'some string', {'key':'value'}, (123)), ('A', '2020-01-01', [0.0, 1.1], NULL, 'example', {}, (26)), ('A', '2020-01-01', [2.2, 1.1], 1, 'some other string', {'key2':'value2'}, (5)), ('A', '2020-01-02', [0.0, 1.1], 10, 'some string', {'key':'value'}, (123)), ('A', '2020-01-02', [0.0, 2.2], 10, 'example', {}, (26)), ('A', '2020-01-02', [2.2, 1.1], 1, 'some other string', {'key2':'value2'}, (5)), ('B', '2020-01-04', [0.0, 1.1], 10, 'some string', {'key':'value'}, (123)), ('B', '2020-01-04', [0.0, 2.2], Null, 'example', {}, (26)), ('B', '2020-01-04', [2.2, 1.1], 1, 'some string', {'key2':'value2'}, (5)), ('B', '2020-01-05', [0.0, 1.1], 10, 'some string', {'key':'value'}, (123)), ('B', '2020-01-05', [0.0, 2.2], Null, 'example', {}, (26)), ('B', '2020-01-05', [2.2, 1.1], 1, 'some other string', {'key':'value'}, (5)), ('C', '2020-01-04', [0.0, 1.1], 10, 'some string', {'key':'value'}, (5)), ('C', '2020-01-04', [0.0, 2.2], Null, 'example', {}, (26)), ('C', '2020-01-04', [2.2, 1.1], 1, 'some other string', {'key2':'value2'}, (5)); diff --git a/tests/queries/0_stateless/03166_skip_indexes_vertical_merge_1.reference b/tests/queries/0_stateless/03166_skip_indexes_vertical_merge_1.reference new file mode 100644 index 00000000000..86f79bea4ba --- /dev/null +++ b/tests/queries/0_stateless/03166_skip_indexes_vertical_merge_1.reference @@ -0,0 +1,33 @@ +200 +Expression ((Project names + Projection)) + Aggregating + Expression (Before GROUP BY) + Filter ((WHERE + Change column names to column identifiers)) + ReadFromMergeTree (default.t_ind_merge_1) + Indexes: + PrimaryKey + Condition: true + Parts: 2/2 + Granules: 32/32 + Skip + Name: idx_b + Description: minmax GRANULARITY 1 + Parts: 2/2 + Granules: 4/32 +200 +Expression ((Project names + Projection)) + Aggregating + Expression (Before GROUP BY) + Filter ((WHERE + Change column names to column identifiers)) + ReadFromMergeTree (default.t_ind_merge_1) + Indexes: + PrimaryKey + Condition: true + Parts: 1/1 + Granules: 32/32 + Skip + Name: idx_b + Description: minmax GRANULARITY 1 + Parts: 1/1 + Granules: 4/32 +4 1 3 diff --git a/tests/queries/0_stateless/03166_skip_indexes_vertical_merge_1.sql b/tests/queries/0_stateless/03166_skip_indexes_vertical_merge_1.sql new file mode 100644 index 00000000000..d3e3b38a3cb --- /dev/null +++ b/tests/queries/0_stateless/03166_skip_indexes_vertical_merge_1.sql @@ -0,0 +1,39 @@ +DROP TABLE IF EXISTS t_ind_merge_1; + +SET allow_experimental_analyzer = 1; + +CREATE TABLE t_ind_merge_1 (a UInt64, b UInt64, c UInt64, d UInt64, INDEX idx_b b TYPE minmax) +ENGINE = MergeTree +ORDER BY a SETTINGS + index_granularity = 64, + merge_max_block_size = 8192, + vertical_merge_algorithm_min_rows_to_activate = 1, + vertical_merge_algorithm_min_columns_to_activate = 1, + min_bytes_for_wide_part = 0, + min_bytes_for_full_part_storage = 0; + +INSERT INTO t_ind_merge_1 SELECT number, number, rand(), rand() FROM numbers(1000); +INSERT INTO t_ind_merge_1 SELECT number, number, rand(), rand() FROM numbers(1000); + +SELECT count() FROM t_ind_merge_1 WHERE b < 100 SETTINGS force_data_skipping_indices = 'idx_b'; +EXPLAIN indexes = 1 SELECT count() FROM t_ind_merge_1 WHERE b < 100; + +OPTIMIZE TABLE t_ind_merge_1 FINAL; + +SELECT count() FROM t_ind_merge_1 WHERE b < 100 SETTINGS force_data_skipping_indices = 'idx_b'; +EXPLAIN indexes = 1 SELECT count() FROM t_ind_merge_1 WHERE b < 100; + +SYSTEM FLUSH LOGS; + +WITH + (SELECT uuid FROM system.tables WHERE database = currentDatabase() AND table = 't_ind_merge_1') AS uuid, + extractAllGroupsVertical(message, 'containing (\\d+) columns \((\\d+) merged, (\\d+) gathered\)')[1] AS groups +SELECT + groups[1] AS total, + groups[2] AS merged, + groups[3] AS gathered +FROM system.text_log +WHERE ((query_id = uuid || '::all_1_2_1') OR (query_id = currentDatabase() || '.t_ind_merge_1::all_1_2_1')) AND notEmpty(groups) +ORDER BY event_time_microseconds; + +DROP TABLE t_ind_merge_1; diff --git a/tests/queries/0_stateless/03166_skip_indexes_vertical_merge_2.reference b/tests/queries/0_stateless/03166_skip_indexes_vertical_merge_2.reference new file mode 100644 index 00000000000..4c2f01294a4 --- /dev/null +++ b/tests/queries/0_stateless/03166_skip_indexes_vertical_merge_2.reference @@ -0,0 +1 @@ +6 3 3 diff --git a/tests/queries/0_stateless/03166_skip_indexes_vertical_merge_2.sql b/tests/queries/0_stateless/03166_skip_indexes_vertical_merge_2.sql new file mode 100644 index 00000000000..b749e0c84b0 --- /dev/null +++ b/tests/queries/0_stateless/03166_skip_indexes_vertical_merge_2.sql @@ -0,0 +1,42 @@ +DROP TABLE IF EXISTS t_ind_merge_2; + +CREATE TABLE t_ind_merge_2 ( + a UInt64, + b UInt64, + c UInt64, + d UInt64, + e UInt64, + f UInt64, + INDEX idx_a a TYPE minmax, + INDEX idx_b b TYPE minmax, + INDEX idx_cd c * d TYPE minmax, + INDEX idx_d1 d TYPE minmax, + INDEX idx_d2 d + 7 TYPE set(3), + INDEX idx_e e * 3 TYPE set(3)) +ENGINE = MergeTree +ORDER BY a SETTINGS + index_granularity = 64, + vertical_merge_algorithm_min_rows_to_activate = 1, + vertical_merge_algorithm_min_columns_to_activate = 1, + min_bytes_for_wide_part = 0, + min_bytes_for_full_part_storage = 0; + +INSERT INTO t_ind_merge_2 SELECT number, number, rand(), rand(), rand(), rand() FROM numbers(1000); +INSERT INTO t_ind_merge_2 SELECT number, number, rand(), rand(), rand(), rand() FROM numbers(1000); + +OPTIMIZE TABLE t_ind_merge_2 FINAL; +SYSTEM FLUSH LOGS; + +--- merged: a, c, d; gathered: b, e, f +WITH + (SELECT uuid FROM system.tables WHERE database = currentDatabase() AND table = 't_ind_merge_2') AS uuid, + extractAllGroupsVertical(message, 'containing (\\d+) columns \((\\d+) merged, (\\d+) gathered\)')[1] AS groups +SELECT + groups[1] AS total, + groups[2] AS merged, + groups[3] AS gathered +FROM system.text_log +WHERE ((query_id = uuid || '::all_1_2_1') OR (query_id = currentDatabase() || '.t_ind_merge_2::all_1_2_1')) AND notEmpty(groups) +ORDER BY event_time_microseconds; + +DROP TABLE t_ind_merge_2; diff --git a/tests/queries/0_stateless/03167_base64_url_functions.reference b/tests/queries/0_stateless/03167_base64_url_functions.reference new file mode 100644 index 00000000000..2a0d0013609 --- /dev/null +++ b/tests/queries/0_stateless/03167_base64_url_functions.reference @@ -0,0 +1,10 @@ +https://clickhouse.com aHR0cHM6Ly9jbGlja2hvdXNlLmNvbQ https://clickhouse.com https://clickhouse.com +12? MTI_ 12? 12? +https://www.google.com/search?q=clickhouse+base64+decode&sca_esv=739f8bb380e4c7ed&ei=TfRiZqCDIrmnwPAP2KLRkA8&ved=0ahUKEwjg3ZHitsmGAxW5ExAIHVhRFPIQ4dUDCBA&uact=5&oq=clickhouse+base64+decode aHR0cHM6Ly93d3cuZ29vZ2xlLmNvbS9zZWFyY2g_cT1jbGlja2hvdXNlK2Jhc2U2NCtkZWNvZGUmc2NhX2Vzdj03MzlmOGJiMzgwZTRjN2VkJmVpPVRmUmlacUNESXJtbndQQVAyS0xSa0E4JnZlZD0wYWhVS0V3amczWkhpdHNtR0F4VzVFeEFJSFZoUkZQSVE0ZFVEQ0JBJnVhY3Q9NSZvcT1jbGlja2hvdXNlK2Jhc2U2NCtkZWNvZGU https://www.google.com/search?q=clickhouse+base64+decode&sca_esv=739f8bb380e4c7ed&ei=TfRiZqCDIrmnwPAP2KLRkA8&ved=0ahUKEwjg3ZHitsmGAxW5ExAIHVhRFPIQ4dUDCBA&uact=5&oq=clickhouse+base64+decode https://www.google.com/search?q=clickhouse+base64+decode&sca_esv=739f8bb380e4c7ed&ei=TfRiZqCDIrmnwPAP2KLRkA8&ved=0ahUKEwjg3ZHitsmGAxW5ExAIHVhRFPIQ4dUDCBA&uact=5&oq=clickhouse+base64+decode +aHR0cHM6Ly9jbGlj https://clic https://clic +aHR0cHM6Ly9jbGlja2g https://clickh https://clickh +aHR0cHM6Ly9jbGljaw https://click https://click + + + +https://clickhouse.com aHR0cHM6Ly9jbGlja2hvdXNlLmNvbQ https://clickhouse.com https://clickhouse.com diff --git a/tests/queries/0_stateless/03167_base64_url_functions.sql b/tests/queries/0_stateless/03167_base64_url_functions.sql new file mode 100644 index 00000000000..674f1ae498b --- /dev/null +++ b/tests/queries/0_stateless/03167_base64_url_functions.sql @@ -0,0 +1,36 @@ +-- Tags: no-fasttest +-- no-fasttest because aklomp-base64 library is required + +-- incorrect number of arguments +SELECT base64UrlEncode(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT base64UrlDecode(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT tryBase64UrlDecode(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT base64UrlEncode('foo', 'excess argument'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT base64UrlDecode('foo', 'excess argument'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT tryBase64UrlDecode('foo', 'excess argument'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +-- test with valid inputs + +SELECT 'https://clickhouse.com' AS original, base64UrlEncode(original) AS encoded, base64UrlDecode(encoded), tryBase64UrlDecode(encoded); +SELECT '12?' AS original, base64UrlEncode(original) AS encoded, base64UrlDecode(encoded), tryBase64UrlDecode(encoded); +SELECT 'https://www.google.com/search?q=clickhouse+base64+decode&sca_esv=739f8bb380e4c7ed&ei=TfRiZqCDIrmnwPAP2KLRkA8&ved=0ahUKEwjg3ZHitsmGAxW5ExAIHVhRFPIQ4dUDCBA&uact=5&oq=clickhouse+base64+decode' AS original, base64UrlEncode(original) AS encoded, base64UrlDecode(encoded), tryBase64UrlDecode(encoded); + +-- encoded value has no padding +SELECT 'aHR0cHM6Ly9jbGlj' AS encoded, base64UrlDecode(encoded), tryBase64UrlDecode(encoded); +-- encoded value has one-byte padding +SELECT 'aHR0cHM6Ly9jbGlja2g' AS encoded, base64UrlDecode(encoded), tryBase64UrlDecode(encoded); +-- encoded value has two-bytes padding +SELECT 'aHR0cHM6Ly9jbGljaw' AS encoded, base64UrlDecode(encoded), tryBase64UrlDecode(encoded); + +-- test with invalid inputs + +SELECT base64UrlDecode('https://clickhouse.com'); -- { serverError INCORRECT_DATA } +SELECT tryBase64UrlDecode('https://clickhouse.com'); +SELECT base64UrlDecode('12?'); -- { serverError INCORRECT_DATA } +SELECT tryBase64UrlDecode('12?'); +SELECT base64UrlDecode('aHR0cHM6Ly9jbGlja'); -- { serverError INCORRECT_DATA } +SELECT tryBase64UrlDecode('aHR0cHM6Ly9jbGlja'); + +-- test FixedString argument + +SELECT toFixedString('https://clickhouse.com', 22) AS original, base64UrlEncode(original) AS encoded, base64UrlDecode(encoded), tryBase64UrlDecode(encoded); diff --git a/tests/queries/0_stateless/03167_boom_filter_index_with_map.reference.j2 b/tests/queries/0_stateless/03167_boom_filter_index_with_map.reference.j2 new file mode 100644 index 00000000000..71dc879f28e --- /dev/null +++ b/tests/queries/0_stateless/03167_boom_filter_index_with_map.reference.j2 @@ -0,0 +1,4 @@ +{% for type in ['Int8', 'Int16', 'Int32', 'Int64', 'UInt8', 'UInt16', 'UInt32', 'UInt64'] -%} +{'xxx':56} +{56:'xxx'} +{% endfor -%} diff --git a/tests/queries/0_stateless/03167_boom_filter_index_with_map.sql.j2 b/tests/queries/0_stateless/03167_boom_filter_index_with_map.sql.j2 new file mode 100644 index 00000000000..4147bd84e8e --- /dev/null +++ b/tests/queries/0_stateless/03167_boom_filter_index_with_map.sql.j2 @@ -0,0 +1,31 @@ +DROP TABLE IF EXISTS boom_filter_map_1; +DROP TABLE IF EXISTS boom_filter_map_2; + +{% for type in ['Int8', 'Int16', 'Int32', 'Int64', 'UInt8', 'UInt16', 'UInt32', 'UInt64'] -%} + +CREATE TABLE boom_filter_map_1 +( + `m` Map(String, {{ type }}), + INDEX index_models_value_bloom_filter mapValues(m) TYPE bloom_filter GRANULARITY 1 +) + ENGINE = MergeTree +ORDER BY tuple(); + +CREATE TABLE boom_filter_map_2 +( + `m` Map({{ type }}, String), + INDEX index_models_value_bloom_filter mapKeys(m) TYPE bloom_filter GRANULARITY 1 +) + ENGINE = MergeTree +ORDER BY tuple(); + +INSERT INTO boom_filter_map_1 (m) values (map('xxx', 56)); +INSERT INTO boom_filter_map_2 (m) values (map(56, 'xxx')); + +SELECT m FROM boom_filter_map_1 WHERE (m['xxx']) = 56; +SELECT m FROM boom_filter_map_2 WHERE (m[56]) = 'xxx'; + +DROP TABLE IF EXISTS boom_filter_map_1; +DROP TABLE IF EXISTS boom_filter_map_2; + +{% endfor -%} diff --git a/tests/queries/0_stateless/03167_transactions_are_really_disabled.reference b/tests/queries/0_stateless/03167_transactions_are_really_disabled.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03167_transactions_are_really_disabled.sql b/tests/queries/0_stateless/03167_transactions_are_really_disabled.sql new file mode 100644 index 00000000000..e3c86a2d5be --- /dev/null +++ b/tests/queries/0_stateless/03167_transactions_are_really_disabled.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS mv_table; +DROP TABLE IF EXISTS null_table; + +CREATE TABLE null_table (str String) ENGINE = Null; +CREATE MATERIALIZED VIEW mv_table (str String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/transactions_disabled_rmt', '{replica}') ORDER BY str AS SELECT str AS str FROM null_table; + +SET implicit_transaction=1; +set throw_on_unsupported_query_inside_transaction=0; + +INSERT INTO null_table VALUES ('test'); --{serverError NOT_IMPLEMENTED} + +DROP TABLE IF EXISTS mv_table; +DROP TABLE IF EXISTS null_table; diff --git a/tests/queries/0_stateless/03168_cld2_tsan.reference b/tests/queries/0_stateless/03168_cld2_tsan.reference new file mode 100644 index 00000000000..6c3cafd4a6d --- /dev/null +++ b/tests/queries/0_stateless/03168_cld2_tsan.reference @@ -0,0 +1,2 @@ +{'ja':0.62,'fr':0.36} +{'ja':0.62,'fr':0.36} diff --git a/tests/queries/0_stateless/03168_cld2_tsan.sql b/tests/queries/0_stateless/03168_cld2_tsan.sql new file mode 100644 index 00000000000..701a781c472 --- /dev/null +++ b/tests/queries/0_stateless/03168_cld2_tsan.sql @@ -0,0 +1,10 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: depends on cld2 + +-- https://github.com/ClickHouse/ClickHouse/issues/64931 +SELECT detectLanguageMixed(materialize('二兎を追う者は一兎をも得ず二兎を追う者は一兎をも得ず A vaincre sans peril, on triomphe sans gloire.')) +GROUP BY + GROUPING SETS ( + ('a', toUInt256(1)), + (stringToH3(toFixedString(toFixedString('85283473ffffff', 14), 14)))) +SETTINGS allow_experimental_nlp_functions = 1; diff --git a/tests/queries/0_stateless/03168_fuzz_multiIf_short_circuit.reference b/tests/queries/0_stateless/03168_fuzz_multiIf_short_circuit.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03168_fuzz_multiIf_short_circuit.sql b/tests/queries/0_stateless/03168_fuzz_multiIf_short_circuit.sql new file mode 100644 index 00000000000..4e4cc291e9b --- /dev/null +++ b/tests/queries/0_stateless/03168_fuzz_multiIf_short_circuit.sql @@ -0,0 +1,6 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/64946 +SELECT + multiIf((number % toLowCardinality(toNullable(toUInt128(2)))) = (number % toNullable(2)), toInt8(1), (number % materialize(toLowCardinality(3))) = toUInt128(toNullable(0)), toInt8(materialize(materialize(2))), toInt64(toUInt128(3))) +FROM system.numbers +LIMIT 44857 +FORMAT Null; diff --git a/tests/queries/0_stateless/03168_loop_engine_with_parallel_replicas.reference b/tests/queries/0_stateless/03168_loop_engine_with_parallel_replicas.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03168_loop_engine_with_parallel_replicas.sql b/tests/queries/0_stateless/03168_loop_engine_with_parallel_replicas.sql new file mode 100644 index 00000000000..da4626ad897 --- /dev/null +++ b/tests/queries/0_stateless/03168_loop_engine_with_parallel_replicas.sql @@ -0,0 +1,11 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS 03147_db; +CREATE DATABASE IF NOT EXISTS 03147_db; +CREATE TABLE 03147_db.t (n Int8) ENGINE=MergeTree ORDER BY n; +INSERT INTO 03147_db.t SELECT * FROM numbers(10); +USE 03147_db; + +SET allow_experimental_parallel_reading_from_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'parallel_replicas', max_parallel_replicas = 100; + +SELECT * FROM loop(03147_db.t) LIMIT 15 FORMAT Null; diff --git a/tests/queries/0_stateless/03169_cache_complex_dict_short_circuit_bug.reference b/tests/queries/0_stateless/03169_cache_complex_dict_short_circuit_bug.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03169_cache_complex_dict_short_circuit_bug.sql b/tests/queries/0_stateless/03169_cache_complex_dict_short_circuit_bug.sql new file mode 100644 index 00000000000..8463d13d251 --- /dev/null +++ b/tests/queries/0_stateless/03169_cache_complex_dict_short_circuit_bug.sql @@ -0,0 +1,31 @@ +DROP TABLE IF EXISTS complex_key_simple_attributes_source_short_circuit_table; +DROP DICTIONARY IF EXISTS cache_dictionary_complex_key_simple_attributes_short_circuit; + +CREATE TABLE complex_key_simple_attributes_source_short_circuit_table +( + id UInt64, + id_key String, + value_first String, + value_second String +) + ENGINE = TinyLog; + +INSERT INTO complex_key_simple_attributes_source_short_circuit_table VALUES(0, 'id_key_0', 'value_0', 'value_second_0'); + +CREATE DICTIONARY cache_dictionary_complex_key_simple_attributes_short_circuit +( + `id` UInt64, + `id_key` String, + `value_first` String DEFAULT 'value_first_default', + `value_second` String DEFAULT 'value_second_default' +) +PRIMARY KEY id, id_key +SOURCE(CLICKHOUSE(TABLE 'complex_key_simple_attributes_source_short_circuit_table')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(COMPLEX_KEY_CACHE(SIZE_IN_CELLS 10)); + +SELECT dictGetOrDefault('cache_dictionary_complex_key_simple_attributes_short_circuit', 'value_first', (number, concat(toString(number))), toString(materialize('default'))) AS value_first FROM system.numbers LIMIT 20 FORMAT Null; +SELECT dictGetOrDefault('cache_dictionary_complex_key_simple_attributes_short_circuit', 'value_first', (number, concat(toString(number))), toString(materialize('default'))) AS value_first FROM system.numbers LIMIT 20 FORMAT Null; + +DROP TABLE IF EXISTS complex_key_simple_attributes_source_short_circuit_table; +DROP DICTIONARY IF EXISTS cache_dictionary_complex_key_simple_attributes_short_circuit; diff --git a/tests/queries/0_stateless/03169_display_column_names_in_footer.reference b/tests/queries/0_stateless/03169_display_column_names_in_footer.reference new file mode 100644 index 00000000000..7a9f413a900 --- /dev/null +++ b/tests/queries/0_stateless/03169_display_column_names_in_footer.reference @@ -0,0 +1,2382 @@ + ┏━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━┓ + ┃ number ┃ toTypeName(number) ┃ + ┡━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━┩ + 1. │ 0 │ UInt64 │ + ├────────┼────────────────────┤ + 2. │ 1 │ UInt64 │ + ├────────┼────────────────────┤ + 3. │ 2 │ UInt64 │ + ├────────┼────────────────────┤ + 4. │ 3 │ UInt64 │ + ├────────┼────────────────────┤ + 5. │ 4 │ UInt64 │ + ├────────┼────────────────────┤ + 6. │ 5 │ UInt64 │ + ├────────┼────────────────────┤ + 7. │ 6 │ UInt64 │ + ├────────┼────────────────────┤ + 8. │ 7 │ UInt64 │ + ├────────┼────────────────────┤ + 9. │ 8 │ UInt64 │ + ├────────┼────────────────────┤ +10. │ 9 │ UInt64 │ + ├────────┼────────────────────┤ +11. │ 10 │ UInt64 │ + ├────────┼────────────────────┤ +12. │ 11 │ UInt64 │ + ├────────┼────────────────────┤ +13. │ 12 │ UInt64 │ + ├────────┼────────────────────┤ +14. │ 13 │ UInt64 │ + ├────────┼────────────────────┤ +15. │ 14 │ UInt64 │ + ├────────┼────────────────────┤ +16. │ 15 │ UInt64 │ + ├────────┼────────────────────┤ +17. │ 16 │ UInt64 │ + ├────────┼────────────────────┤ +18. │ 17 │ UInt64 │ + ├────────┼────────────────────┤ +19. │ 18 │ UInt64 │ + ├────────┼────────────────────┤ +20. │ 19 │ UInt64 │ + ├────────┼────────────────────┤ +21. │ 20 │ UInt64 │ + ├────────┼────────────────────┤ +22. │ 21 │ UInt64 │ + ├────────┼────────────────────┤ +23. │ 22 │ UInt64 │ + ├────────┼────────────────────┤ +24. │ 23 │ UInt64 │ + ├────────┼────────────────────┤ +25. │ 24 │ UInt64 │ + ├────────┼────────────────────┤ +26. │ 25 │ UInt64 │ + ├────────┼────────────────────┤ +27. │ 26 │ UInt64 │ + ├────────┼────────────────────┤ +28. │ 27 │ UInt64 │ + ├────────┼────────────────────┤ +29. │ 28 │ UInt64 │ + ├────────┼────────────────────┤ +30. │ 29 │ UInt64 │ + ├────────┼────────────────────┤ +31. │ 30 │ UInt64 │ + ├────────┼────────────────────┤ +32. │ 31 │ UInt64 │ + ├────────┼────────────────────┤ +33. │ 32 │ UInt64 │ + ├────────┼────────────────────┤ +34. │ 33 │ UInt64 │ + ├────────┼────────────────────┤ +35. │ 34 │ UInt64 │ + ├────────┼────────────────────┤ +36. │ 35 │ UInt64 │ + ├────────┼────────────────────┤ +37. │ 36 │ UInt64 │ + ├────────┼────────────────────┤ +38. │ 37 │ UInt64 │ + ├────────┼────────────────────┤ +39. │ 38 │ UInt64 │ + ├────────┼────────────────────┤ +40. │ 39 │ UInt64 │ + ├────────┼────────────────────┤ +41. │ 40 │ UInt64 │ + ├────────┼────────────────────┤ +42. │ 41 │ UInt64 │ + ├────────┼────────────────────┤ +43. │ 42 │ UInt64 │ + ├────────┼────────────────────┤ +44. │ 43 │ UInt64 │ + ├────────┼────────────────────┤ +45. │ 44 │ UInt64 │ + ├────────┼────────────────────┤ +46. │ 45 │ UInt64 │ + ├────────┼────────────────────┤ +47. │ 46 │ UInt64 │ + ├────────┼────────────────────┤ +48. │ 47 │ UInt64 │ + ├────────┼────────────────────┤ +49. │ 48 │ UInt64 │ + └────────┴────────────────────┘ + ┏━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━┓ + ┃ number ┃ toTypeName(number) ┃ + ┡━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━┩ + 1. │ 0 │ UInt64 │ + ├────────┼────────────────────┤ + 2. │ 1 │ UInt64 │ + ├────────┼────────────────────┤ + 3. │ 2 │ UInt64 │ + ├────────┼────────────────────┤ + 4. │ 3 │ UInt64 │ + ├────────┼────────────────────┤ + 5. │ 4 │ UInt64 │ + ├────────┼────────────────────┤ + 6. │ 5 │ UInt64 │ + ├────────┼────────────────────┤ + 7. │ 6 │ UInt64 │ + ├────────┼────────────────────┤ + 8. │ 7 │ UInt64 │ + ├────────┼────────────────────┤ + 9. │ 8 │ UInt64 │ + ├────────┼────────────────────┤ +10. │ 9 │ UInt64 │ + ┣━━━━━━━━╋━━━━━━━━━━━━━━━━━━━━┫ + ┃ number ┃ toTypeName(number) ┃ + ┗━━━━━━━━┻━━━━━━━━━━━━━━━━━━━━┛ + ┏━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━┓ + ┃ number ┃ toTypeName(number) ┃ + ┡━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━┩ + 1. │ 0 │ UInt64 │ + ├────────┼────────────────────┤ + 2. │ 1 │ UInt64 │ + ├────────┼────────────────────┤ + 3. │ 2 │ UInt64 │ + ├────────┼────────────────────┤ + 4. │ 3 │ UInt64 │ + ├────────┼────────────────────┤ + 5. │ 4 │ UInt64 │ + ├────────┼────────────────────┤ + 6. │ 5 │ UInt64 │ + ├────────┼────────────────────┤ + 7. │ 6 │ UInt64 │ + ├────────┼────────────────────┤ + 8. │ 7 │ UInt64 │ + ├────────┼────────────────────┤ + 9. │ 8 │ UInt64 │ + ├────────┼────────────────────┤ + 10. │ 9 │ UInt64 │ + ├────────┼────────────────────┤ + 11. │ 10 │ UInt64 │ + ├────────┼────────────────────┤ + 12. │ 11 │ UInt64 │ + ├────────┼────────────────────┤ + 13. │ 12 │ UInt64 │ + ├────────┼────────────────────┤ + 14. │ 13 │ UInt64 │ + ├────────┼────────────────────┤ + 15. │ 14 │ UInt64 │ + ├────────┼────────────────────┤ + 16. │ 15 │ UInt64 │ + ├────────┼────────────────────┤ + 17. │ 16 │ UInt64 │ + ├────────┼────────────────────┤ + 18. │ 17 │ UInt64 │ + ├────────┼────────────────────┤ + 19. │ 18 │ UInt64 │ + ├────────┼────────────────────┤ + 20. │ 19 │ UInt64 │ + ├────────┼────────────────────┤ + 21. │ 20 │ UInt64 │ + ├────────┼────────────────────┤ + 22. │ 21 │ UInt64 │ + ├────────┼────────────────────┤ + 23. │ 22 │ UInt64 │ + ├────────┼────────────────────┤ + 24. │ 23 │ UInt64 │ + ├────────┼────────────────────┤ + 25. │ 24 │ UInt64 │ + ├────────┼────────────────────┤ + 26. │ 25 │ UInt64 │ + ├────────┼────────────────────┤ + 27. │ 26 │ UInt64 │ + ├────────┼────────────────────┤ + 28. │ 27 │ UInt64 │ + ├────────┼────────────────────┤ + 29. │ 28 │ UInt64 │ + ├────────┼────────────────────┤ + 30. │ 29 │ UInt64 │ + ├────────┼────────────────────┤ + 31. │ 30 │ UInt64 │ + ├────────┼────────────────────┤ + 32. │ 31 │ UInt64 │ + ├────────┼────────────────────┤ + 33. │ 32 │ UInt64 │ + ├────────┼────────────────────┤ + 34. │ 33 │ UInt64 │ + ├────────┼────────────────────┤ + 35. │ 34 │ UInt64 │ + ├────────┼────────────────────┤ + 36. │ 35 │ UInt64 │ + ├────────┼────────────────────┤ + 37. │ 36 │ UInt64 │ + ├────────┼────────────────────┤ + 38. │ 37 │ UInt64 │ + ├────────┼────────────────────┤ + 39. │ 38 │ UInt64 │ + ├────────┼────────────────────┤ + 40. │ 39 │ UInt64 │ + ├────────┼────────────────────┤ + 41. │ 40 │ UInt64 │ + ├────────┼────────────────────┤ + 42. │ 41 │ UInt64 │ + ├────────┼────────────────────┤ + 43. │ 42 │ UInt64 │ + ├────────┼────────────────────┤ + 44. │ 43 │ UInt64 │ + ├────────┼────────────────────┤ + 45. │ 44 │ UInt64 │ + ├────────┼────────────────────┤ + 46. │ 45 │ UInt64 │ + ├────────┼────────────────────┤ + 47. │ 46 │ UInt64 │ + ├────────┼────────────────────┤ + 48. │ 47 │ UInt64 │ + ├────────┼────────────────────┤ + 49. │ 48 │ UInt64 │ + ├────────┼────────────────────┤ + 50. │ 49 │ UInt64 │ + ├────────┼────────────────────┤ + 51. │ 50 │ UInt64 │ + ├────────┼────────────────────┤ + 52. │ 51 │ UInt64 │ + ├────────┼────────────────────┤ + 53. │ 52 │ UInt64 │ + ├────────┼────────────────────┤ + 54. │ 53 │ UInt64 │ + ├────────┼────────────────────┤ + 55. │ 54 │ UInt64 │ + ├────────┼────────────────────┤ + 56. │ 55 │ UInt64 │ + ├────────┼────────────────────┤ + 57. │ 56 │ UInt64 │ + ├────────┼────────────────────┤ + 58. │ 57 │ UInt64 │ + ├────────┼────────────────────┤ + 59. │ 58 │ UInt64 │ + ├────────┼────────────────────┤ + 60. │ 59 │ UInt64 │ + ├────────┼────────────────────┤ + 61. │ 60 │ UInt64 │ + ├────────┼────────────────────┤ + 62. │ 61 │ UInt64 │ + ├────────┼────────────────────┤ + 63. │ 62 │ UInt64 │ + ├────────┼────────────────────┤ + 64. │ 63 │ UInt64 │ + ├────────┼────────────────────┤ + 65. │ 64 │ UInt64 │ + ├────────┼────────────────────┤ + 66. │ 65 │ UInt64 │ + ├────────┼────────────────────┤ + 67. │ 66 │ UInt64 │ + ├────────┼────────────────────┤ + 68. │ 67 │ UInt64 │ + ├────────┼────────────────────┤ + 69. │ 68 │ UInt64 │ + ├────────┼────────────────────┤ + 70. │ 69 │ UInt64 │ + ├────────┼────────────────────┤ + 71. │ 70 │ UInt64 │ + ├────────┼────────────────────┤ + 72. │ 71 │ UInt64 │ + ├────────┼────────────────────┤ + 73. │ 72 │ UInt64 │ + ├────────┼────────────────────┤ + 74. │ 73 │ UInt64 │ + ├────────┼────────────────────┤ + 75. │ 74 │ UInt64 │ + ├────────┼────────────────────┤ + 76. │ 75 │ UInt64 │ + ├────────┼────────────────────┤ + 77. │ 76 │ UInt64 │ + ├────────┼────────────────────┤ + 78. │ 77 │ UInt64 │ + ├────────┼────────────────────┤ + 79. │ 78 │ UInt64 │ + ├────────┼────────────────────┤ + 80. │ 79 │ UInt64 │ + ├────────┼────────────────────┤ + 81. │ 80 │ UInt64 │ + ├────────┼────────────────────┤ + 82. │ 81 │ UInt64 │ + ├────────┼────────────────────┤ + 83. │ 82 │ UInt64 │ + ├────────┼────────────────────┤ + 84. │ 83 │ UInt64 │ + ├────────┼────────────────────┤ + 85. │ 84 │ UInt64 │ + ├────────┼────────────────────┤ + 86. │ 85 │ UInt64 │ + ├────────┼────────────────────┤ + 87. │ 86 │ UInt64 │ + ├────────┼────────────────────┤ + 88. │ 87 │ UInt64 │ + ├────────┼────────────────────┤ + 89. │ 88 │ UInt64 │ + ├────────┼────────────────────┤ + 90. │ 89 │ UInt64 │ + ├────────┼────────────────────┤ + 91. │ 90 │ UInt64 │ + ├────────┼────────────────────┤ + 92. │ 91 │ UInt64 │ + ├────────┼────────────────────┤ + 93. │ 92 │ UInt64 │ + ├────────┼────────────────────┤ + 94. │ 93 │ UInt64 │ + ├────────┼────────────────────┤ + 95. │ 94 │ UInt64 │ + ├────────┼────────────────────┤ + 96. │ 95 │ UInt64 │ + ├────────┼────────────────────┤ + 97. │ 96 │ UInt64 │ + ├────────┼────────────────────┤ + 98. │ 97 │ UInt64 │ + ├────────┼────────────────────┤ + 99. │ 98 │ UInt64 │ + ├────────┼────────────────────┤ +100. │ 99 │ UInt64 │ + └────────┴────────────────────┘ + ┏━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━┓ + ┃ number ┃ toTypeName(number) ┃ + ┡━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━┩ + 1. │ 0 │ UInt64 │ + ├────────┼────────────────────┤ + 2. │ 1 │ UInt64 │ + ├────────┼────────────────────┤ + 3. │ 2 │ UInt64 │ + ├────────┼────────────────────┤ + 4. │ 3 │ UInt64 │ + ├────────┼────────────────────┤ + 5. │ 4 │ UInt64 │ + ├────────┼────────────────────┤ + 6. │ 5 │ UInt64 │ + ├────────┼────────────────────┤ + 7. │ 6 │ UInt64 │ + ├────────┼────────────────────┤ + 8. │ 7 │ UInt64 │ + ├────────┼────────────────────┤ + 9. │ 8 │ UInt64 │ + ├────────┼────────────────────┤ + 10. │ 9 │ UInt64 │ + ├────────┼────────────────────┤ + 11. │ 10 │ UInt64 │ + ├────────┼────────────────────┤ + 12. │ 11 │ UInt64 │ + ├────────┼────────────────────┤ + 13. │ 12 │ UInt64 │ + ├────────┼────────────────────┤ + 14. │ 13 │ UInt64 │ + ├────────┼────────────────────┤ + 15. │ 14 │ UInt64 │ + ├────────┼────────────────────┤ + 16. │ 15 │ UInt64 │ + ├────────┼────────────────────┤ + 17. │ 16 │ UInt64 │ + ├────────┼────────────────────┤ + 18. │ 17 │ UInt64 │ + ├────────┼────────────────────┤ + 19. │ 18 │ UInt64 │ + ├────────┼────────────────────┤ + 20. │ 19 │ UInt64 │ + ├────────┼────────────────────┤ + 21. │ 20 │ UInt64 │ + ├────────┼────────────────────┤ + 22. │ 21 │ UInt64 │ + ├────────┼────────────────────┤ + 23. │ 22 │ UInt64 │ + ├────────┼────────────────────┤ + 24. │ 23 │ UInt64 │ + ├────────┼────────────────────┤ + 25. │ 24 │ UInt64 │ + ├────────┼────────────────────┤ + 26. │ 25 │ UInt64 │ + ├────────┼────────────────────┤ + 27. │ 26 │ UInt64 │ + ├────────┼────────────────────┤ + 28. │ 27 │ UInt64 │ + ├────────┼────────────────────┤ + 29. │ 28 │ UInt64 │ + ├────────┼────────────────────┤ + 30. │ 29 │ UInt64 │ + ├────────┼────────────────────┤ + 31. │ 30 │ UInt64 │ + ├────────┼────────────────────┤ + 32. │ 31 │ UInt64 │ + ├────────┼────────────────────┤ + 33. │ 32 │ UInt64 │ + ├────────┼────────────────────┤ + 34. │ 33 │ UInt64 │ + ├────────┼────────────────────┤ + 35. │ 34 │ UInt64 │ + ├────────┼────────────────────┤ + 36. │ 35 │ UInt64 │ + ├────────┼────────────────────┤ + 37. │ 36 │ UInt64 │ + ├────────┼────────────────────┤ + 38. │ 37 │ UInt64 │ + ├────────┼────────────────────┤ + 39. │ 38 │ UInt64 │ + ├────────┼────────────────────┤ + 40. │ 39 │ UInt64 │ + ├────────┼────────────────────┤ + 41. │ 40 │ UInt64 │ + ├────────┼────────────────────┤ + 42. │ 41 │ UInt64 │ + ├────────┼────────────────────┤ + 43. │ 42 │ UInt64 │ + ├────────┼────────────────────┤ + 44. │ 43 │ UInt64 │ + ├────────┼────────────────────┤ + 45. │ 44 │ UInt64 │ + ├────────┼────────────────────┤ + 46. │ 45 │ UInt64 │ + ├────────┼────────────────────┤ + 47. │ 46 │ UInt64 │ + ├────────┼────────────────────┤ + 48. │ 47 │ UInt64 │ + ├────────┼────────────────────┤ + 49. │ 48 │ UInt64 │ + ├────────┼────────────────────┤ + 50. │ 49 │ UInt64 │ + ├────────┼────────────────────┤ + 51. │ 50 │ UInt64 │ + ├────────┼────────────────────┤ + 52. │ 51 │ UInt64 │ + ├────────┼────────────────────┤ + 53. │ 52 │ UInt64 │ + ├────────┼────────────────────┤ + 54. │ 53 │ UInt64 │ + ├────────┼────────────────────┤ + 55. │ 54 │ UInt64 │ + ├────────┼────────────────────┤ + 56. │ 55 │ UInt64 │ + ├────────┼────────────────────┤ + 57. │ 56 │ UInt64 │ + ├────────┼────────────────────┤ + 58. │ 57 │ UInt64 │ + ├────────┼────────────────────┤ + 59. │ 58 │ UInt64 │ + ├────────┼────────────────────┤ + 60. │ 59 │ UInt64 │ + ├────────┼────────────────────┤ + 61. │ 60 │ UInt64 │ + ├────────┼────────────────────┤ + 62. │ 61 │ UInt64 │ + ├────────┼────────────────────┤ + 63. │ 62 │ UInt64 │ + ├────────┼────────────────────┤ + 64. │ 63 │ UInt64 │ + ├────────┼────────────────────┤ + 65. │ 64 │ UInt64 │ + ├────────┼────────────────────┤ + 66. │ 65 │ UInt64 │ + ├────────┼────────────────────┤ + 67. │ 66 │ UInt64 │ + ├────────┼────────────────────┤ + 68. │ 67 │ UInt64 │ + ├────────┼────────────────────┤ + 69. │ 68 │ UInt64 │ + ├────────┼────────────────────┤ + 70. │ 69 │ UInt64 │ + ├────────┼────────────────────┤ + 71. │ 70 │ UInt64 │ + ├────────┼────────────────────┤ + 72. │ 71 │ UInt64 │ + ├────────┼────────────────────┤ + 73. │ 72 │ UInt64 │ + ├────────┼────────────────────┤ + 74. │ 73 │ UInt64 │ + ├────────┼────────────────────┤ + 75. │ 74 │ UInt64 │ + ├────────┼────────────────────┤ + 76. │ 75 │ UInt64 │ + ├────────┼────────────────────┤ + 77. │ 76 │ UInt64 │ + ├────────┼────────────────────┤ + 78. │ 77 │ UInt64 │ + ├────────┼────────────────────┤ + 79. │ 78 │ UInt64 │ + ├────────┼────────────────────┤ + 80. │ 79 │ UInt64 │ + ├────────┼────────────────────┤ + 81. │ 80 │ UInt64 │ + ├────────┼────────────────────┤ + 82. │ 81 │ UInt64 │ + ├────────┼────────────────────┤ + 83. │ 82 │ UInt64 │ + ├────────┼────────────────────┤ + 84. │ 83 │ UInt64 │ + ├────────┼────────────────────┤ + 85. │ 84 │ UInt64 │ + ├────────┼────────────────────┤ + 86. │ 85 │ UInt64 │ + ├────────┼────────────────────┤ + 87. │ 86 │ UInt64 │ + ├────────┼────────────────────┤ + 88. │ 87 │ UInt64 │ + ├────────┼────────────────────┤ + 89. │ 88 │ UInt64 │ + ├────────┼────────────────────┤ + 90. │ 89 │ UInt64 │ + ├────────┼────────────────────┤ + 91. │ 90 │ UInt64 │ + ├────────┼────────────────────┤ + 92. │ 91 │ UInt64 │ + ├────────┼────────────────────┤ + 93. │ 92 │ UInt64 │ + ├────────┼────────────────────┤ + 94. │ 93 │ UInt64 │ + ├────────┼────────────────────┤ + 95. │ 94 │ UInt64 │ + ├────────┼────────────────────┤ + 96. │ 95 │ UInt64 │ + ├────────┼────────────────────┤ + 97. │ 96 │ UInt64 │ + ├────────┼────────────────────┤ + 98. │ 97 │ UInt64 │ + ├────────┼────────────────────┤ + 99. │ 98 │ UInt64 │ + ├────────┼────────────────────┤ +100. │ 99 │ UInt64 │ + ┣━━━━━━━━╋━━━━━━━━━━━━━━━━━━━━┫ + ┃ number ┃ toTypeName(number) ┃ + ┗━━━━━━━━┻━━━━━━━━━━━━━━━━━━━━┛ + ┏━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━┓ + ┃ number ┃ toTypeName(number) ┃ + ┡━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━┩ + 1. │ 0 │ UInt64 │ + ├────────┼────────────────────┤ + 2. │ 1 │ UInt64 │ + ├────────┼────────────────────┤ + 3. │ 2 │ UInt64 │ + ├────────┼────────────────────┤ + 4. │ 3 │ UInt64 │ + ├────────┼────────────────────┤ + 5. │ 4 │ UInt64 │ + ├────────┼────────────────────┤ + 6. │ 5 │ UInt64 │ + ├────────┼────────────────────┤ + 7. │ 6 │ UInt64 │ + ├────────┼────────────────────┤ + 8. │ 7 │ UInt64 │ + ├────────┼────────────────────┤ + 9. │ 8 │ UInt64 │ + ├────────┼────────────────────┤ + 10. │ 9 │ UInt64 │ + ├────────┼────────────────────┤ + 11. │ 10 │ UInt64 │ + ├────────┼────────────────────┤ + 12. │ 11 │ UInt64 │ + ├────────┼────────────────────┤ + 13. │ 12 │ UInt64 │ + ├────────┼────────────────────┤ + 14. │ 13 │ UInt64 │ + ├────────┼────────────────────┤ + 15. │ 14 │ UInt64 │ + ├────────┼────────────────────┤ + 16. │ 15 │ UInt64 │ + ├────────┼────────────────────┤ + 17. │ 16 │ UInt64 │ + ├────────┼────────────────────┤ + 18. │ 17 │ UInt64 │ + ├────────┼────────────────────┤ + 19. │ 18 │ UInt64 │ + ├────────┼────────────────────┤ + 20. │ 19 │ UInt64 │ + ├────────┼────────────────────┤ + 21. │ 20 │ UInt64 │ + ├────────┼────────────────────┤ + 22. │ 21 │ UInt64 │ + ├────────┼────────────────────┤ + 23. │ 22 │ UInt64 │ + ├────────┼────────────────────┤ + 24. │ 23 │ UInt64 │ + ├────────┼────────────────────┤ + 25. │ 24 │ UInt64 │ + ├────────┼────────────────────┤ + 26. │ 25 │ UInt64 │ + ├────────┼────────────────────┤ + 27. │ 26 │ UInt64 │ + ├────────┼────────────────────┤ + 28. │ 27 │ UInt64 │ + ├────────┼────────────────────┤ + 29. │ 28 │ UInt64 │ + ├────────┼────────────────────┤ + 30. │ 29 │ UInt64 │ + ├────────┼────────────────────┤ + 31. │ 30 │ UInt64 │ + ├────────┼────────────────────┤ + 32. │ 31 │ UInt64 │ + ├────────┼────────────────────┤ + 33. │ 32 │ UInt64 │ + ├────────┼────────────────────┤ + 34. │ 33 │ UInt64 │ + ├────────┼────────────────────┤ + 35. │ 34 │ UInt64 │ + ├────────┼────────────────────┤ + 36. │ 35 │ UInt64 │ + ├────────┼────────────────────┤ + 37. │ 36 │ UInt64 │ + ├────────┼────────────────────┤ + 38. │ 37 │ UInt64 │ + ├────────┼────────────────────┤ + 39. │ 38 │ UInt64 │ + ├────────┼────────────────────┤ + 40. │ 39 │ UInt64 │ + ├────────┼────────────────────┤ + 41. │ 40 │ UInt64 │ + ├────────┼────────────────────┤ + 42. │ 41 │ UInt64 │ + ├────────┼────────────────────┤ + 43. │ 42 │ UInt64 │ + ├────────┼────────────────────┤ + 44. │ 43 │ UInt64 │ + ├────────┼────────────────────┤ + 45. │ 44 │ UInt64 │ + ├────────┼────────────────────┤ + 46. │ 45 │ UInt64 │ + ├────────┼────────────────────┤ + 47. │ 46 │ UInt64 │ + ├────────┼────────────────────┤ + 48. │ 47 │ UInt64 │ + ├────────┼────────────────────┤ + 49. │ 48 │ UInt64 │ + ├────────┼────────────────────┤ + 50. │ 49 │ UInt64 │ + ├────────┼────────────────────┤ + 51. │ 50 │ UInt64 │ + ├────────┼────────────────────┤ + 52. │ 51 │ UInt64 │ + ├────────┼────────────────────┤ + 53. │ 52 │ UInt64 │ + ├────────┼────────────────────┤ + 54. │ 53 │ UInt64 │ + ├────────┼────────────────────┤ + 55. │ 54 │ UInt64 │ + ├────────┼────────────────────┤ + 56. │ 55 │ UInt64 │ + ├────────┼────────────────────┤ + 57. │ 56 │ UInt64 │ + ├────────┼────────────────────┤ + 58. │ 57 │ UInt64 │ + ├────────┼────────────────────┤ + 59. │ 58 │ UInt64 │ + ├────────┼────────────────────┤ + 60. │ 59 │ UInt64 │ + ├────────┼────────────────────┤ + 61. │ 60 │ UInt64 │ + ├────────┼────────────────────┤ + 62. │ 61 │ UInt64 │ + ├────────┼────────────────────┤ + 63. │ 62 │ UInt64 │ + ├────────┼────────────────────┤ + 64. │ 63 │ UInt64 │ + ├────────┼────────────────────┤ + 65. │ 64 │ UInt64 │ + ├────────┼────────────────────┤ + 66. │ 65 │ UInt64 │ + ├────────┼────────────────────┤ + 67. │ 66 │ UInt64 │ + ├────────┼────────────────────┤ + 68. │ 67 │ UInt64 │ + ├────────┼────────────────────┤ + 69. │ 68 │ UInt64 │ + ├────────┼────────────────────┤ + 70. │ 69 │ UInt64 │ + ├────────┼────────────────────┤ + 71. │ 70 │ UInt64 │ + ├────────┼────────────────────┤ + 72. │ 71 │ UInt64 │ + ├────────┼────────────────────┤ + 73. │ 72 │ UInt64 │ + ├────────┼────────────────────┤ + 74. │ 73 │ UInt64 │ + ├────────┼────────────────────┤ + 75. │ 74 │ UInt64 │ + ├────────┼────────────────────┤ + 76. │ 75 │ UInt64 │ + ├────────┼────────────────────┤ + 77. │ 76 │ UInt64 │ + ├────────┼────────────────────┤ + 78. │ 77 │ UInt64 │ + ├────────┼────────────────────┤ + 79. │ 78 │ UInt64 │ + ├────────┼────────────────────┤ + 80. │ 79 │ UInt64 │ + ├────────┼────────────────────┤ + 81. │ 80 │ UInt64 │ + ├────────┼────────────────────┤ + 82. │ 81 │ UInt64 │ + ├────────┼────────────────────┤ + 83. │ 82 │ UInt64 │ + ├────────┼────────────────────┤ + 84. │ 83 │ UInt64 │ + ├────────┼────────────────────┤ + 85. │ 84 │ UInt64 │ + ├────────┼────────────────────┤ + 86. │ 85 │ UInt64 │ + ├────────┼────────────────────┤ + 87. │ 86 │ UInt64 │ + ├────────┼────────────────────┤ + 88. │ 87 │ UInt64 │ + ├────────┼────────────────────┤ + 89. │ 88 │ UInt64 │ + ├────────┼────────────────────┤ + 90. │ 89 │ UInt64 │ + ├────────┼────────────────────┤ + 91. │ 90 │ UInt64 │ + ├────────┼────────────────────┤ + 92. │ 91 │ UInt64 │ + ├────────┼────────────────────┤ + 93. │ 92 │ UInt64 │ + ├────────┼────────────────────┤ + 94. │ 93 │ UInt64 │ + ├────────┼────────────────────┤ + 95. │ 94 │ UInt64 │ + ├────────┼────────────────────┤ + 96. │ 95 │ UInt64 │ + ├────────┼────────────────────┤ + 97. │ 96 │ UInt64 │ + ├────────┼────────────────────┤ + 98. │ 97 │ UInt64 │ + ├────────┼────────────────────┤ + 99. │ 98 │ UInt64 │ + ├────────┼────────────────────┤ +100. │ 99 │ UInt64 │ + ┣━━━━━━━━╋━━━━━━━━━━━━━━━━━━━━┫ + ┃ number ┃ toTypeName(number) ┃ + ┗━━━━━━━━┻━━━━━━━━━━━━━━━━━━━━┛ + ┏━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━┓ + ┃ number ┃ toTypeName(number) ┃ + ┡━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━┩ + 1. │ 0 │ UInt64 │ + ├────────┼────────────────────┤ + 2. │ 1 │ UInt64 │ + ├────────┼────────────────────┤ + 3. │ 2 │ UInt64 │ + ├────────┼────────────────────┤ + 4. │ 3 │ UInt64 │ + ├────────┼────────────────────┤ + 5. │ 4 │ UInt64 │ + ├────────┼────────────────────┤ + 6. │ 5 │ UInt64 │ + ├────────┼────────────────────┤ + 7. │ 6 │ UInt64 │ + ├────────┼────────────────────┤ + 8. │ 7 │ UInt64 │ + ├────────┼────────────────────┤ + 9. │ 8 │ UInt64 │ + ├────────┼────────────────────┤ + 10. │ 9 │ UInt64 │ + ├────────┼────────────────────┤ + 11. │ 10 │ UInt64 │ + ├────────┼────────────────────┤ + 12. │ 11 │ UInt64 │ + ├────────┼────────────────────┤ + 13. │ 12 │ UInt64 │ + ├────────┼────────────────────┤ + 14. │ 13 │ UInt64 │ + ├────────┼────────────────────┤ + 15. │ 14 │ UInt64 │ + ├────────┼────────────────────┤ + 16. │ 15 │ UInt64 │ + ├────────┼────────────────────┤ + 17. │ 16 │ UInt64 │ + ├────────┼────────────────────┤ + 18. │ 17 │ UInt64 │ + ├────────┼────────────────────┤ + 19. │ 18 │ UInt64 │ + ├────────┼────────────────────┤ + 20. │ 19 │ UInt64 │ + ├────────┼────────────────────┤ + 21. │ 20 │ UInt64 │ + ├────────┼────────────────────┤ + 22. │ 21 │ UInt64 │ + ├────────┼────────────────────┤ + 23. │ 22 │ UInt64 │ + ├────────┼────────────────────┤ + 24. │ 23 │ UInt64 │ + ├────────┼────────────────────┤ + 25. │ 24 │ UInt64 │ + ├────────┼────────────────────┤ + 26. │ 25 │ UInt64 │ + ├────────┼────────────────────┤ + 27. │ 26 │ UInt64 │ + ├────────┼────────────────────┤ + 28. │ 27 │ UInt64 │ + ├────────┼────────────────────┤ + 29. │ 28 │ UInt64 │ + ├────────┼────────────────────┤ + 30. │ 29 │ UInt64 │ + ├────────┼────────────────────┤ + 31. │ 30 │ UInt64 │ + ├────────┼────────────────────┤ + 32. │ 31 │ UInt64 │ + ├────────┼────────────────────┤ + 33. │ 32 │ UInt64 │ + ├────────┼────────────────────┤ + 34. │ 33 │ UInt64 │ + ├────────┼────────────────────┤ + 35. │ 34 │ UInt64 │ + ├────────┼────────────────────┤ + 36. │ 35 │ UInt64 │ + ├────────┼────────────────────┤ + 37. │ 36 │ UInt64 │ + ├────────┼────────────────────┤ + 38. │ 37 │ UInt64 │ + ├────────┼────────────────────┤ + 39. │ 38 │ UInt64 │ + ├────────┼────────────────────┤ + 40. │ 39 │ UInt64 │ + ├────────┼────────────────────┤ + 41. │ 40 │ UInt64 │ + ├────────┼────────────────────┤ + 42. │ 41 │ UInt64 │ + ├────────┼────────────────────┤ + 43. │ 42 │ UInt64 │ + ├────────┼────────────────────┤ + 44. │ 43 │ UInt64 │ + ├────────┼────────────────────┤ + 45. │ 44 │ UInt64 │ + ├────────┼────────────────────┤ + 46. │ 45 │ UInt64 │ + ├────────┼────────────────────┤ + 47. │ 46 │ UInt64 │ + ├────────┼────────────────────┤ + 48. │ 47 │ UInt64 │ + ├────────┼────────────────────┤ + 49. │ 48 │ UInt64 │ + ├────────┼────────────────────┤ + 50. │ 49 │ UInt64 │ + ├────────┼────────────────────┤ + 51. │ 50 │ UInt64 │ + ├────────┼────────────────────┤ + 52. │ 51 │ UInt64 │ + ├────────┼────────────────────┤ + 53. │ 52 │ UInt64 │ + ├────────┼────────────────────┤ + 54. │ 53 │ UInt64 │ + ├────────┼────────────────────┤ + 55. │ 54 │ UInt64 │ + ├────────┼────────────────────┤ + 56. │ 55 │ UInt64 │ + ├────────┼────────────────────┤ + 57. │ 56 │ UInt64 │ + ├────────┼────────────────────┤ + 58. │ 57 │ UInt64 │ + ├────────┼────────────────────┤ + 59. │ 58 │ UInt64 │ + ├────────┼────────────────────┤ + 60. │ 59 │ UInt64 │ + ├────────┼────────────────────┤ + 61. │ 60 │ UInt64 │ + ├────────┼────────────────────┤ + 62. │ 61 │ UInt64 │ + ├────────┼────────────────────┤ + 63. │ 62 │ UInt64 │ + ├────────┼────────────────────┤ + 64. │ 63 │ UInt64 │ + ├────────┼────────────────────┤ + 65. │ 64 │ UInt64 │ + ├────────┼────────────────────┤ + 66. │ 65 │ UInt64 │ + ├────────┼────────────────────┤ + 67. │ 66 │ UInt64 │ + ├────────┼────────────────────┤ + 68. │ 67 │ UInt64 │ + ├────────┼────────────────────┤ + 69. │ 68 │ UInt64 │ + ├────────┼────────────────────┤ + 70. │ 69 │ UInt64 │ + ├────────┼────────────────────┤ + 71. │ 70 │ UInt64 │ + ├────────┼────────────────────┤ + 72. │ 71 │ UInt64 │ + ├────────┼────────────────────┤ + 73. │ 72 │ UInt64 │ + ├────────┼────────────────────┤ + 74. │ 73 │ UInt64 │ + ├────────┼────────────────────┤ + 75. │ 74 │ UInt64 │ + ├────────┼────────────────────┤ + 76. │ 75 │ UInt64 │ + ├────────┼────────────────────┤ + 77. │ 76 │ UInt64 │ + ├────────┼────────────────────┤ + 78. │ 77 │ UInt64 │ + ├────────┼────────────────────┤ + 79. │ 78 │ UInt64 │ + ├────────┼────────────────────┤ + 80. │ 79 │ UInt64 │ + ├────────┼────────────────────┤ + 81. │ 80 │ UInt64 │ + ├────────┼────────────────────┤ + 82. │ 81 │ UInt64 │ + ├────────┼────────────────────┤ + 83. │ 82 │ UInt64 │ + ├────────┼────────────────────┤ + 84. │ 83 │ UInt64 │ + ├────────┼────────────────────┤ + 85. │ 84 │ UInt64 │ + ├────────┼────────────────────┤ + 86. │ 85 │ UInt64 │ + ├────────┼────────────────────┤ + 87. │ 86 │ UInt64 │ + ├────────┼────────────────────┤ + 88. │ 87 │ UInt64 │ + ├────────┼────────────────────┤ + 89. │ 88 │ UInt64 │ + ├────────┼────────────────────┤ + 90. │ 89 │ UInt64 │ + ├────────┼────────────────────┤ + 91. │ 90 │ UInt64 │ + ├────────┼────────────────────┤ + 92. │ 91 │ UInt64 │ + ├────────┼────────────────────┤ + 93. │ 92 │ UInt64 │ + ├────────┼────────────────────┤ + 94. │ 93 │ UInt64 │ + ├────────┼────────────────────┤ + 95. │ 94 │ UInt64 │ + ├────────┼────────────────────┤ + 96. │ 95 │ UInt64 │ + ├────────┼────────────────────┤ + 97. │ 96 │ UInt64 │ + ├────────┼────────────────────┤ + 98. │ 97 │ UInt64 │ + ├────────┼────────────────────┤ + 99. │ 98 │ UInt64 │ + ├────────┼────────────────────┤ +100. │ 99 │ UInt64 │ + ┣━━━━━━━━╋━━━━━━━━━━━━━━━━━━━━┫ + ┃ number ┃ toTypeName(number) ┃ + ┗━━━━━━━━┻━━━━━━━━━━━━━━━━━━━━┛ + ┏━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━┓ + ┃ number ┃ toTypeName(number) ┃ + ┡━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━┩ + 1. │ 0 │ UInt64 │ + ├────────┼────────────────────┤ + 2. │ 1 │ UInt64 │ + ├────────┼────────────────────┤ + 3. │ 2 │ UInt64 │ + ├────────┼────────────────────┤ + 4. │ 3 │ UInt64 │ + ├────────┼────────────────────┤ + 5. │ 4 │ UInt64 │ + ├────────┼────────────────────┤ + 6. │ 5 │ UInt64 │ + ├────────┼────────────────────┤ + 7. │ 6 │ UInt64 │ + ├────────┼────────────────────┤ + 8. │ 7 │ UInt64 │ + ├────────┼────────────────────┤ + 9. │ 8 │ UInt64 │ + ├────────┼────────────────────┤ + 10. │ 9 │ UInt64 │ + ├────────┼────────────────────┤ + 11. │ 10 │ UInt64 │ + ├────────┼────────────────────┤ + 12. │ 11 │ UInt64 │ + ├────────┼────────────────────┤ + 13. │ 12 │ UInt64 │ + ├────────┼────────────────────┤ + 14. │ 13 │ UInt64 │ + ├────────┼────────────────────┤ + 15. │ 14 │ UInt64 │ + ├────────┼────────────────────┤ + 16. │ 15 │ UInt64 │ + ├────────┼────────────────────┤ + 17. │ 16 │ UInt64 │ + ├────────┼────────────────────┤ + 18. │ 17 │ UInt64 │ + ├────────┼────────────────────┤ + 19. │ 18 │ UInt64 │ + ├────────┼────────────────────┤ + 20. │ 19 │ UInt64 │ + ├────────┼────────────────────┤ + 21. │ 20 │ UInt64 │ + ├────────┼────────────────────┤ + 22. │ 21 │ UInt64 │ + ├────────┼────────────────────┤ + 23. │ 22 │ UInt64 │ + ├────────┼────────────────────┤ + 24. │ 23 │ UInt64 │ + ├────────┼────────────────────┤ + 25. │ 24 │ UInt64 │ + ├────────┼────────────────────┤ + 26. │ 25 │ UInt64 │ + ├────────┼────────────────────┤ + 27. │ 26 │ UInt64 │ + ├────────┼────────────────────┤ + 28. │ 27 │ UInt64 │ + ├────────┼────────────────────┤ + 29. │ 28 │ UInt64 │ + ├────────┼────────────────────┤ + 30. │ 29 │ UInt64 │ + ├────────┼────────────────────┤ + 31. │ 30 │ UInt64 │ + ├────────┼────────────────────┤ + 32. │ 31 │ UInt64 │ + ├────────┼────────────────────┤ + 33. │ 32 │ UInt64 │ + ├────────┼────────────────────┤ + 34. │ 33 │ UInt64 │ + ├────────┼────────────────────┤ + 35. │ 34 │ UInt64 │ + ├────────┼────────────────────┤ + 36. │ 35 │ UInt64 │ + ├────────┼────────────────────┤ + 37. │ 36 │ UInt64 │ + ├────────┼────────────────────┤ + 38. │ 37 │ UInt64 │ + ├────────┼────────────────────┤ + 39. │ 38 │ UInt64 │ + ├────────┼────────────────────┤ + 40. │ 39 │ UInt64 │ + ├────────┼────────────────────┤ + 41. │ 40 │ UInt64 │ + ├────────┼────────────────────┤ + 42. │ 41 │ UInt64 │ + ├────────┼────────────────────┤ + 43. │ 42 │ UInt64 │ + ├────────┼────────────────────┤ + 44. │ 43 │ UInt64 │ + ├────────┼────────────────────┤ + 45. │ 44 │ UInt64 │ + ├────────┼────────────────────┤ + 46. │ 45 │ UInt64 │ + ├────────┼────────────────────┤ + 47. │ 46 │ UInt64 │ + ├────────┼────────────────────┤ + 48. │ 47 │ UInt64 │ + ├────────┼────────────────────┤ + 49. │ 48 │ UInt64 │ + ├────────┼────────────────────┤ + 50. │ 49 │ UInt64 │ + ├────────┼────────────────────┤ + 51. │ 50 │ UInt64 │ + ├────────┼────────────────────┤ + 52. │ 51 │ UInt64 │ + ├────────┼────────────────────┤ + 53. │ 52 │ UInt64 │ + ├────────┼────────────────────┤ + 54. │ 53 │ UInt64 │ + ├────────┼────────────────────┤ + 55. │ 54 │ UInt64 │ + ├────────┼────────────────────┤ + 56. │ 55 │ UInt64 │ + ├────────┼────────────────────┤ + 57. │ 56 │ UInt64 │ + ├────────┼────────────────────┤ + 58. │ 57 │ UInt64 │ + ├────────┼────────────────────┤ + 59. │ 58 │ UInt64 │ + ├────────┼────────────────────┤ + 60. │ 59 │ UInt64 │ + ├────────┼────────────────────┤ + 61. │ 60 │ UInt64 │ + ├────────┼────────────────────┤ + 62. │ 61 │ UInt64 │ + ├────────┼────────────────────┤ + 63. │ 62 │ UInt64 │ + ├────────┼────────────────────┤ + 64. │ 63 │ UInt64 │ + ├────────┼────────────────────┤ + 65. │ 64 │ UInt64 │ + ├────────┼────────────────────┤ + 66. │ 65 │ UInt64 │ + ├────────┼────────────────────┤ + 67. │ 66 │ UInt64 │ + ├────────┼────────────────────┤ + 68. │ 67 │ UInt64 │ + ├────────┼────────────────────┤ + 69. │ 68 │ UInt64 │ + ├────────┼────────────────────┤ + 70. │ 69 │ UInt64 │ + ├────────┼────────────────────┤ + 71. │ 70 │ UInt64 │ + ├────────┼────────────────────┤ + 72. │ 71 │ UInt64 │ + ├────────┼────────────────────┤ + 73. │ 72 │ UInt64 │ + ├────────┼────────────────────┤ + 74. │ 73 │ UInt64 │ + ├────────┼────────────────────┤ + 75. │ 74 │ UInt64 │ + ├────────┼────────────────────┤ + 76. │ 75 │ UInt64 │ + ├────────┼────────────────────┤ + 77. │ 76 │ UInt64 │ + ├────────┼────────────────────┤ + 78. │ 77 │ UInt64 │ + ├────────┼────────────────────┤ + 79. │ 78 │ UInt64 │ + ├────────┼────────────────────┤ + 80. │ 79 │ UInt64 │ + ├────────┼────────────────────┤ + 81. │ 80 │ UInt64 │ + ├────────┼────────────────────┤ + 82. │ 81 │ UInt64 │ + ├────────┼────────────────────┤ + 83. │ 82 │ UInt64 │ + ├────────┼────────────────────┤ + 84. │ 83 │ UInt64 │ + ├────────┼────────────────────┤ + 85. │ 84 │ UInt64 │ + ├────────┼────────────────────┤ + 86. │ 85 │ UInt64 │ + ├────────┼────────────────────┤ + 87. │ 86 │ UInt64 │ + ├────────┼────────────────────┤ + 88. │ 87 │ UInt64 │ + ├────────┼────────────────────┤ + 89. │ 88 │ UInt64 │ + ├────────┼────────────────────┤ + 90. │ 89 │ UInt64 │ + ├────────┼────────────────────┤ + 91. │ 90 │ UInt64 │ + ├────────┼────────────────────┤ + 92. │ 91 │ UInt64 │ + ├────────┼────────────────────┤ + 93. │ 92 │ UInt64 │ + ├────────┼────────────────────┤ + 94. │ 93 │ UInt64 │ + ├────────┼────────────────────┤ + 95. │ 94 │ UInt64 │ + ├────────┼────────────────────┤ + 96. │ 95 │ UInt64 │ + ├────────┼────────────────────┤ + 97. │ 96 │ UInt64 │ + ├────────┼────────────────────┤ + 98. │ 97 │ UInt64 │ + ├────────┼────────────────────┤ + 99. │ 98 │ UInt64 │ + ├────────┼────────────────────┤ +100. │ 99 │ UInt64 │ + ┣━━━━━━━━╋━━━━━━━━━━━━━━━━━━━━┫ + ┃ number ┃ toTypeName(number) ┃ + ┗━━━━━━━━┻━━━━━━━━━━━━━━━━━━━━┛ + ┏━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━┓ + ┃ number ┃ toTypeName(number) ┃ + ┡━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━┩ + 1. │ 0 │ UInt64 │ + ├────────┼────────────────────┤ + 2. │ 1 │ UInt64 │ + ├────────┼────────────────────┤ + 3. │ 2 │ UInt64 │ + ├────────┼────────────────────┤ + 4. │ 3 │ UInt64 │ + ├────────┼────────────────────┤ + 5. │ 4 │ UInt64 │ + ├────────┼────────────────────┤ + 6. │ 5 │ UInt64 │ + ├────────┼────────────────────┤ + 7. │ 6 │ UInt64 │ + ├────────┼────────────────────┤ + 8. │ 7 │ UInt64 │ + ├────────┼────────────────────┤ + 9. │ 8 │ UInt64 │ + ├────────┼────────────────────┤ + 10. │ 9 │ UInt64 │ + ├────────┼────────────────────┤ + 11. │ 10 │ UInt64 │ + ├────────┼────────────────────┤ + 12. │ 11 │ UInt64 │ + ├────────┼────────────────────┤ + 13. │ 12 │ UInt64 │ + ├────────┼────────────────────┤ + 14. │ 13 │ UInt64 │ + ├────────┼────────────────────┤ + 15. │ 14 │ UInt64 │ + ├────────┼────────────────────┤ + 16. │ 15 │ UInt64 │ + ├────────┼────────────────────┤ + 17. │ 16 │ UInt64 │ + ├────────┼────────────────────┤ + 18. │ 17 │ UInt64 │ + ├────────┼────────────────────┤ + 19. │ 18 │ UInt64 │ + ├────────┼────────────────────┤ + 20. │ 19 │ UInt64 │ + ├────────┼────────────────────┤ + 21. │ 20 │ UInt64 │ + ├────────┼────────────────────┤ + 22. │ 21 │ UInt64 │ + ├────────┼────────────────────┤ + 23. │ 22 │ UInt64 │ + ├────────┼────────────────────┤ + 24. │ 23 │ UInt64 │ + ├────────┼────────────────────┤ + 25. │ 24 │ UInt64 │ + ├────────┼────────────────────┤ + 26. │ 25 │ UInt64 │ + ├────────┼────────────────────┤ + 27. │ 26 │ UInt64 │ + ├────────┼────────────────────┤ + 28. │ 27 │ UInt64 │ + ├────────┼────────────────────┤ + 29. │ 28 │ UInt64 │ + ├────────┼────────────────────┤ + 30. │ 29 │ UInt64 │ + ├────────┼────────────────────┤ + 31. │ 30 │ UInt64 │ + ├────────┼────────────────────┤ + 32. │ 31 │ UInt64 │ + ├────────┼────────────────────┤ + 33. │ 32 │ UInt64 │ + ├────────┼────────────────────┤ + 34. │ 33 │ UInt64 │ + ├────────┼────────────────────┤ + 35. │ 34 │ UInt64 │ + ├────────┼────────────────────┤ + 36. │ 35 │ UInt64 │ + ├────────┼────────────────────┤ + 37. │ 36 │ UInt64 │ + ├────────┼────────────────────┤ + 38. │ 37 │ UInt64 │ + ├────────┼────────────────────┤ + 39. │ 38 │ UInt64 │ + ├────────┼────────────────────┤ + 40. │ 39 │ UInt64 │ + ├────────┼────────────────────┤ + 41. │ 40 │ UInt64 │ + ├────────┼────────────────────┤ + 42. │ 41 │ UInt64 │ + ├────────┼────────────────────┤ + 43. │ 42 │ UInt64 │ + ├────────┼────────────────────┤ + 44. │ 43 │ UInt64 │ + ├────────┼────────────────────┤ + 45. │ 44 │ UInt64 │ + ├────────┼────────────────────┤ + 46. │ 45 │ UInt64 │ + ├────────┼────────────────────┤ + 47. │ 46 │ UInt64 │ + ├────────┼────────────────────┤ + 48. │ 47 │ UInt64 │ + ├────────┼────────────────────┤ + 49. │ 48 │ UInt64 │ + ├────────┼────────────────────┤ + 50. │ 49 │ UInt64 │ + ├────────┼────────────────────┤ + 51. │ 50 │ UInt64 │ + ├────────┼────────────────────┤ + 52. │ 51 │ UInt64 │ + ├────────┼────────────────────┤ + 53. │ 52 │ UInt64 │ + ├────────┼────────────────────┤ + 54. │ 53 │ UInt64 │ + ├────────┼────────────────────┤ + 55. │ 54 │ UInt64 │ + ├────────┼────────────────────┤ + 56. │ 55 │ UInt64 │ + ├────────┼────────────────────┤ + 57. │ 56 │ UInt64 │ + ├────────┼────────────────────┤ + 58. │ 57 │ UInt64 │ + ├────────┼────────────────────┤ + 59. │ 58 │ UInt64 │ + ├────────┼────────────────────┤ + 60. │ 59 │ UInt64 │ + ├────────┼────────────────────┤ + 61. │ 60 │ UInt64 │ + ├────────┼────────────────────┤ + 62. │ 61 │ UInt64 │ + ├────────┼────────────────────┤ + 63. │ 62 │ UInt64 │ + ├────────┼────────────────────┤ + 64. │ 63 │ UInt64 │ + ├────────┼────────────────────┤ + 65. │ 64 │ UInt64 │ + ├────────┼────────────────────┤ + 66. │ 65 │ UInt64 │ + ├────────┼────────────────────┤ + 67. │ 66 │ UInt64 │ + ├────────┼────────────────────┤ + 68. │ 67 │ UInt64 │ + ├────────┼────────────────────┤ + 69. │ 68 │ UInt64 │ + ├────────┼────────────────────┤ + 70. │ 69 │ UInt64 │ + ├────────┼────────────────────┤ + 71. │ 70 │ UInt64 │ + ├────────┼────────────────────┤ + 72. │ 71 │ UInt64 │ + ├────────┼────────────────────┤ + 73. │ 72 │ UInt64 │ + ├────────┼────────────────────┤ + 74. │ 73 │ UInt64 │ + ├────────┼────────────────────┤ + 75. │ 74 │ UInt64 │ + ├────────┼────────────────────┤ + 76. │ 75 │ UInt64 │ + ├────────┼────────────────────┤ + 77. │ 76 │ UInt64 │ + ├────────┼────────────────────┤ + 78. │ 77 │ UInt64 │ + ├────────┼────────────────────┤ + 79. │ 78 │ UInt64 │ + ├────────┼────────────────────┤ + 80. │ 79 │ UInt64 │ + ├────────┼────────────────────┤ + 81. │ 80 │ UInt64 │ + ├────────┼────────────────────┤ + 82. │ 81 │ UInt64 │ + ├────────┼────────────────────┤ + 83. │ 82 │ UInt64 │ + ├────────┼────────────────────┤ + 84. │ 83 │ UInt64 │ + ├────────┼────────────────────┤ + 85. │ 84 │ UInt64 │ + ├────────┼────────────────────┤ + 86. │ 85 │ UInt64 │ + ├────────┼────────────────────┤ + 87. │ 86 │ UInt64 │ + ├────────┼────────────────────┤ + 88. │ 87 │ UInt64 │ + ├────────┼────────────────────┤ + 89. │ 88 │ UInt64 │ + ├────────┼────────────────────┤ + 90. │ 89 │ UInt64 │ + ├────────┼────────────────────┤ + 91. │ 90 │ UInt64 │ + ├────────┼────────────────────┤ + 92. │ 91 │ UInt64 │ + ├────────┼────────────────────┤ + 93. │ 92 │ UInt64 │ + ├────────┼────────────────────┤ + 94. │ 93 │ UInt64 │ + ├────────┼────────────────────┤ + 95. │ 94 │ UInt64 │ + ├────────┼────────────────────┤ + 96. │ 95 │ UInt64 │ + ├────────┼────────────────────┤ + 97. │ 96 │ UInt64 │ + ├────────┼────────────────────┤ + 98. │ 97 │ UInt64 │ + ├────────┼────────────────────┤ + 99. │ 98 │ UInt64 │ + ├────────┼────────────────────┤ +100. │ 99 │ UInt64 │ + ┣━━━━━━━━╋━━━━━━━━━━━━━━━━━━━━┫ + ┃ number ┃ toTypeName(number) ┃ + ┗━━━━━━━━┻━━━━━━━━━━━━━━━━━━━━┛ + ┌─number─┬─toTypeName(number)─┐ + 1. │ 0 │ UInt64 │ + 2. │ 1 │ UInt64 │ + 3. │ 2 │ UInt64 │ + 4. │ 3 │ UInt64 │ + 5. │ 4 │ UInt64 │ + 6. │ 5 │ UInt64 │ + 7. │ 6 │ UInt64 │ + 8. │ 7 │ UInt64 │ + 9. │ 8 │ UInt64 │ + 10. │ 9 │ UInt64 │ + 11. │ 10 │ UInt64 │ + 12. │ 11 │ UInt64 │ + 13. │ 12 │ UInt64 │ + 14. │ 13 │ UInt64 │ + 15. │ 14 │ UInt64 │ + 16. │ 15 │ UInt64 │ + 17. │ 16 │ UInt64 │ + 18. │ 17 │ UInt64 │ + 19. │ 18 │ UInt64 │ + 20. │ 19 │ UInt64 │ + 21. │ 20 │ UInt64 │ + 22. │ 21 │ UInt64 │ + 23. │ 22 │ UInt64 │ + 24. │ 23 │ UInt64 │ + 25. │ 24 │ UInt64 │ + 26. │ 25 │ UInt64 │ + 27. │ 26 │ UInt64 │ + 28. │ 27 │ UInt64 │ + 29. │ 28 │ UInt64 │ + 30. │ 29 │ UInt64 │ + 31. │ 30 │ UInt64 │ + 32. │ 31 │ UInt64 │ + 33. │ 32 │ UInt64 │ + 34. │ 33 │ UInt64 │ + 35. │ 34 │ UInt64 │ + 36. │ 35 │ UInt64 │ + 37. │ 36 │ UInt64 │ + 38. │ 37 │ UInt64 │ + 39. │ 38 │ UInt64 │ + 40. │ 39 │ UInt64 │ + 41. │ 40 │ UInt64 │ + 42. │ 41 │ UInt64 │ + 43. │ 42 │ UInt64 │ + 44. │ 43 │ UInt64 │ + 45. │ 44 │ UInt64 │ + 46. │ 45 │ UInt64 │ + 47. │ 46 │ UInt64 │ + 48. │ 47 │ UInt64 │ + 49. │ 48 │ UInt64 │ + 50. │ 49 │ UInt64 │ + 51. │ 50 │ UInt64 │ + 52. │ 51 │ UInt64 │ + 53. │ 52 │ UInt64 │ + 54. │ 53 │ UInt64 │ + 55. │ 54 │ UInt64 │ + 56. │ 55 │ UInt64 │ + 57. │ 56 │ UInt64 │ + 58. │ 57 │ UInt64 │ + 59. │ 58 │ UInt64 │ + 60. │ 59 │ UInt64 │ + 61. │ 60 │ UInt64 │ + 62. │ 61 │ UInt64 │ + 63. │ 62 │ UInt64 │ + 64. │ 63 │ UInt64 │ + 65. │ 64 │ UInt64 │ + 66. │ 65 │ UInt64 │ + 67. │ 66 │ UInt64 │ + 68. │ 67 │ UInt64 │ + 69. │ 68 │ UInt64 │ + 70. │ 69 │ UInt64 │ + 71. │ 70 │ UInt64 │ + 72. │ 71 │ UInt64 │ + 73. │ 72 │ UInt64 │ + 74. │ 73 │ UInt64 │ + 75. │ 74 │ UInt64 │ + 76. │ 75 │ UInt64 │ + 77. │ 76 │ UInt64 │ + 78. │ 77 │ UInt64 │ + 79. │ 78 │ UInt64 │ + 80. │ 79 │ UInt64 │ + 81. │ 80 │ UInt64 │ + 82. │ 81 │ UInt64 │ + 83. │ 82 │ UInt64 │ + 84. │ 83 │ UInt64 │ + 85. │ 84 │ UInt64 │ + 86. │ 85 │ UInt64 │ + 87. │ 86 │ UInt64 │ + 88. │ 87 │ UInt64 │ + 89. │ 88 │ UInt64 │ + 90. │ 89 │ UInt64 │ + 91. │ 90 │ UInt64 │ + 92. │ 91 │ UInt64 │ + 93. │ 92 │ UInt64 │ + 94. │ 93 │ UInt64 │ + 95. │ 94 │ UInt64 │ + 96. │ 95 │ UInt64 │ + 97. │ 96 │ UInt64 │ + 98. │ 97 │ UInt64 │ + 99. │ 98 │ UInt64 │ +100. │ 99 │ UInt64 │ + └────────┴────────────────────┘ + ┌─number─┬─toTypeName(number)─┐ + 1. │ 0 │ UInt64 │ + 2. │ 1 │ UInt64 │ + 3. │ 2 │ UInt64 │ + 4. │ 3 │ UInt64 │ + 5. │ 4 │ UInt64 │ + 6. │ 5 │ UInt64 │ + 7. │ 6 │ UInt64 │ + 8. │ 7 │ UInt64 │ + 9. │ 8 │ UInt64 │ + 10. │ 9 │ UInt64 │ + 11. │ 10 │ UInt64 │ + 12. │ 11 │ UInt64 │ + 13. │ 12 │ UInt64 │ + 14. │ 13 │ UInt64 │ + 15. │ 14 │ UInt64 │ + 16. │ 15 │ UInt64 │ + 17. │ 16 │ UInt64 │ + 18. │ 17 │ UInt64 │ + 19. │ 18 │ UInt64 │ + 20. │ 19 │ UInt64 │ + 21. │ 20 │ UInt64 │ + 22. │ 21 │ UInt64 │ + 23. │ 22 │ UInt64 │ + 24. │ 23 │ UInt64 │ + 25. │ 24 │ UInt64 │ + 26. │ 25 │ UInt64 │ + 27. │ 26 │ UInt64 │ + 28. │ 27 │ UInt64 │ + 29. │ 28 │ UInt64 │ + 30. │ 29 │ UInt64 │ + 31. │ 30 │ UInt64 │ + 32. │ 31 │ UInt64 │ + 33. │ 32 │ UInt64 │ + 34. │ 33 │ UInt64 │ + 35. │ 34 │ UInt64 │ + 36. │ 35 │ UInt64 │ + 37. │ 36 │ UInt64 │ + 38. │ 37 │ UInt64 │ + 39. │ 38 │ UInt64 │ + 40. │ 39 │ UInt64 │ + 41. │ 40 │ UInt64 │ + 42. │ 41 │ UInt64 │ + 43. │ 42 │ UInt64 │ + 44. │ 43 │ UInt64 │ + 45. │ 44 │ UInt64 │ + 46. │ 45 │ UInt64 │ + 47. │ 46 │ UInt64 │ + 48. │ 47 │ UInt64 │ + 49. │ 48 │ UInt64 │ + 50. │ 49 │ UInt64 │ + 51. │ 50 │ UInt64 │ + 52. │ 51 │ UInt64 │ + 53. │ 52 │ UInt64 │ + 54. │ 53 │ UInt64 │ + 55. │ 54 │ UInt64 │ + 56. │ 55 │ UInt64 │ + 57. │ 56 │ UInt64 │ + 58. │ 57 │ UInt64 │ + 59. │ 58 │ UInt64 │ + 60. │ 59 │ UInt64 │ + 61. │ 60 │ UInt64 │ + 62. │ 61 │ UInt64 │ + 63. │ 62 │ UInt64 │ + 64. │ 63 │ UInt64 │ + 65. │ 64 │ UInt64 │ + 66. │ 65 │ UInt64 │ + 67. │ 66 │ UInt64 │ + 68. │ 67 │ UInt64 │ + 69. │ 68 │ UInt64 │ + 70. │ 69 │ UInt64 │ + 71. │ 70 │ UInt64 │ + 72. │ 71 │ UInt64 │ + 73. │ 72 │ UInt64 │ + 74. │ 73 │ UInt64 │ + 75. │ 74 │ UInt64 │ + 76. │ 75 │ UInt64 │ + 77. │ 76 │ UInt64 │ + 78. │ 77 │ UInt64 │ + 79. │ 78 │ UInt64 │ + 80. │ 79 │ UInt64 │ + 81. │ 80 │ UInt64 │ + 82. │ 81 │ UInt64 │ + 83. │ 82 │ UInt64 │ + 84. │ 83 │ UInt64 │ + 85. │ 84 │ UInt64 │ + 86. │ 85 │ UInt64 │ + 87. │ 86 │ UInt64 │ + 88. │ 87 │ UInt64 │ + 89. │ 88 │ UInt64 │ + 90. │ 89 │ UInt64 │ + 91. │ 90 │ UInt64 │ + 92. │ 91 │ UInt64 │ + 93. │ 92 │ UInt64 │ + 94. │ 93 │ UInt64 │ + 95. │ 94 │ UInt64 │ + 96. │ 95 │ UInt64 │ + 97. │ 96 │ UInt64 │ + 98. │ 97 │ UInt64 │ + 99. │ 98 │ UInt64 │ +100. │ 99 │ UInt64 │ + └─number─┴─toTypeName(number)─┘ + ┌─number─┬─toTypeName(number)─┐ + 1. │ 0 │ UInt64 │ + 2. │ 1 │ UInt64 │ + 3. │ 2 │ UInt64 │ + 4. │ 3 │ UInt64 │ + 5. │ 4 │ UInt64 │ + 6. │ 5 │ UInt64 │ + 7. │ 6 │ UInt64 │ + 8. │ 7 │ UInt64 │ + 9. │ 8 │ UInt64 │ + 10. │ 9 │ UInt64 │ + 11. │ 10 │ UInt64 │ + 12. │ 11 │ UInt64 │ + 13. │ 12 │ UInt64 │ + 14. │ 13 │ UInt64 │ + 15. │ 14 │ UInt64 │ + 16. │ 15 │ UInt64 │ + 17. │ 16 │ UInt64 │ + 18. │ 17 │ UInt64 │ + 19. │ 18 │ UInt64 │ + 20. │ 19 │ UInt64 │ + 21. │ 20 │ UInt64 │ + 22. │ 21 │ UInt64 │ + 23. │ 22 │ UInt64 │ + 24. │ 23 │ UInt64 │ + 25. │ 24 │ UInt64 │ + 26. │ 25 │ UInt64 │ + 27. │ 26 │ UInt64 │ + 28. │ 27 │ UInt64 │ + 29. │ 28 │ UInt64 │ + 30. │ 29 │ UInt64 │ + 31. │ 30 │ UInt64 │ + 32. │ 31 │ UInt64 │ + 33. │ 32 │ UInt64 │ + 34. │ 33 │ UInt64 │ + 35. │ 34 │ UInt64 │ + 36. │ 35 │ UInt64 │ + 37. │ 36 │ UInt64 │ + 38. │ 37 │ UInt64 │ + 39. │ 38 │ UInt64 │ + 40. │ 39 │ UInt64 │ + 41. │ 40 │ UInt64 │ + 42. │ 41 │ UInt64 │ + 43. │ 42 │ UInt64 │ + 44. │ 43 │ UInt64 │ + 45. │ 44 │ UInt64 │ + 46. │ 45 │ UInt64 │ + 47. │ 46 │ UInt64 │ + 48. │ 47 │ UInt64 │ + 49. │ 48 │ UInt64 │ + 50. │ 49 │ UInt64 │ + 51. │ 50 │ UInt64 │ + 52. │ 51 │ UInt64 │ + 53. │ 52 │ UInt64 │ + 54. │ 53 │ UInt64 │ + 55. │ 54 │ UInt64 │ + 56. │ 55 │ UInt64 │ + 57. │ 56 │ UInt64 │ + 58. │ 57 │ UInt64 │ + 59. │ 58 │ UInt64 │ + 60. │ 59 │ UInt64 │ + 61. │ 60 │ UInt64 │ + 62. │ 61 │ UInt64 │ + 63. │ 62 │ UInt64 │ + 64. │ 63 │ UInt64 │ + 65. │ 64 │ UInt64 │ + 66. │ 65 │ UInt64 │ + 67. │ 66 │ UInt64 │ + 68. │ 67 │ UInt64 │ + 69. │ 68 │ UInt64 │ + 70. │ 69 │ UInt64 │ + 71. │ 70 │ UInt64 │ + 72. │ 71 │ UInt64 │ + 73. │ 72 │ UInt64 │ + 74. │ 73 │ UInt64 │ + 75. │ 74 │ UInt64 │ + 76. │ 75 │ UInt64 │ + 77. │ 76 │ UInt64 │ + 78. │ 77 │ UInt64 │ + 79. │ 78 │ UInt64 │ + 80. │ 79 │ UInt64 │ + 81. │ 80 │ UInt64 │ + 82. │ 81 │ UInt64 │ + 83. │ 82 │ UInt64 │ + 84. │ 83 │ UInt64 │ + 85. │ 84 │ UInt64 │ + 86. │ 85 │ UInt64 │ + 87. │ 86 │ UInt64 │ + 88. │ 87 │ UInt64 │ + 89. │ 88 │ UInt64 │ + 90. │ 89 │ UInt64 │ + 91. │ 90 │ UInt64 │ + 92. │ 91 │ UInt64 │ + 93. │ 92 │ UInt64 │ + 94. │ 93 │ UInt64 │ + 95. │ 94 │ UInt64 │ + 96. │ 95 │ UInt64 │ + 97. │ 96 │ UInt64 │ + 98. │ 97 │ UInt64 │ + 99. │ 98 │ UInt64 │ +100. │ 99 │ UInt64 │ + └─number─┴─toTypeName(number)─┘ + ┌─number─┬─toTypeName(number)─┐ + 1. │ 0 │ UInt64 │ + 2. │ 1 │ UInt64 │ + 3. │ 2 │ UInt64 │ + 4. │ 3 │ UInt64 │ + 5. │ 4 │ UInt64 │ + 6. │ 5 │ UInt64 │ + 7. │ 6 │ UInt64 │ + 8. │ 7 │ UInt64 │ + 9. │ 8 │ UInt64 │ + 10. │ 9 │ UInt64 │ + 11. │ 10 │ UInt64 │ + 12. │ 11 │ UInt64 │ + 13. │ 12 │ UInt64 │ + 14. │ 13 │ UInt64 │ + 15. │ 14 │ UInt64 │ + 16. │ 15 │ UInt64 │ + 17. │ 16 │ UInt64 │ + 18. │ 17 │ UInt64 │ + 19. │ 18 │ UInt64 │ + 20. │ 19 │ UInt64 │ + 21. │ 20 │ UInt64 │ + 22. │ 21 │ UInt64 │ + 23. │ 22 │ UInt64 │ + 24. │ 23 │ UInt64 │ + 25. │ 24 │ UInt64 │ + 26. │ 25 │ UInt64 │ + 27. │ 26 │ UInt64 │ + 28. │ 27 │ UInt64 │ + 29. │ 28 │ UInt64 │ + 30. │ 29 │ UInt64 │ + 31. │ 30 │ UInt64 │ + 32. │ 31 │ UInt64 │ + 33. │ 32 │ UInt64 │ + 34. │ 33 │ UInt64 │ + 35. │ 34 │ UInt64 │ + 36. │ 35 │ UInt64 │ + 37. │ 36 │ UInt64 │ + 38. │ 37 │ UInt64 │ + 39. │ 38 │ UInt64 │ + 40. │ 39 │ UInt64 │ + 41. │ 40 │ UInt64 │ + 42. │ 41 │ UInt64 │ + 43. │ 42 │ UInt64 │ + 44. │ 43 │ UInt64 │ + 45. │ 44 │ UInt64 │ + 46. │ 45 │ UInt64 │ + 47. │ 46 │ UInt64 │ + 48. │ 47 │ UInt64 │ + 49. │ 48 │ UInt64 │ + 50. │ 49 │ UInt64 │ + 51. │ 50 │ UInt64 │ + 52. │ 51 │ UInt64 │ + 53. │ 52 │ UInt64 │ + 54. │ 53 │ UInt64 │ + 55. │ 54 │ UInt64 │ + 56. │ 55 │ UInt64 │ + 57. │ 56 │ UInt64 │ + 58. │ 57 │ UInt64 │ + 59. │ 58 │ UInt64 │ + 60. │ 59 │ UInt64 │ + 61. │ 60 │ UInt64 │ + 62. │ 61 │ UInt64 │ + 63. │ 62 │ UInt64 │ + 64. │ 63 │ UInt64 │ + 65. │ 64 │ UInt64 │ + 66. │ 65 │ UInt64 │ + 67. │ 66 │ UInt64 │ + 68. │ 67 │ UInt64 │ + 69. │ 68 │ UInt64 │ + 70. │ 69 │ UInt64 │ + 71. │ 70 │ UInt64 │ + 72. │ 71 │ UInt64 │ + 73. │ 72 │ UInt64 │ + 74. │ 73 │ UInt64 │ + 75. │ 74 │ UInt64 │ + 76. │ 75 │ UInt64 │ + 77. │ 76 │ UInt64 │ + 78. │ 77 │ UInt64 │ + 79. │ 78 │ UInt64 │ + 80. │ 79 │ UInt64 │ + 81. │ 80 │ UInt64 │ + 82. │ 81 │ UInt64 │ + 83. │ 82 │ UInt64 │ + 84. │ 83 │ UInt64 │ + 85. │ 84 │ UInt64 │ + 86. │ 85 │ UInt64 │ + 87. │ 86 │ UInt64 │ + 88. │ 87 │ UInt64 │ + 89. │ 88 │ UInt64 │ + 90. │ 89 │ UInt64 │ + 91. │ 90 │ UInt64 │ + 92. │ 91 │ UInt64 │ + 93. │ 92 │ UInt64 │ + 94. │ 93 │ UInt64 │ + 95. │ 94 │ UInt64 │ + 96. │ 95 │ UInt64 │ + 97. │ 96 │ UInt64 │ + 98. │ 97 │ UInt64 │ + 99. │ 98 │ UInt64 │ +100. │ 99 │ UInt64 │ + └─number─┴─toTypeName(number)─┘ + ┌─number─┬─toTypeName(number)─┐ + 1. │ 0 │ UInt64 │ + 2. │ 1 │ UInt64 │ + 3. │ 2 │ UInt64 │ + 4. │ 3 │ UInt64 │ + 5. │ 4 │ UInt64 │ + 6. │ 5 │ UInt64 │ + 7. │ 6 │ UInt64 │ + 8. │ 7 │ UInt64 │ + 9. │ 8 │ UInt64 │ + 10. │ 9 │ UInt64 │ + 11. │ 10 │ UInt64 │ + 12. │ 11 │ UInt64 │ + 13. │ 12 │ UInt64 │ + 14. │ 13 │ UInt64 │ + 15. │ 14 │ UInt64 │ + 16. │ 15 │ UInt64 │ + 17. │ 16 │ UInt64 │ + 18. │ 17 │ UInt64 │ + 19. │ 18 │ UInt64 │ + 20. │ 19 │ UInt64 │ + 21. │ 20 │ UInt64 │ + 22. │ 21 │ UInt64 │ + 23. │ 22 │ UInt64 │ + 24. │ 23 │ UInt64 │ + 25. │ 24 │ UInt64 │ + 26. │ 25 │ UInt64 │ + 27. │ 26 │ UInt64 │ + 28. │ 27 │ UInt64 │ + 29. │ 28 │ UInt64 │ + 30. │ 29 │ UInt64 │ + 31. │ 30 │ UInt64 │ + 32. │ 31 │ UInt64 │ + 33. │ 32 │ UInt64 │ + 34. │ 33 │ UInt64 │ + 35. │ 34 │ UInt64 │ + 36. │ 35 │ UInt64 │ + 37. │ 36 │ UInt64 │ + 38. │ 37 │ UInt64 │ + 39. │ 38 │ UInt64 │ + 40. │ 39 │ UInt64 │ + 41. │ 40 │ UInt64 │ + 42. │ 41 │ UInt64 │ + 43. │ 42 │ UInt64 │ + 44. │ 43 │ UInt64 │ + 45. │ 44 │ UInt64 │ + 46. │ 45 │ UInt64 │ + 47. │ 46 │ UInt64 │ + 48. │ 47 │ UInt64 │ + 49. │ 48 │ UInt64 │ + 50. │ 49 │ UInt64 │ + 51. │ 50 │ UInt64 │ + 52. │ 51 │ UInt64 │ + 53. │ 52 │ UInt64 │ + 54. │ 53 │ UInt64 │ + 55. │ 54 │ UInt64 │ + 56. │ 55 │ UInt64 │ + 57. │ 56 │ UInt64 │ + 58. │ 57 │ UInt64 │ + 59. │ 58 │ UInt64 │ + 60. │ 59 │ UInt64 │ + 61. │ 60 │ UInt64 │ + 62. │ 61 │ UInt64 │ + 63. │ 62 │ UInt64 │ + 64. │ 63 │ UInt64 │ + 65. │ 64 │ UInt64 │ + 66. │ 65 │ UInt64 │ + 67. │ 66 │ UInt64 │ + 68. │ 67 │ UInt64 │ + 69. │ 68 │ UInt64 │ + 70. │ 69 │ UInt64 │ + 71. │ 70 │ UInt64 │ + 72. │ 71 │ UInt64 │ + 73. │ 72 │ UInt64 │ + 74. │ 73 │ UInt64 │ + 75. │ 74 │ UInt64 │ + 76. │ 75 │ UInt64 │ + 77. │ 76 │ UInt64 │ + 78. │ 77 │ UInt64 │ + 79. │ 78 │ UInt64 │ + 80. │ 79 │ UInt64 │ + 81. │ 80 │ UInt64 │ + 82. │ 81 │ UInt64 │ + 83. │ 82 │ UInt64 │ + 84. │ 83 │ UInt64 │ + 85. │ 84 │ UInt64 │ + 86. │ 85 │ UInt64 │ + 87. │ 86 │ UInt64 │ + 88. │ 87 │ UInt64 │ + 89. │ 88 │ UInt64 │ + 90. │ 89 │ UInt64 │ + 91. │ 90 │ UInt64 │ + 92. │ 91 │ UInt64 │ + 93. │ 92 │ UInt64 │ + 94. │ 93 │ UInt64 │ + 95. │ 94 │ UInt64 │ + 96. │ 95 │ UInt64 │ + 97. │ 96 │ UInt64 │ + 98. │ 97 │ UInt64 │ + 99. │ 98 │ UInt64 │ +100. │ 99 │ UInt64 │ + └─number─┴─toTypeName(number)─┘ + number toTypeName(number) + + 1. 0 UInt64 + 2. 1 UInt64 + 3. 2 UInt64 + 4. 3 UInt64 + 5. 4 UInt64 + 6. 5 UInt64 + 7. 6 UInt64 + 8. 7 UInt64 + 9. 8 UInt64 + 10. 9 UInt64 + 11. 10 UInt64 + 12. 11 UInt64 + 13. 12 UInt64 + 14. 13 UInt64 + 15. 14 UInt64 + 16. 15 UInt64 + 17. 16 UInt64 + 18. 17 UInt64 + 19. 18 UInt64 + 20. 19 UInt64 + 21. 20 UInt64 + 22. 21 UInt64 + 23. 22 UInt64 + 24. 23 UInt64 + 25. 24 UInt64 + 26. 25 UInt64 + 27. 26 UInt64 + 28. 27 UInt64 + 29. 28 UInt64 + 30. 29 UInt64 + 31. 30 UInt64 + 32. 31 UInt64 + 33. 32 UInt64 + 34. 33 UInt64 + 35. 34 UInt64 + 36. 35 UInt64 + 37. 36 UInt64 + 38. 37 UInt64 + 39. 38 UInt64 + 40. 39 UInt64 + 41. 40 UInt64 + 42. 41 UInt64 + 43. 42 UInt64 + 44. 43 UInt64 + 45. 44 UInt64 + 46. 45 UInt64 + 47. 46 UInt64 + 48. 47 UInt64 + 49. 48 UInt64 + 50. 49 UInt64 + 51. 50 UInt64 + 52. 51 UInt64 + 53. 52 UInt64 + 54. 53 UInt64 + 55. 54 UInt64 + 56. 55 UInt64 + 57. 56 UInt64 + 58. 57 UInt64 + 59. 58 UInt64 + 60. 59 UInt64 + 61. 60 UInt64 + 62. 61 UInt64 + 63. 62 UInt64 + 64. 63 UInt64 + 65. 64 UInt64 + 66. 65 UInt64 + 67. 66 UInt64 + 68. 67 UInt64 + 69. 68 UInt64 + 70. 69 UInt64 + 71. 70 UInt64 + 72. 71 UInt64 + 73. 72 UInt64 + 74. 73 UInt64 + 75. 74 UInt64 + 76. 75 UInt64 + 77. 76 UInt64 + 78. 77 UInt64 + 79. 78 UInt64 + 80. 79 UInt64 + 81. 80 UInt64 + 82. 81 UInt64 + 83. 82 UInt64 + 84. 83 UInt64 + 85. 84 UInt64 + 86. 85 UInt64 + 87. 86 UInt64 + 88. 87 UInt64 + 89. 88 UInt64 + 90. 89 UInt64 + 91. 90 UInt64 + 92. 91 UInt64 + 93. 92 UInt64 + 94. 93 UInt64 + 95. 94 UInt64 + 96. 95 UInt64 + 97. 96 UInt64 + 98. 97 UInt64 + 99. 98 UInt64 +100. 99 UInt64 + number toTypeName(number) + + 1. 0 UInt64 + 2. 1 UInt64 + 3. 2 UInt64 + 4. 3 UInt64 + 5. 4 UInt64 + 6. 5 UInt64 + 7. 6 UInt64 + 8. 7 UInt64 + 9. 8 UInt64 + 10. 9 UInt64 + 11. 10 UInt64 + 12. 11 UInt64 + 13. 12 UInt64 + 14. 13 UInt64 + 15. 14 UInt64 + 16. 15 UInt64 + 17. 16 UInt64 + 18. 17 UInt64 + 19. 18 UInt64 + 20. 19 UInt64 + 21. 20 UInt64 + 22. 21 UInt64 + 23. 22 UInt64 + 24. 23 UInt64 + 25. 24 UInt64 + 26. 25 UInt64 + 27. 26 UInt64 + 28. 27 UInt64 + 29. 28 UInt64 + 30. 29 UInt64 + 31. 30 UInt64 + 32. 31 UInt64 + 33. 32 UInt64 + 34. 33 UInt64 + 35. 34 UInt64 + 36. 35 UInt64 + 37. 36 UInt64 + 38. 37 UInt64 + 39. 38 UInt64 + 40. 39 UInt64 + 41. 40 UInt64 + 42. 41 UInt64 + 43. 42 UInt64 + 44. 43 UInt64 + 45. 44 UInt64 + 46. 45 UInt64 + 47. 46 UInt64 + 48. 47 UInt64 + 49. 48 UInt64 + 50. 49 UInt64 + 51. 50 UInt64 + 52. 51 UInt64 + 53. 52 UInt64 + 54. 53 UInt64 + 55. 54 UInt64 + 56. 55 UInt64 + 57. 56 UInt64 + 58. 57 UInt64 + 59. 58 UInt64 + 60. 59 UInt64 + 61. 60 UInt64 + 62. 61 UInt64 + 63. 62 UInt64 + 64. 63 UInt64 + 65. 64 UInt64 + 66. 65 UInt64 + 67. 66 UInt64 + 68. 67 UInt64 + 69. 68 UInt64 + 70. 69 UInt64 + 71. 70 UInt64 + 72. 71 UInt64 + 73. 72 UInt64 + 74. 73 UInt64 + 75. 74 UInt64 + 76. 75 UInt64 + 77. 76 UInt64 + 78. 77 UInt64 + 79. 78 UInt64 + 80. 79 UInt64 + 81. 80 UInt64 + 82. 81 UInt64 + 83. 82 UInt64 + 84. 83 UInt64 + 85. 84 UInt64 + 86. 85 UInt64 + 87. 86 UInt64 + 88. 87 UInt64 + 89. 88 UInt64 + 90. 89 UInt64 + 91. 90 UInt64 + 92. 91 UInt64 + 93. 92 UInt64 + 94. 93 UInt64 + 95. 94 UInt64 + 96. 95 UInt64 + 97. 96 UInt64 + 98. 97 UInt64 + 99. 98 UInt64 +100. 99 UInt64 + + number toTypeName(number) + number toTypeName(number) + + 1. 0 UInt64 + 2. 1 UInt64 + 3. 2 UInt64 + 4. 3 UInt64 + 5. 4 UInt64 + 6. 5 UInt64 + 7. 6 UInt64 + 8. 7 UInt64 + 9. 8 UInt64 + 10. 9 UInt64 + 11. 10 UInt64 + 12. 11 UInt64 + 13. 12 UInt64 + 14. 13 UInt64 + 15. 14 UInt64 + 16. 15 UInt64 + 17. 16 UInt64 + 18. 17 UInt64 + 19. 18 UInt64 + 20. 19 UInt64 + 21. 20 UInt64 + 22. 21 UInt64 + 23. 22 UInt64 + 24. 23 UInt64 + 25. 24 UInt64 + 26. 25 UInt64 + 27. 26 UInt64 + 28. 27 UInt64 + 29. 28 UInt64 + 30. 29 UInt64 + 31. 30 UInt64 + 32. 31 UInt64 + 33. 32 UInt64 + 34. 33 UInt64 + 35. 34 UInt64 + 36. 35 UInt64 + 37. 36 UInt64 + 38. 37 UInt64 + 39. 38 UInt64 + 40. 39 UInt64 + 41. 40 UInt64 + 42. 41 UInt64 + 43. 42 UInt64 + 44. 43 UInt64 + 45. 44 UInt64 + 46. 45 UInt64 + 47. 46 UInt64 + 48. 47 UInt64 + 49. 48 UInt64 + 50. 49 UInt64 + 51. 50 UInt64 + 52. 51 UInt64 + 53. 52 UInt64 + 54. 53 UInt64 + 55. 54 UInt64 + 56. 55 UInt64 + 57. 56 UInt64 + 58. 57 UInt64 + 59. 58 UInt64 + 60. 59 UInt64 + 61. 60 UInt64 + 62. 61 UInt64 + 63. 62 UInt64 + 64. 63 UInt64 + 65. 64 UInt64 + 66. 65 UInt64 + 67. 66 UInt64 + 68. 67 UInt64 + 69. 68 UInt64 + 70. 69 UInt64 + 71. 70 UInt64 + 72. 71 UInt64 + 73. 72 UInt64 + 74. 73 UInt64 + 75. 74 UInt64 + 76. 75 UInt64 + 77. 76 UInt64 + 78. 77 UInt64 + 79. 78 UInt64 + 80. 79 UInt64 + 81. 80 UInt64 + 82. 81 UInt64 + 83. 82 UInt64 + 84. 83 UInt64 + 85. 84 UInt64 + 86. 85 UInt64 + 87. 86 UInt64 + 88. 87 UInt64 + 89. 88 UInt64 + 90. 89 UInt64 + 91. 90 UInt64 + 92. 91 UInt64 + 93. 92 UInt64 + 94. 93 UInt64 + 95. 94 UInt64 + 96. 95 UInt64 + 97. 96 UInt64 + 98. 97 UInt64 + 99. 98 UInt64 +100. 99 UInt64 + + number toTypeName(number) + number toTypeName(number) + + 1. 0 UInt64 + 2. 1 UInt64 + 3. 2 UInt64 + 4. 3 UInt64 + 5. 4 UInt64 + 6. 5 UInt64 + 7. 6 UInt64 + 8. 7 UInt64 + 9. 8 UInt64 + 10. 9 UInt64 + 11. 10 UInt64 + 12. 11 UInt64 + 13. 12 UInt64 + 14. 13 UInt64 + 15. 14 UInt64 + 16. 15 UInt64 + 17. 16 UInt64 + 18. 17 UInt64 + 19. 18 UInt64 + 20. 19 UInt64 + 21. 20 UInt64 + 22. 21 UInt64 + 23. 22 UInt64 + 24. 23 UInt64 + 25. 24 UInt64 + 26. 25 UInt64 + 27. 26 UInt64 + 28. 27 UInt64 + 29. 28 UInt64 + 30. 29 UInt64 + 31. 30 UInt64 + 32. 31 UInt64 + 33. 32 UInt64 + 34. 33 UInt64 + 35. 34 UInt64 + 36. 35 UInt64 + 37. 36 UInt64 + 38. 37 UInt64 + 39. 38 UInt64 + 40. 39 UInt64 + 41. 40 UInt64 + 42. 41 UInt64 + 43. 42 UInt64 + 44. 43 UInt64 + 45. 44 UInt64 + 46. 45 UInt64 + 47. 46 UInt64 + 48. 47 UInt64 + 49. 48 UInt64 + 50. 49 UInt64 + 51. 50 UInt64 + 52. 51 UInt64 + 53. 52 UInt64 + 54. 53 UInt64 + 55. 54 UInt64 + 56. 55 UInt64 + 57. 56 UInt64 + 58. 57 UInt64 + 59. 58 UInt64 + 60. 59 UInt64 + 61. 60 UInt64 + 62. 61 UInt64 + 63. 62 UInt64 + 64. 63 UInt64 + 65. 64 UInt64 + 66. 65 UInt64 + 67. 66 UInt64 + 68. 67 UInt64 + 69. 68 UInt64 + 70. 69 UInt64 + 71. 70 UInt64 + 72. 71 UInt64 + 73. 72 UInt64 + 74. 73 UInt64 + 75. 74 UInt64 + 76. 75 UInt64 + 77. 76 UInt64 + 78. 77 UInt64 + 79. 78 UInt64 + 80. 79 UInt64 + 81. 80 UInt64 + 82. 81 UInt64 + 83. 82 UInt64 + 84. 83 UInt64 + 85. 84 UInt64 + 86. 85 UInt64 + 87. 86 UInt64 + 88. 87 UInt64 + 89. 88 UInt64 + 90. 89 UInt64 + 91. 90 UInt64 + 92. 91 UInt64 + 93. 92 UInt64 + 94. 93 UInt64 + 95. 94 UInt64 + 96. 95 UInt64 + 97. 96 UInt64 + 98. 97 UInt64 + 99. 98 UInt64 +100. 99 UInt64 + + number toTypeName(number) + number toTypeName(number) + + 1. 0 UInt64 + 2. 1 UInt64 + 3. 2 UInt64 + 4. 3 UInt64 + 5. 4 UInt64 + 6. 5 UInt64 + 7. 6 UInt64 + 8. 7 UInt64 + 9. 8 UInt64 + 10. 9 UInt64 + 11. 10 UInt64 + 12. 11 UInt64 + 13. 12 UInt64 + 14. 13 UInt64 + 15. 14 UInt64 + 16. 15 UInt64 + 17. 16 UInt64 + 18. 17 UInt64 + 19. 18 UInt64 + 20. 19 UInt64 + 21. 20 UInt64 + 22. 21 UInt64 + 23. 22 UInt64 + 24. 23 UInt64 + 25. 24 UInt64 + 26. 25 UInt64 + 27. 26 UInt64 + 28. 27 UInt64 + 29. 28 UInt64 + 30. 29 UInt64 + 31. 30 UInt64 + 32. 31 UInt64 + 33. 32 UInt64 + 34. 33 UInt64 + 35. 34 UInt64 + 36. 35 UInt64 + 37. 36 UInt64 + 38. 37 UInt64 + 39. 38 UInt64 + 40. 39 UInt64 + 41. 40 UInt64 + 42. 41 UInt64 + 43. 42 UInt64 + 44. 43 UInt64 + 45. 44 UInt64 + 46. 45 UInt64 + 47. 46 UInt64 + 48. 47 UInt64 + 49. 48 UInt64 + 50. 49 UInt64 + 51. 50 UInt64 + 52. 51 UInt64 + 53. 52 UInt64 + 54. 53 UInt64 + 55. 54 UInt64 + 56. 55 UInt64 + 57. 56 UInt64 + 58. 57 UInt64 + 59. 58 UInt64 + 60. 59 UInt64 + 61. 60 UInt64 + 62. 61 UInt64 + 63. 62 UInt64 + 64. 63 UInt64 + 65. 64 UInt64 + 66. 65 UInt64 + 67. 66 UInt64 + 68. 67 UInt64 + 69. 68 UInt64 + 70. 69 UInt64 + 71. 70 UInt64 + 72. 71 UInt64 + 73. 72 UInt64 + 74. 73 UInt64 + 75. 74 UInt64 + 76. 75 UInt64 + 77. 76 UInt64 + 78. 77 UInt64 + 79. 78 UInt64 + 80. 79 UInt64 + 81. 80 UInt64 + 82. 81 UInt64 + 83. 82 UInt64 + 84. 83 UInt64 + 85. 84 UInt64 + 86. 85 UInt64 + 87. 86 UInt64 + 88. 87 UInt64 + 89. 88 UInt64 + 90. 89 UInt64 + 91. 90 UInt64 + 92. 91 UInt64 + 93. 92 UInt64 + 94. 93 UInt64 + 95. 94 UInt64 + 96. 95 UInt64 + 97. 96 UInt64 + 98. 97 UInt64 + 99. 98 UInt64 +100. 99 UInt64 + + number toTypeName(number) diff --git a/tests/queries/0_stateless/03169_display_column_names_in_footer.sql b/tests/queries/0_stateless/03169_display_column_names_in_footer.sql new file mode 100644 index 00000000000..9e4ec09c21c --- /dev/null +++ b/tests/queries/0_stateless/03169_display_column_names_in_footer.sql @@ -0,0 +1,19 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/65035 +SELECT *, toTypeName(*) FROM (SELECT * FROM system.numbers LIMIT 49) FORMAT Pretty; +SELECT *, toTypeName(*) FROM (SELECT * FROM system.numbers LIMIT 10) FORMAT Pretty SETTINGS output_format_pretty_display_footer_column_names_min_rows=9; +SELECT *, toTypeName(*) FROM (SELECT * FROM system.numbers LIMIT 100) FORMAT Pretty SETTINGS output_format_pretty_display_footer_column_names=0; +SELECT *, toTypeName(*) FROM (SELECT * FROM system.numbers LIMIT 100) FORMAT Pretty; +SELECT *, toTypeName(*) FROM (SELECT * FROM system.numbers LIMIT 100) FORMAT PrettyNoEscapes; +SELECT *, toTypeName(*) FROM (SELECT * FROM system.numbers LIMIT 100) FORMAT PrettyMonoBlock; +SELECT *, toTypeName(*) FROM (SELECT * FROM system.numbers LIMIT 100) FORMAT PrettyNoEscapesMonoBlock; +SELECT *, toTypeName(*) FROM (SELECT * FROM system.numbers LIMIT 100) FORMAT PrettyNoEscapesMonoBlock; +SELECT *, toTypeName(*) FROM (SELECT * FROM system.numbers LIMIT 100) FORMAT PrettyCompact SETTINGS output_format_pretty_display_footer_column_names=0; +SELECT *, toTypeName(*) FROM (SELECT * FROM system.numbers LIMIT 100) FORMAT PrettyCompact; +SELECT *, toTypeName(*) FROM (SELECT * FROM system.numbers LIMIT 100) FORMAT PrettyCompactNoEscapes; +SELECT *, toTypeName(*) FROM (SELECT * FROM system.numbers LIMIT 100) FORMAT PrettyCompactMonoBlock; +SELECT *, toTypeName(*) FROM (SELECT * FROM system.numbers LIMIT 100) FORMAT PrettyCompactNoEscapesMonoBlock; +SELECT *, toTypeName(*) FROM (SELECT * FROM system.numbers LIMIT 100) FORMAT PrettySpace SETTINGS output_format_pretty_display_footer_column_names=0; +SELECT *, toTypeName(*) FROM (SELECT * FROM system.numbers LIMIT 100) FORMAT PrettySpace; +SELECT *, toTypeName(*) FROM (SELECT * FROM system.numbers LIMIT 100) FORMAT PrettySpaceNoEscapes; +SELECT *, toTypeName(*) FROM (SELECT * FROM system.numbers LIMIT 100) FORMAT PrettySpaceMonoBlock; +SELECT *, toTypeName(*) FROM (SELECT * FROM system.numbers LIMIT 100) FORMAT PrettySpaceNoEscapesMonoBlock; diff --git a/tests/queries/0_stateless/03169_modify_column_data_loss.reference b/tests/queries/0_stateless/03169_modify_column_data_loss.reference new file mode 100644 index 00000000000..2126a658c16 --- /dev/null +++ b/tests/queries/0_stateless/03169_modify_column_data_loss.reference @@ -0,0 +1,4 @@ +1 one 0 +2 two 0 +3 \N 0 +1 one 1 0 diff --git a/tests/queries/0_stateless/03169_modify_column_data_loss.sql b/tests/queries/0_stateless/03169_modify_column_data_loss.sql new file mode 100644 index 00000000000..def0a25a1b4 --- /dev/null +++ b/tests/queries/0_stateless/03169_modify_column_data_loss.sql @@ -0,0 +1,19 @@ +DROP TABLE IF EXISTS column_modify_test; + +CREATE TABLE column_modify_test (id UInt64, val String, other_col UInt64) engine=MergeTree ORDER BY id SETTINGS min_bytes_for_wide_part=0; +INSERT INTO column_modify_test VALUES (1,'one',0); +INSERT INTO column_modify_test VALUES (2,'two',0); + +-- on 21.9 that was done via mutations mechanism +ALTER TABLE column_modify_test MODIFY COLUMN val Nullable(String); + +INSERT INTO column_modify_test VALUES (3,Null,0); + +-- till now everythings looks ok +SELECT * FROM column_modify_test order by id, val, other_col; + +-- Now we do mutation. It will affect one of the parts, and will update columns.txt to the latest / correct state w/o updating the column file! +alter table column_modify_test update other_col=1 where id = 1 SETTINGS mutations_sync=1; + +-- row 1 is damaged now the column file & columns.txt is out of sync! +SELECT *, throwIf(val <> 'one') as issue FROM column_modify_test WHERE id = 1; diff --git a/tests/queries/0_stateless/03169_optimize_injective_functions_inside_uniq_crash.reference b/tests/queries/0_stateless/03169_optimize_injective_functions_inside_uniq_crash.reference new file mode 100644 index 00000000000..e58e9764b39 --- /dev/null +++ b/tests/queries/0_stateless/03169_optimize_injective_functions_inside_uniq_crash.reference @@ -0,0 +1,2 @@ +100 +100 diff --git a/tests/queries/0_stateless/03169_optimize_injective_functions_inside_uniq_crash.sql b/tests/queries/0_stateless/03169_optimize_injective_functions_inside_uniq_crash.sql new file mode 100644 index 00000000000..50d99b851a6 --- /dev/null +++ b/tests/queries/0_stateless/03169_optimize_injective_functions_inside_uniq_crash.sql @@ -0,0 +1,21 @@ +SELECT sum(u) +FROM +( + SELECT + intDiv(number, 4096) AS k, + uniqCombined(tuple(materialize(toLowCardinality(toNullable(16))))) AS u + FROM numbers(4096 * 100) + GROUP BY k +) +SETTINGS allow_experimental_analyzer = 1, optimize_injective_functions_inside_uniq=0; + +SELECT sum(u) +FROM +( + SELECT + intDiv(number, 4096) AS k, + uniqCombined(tuple(materialize(toLowCardinality(toNullable(16))))) AS u + FROM numbers(4096 * 100) + GROUP BY k +) +SETTINGS allow_experimental_analyzer = 1, optimize_injective_functions_inside_uniq=1; diff --git a/tests/queries/0_stateless/03169_time_virtual_column.reference b/tests/queries/0_stateless/03169_time_virtual_column.reference new file mode 100644 index 00000000000..4482956b706 --- /dev/null +++ b/tests/queries/0_stateless/03169_time_virtual_column.reference @@ -0,0 +1 @@ +4 1 diff --git a/tests/queries/0_stateless/03169_time_virtual_column.sh b/tests/queries/0_stateless/03169_time_virtual_column.sh new file mode 100755 index 00000000000..fef1de8c6f2 --- /dev/null +++ b/tests/queries/0_stateless/03169_time_virtual_column.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +echo "1,2" > $CLICKHOUSE_TEST_UNIQUE_NAME.csv +sleep 1 +$CLICKHOUSE_LOCAL -nm -q " +select _size, (dateDiff('millisecond', _time, now()) < 600000 AND dateDiff('millisecond', _time, now()) > 0) from file('$CLICKHOUSE_TEST_UNIQUE_NAME.csv'); +" +rm $CLICKHOUSE_TEST_UNIQUE_NAME.csv diff --git a/tests/queries/0_stateless/03170_float_schema_inference_small_block.reference b/tests/queries/0_stateless/03170_float_schema_inference_small_block.reference new file mode 100644 index 00000000000..9ee16da8728 --- /dev/null +++ b/tests/queries/0_stateless/03170_float_schema_inference_small_block.reference @@ -0,0 +1,15 @@ +Int64 +x Nullable(Int64) +x Nullable(Int64) +x Nullable(Int64) +Float64 +x Nullable(Float64) +x Nullable(Float64) +x Nullable(Float64) +x Nullable(Float64) +Float64.explicit File +x Nullable(Float64) +Float64.pipe +x Nullable(Float64) +Float64.default max_read_buffer_size +x Nullable(Float64) diff --git a/tests/queries/0_stateless/03170_float_schema_inference_small_block.sh b/tests/queries/0_stateless/03170_float_schema_inference_small_block.sh new file mode 100755 index 00000000000..88f9bfad7ed --- /dev/null +++ b/tests/queries/0_stateless/03170_float_schema_inference_small_block.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +# do not fallback to float always +echo "Int64" +$CLICKHOUSE_LOCAL --storage_file_read_method read --max_read_buffer_size 1 --input-format JSONEachRow 'desc "table"' <<<'{"x" : 1}' +$CLICKHOUSE_LOCAL --storage_file_read_method read --max_read_buffer_size 1 --input-format JSONEachRow 'desc "table"' <<<'{"x" : +1}' +$CLICKHOUSE_LOCAL --storage_file_read_method read --max_read_buffer_size 1 --input-format JSONEachRow 'desc "table"' <<<'{"x" : -1}' + +echo "Float64" +$CLICKHOUSE_LOCAL --storage_file_read_method read --max_read_buffer_size 1 --input-format JSONEachRow 'desc "table"' <<<'{"x" : 1.1}' +$CLICKHOUSE_LOCAL --storage_file_read_method read --max_read_buffer_size 1 --input-format JSONEachRow 'desc "table"' <<<'{"x" : +1.1}' +$CLICKHOUSE_LOCAL --storage_file_read_method read --max_read_buffer_size 1 --input-format JSONEachRow 'desc "table"' <<<'{"x" : 1.111}' +$CLICKHOUSE_LOCAL --storage_file_read_method read --max_read_buffer_size 1 --input-format JSONEachRow 'desc "table"' <<<'{"x" : +1.111}' + +# this is requried due to previously clickhouse-local does not interprets +# --max_read_buffer_size for fds [1] +# +# [1]: https://github.com/ClickHouse/ClickHouse/pull/64532 +echo "Float64.explicit File" +tmp_path=$(mktemp "$CUR_DIR/03170_float_schema_inference_small_block.json.XXXXXX") +trap 'rm -f $tmp_path' EXIT +cat > "$tmp_path" <<<'{"x" : 1.111}' +$CLICKHOUSE_LOCAL --storage_file_read_method read --max_read_buffer_size 1 --input-format JSONEachRow 'desc "table"' --file "$tmp_path" + +echo "Float64.pipe" +echo '{"x" : 1.1}' | $CLICKHOUSE_LOCAL --storage_file_read_method read --max_read_buffer_size 1 --input-format JSONEachRow 'desc "table"' +echo "Float64.default max_read_buffer_size" +echo '{"x" : 1.1}' | $CLICKHOUSE_LOCAL --storage_file_read_method read --input-format JSONEachRow 'desc "table"' diff --git a/tests/queries/0_stateless/03170_part_offset_as_table_column.reference b/tests/queries/0_stateless/03170_part_offset_as_table_column.reference new file mode 100644 index 00000000000..435187cb39b --- /dev/null +++ b/tests/queries/0_stateless/03170_part_offset_as_table_column.reference @@ -0,0 +1,30 @@ +0 0 +1 0 +2 0 +3 0 +4 0 +5 0 +6 0 +7 0 +8 0 +9 0 +0 0 +1 0 +2 0 +3 0 +4 0 +5 0 +6 0 +7 0 +8 0 +9 0 +0 0 +1 0 +2 0 +3 0 +4 0 +5 0 +6 0 +7 0 +8 0 +9 0 diff --git a/tests/queries/0_stateless/03170_part_offset_as_table_column.sql b/tests/queries/0_stateless/03170_part_offset_as_table_column.sql new file mode 100644 index 00000000000..36cbc156744 --- /dev/null +++ b/tests/queries/0_stateless/03170_part_offset_as_table_column.sql @@ -0,0 +1,25 @@ +CREATE TABLE test_table +( + `key` UInt32, + `_part_offset` DEFAULT 0 +) +ENGINE = MergeTree +ORDER BY key; + +INSERT INTO test_table (key) SELECT number +FROM numbers(10); + +set allow_experimental_analyzer=0; + +SELECT * +FROM test_table; + +set allow_experimental_analyzer=1; + +SELECT * +FROM test_table; + +SELECT + key, + _part_offset +FROM test_table; diff --git a/tests/queries/0_stateless/03171_condition_pushdown.reference b/tests/queries/0_stateless/03171_condition_pushdown.reference new file mode 100644 index 00000000000..c7e6479af3b --- /dev/null +++ b/tests/queries/0_stateless/03171_condition_pushdown.reference @@ -0,0 +1,2 @@ +123 123 +456 456 diff --git a/tests/queries/0_stateless/03171_condition_pushdown.sql b/tests/queries/0_stateless/03171_condition_pushdown.sql new file mode 100644 index 00000000000..9cfe41ce921 --- /dev/null +++ b/tests/queries/0_stateless/03171_condition_pushdown.sql @@ -0,0 +1,6 @@ +-- This query succeeds only if it is correctly optimized. +SET allow_experimental_analyzer = 1; +SELECT * FROM (SELECT * FROM numbers(1e19)) AS t1, (SELECT * FROM numbers(1e19)) AS t2 WHERE t1.number IN (123, 456) AND t2.number = t1.number ORDER BY ALL; + +-- Still TODO: +-- SELECT * FROM (SELECT * FROM numbers(1e19)) AS t1, (SELECT * FROM numbers(1e19)) AS t2 WHERE t1.number IN (SELECT 123 UNION ALL SELECT 456) AND t2.number = t1.number ORDER BY ALL; diff --git a/tests/queries/0_stateless/03171_direct_dict_short_circuit_bug.reference b/tests/queries/0_stateless/03171_direct_dict_short_circuit_bug.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03171_direct_dict_short_circuit_bug.sql b/tests/queries/0_stateless/03171_direct_dict_short_circuit_bug.sql new file mode 100644 index 00000000000..034d48d2b8b --- /dev/null +++ b/tests/queries/0_stateless/03171_direct_dict_short_circuit_bug.sql @@ -0,0 +1,37 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/65201 +SET short_circuit_function_evaluation='enable'; + +DROP DICTIONARY IF EXISTS direct_dictionary_simple_key_simple_attributes; +DROP TABLE IF EXISTS simple_key_simple_attributes_source_table; + +CREATE TABLE simple_key_simple_attributes_source_table +( + id UInt64, + value_first String, + value_second String +) + ENGINE = TinyLog; + +INSERT INTO simple_key_simple_attributes_source_table VALUES(0, 'value_0', 'value_second_0'); +INSERT INTO simple_key_simple_attributes_source_table VALUES(1, 'value_1', 'value_second_1'); +INSERT INTO simple_key_simple_attributes_source_table VALUES(2, 'value_2', 'value_second_2'); + + +CREATE DICTIONARY direct_dictionary_simple_key_simple_attributes +( + `id` UInt64, + `value_first` String DEFAULT 'value_first_default', + `value_second` String DEFAULT 'value_second_default' +) + PRIMARY KEY id + SOURCE(CLICKHOUSE(TABLE 'simple_key_simple_attributes_source_table')) + LAYOUT(DIRECT()); + +SELECT + toUInt128(1), + dictGetOrDefault('direct_dictionary_simple_key_simple_attributes', 'value_second', number, toString(toFixedString(toFixedString(toFixedString(materialize(toNullable('default')), 7), 7), toNullable(toNullable(toNullable(toUInt128(7))))))) AS value_second +FROM system.numbers LIMIT 255 +FORMAT Null; + +DROP DICTIONARY IF EXISTS direct_dictionary_simple_key_simple_attributes; +DROP TABLE IF EXISTS simple_key_simple_attributes_source_table; diff --git a/tests/queries/0_stateless/03171_hashed_dictionary_short_circuit_bug_fix.reference b/tests/queries/0_stateless/03171_hashed_dictionary_short_circuit_bug_fix.reference new file mode 100644 index 00000000000..a2ac115060f --- /dev/null +++ b/tests/queries/0_stateless/03171_hashed_dictionary_short_circuit_bug_fix.reference @@ -0,0 +1,6 @@ +100 1 1 +300 3 0 +200 2 2 +100 1 1 +300 3 0 +200 2 2 diff --git a/tests/queries/0_stateless/03171_hashed_dictionary_short_circuit_bug_fix.sql b/tests/queries/0_stateless/03171_hashed_dictionary_short_circuit_bug_fix.sql new file mode 100644 index 00000000000..e1b5531a442 --- /dev/null +++ b/tests/queries/0_stateless/03171_hashed_dictionary_short_circuit_bug_fix.sql @@ -0,0 +1,30 @@ +-- Tags: no-parallel + +CREATE TABLE x ( hash_id UInt64, user_result Decimal(3, 2) ) ENGINE = Memory(); + +CREATE TABLE y ( hash_id UInt64, user_result DECIMAL(18, 6) ) ENGINE = Memory(); + +INSERT INTO x values (100, 1), (200, 2); +INSERT INTO y values (100, 1), (300, 3), (200, 2); + +CREATE DICTIONARY d1 (hash_id UInt64, user_result Decimal(3, 2) ) +PRIMARY KEY hash_id +SOURCE(CLICKHOUSE(TABLE 'x')) +LIFETIME(0) +LAYOUT(HASHED()); + +SELECT hash_id, + dictGetOrDefault(d1, 'user_result', toUInt64(hash_id), toFloat64(user_result)), + dictGet(d1, 'user_result', toUInt64(hash_id)) +FROM y; + +CREATE DICTIONARY d2 (hash_id UInt64, user_result Decimal(3, 2) ) +PRIMARY KEY hash_id +SOURCE(CLICKHOUSE(TABLE 'x')) +LIFETIME(0) +LAYOUT(HASHED_ARRAY()); + +SELECT hash_id, + dictGetOrDefault(d2, 'user_result', toUInt64(hash_id), toFloat64(user_result)), + dictGet(d2, 'user_result', toUInt64(hash_id)) +FROM y; diff --git a/tests/queries/0_stateless/03172_bcrypt_validation.reference b/tests/queries/0_stateless/03172_bcrypt_validation.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03172_bcrypt_validation.sql b/tests/queries/0_stateless/03172_bcrypt_validation.sql new file mode 100644 index 00000000000..37dd0c9bb5d --- /dev/null +++ b/tests/queries/0_stateless/03172_bcrypt_validation.sql @@ -0,0 +1,3 @@ +-- Tags: no-fasttest +DROP USER IF EXISTS 03172_user_invalid_bcrypt_hash; +CREATE USER 03172_user_invalid_bcrypt_hash IDENTIFIED WITH bcrypt_hash BY '012345678901234567890123456789012345678901234567890123456789'; -- { serverError BAD_ARGUMENTS } diff --git a/tests/queries/0_stateless/mergetree_mutations.lib b/tests/queries/0_stateless/mergetree_mutations.lib index b11b2e6b852..9eeea87b52d 100644 --- a/tests/queries/0_stateless/mergetree_mutations.lib +++ b/tests/queries/0_stateless/mergetree_mutations.lib @@ -37,7 +37,7 @@ function wait_for_all_mutations() echo "Timed out while waiting for mutation to execute!" fi - sleep 0.1 + sleep 0.3 done } diff --git a/tests/queries/1_stateful/00166_explain_estimate.sql b/tests/queries/1_stateful/00166_explain_estimate.sql index c4071271736..136433c16ee 100644 --- a/tests/queries/1_stateful/00166_explain_estimate.sql +++ b/tests/queries/1_stateful/00166_explain_estimate.sql @@ -1,6 +1,8 @@ -- Tags: no-replicated-database -- Tag no-replicated-database: Requires investigation +SET optimize_use_implicit_projections = 0; + EXPLAIN ESTIMATE SELECT count() FROM test.hits WHERE CounterID = 29103473; EXPLAIN ESTIMATE SELECT count() FROM test.hits WHERE CounterID != 29103473; EXPLAIN ESTIMATE SELECT count() FROM test.hits WHERE CounterID > 29103473; diff --git a/tests/queries/1_stateful/00175_counting_resources_in_subqueries.sql b/tests/queries/1_stateful/00175_counting_resources_in_subqueries.sql index 63eca96414f..5733bf6dd18 100644 --- a/tests/queries/1_stateful/00175_counting_resources_in_subqueries.sql +++ b/tests/queries/1_stateful/00175_counting_resources_in_subqueries.sql @@ -1,3 +1,5 @@ +SET optimize_use_implicit_projections = 0; + -- the work for scalar subquery is properly accounted: SET max_rows_to_read = 1000000; SELECT 1 = (SELECT count() FROM test.hits WHERE NOT ignore(AdvEngineID)); -- { serverError TOO_MANY_ROWS } diff --git a/utils/changelog/changelog.py b/utils/changelog/changelog.py index 314461a6b3a..b79e4139bcc 100755 --- a/utils/changelog/changelog.py +++ b/utils/changelog/changelog.py @@ -1,427 +1,15 @@ #!/usr/bin/env python3 # In our CI this script runs in style-test containers -import argparse -import logging -import os -import os.path as p -import re -from datetime import date, timedelta -from subprocess import DEVNULL, CalledProcessError -from typing import Dict, List, Optional, TextIO +# The main script is moved to tests/ci/changelog.py +# It depends on the ci scripts too hard to keep it here +# Here's only a wrapper around it for the people who used to it -from github.GithubException import RateLimitExceededException, UnknownObjectException -from github.NamedUser import NamedUser -from thefuzz.fuzz import ratio # type: ignore - -from git_helper import git_runner as runner -from git_helper import is_shallow -from github_helper import GitHub, PullRequest, PullRequests, Repository - -# This array gives the preferred category order, and is also used to -# normalize category names. -# Categories are used in .github/PULL_REQUEST_TEMPLATE.md, keep comments there -# updated accordingly -categories_preferred_order = ( - "Backward Incompatible Change", - "New Feature", - "Performance Improvement", - "Improvement", - "Critical Bug Fix", - "Bug Fix", - "Build/Testing/Packaging Improvement", - "Other", -) - -FROM_REF = "" -TO_REF = "" -SHA_IN_CHANGELOG = [] # type: List[str] -gh = GitHub(create_cache_dir=False) -CACHE_PATH = p.join(p.dirname(p.realpath(__file__)), "gh_cache") - - -class Description: - def __init__( - self, number: int, user: NamedUser, html_url: str, entry: str, category: str - ): - self.number = number - self.html_url = html_url - self.user = gh.get_user_cached(user._rawData["login"]) # type: ignore - self.entry = entry - self.category = category - - @property - def formatted_entry(self) -> str: - # Substitute issue links. - # 1) issue number w/o markdown link - entry = re.sub( - r"([^[])#([0-9]{4,})", - r"\1[#\2](https://github.com/ClickHouse/ClickHouse/issues/\2)", - self.entry, - ) - # 2) issue URL w/o markdown link - # including #issuecomment-1 or #event-12 - entry = re.sub( - r"([^(])(https://github.com/ClickHouse/ClickHouse/issues/([0-9]{4,})[-#a-z0-9]*)", - r"\1[#\3](\2)", - entry, - ) - # It's possible that we face a secondary rate limit. - # In this case we should sleep until we get it - while True: - try: - user_name = self.user.name if self.user.name else self.user.login - break - except UnknownObjectException: - user_name = self.user.login - break - except RateLimitExceededException: - gh.sleep_on_rate_limit() - return ( - f"* {entry} [#{self.number}]({self.html_url}) " - f"([{user_name}]({self.user.html_url}))." - ) - - # Sort PR descriptions by numbers - def __eq__(self, other) -> bool: - if not isinstance(self, type(other)): - return NotImplemented - return self.number == other.number - - def __lt__(self, other: "Description") -> bool: - return self.number < other.number - - -def get_descriptions(prs: PullRequests) -> Dict[str, List[Description]]: - descriptions = {} # type: Dict[str, List[Description]] - repos = {} # type: Dict[str, Repository] - for pr in prs: - # See https://github.com/PyGithub/PyGithub/issues/2202, - # obj._rawData doesn't spend additional API requests - # We'll save some requests - # pylint: disable=protected-access - repo_name = pr._rawData["base"]["repo"]["full_name"] - # pylint: enable=protected-access - if repo_name not in repos: - repos[repo_name] = pr.base.repo - in_changelog = False - merge_commit = pr.merge_commit_sha - if merge_commit is None: - logging.warning("PR %s does not have merge-commit, skipping", pr.number) - continue - - in_changelog = merge_commit in SHA_IN_CHANGELOG - if in_changelog: - desc = generate_description(pr, repos[repo_name]) - if desc: - if desc.category not in descriptions: - descriptions[desc.category] = [] - descriptions[desc.category].append(desc) - - for descs in descriptions.values(): - descs.sort() - - return descriptions - - -def parse_args() -> argparse.Namespace: - parser = argparse.ArgumentParser( - formatter_class=argparse.ArgumentDefaultsHelpFormatter, - description="Generate a changelog in Markdown format between given tags. " - "It fetches all tags and unshallow the git repository automatically", - ) - parser.add_argument( - "-v", - "--verbose", - action="count", - default=0, - help="set the script verbosity, could be used multiple", - ) - parser.add_argument( - "--debug-helpers", - action="store_true", - help="add debug logging for git_helper and github_helper", - ) - parser.add_argument( - "--output", - type=argparse.FileType("w"), - default="-", - help="output file for changelog", - ) - parser.add_argument( - "--repo", - default="ClickHouse/ClickHouse", - help="a repository to query for pull-requests from GitHub", - ) - parser.add_argument( - "--jobs", - type=int, - default=10, - help="number of jobs to get pull-requests info from GitHub API", - ) - parser.add_argument( - "--gh-user-or-token", - help="user name or GH token to authenticate", - ) - parser.add_argument( - "--gh-password", - help="a password that should be used when user is given", - ) - parser.add_argument( - "--with-testing-tags", - action="store_true", - help="by default '*-testing' tags are ignored, this argument enables them too", - ) - parser.add_argument( - "--from", - dest="from_ref", - help="git ref for a starting point of changelog, by default is calculated " - "automatically to match a previous tag in history", - ) - parser.add_argument( - "to_ref", - metavar="TO_REF", - help="git ref for the changelog end", - ) - args = parser.parse_args() - return args - - -# This function mirrors the PR description checks in ClickhousePullRequestTrigger. -# Returns None if the PR should not be mentioned in changelog. -def generate_description(item: PullRequest, repo: Repository) -> Optional[Description]: - backport_number = item.number - if item.head.ref.startswith("backport/"): - branch_parts = item.head.ref.split("/") - if len(branch_parts) == 3: - try: - item = gh.get_pull_cached(repo, int(branch_parts[-1])) - except Exception as e: - logging.warning("unable to get backpoted PR, exception: %s", e) - else: - logging.warning( - "The branch %s doesn't match backport template, using PR %s as is", - item.head.ref, - item.number, - ) - description = item.body - # Don't skip empty lines because they delimit parts of description - lines = [x.strip() for x in (description.split("\n") if description else [])] - lines = [re.sub(r"\s+", " ", ln) for ln in lines] - - category = "" - entry = "" - - if lines: - i = 0 - while i < len(lines): - if re.match(r"(?i)^[#>*_ ]*change\s*log\s*category", lines[i]): - i += 1 - if i >= len(lines): - break - # Can have one empty line between header and the category itself. - # Filter it out. - if not lines[i]: - i += 1 - if i >= len(lines): - break - category = re.sub(r"^[-*\s]*", "", lines[i]) - i += 1 - elif re.match( - r"(?i)^[#>*_ ]*(short\s*description|change\s*log\s*entry)", lines[i] - ): - i += 1 - # Can have one empty line between header and the entry itself. - # Filter it out. - if i < len(lines) and not lines[i]: - i += 1 - # All following lines until empty one are the changelog entry. - entry_lines = [] - while i < len(lines) and lines[i]: - entry_lines.append(lines[i]) - i += 1 - entry = " ".join(entry_lines) - else: - i += 1 - - # Remove excessive bullets from the entry. - if re.match(r"^[\-\*] ", entry): - entry = entry[2:] - - # Better style. - if re.match(r"^[a-z]", entry): - entry = entry.capitalize() - - if not category: - # Shouldn't happen, because description check in CI should catch such PRs. - # Fall through, so that it shows up in output and the user can fix it. - category = "NO CL CATEGORY" - - # Filter out the PR categories that are not for changelog. - if re.match( - r"(?i)((non|in|not|un)[-\s]*significant)|(not[ ]*for[ ]*changelog)", - category, - ): - category = "NOT FOR CHANGELOG / INSIGNIFICANT" - return Description(item.number, item.user, item.html_url, item.title, category) - - # Normalize bug fixes - if re.match( - r"(?i)bug\Wfix", - category, - ): - category = "Bug Fix (user-visible misbehavior in an official stable release)" - - # Filter out documentations changelog - if re.match( - r"(?i)doc", - category, - ): - return None - - if backport_number != item.number: - entry = f"Backported in #{backport_number}: {entry}" - - if not entry: - # Shouldn't happen, because description check in CI should catch such PRs. - category = "NO CL ENTRY" - entry = "NO CL ENTRY: '" + item.title + "'" - - entry = entry.strip() - if entry[-1] != ".": - entry += "." - - for c in categories_preferred_order: - if ratio(category.lower(), c.lower()) >= 90: - category = c - break - - return Description(item.number, item.user, item.html_url, entry, category) - - -def write_changelog( - fd: TextIO, descriptions: Dict[str, List[Description]], year: int -) -> None: - to_commit = runner(f"git rev-parse {TO_REF}^{{}}")[:11] - from_commit = runner(f"git rev-parse {FROM_REF}^{{}}")[:11] - fd.write( - f"---\nsidebar_position: 1\nsidebar_label: {year}\n---\n\n" - f"# {year} Changelog\n\n" - f"### ClickHouse release {TO_REF} ({to_commit}) FIXME " - f"as compared to {FROM_REF} ({from_commit})\n\n" - ) - - seen_categories = [] # type: List[str] - for category in categories_preferred_order: - if category in descriptions: - seen_categories.append(category) - fd.write(f"#### {category}\n") - for desc in descriptions[category]: - fd.write(f"{desc.formatted_entry}\n") - - fd.write("\n") - - for category in sorted(descriptions): - if category not in seen_categories: - fd.write(f"#### {category}\n\n") - for desc in descriptions[category]: - fd.write(f"{desc.formatted_entry}\n") - - fd.write("\n") - - -def check_refs(from_ref: Optional[str], to_ref: str, with_testing_tags: bool): - global FROM_REF, TO_REF - TO_REF = to_ref - - # Check TO_REF - runner.run(f"git rev-parse {TO_REF}") - - # Check from_ref - if from_ref is None: - # Get all tags pointing to TO_REF - tags = runner.run(f"git tag --points-at '{TO_REF}^{{}}'").split("\n") - logging.info("All tags pointing to %s:\n%s", TO_REF, tags) - if not with_testing_tags: - tags.append("*-testing") - exclude = " ".join([f"--exclude='{tag}'" for tag in tags]) - cmd = f"git describe --abbrev=0 --tags {exclude} '{TO_REF}'" - FROM_REF = runner.run(cmd) - else: - runner.run(f"git rev-parse {FROM_REF}") - FROM_REF = from_ref - - -def set_sha_in_changelog(): - global SHA_IN_CHANGELOG - SHA_IN_CHANGELOG = runner.run( - f"git log --format=format:%H {FROM_REF}..{TO_REF}" - ).split("\n") - - -def get_year(prs: PullRequests) -> int: - if not prs: - return date.today().year - return max(pr.created_at.year for pr in prs) - - -def main(): - log_levels = [logging.WARN, logging.INFO, logging.DEBUG] - args = parse_args() - logging.basicConfig( - format="%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d]:\n%(message)s", - level=log_levels[min(args.verbose, 2)], - ) - if args.debug_helpers: - logging.getLogger("github_helper").setLevel(logging.DEBUG) - logging.getLogger("git_helper").setLevel(logging.DEBUG) - # Create a cache directory - if not p.isdir(CACHE_PATH): - os.mkdir(CACHE_PATH, 0o700) - - # Get the full repo - if is_shallow(): - logging.info("Unshallow repository") - runner.run("git fetch --unshallow", stderr=DEVNULL) - logging.info("Fetching all tags") - runner.run("git fetch --tags", stderr=DEVNULL) - - check_refs(args.from_ref, args.to_ref, args.with_testing_tags) - set_sha_in_changelog() - - logging.info("Using %s..%s as changelog interval", FROM_REF, TO_REF) - - # use merge-base commit as a starting point, if used ref in another branch - base_commit = runner.run(f"git merge-base '{FROM_REF}^{{}}' '{TO_REF}^{{}}'") - # Get starting and ending dates for gathering PRs - # Add one day after and before to mitigate TZ possible issues - # `tag^{}` format gives commit ref when we have annotated tags - # format %cs gives a committer date, works better for cherry-picked commits - from_date = runner.run(f"git log -1 --format=format:%cs '{base_commit}'") - to_date = runner.run(f"git log -1 --format=format:%cs '{TO_REF}^{{}}'") - merged = ( - date.fromisoformat(from_date) - timedelta(1), - date.fromisoformat(to_date) + timedelta(1), - ) - - # Get all PRs for the given time frame - global gh - gh = GitHub( - args.gh_user_or_token, - args.gh_password, - create_cache_dir=False, - per_page=100, - pool_size=args.jobs, - ) - gh.cache_path = CACHE_PATH - query = f"type:pr repo:{args.repo} is:merged" - prs = gh.get_pulls_from_search(query=query, merged=merged, sort="created") - - descriptions = get_descriptions(prs) - changelog_year = get_year(prs) - - write_changelog(args.output, descriptions, changelog_year) +import subprocess +import sys +from pathlib import Path +SCRIPT_PATH = (Path(__file__).parents[2] / "tests/ci/changelog.py").absolute() if __name__ == "__main__": - main() + subprocess.check_call(["python3", SCRIPT_PATH, *sys.argv[1:]]) diff --git a/utils/changelog/git_helper.py b/utils/changelog/git_helper.py deleted file mode 120000 index 03b05a7eddd..00000000000 --- a/utils/changelog/git_helper.py +++ /dev/null @@ -1 +0,0 @@ -../../tests/ci/git_helper.py \ No newline at end of file diff --git a/utils/changelog/github_helper.py b/utils/changelog/github_helper.py deleted file mode 120000 index 2d44dfe8000..00000000000 --- a/utils/changelog/github_helper.py +++ /dev/null @@ -1 +0,0 @@ -../../tests/ci/github_helper.py \ No newline at end of file diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index c7d567c4168..eaeeb038aad 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -222,6 +222,7 @@ DatabaseOrdinaryThreadsActive DateTime DateTimes DbCL +deallocated Decrypted Deduplicate Deduplication @@ -293,6 +294,7 @@ FilesystemMainPathUsedBytes FilesystemMainPathUsedINodes FixedString FlameGraph +flameGraph Flink ForEach FreeBSD @@ -473,6 +475,7 @@ Liao LibFuzzer LightHouse LineAsString +LineString Linf LinfDistance LinfNorm @@ -1009,6 +1012,8 @@ Updatable Uppercased Uptime Uptrace +UrlDecode +UrlEncode UserID Util VARCHAR @@ -1702,6 +1707,7 @@ groupBitmap groupBitmapAnd groupBitmapOr groupBitmapXor +groupConcat groupUniqArray grouparray grouparrayinsertat @@ -1718,6 +1724,7 @@ groupbitmapor groupbitmapxor groupbitor groupbitxor +groupconcat groupuniqarray grpc grpcio @@ -1935,6 +1942,8 @@ london lookups loongarch lowcardinality +lowCardinalityIndices +lowCardinalityKeys lowerUTF lowercased lttb @@ -2311,6 +2320,7 @@ randomStringUTF rankCorr rapidjson rawblob +readWKTLineString readWKTMultiPolygon readWKTPoint readWKTPolygon diff --git a/utils/check-style/check-mypy b/utils/check-style/check-mypy index 42cb7fbbd15..4434377e627 100755 --- a/utils/check-style/check-mypy +++ b/utils/check-style/check-mypy @@ -11,13 +11,15 @@ GIT_ROOT=${GIT_ROOT:-.} CONFIG="$GIT_ROOT/tests/ci/.mypy.ini" DIRS=("$GIT_ROOT/tests/ci/" "$GIT_ROOT/tests/ci/"*/) tmp=$(mktemp) + for dir in "${DIRS[@]}"; do if ! compgen -G "$dir"/*.py > /dev/null; then continue fi - if ! mypy --config-file="$CONFIG" --sqlite-cache "$dir"/*.py > "$tmp" 2>&1; then + if ! mypy --config-file="$CONFIG" --sqlite-cache $(find "$dir" -maxdepth 1 -name "*.py" | grep -v "test_") > "$tmp" 2>&1; then echo "Errors while processing $dir": cat "$tmp" fi done + rm -rf "$tmp" diff --git a/utils/check-style/check-pylint b/utils/check-style/check-pylint index 7959a414023..8cfbc68ac96 100755 --- a/utils/check-style/check-pylint +++ b/utils/check-style/check-pylint @@ -10,6 +10,7 @@ function xargs-pylint { xargs -P "$(nproc)" -n "$1" pylint --rcfile="$ROOT_PATH/pyproject.toml" --persistent=no --score=n } -find "$ROOT_PATH/tests" -maxdepth 2 -type f -exec file -F' ' --mime-type {} + | xargs-pylint 50 +# exclude ci unittest scripts from check: test_* +find "$ROOT_PATH/tests" -maxdepth 2 -type f -exec file -F' ' --mime-type {} + | grep -v "/test_" | xargs-pylint 50 # Beware, there lambdas are checked. All of them contain `app`, and it causes brain-cucumber-zalgo find "$ROOT_PATH/tests/ci" -mindepth 2 -type f -exec file -F' ' --mime-type {} + | xargs-pylint 1 diff --git a/utils/check-style/check-style b/utils/check-style/check-style index 5c05907e9dd..7f25ca4607c 100755 --- a/utils/check-style/check-style +++ b/utils/check-style/check-style @@ -65,6 +65,7 @@ EXTERN_TYPES_EXCLUDES=( ProfileEvents::increment ProfileEvents::incrementForLogMessage ProfileEvents::getName + ProfileEvents::Timer ProfileEvents::Type ProfileEvents::TypeEnum ProfileEvents::dumpToMapColumn @@ -240,8 +241,22 @@ for test_case in "${tests_with_replicated_merge_tree[@]}"; do esac done -# All the submodules should be from https://github.com/ -find $ROOT_PATH -name '.gitmodules' | while read i; do grep -F 'url = ' $i | grep -v -F 'https://github.com/' && echo 'All the submodules should be from https://github.com/'; done +# All submodules should be from https://github.com/ +git config --file "$ROOT_PATH/.gitmodules" --get-regexp 'submodule\..+\.url' | \ +while read -r line; do + name=${line#submodule.}; name=${name%.url*} + url=${line#* } + [[ "$url" != 'https://github.com/'* ]] && echo "All submodules should be from https://github.com/, submodule '$name' has '$url'" +done + +# All submodules should be of this form: [submodule "contrib/libxyz"] (for consistency, the submodule name does matter too much) +# - restrict the check to top-level .gitmodules file +git config --file "$ROOT_PATH/.gitmodules" --get-regexp 'submodule\..+\.path' | \ +while read -r line; do + name=${line#submodule.}; name=${name%.path*} + path=${line#* } + [ "$name" != "$path" ] && echo "Submodule name '$name' is not equal to it's path '$path'" +done # There shouldn't be any code snippets under GPL or LGPL find $ROOT_PATH/{src,base,programs} -name '*.h' -or -name '*.cpp' 2>/dev/null | xargs grep -i -F 'General Public License' && echo "There shouldn't be any code snippets under GPL or LGPL" @@ -309,6 +324,7 @@ std_cerr_cout_excludes=( src/Bridge/IBridge.cpp src/Daemon/BaseDaemon.cpp src/Loggers/Loggers.cpp + src/Common/GWPAsan.cpp ) sources_with_std_cerr_cout=( $( find $ROOT_PATH/{src,base} -name '*.h' -or -name '*.cpp' | \ diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index f7d84cce4b1..1bff4cb0b09 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,5 +1,9 @@ +v24.5.3.5-stable 2024-06-13 +v24.5.2.34-stable 2024-06-13 v24.5.1.1763-stable 2024-06-01 +v24.4.2.141-stable 2024-06-07 v24.4.1.2088-stable 2024-05-01 +v24.3.4.147-lts 2024-06-13 v24.3.3.102-lts 2024-05-01 v24.3.2.23-lts 2024-04-03 v24.3.1.2672-lts 2024-03-27 @@ -37,6 +41,7 @@ v23.9.4.11-stable 2023-11-08 v23.9.3.12-stable 2023-10-31 v23.9.2.56-stable 2023-10-19 v23.9.1.1854-stable 2023-09-29 +v23.8.15.35-lts 2024-06-14 v23.8.14.6-lts 2024-05-02 v23.8.13.25-lts 2024-04-26 v23.8.12.13-lts 2024-03-26