mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-09-19 16:20:50 +00:00
Merge branch 'master' of github.com:ClickHouse/ClickHouse into fix-dynamic-tests
This commit is contained in:
commit
102f2cf3e9
2
.gitmodules
vendored
2
.gitmodules
vendored
@ -108,7 +108,7 @@
|
||||
url = https://github.com/ClickHouse/icudata
|
||||
[submodule "contrib/icu"]
|
||||
path = contrib/icu
|
||||
url = https://github.com/unicode-org/icu
|
||||
url = https://github.com/ClickHouse/icu
|
||||
[submodule "contrib/flatbuffers"]
|
||||
path = contrib/flatbuffers
|
||||
url = https://github.com/ClickHouse/flatbuffers
|
||||
|
@ -605,7 +605,9 @@ if (NATIVE_BUILD_TARGETS
|
||||
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_COMMAND} -E make_directory "${NATIVE_BUILD_DIR}"
|
||||
COMMAND_ECHO STDOUT)
|
||||
COMMAND_ECHO STDOUT
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_COMMAND}
|
||||
@ -617,9 +619,13 @@ if (NATIVE_BUILD_TARGETS
|
||||
"-DENABLE_CLICKHOUSE_SELF_EXTRACTING=${ENABLE_CLICKHOUSE_SELF_EXTRACTING}"
|
||||
${PROJECT_SOURCE_DIR}
|
||||
WORKING_DIRECTORY "${NATIVE_BUILD_DIR}"
|
||||
COMMAND_ECHO STDOUT)
|
||||
COMMAND_ECHO STDOUT
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_COMMAND} --build "${NATIVE_BUILD_DIR}" --target ${NATIVE_BUILD_TARGETS}
|
||||
COMMAND_ECHO STDOUT)
|
||||
COMMAND_ECHO STDOUT
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
endif ()
|
||||
|
@ -51,8 +51,14 @@ if (NOT "$ENV{CFLAGS}" STREQUAL ""
|
||||
endif()
|
||||
|
||||
# Default toolchain - this is needed to avoid dependency on OS files.
|
||||
execute_process(COMMAND uname -s OUTPUT_VARIABLE OS)
|
||||
execute_process(COMMAND uname -m OUTPUT_VARIABLE ARCH)
|
||||
execute_process(COMMAND uname -s
|
||||
OUTPUT_VARIABLE OS
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
execute_process(COMMAND uname -m
|
||||
OUTPUT_VARIABLE ARCH
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
|
||||
# By default, prefer clang on Linux
|
||||
# But note, that you still may change the compiler with -DCMAKE_C_COMPILER/-DCMAKE_CXX_COMPILER.
|
||||
|
@ -9,10 +9,18 @@ endif ()
|
||||
file(GLOB bprefix "/usr/local/llvm${COMPILER_VERSION_MAJOR}/lib/clang/${COMPILER_VERSION_MAJOR}/lib/${system_processor}-portbld-freebsd*/")
|
||||
message(STATUS "-Bprefix: ${bprefix}")
|
||||
|
||||
execute_process(COMMAND ${CMAKE_CXX_COMPILER} -Bprefix=${bprefix} --print-file-name=libclang_rt.builtins-${system_processor}.a OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
execute_process(COMMAND
|
||||
${CMAKE_CXX_COMPILER} -Bprefix=${bprefix} --print-file-name=libclang_rt.builtins-${system_processor}.a
|
||||
OUTPUT_VARIABLE BUILTINS_LIBRARY
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
# --print-file-name simply prints what you passed in case of nothing was resolved, so let's try one other possible option
|
||||
if (BUILTINS_LIBRARY STREQUAL "libclang_rt.builtins-${system_processor}.a")
|
||||
execute_process(COMMAND ${CMAKE_CXX_COMPILER} -Bprefix=${bprefix} --print-file-name=libclang_rt.builtins.a OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
execute_process(COMMAND
|
||||
${CMAKE_CXX_COMPILER} -Bprefix=${bprefix} --print-file-name=libclang_rt.builtins.a
|
||||
OUTPUT_VARIABLE BUILTINS_LIBRARY
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
endif()
|
||||
if (BUILTINS_LIBRARY STREQUAL "libclang_rt.builtins.a")
|
||||
message(FATAL_ERROR "libclang_rt.builtins had not been found")
|
||||
|
@ -5,7 +5,11 @@ set (DEFAULT_LIBS "-nodefaultlibs")
|
||||
|
||||
# We need builtins from Clang's RT even without libcxx - for ubsan+int128.
|
||||
# See https://bugs.llvm.org/show_bug.cgi?id=16404
|
||||
execute_process (COMMAND ${CMAKE_CXX_COMPILER} --target=${CMAKE_CXX_COMPILER_TARGET} --print-libgcc-file-name --rtlib=compiler-rt OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
execute_process (COMMAND
|
||||
${CMAKE_CXX_COMPILER} --target=${CMAKE_CXX_COMPILER_TARGET} --print-libgcc-file-name --rtlib=compiler-rt
|
||||
OUTPUT_VARIABLE BUILTINS_LIBRARY
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
|
||||
# Apparently, in clang-19, the UBSan support library for C++ was moved out into ubsan_standalone_cxx.a, so we have to include both.
|
||||
if (SANITIZE STREQUAL undefined)
|
||||
|
@ -5,7 +5,11 @@ if (NOT CMAKE_CXX_COMPILER_ID MATCHES "Clang")
|
||||
endif ()
|
||||
|
||||
# Print details to output
|
||||
execute_process(COMMAND ${CMAKE_CXX_COMPILER} --version OUTPUT_VARIABLE COMPILER_SELF_IDENTIFICATION OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
execute_process(COMMAND ${CMAKE_CXX_COMPILER} --version
|
||||
OUTPUT_VARIABLE COMPILER_SELF_IDENTIFICATION
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
)
|
||||
message (STATUS "Using compiler:\n${COMPILER_SELF_IDENTIFICATION}")
|
||||
|
||||
# Require minimum compiler versions
|
||||
|
@ -90,7 +90,10 @@ endfunction()
|
||||
|
||||
# Function get_cmake_properties returns list of all propreties that cmake supports
|
||||
function(get_cmake_properties outvar)
|
||||
execute_process(COMMAND cmake --help-property-list OUTPUT_VARIABLE cmake_properties)
|
||||
execute_process(COMMAND cmake --help-property-list
|
||||
OUTPUT_VARIABLE cmake_properties
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
# Convert command output into a CMake list
|
||||
string(REGEX REPLACE ";" "\\\\;" cmake_properties "${cmake_properties}")
|
||||
string(REGEX REPLACE "\n" ";" cmake_properties "${cmake_properties}")
|
||||
|
@ -37,7 +37,9 @@ message(STATUS "Packaging with tzdata version: ${TZDATA_VERSION}")
|
||||
execute_process(COMMAND
|
||||
bash -c "cd ${TZDIR} && find * -type f -and ! -name '*.tab' -and ! -name 'localtime' | LC_ALL=C sort | paste -sd ';' -"
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
OUTPUT_VARIABLE TIMEZONES)
|
||||
OUTPUT_VARIABLE TIMEZONES
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
|
||||
file(APPEND ${TIMEZONES_FILE} "// autogenerated by ClickHouse/contrib/cctz-cmake/CMakeLists.txt\n")
|
||||
file(APPEND ${TIMEZONES_FILE} "#include <incbin.h>\n")
|
||||
|
@ -359,7 +359,9 @@ else ()
|
||||
|
||||
execute_process(
|
||||
COMMAND mkdir -p ${PROTOC_BUILD_DIR}
|
||||
COMMAND_ECHO STDOUT)
|
||||
COMMAND_ECHO STDOUT
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_COMMAND}
|
||||
@ -375,11 +377,15 @@ else ()
|
||||
"-DABSL_ENABLE_INSTALL=0"
|
||||
"${protobuf_source_dir}"
|
||||
WORKING_DIRECTORY "${PROTOC_BUILD_DIR}"
|
||||
COMMAND_ECHO STDOUT)
|
||||
COMMAND_ECHO STDOUT
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_COMMAND} --build "${PROTOC_BUILD_DIR}"
|
||||
COMMAND_ECHO STDOUT)
|
||||
COMMAND_ECHO STDOUT
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
endif ()
|
||||
|
||||
add_executable(protoc IMPORTED GLOBAL)
|
||||
|
@ -51,8 +51,9 @@ if (NOT CMAKE_HOST_SYSTEM_NAME STREQUAL CMAKE_SYSTEM_NAME
|
||||
set(OPENSSL_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/openssl-cmake")
|
||||
|
||||
execute_process(
|
||||
COMMAND mkdir -p ${OPENSSL_BUILD_DIR}
|
||||
COMMAND_ECHO STDOUT
|
||||
COMMAND mkdir -p ${OPENSSL_BUILD_DIR}
|
||||
COMMAND_ECHO STDOUT
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
|
||||
if (CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "amd64|x86_64")
|
||||
@ -89,15 +90,21 @@ if (NOT CMAKE_HOST_SYSTEM_NAME STREQUAL CMAKE_SYSTEM_NAME
|
||||
"-DClickHouse_SOURCE_DIR=${ClickHouse_SOURCE_DIR}"
|
||||
"${OPENSSL_SOURCE_DIR}"
|
||||
WORKING_DIRECTORY "${OPENSSL_BUILD_DIR}"
|
||||
COMMAND_ECHO STDOUT)
|
||||
COMMAND_ECHO STDOUT
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_COMMAND} --build "${OPENSSL_BUILD_DIR}"
|
||||
COMMAND_ECHO STDOUT)
|
||||
COMMAND_ECHO STDOUT
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_COMMAND} --install "${OPENSSL_BUILD_DIR}"
|
||||
COMMAND_ECHO STDOUT)
|
||||
COMMAND_ECHO STDOUT
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
|
||||
# It's not important on which file we depend, we just want to specify right order
|
||||
add_library(openssl_for_grpc STATIC IMPORTED GLOBAL)
|
||||
@ -108,8 +115,9 @@ if (NOT CMAKE_HOST_SYSTEM_NAME STREQUAL CMAKE_SYSTEM_NAME
|
||||
set (GRPC_CPP_PLUGIN_BUILD_DIR "${_gRPC_BINARY_DIR}/build")
|
||||
|
||||
execute_process(
|
||||
COMMAND mkdir -p ${GRPC_CPP_PLUGIN_BUILD_DIR}
|
||||
COMMAND_ECHO STDOUT
|
||||
COMMAND mkdir -p ${GRPC_CPP_PLUGIN_BUILD_DIR}
|
||||
COMMAND_ECHO STDOUT
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
|
||||
set(abseil_source_dir "${ClickHouse_SOURCE_DIR}/contrib/abseil-cpp")
|
||||
@ -140,11 +148,15 @@ if (NOT CMAKE_HOST_SYSTEM_NAME STREQUAL CMAKE_SYSTEM_NAME
|
||||
"-DgRPC_SSL_PROVIDER=package"
|
||||
"${_gRPC_SOURCE_DIR}"
|
||||
WORKING_DIRECTORY "${GRPC_CPP_PLUGIN_BUILD_DIR}"
|
||||
COMMAND_ECHO STDOUT)
|
||||
COMMAND_ECHO STDOUT
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_COMMAND} --build "${GRPC_CPP_PLUGIN_BUILD_DIR}"
|
||||
COMMAND_ECHO STDOUT)
|
||||
COMMAND_ECHO STDOUT
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
|
||||
add_executable(grpc_cpp_plugin IMPORTED GLOBAL)
|
||||
set_target_properties (grpc_cpp_plugin PROPERTIES IMPORTED_LOCATION "${GRPC_CPP_PLUGIN_BUILD_DIR}/grpc_cpp_plugin")
|
||||
|
2
contrib/icu
vendored
2
contrib/icu
vendored
@ -1 +1 @@
|
||||
Subproject commit 7750081bda4b3bc1768ae03849ec70f67ea10625
|
||||
Subproject commit 4216173eeeb39c1d4caaa54a68860e800412d273
|
@ -47,8 +47,7 @@
|
||||
"docker/test/stateful": {
|
||||
"name": "clickhouse/stateful-test",
|
||||
"dependent": [
|
||||
"docker/test/stress",
|
||||
"docker/test/upgrade"
|
||||
"docker/test/stress"
|
||||
]
|
||||
},
|
||||
"docker/test/unit": {
|
||||
@ -59,10 +58,6 @@
|
||||
"name": "clickhouse/stress-test",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/upgrade": {
|
||||
"name": "clickhouse/upgrade-check",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/integration/runner": {
|
||||
"name": "clickhouse/integration-tests-runner",
|
||||
"dependent": []
|
||||
|
@ -93,6 +93,3 @@ RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
ENV COMMIT_SHA=''
|
||||
ENV PULL_REQUEST_NUMBER=''
|
||||
ENV COPY_CLICKHOUSE_BINARY_TO_OUTPUT=0
|
||||
|
||||
COPY run.sh /
|
||||
CMD ["/bin/bash", "/run.sh"]
|
||||
|
@ -10,7 +10,3 @@ RUN apt-get update -y \
|
||||
npm \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||
|
||||
COPY create.sql /
|
||||
COPY run.sh /
|
||||
CMD ["/bin/bash", "/run.sh"]
|
||||
|
@ -1 +0,0 @@
|
||||
../stateless/setup_minio.sh
|
@ -85,18 +85,6 @@ RUN curl -L --no-verbose -O 'https://archive.apache.org/dist/hadoop/common/hadoo
|
||||
ENV MINIO_ROOT_USER="clickhouse"
|
||||
ENV MINIO_ROOT_PASSWORD="clickhouse"
|
||||
ENV EXPORT_S3_STORAGE_POLICIES=1
|
||||
ENV CLICKHOUSE_GRPC_CLIENT="/usr/share/clickhouse-utils/grpc-client/clickhouse-grpc-client.py"
|
||||
|
||||
RUN npm install -g azurite@3.30.0 \
|
||||
&& npm install -g tslib && npm install -g node
|
||||
|
||||
COPY run.sh /
|
||||
COPY setup_minio.sh /
|
||||
COPY setup_hdfs_minicluster.sh /
|
||||
COPY attach_gdb.lib /
|
||||
COPY utils.lib /
|
||||
|
||||
# We store stress_tests.lib in stateless image to avoid duplication of this file in stress and upgrade tests
|
||||
COPY stress_tests.lib /
|
||||
|
||||
CMD ["/bin/bash", "/run.sh"]
|
||||
|
@ -22,8 +22,5 @@ RUN apt-get update -y \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||
|
||||
COPY run.sh /
|
||||
|
||||
ENV EXPORT_S3_STORAGE_POLICIES=1
|
||||
|
||||
CMD ["/bin/bash", "/run.sh"]
|
||||
|
@ -1,29 +0,0 @@
|
||||
# rebuild in #33610
|
||||
# docker build -t clickhouse/upgrade-check .
|
||||
ARG FROM_TAG=latest
|
||||
FROM clickhouse/stateful-test:$FROM_TAG
|
||||
|
||||
RUN apt-get update -y \
|
||||
&& env DEBIAN_FRONTEND=noninteractive \
|
||||
apt-get install --yes --no-install-recommends \
|
||||
bash \
|
||||
tzdata \
|
||||
parallel \
|
||||
expect \
|
||||
python3 \
|
||||
python3-lxml \
|
||||
python3-termcolor \
|
||||
python3-requests \
|
||||
curl \
|
||||
sudo \
|
||||
openssl \
|
||||
netcat-openbsd \
|
||||
brotli \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||
|
||||
COPY run.sh /
|
||||
|
||||
ENV EXPORT_S3_STORAGE_POLICIES=1
|
||||
|
||||
CMD ["/bin/bash", "/run.sh"]
|
@ -56,7 +56,5 @@ RUN apt-get update \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||
|
||||
COPY process_functional_tests_result.py /
|
||||
|
||||
COPY --from=clickhouse/cctools:0d6b90a7a490 /opt/gdb /opt/gdb
|
||||
ENV PATH="/opt/gdb/bin:${PATH}"
|
||||
|
16
docs/changelogs/v24.3.8.13-lts.md
Normal file
16
docs/changelogs/v24.3.8.13-lts.md
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.3.8.13-lts (84bbfc70f5d) FIXME as compared to v24.3.7.30-lts (c8a28cf4331)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#68562](https://github.com/ClickHouse/ClickHouse/issues/68562): Fix indexHint function case found by fuzzer. [#66286](https://github.com/ClickHouse/ClickHouse/pull/66286) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Backported in [#68114](https://github.com/ClickHouse/ClickHouse/issues/68114): Fix possible PARAMETER_OUT_OF_BOUND error during reading variant subcolumn. [#66659](https://github.com/ClickHouse/ClickHouse/pull/66659) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#67989](https://github.com/ClickHouse/ClickHouse/issues/67989): Validate experimental/suspicious data types in ALTER ADD/MODIFY COLUMN. [#67911](https://github.com/ClickHouse/ClickHouse/pull/67911) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#68335](https://github.com/ClickHouse/ClickHouse/issues/68335): Try fix postgres crash when query is cancelled. [#68288](https://github.com/ClickHouse/ClickHouse/pull/68288) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Backported in [#68392](https://github.com/ClickHouse/ClickHouse/issues/68392): Fix missing sync replica mode in query `SYSTEM SYNC REPLICA`. [#68326](https://github.com/ClickHouse/ClickHouse/pull/68326) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
|
@ -240,7 +240,7 @@ libhdfs3 support HDFS namenode HA.
|
||||
## Storage Settings {#storage-settings}
|
||||
|
||||
- [hdfs_truncate_on_insert](/docs/en/operations/settings/settings.md#hdfs_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default.
|
||||
- [hdfs_create_multiple_files](/docs/en/operations/settings/settings.md#hdfs_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||
- [hdfs_create_new_file_on_insert](/docs/en/operations/settings/settings.md#hdfs_create_new_file_on_insert) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||
- [hdfs_skip_empty_files](/docs/en/operations/settings/settings.md#hdfs_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||
|
||||
**See Also**
|
||||
|
@ -225,7 +225,7 @@ CREATE TABLE table_with_asterisk (name String, value UInt32)
|
||||
## Storage Settings {#storage-settings}
|
||||
|
||||
- [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default.
|
||||
- [s3_create_multiple_files](/docs/en/operations/settings/settings.md#s3_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||
- [s3_create_new_file_on_insert](/docs/en/operations/settings/settings.md#s3_create_new_file_on_insert) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||
- [s3_skip_empty_files](/docs/en/operations/settings/settings.md#s3_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||
|
||||
## S3-related Settings {#settings}
|
||||
|
@ -6103,30 +6103,23 @@ Result:
|
||||
└───────┴───────────────┴──────┴──────────────┴──────────────┴──────────────────────┘
|
||||
```
|
||||
|
||||
## toInterval(Year\|Quarter\|Month\|Week\|Day\|Hour\|Minute\|Second)
|
||||
## toIntervalYear
|
||||
|
||||
Converts a Number type argument to an [Interval](../data-types/special-data-types/interval.md) data type.
|
||||
Returns an interval of `n` years of data type [IntervalYear](../data-types/special-data-types/interval.md).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
toIntervalSecond(number)
|
||||
toIntervalMinute(number)
|
||||
toIntervalHour(number)
|
||||
toIntervalDay(number)
|
||||
toIntervalWeek(number)
|
||||
toIntervalMonth(number)
|
||||
toIntervalQuarter(number)
|
||||
toIntervalYear(number)
|
||||
toIntervalYear(n)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `number` — Duration of interval. Positive integer number.
|
||||
- `n` — Number of years. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- The value in `Interval` data type.
|
||||
- Interval of `n` years. [IntervalYear](../data-types/special-data-types/interval.md).
|
||||
|
||||
**Example**
|
||||
|
||||
@ -6134,20 +6127,387 @@ Query:
|
||||
|
||||
``` sql
|
||||
WITH
|
||||
toDate('2019-01-01') AS date,
|
||||
INTERVAL 1 WEEK AS interval_week,
|
||||
toIntervalWeek(1) AS interval_to_week
|
||||
SELECT
|
||||
date + interval_week,
|
||||
date + interval_to_week;
|
||||
toDate('2024-06-15') AS date,
|
||||
toIntervalYear(1) AS interval_to_year
|
||||
SELECT date + interval_to_year AS result
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─plus(date, interval_week)─┬─plus(date, interval_to_week)─┐
|
||||
│ 2019-01-08 │ 2019-01-08 │
|
||||
└───────────────────────────┴──────────────────────────────┘
|
||||
┌─────result─┐
|
||||
│ 2025-06-15 │
|
||||
└────────────┘
|
||||
```
|
||||
|
||||
## toIntervalQuarter
|
||||
|
||||
Returns an interval of `n` quarters of data type [IntervalQuarter](../data-types/special-data-types/interval.md).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
toIntervalQuarter(n)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `n` — Number of quarters. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- Interval of `n` quarters. [IntervalQuarter](../data-types/special-data-types/interval.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
WITH
|
||||
toDate('2024-06-15') AS date,
|
||||
toIntervalQuarter(1) AS interval_to_quarter
|
||||
SELECT date + interval_to_quarter AS result
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─────result─┐
|
||||
│ 2024-09-15 │
|
||||
└────────────┘
|
||||
```
|
||||
|
||||
## toIntervalMonth
|
||||
|
||||
Returns an interval of `n` months of data type [IntervalMonth](../data-types/special-data-types/interval.md).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
toIntervalMonth(n)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `n` — Number of months. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- Interval of `n` months. [IntervalMonth](../data-types/special-data-types/interval.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
WITH
|
||||
toDate('2024-06-15') AS date,
|
||||
toIntervalMonth(1) AS interval_to_month
|
||||
SELECT date + interval_to_month AS result
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─────result─┐
|
||||
│ 2024-07-15 │
|
||||
└────────────┘
|
||||
```
|
||||
|
||||
## toIntervalWeek
|
||||
|
||||
Returns an interval of `n` weeks of data type [IntervalWeek](../data-types/special-data-types/interval.md).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
toIntervalWeek(n)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `n` — Number of weeks. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- Interval of `n` weeks. [IntervalWeek](../data-types/special-data-types/interval.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
WITH
|
||||
toDate('2024-06-15') AS date,
|
||||
toIntervalWeek(1) AS interval_to_week
|
||||
SELECT date + interval_to_week AS result
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─────result─┐
|
||||
│ 2024-06-22 │
|
||||
└────────────┘
|
||||
```
|
||||
|
||||
## toIntervalDay
|
||||
|
||||
Returns an interval of `n` days of data type [IntervalDay](../data-types/special-data-types/interval.md).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
toIntervalDay(n)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `n` — Number of days. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- Interval of `n` days. [IntervalDay](../data-types/special-data-types/interval.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
WITH
|
||||
toDate('2024-06-15') AS date,
|
||||
toIntervalDay(5) AS interval_to_days
|
||||
SELECT date + interval_to_days AS result
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─────result─┐
|
||||
│ 2024-06-20 │
|
||||
└────────────┘
|
||||
```
|
||||
|
||||
## toIntervalHour
|
||||
|
||||
Returns an interval of `n` hours of data type [IntervalHour](../data-types/special-data-types/interval.md).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
toIntervalHour(n)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `n` — Number of hours. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- Interval of `n` hours. [IntervalHour](../data-types/special-data-types/interval.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
WITH
|
||||
toDate('2024-06-15') AS date,
|
||||
toIntervalHour(12) AS interval_to_hours
|
||||
SELECT date + interval_to_hours AS result
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌──────────────result─┐
|
||||
│ 2024-06-15 12:00:00 │
|
||||
└─────────────────────┘
|
||||
```
|
||||
|
||||
## toIntervalMinute
|
||||
|
||||
Returns an interval of `n` minutes of data type [IntervalMinute](../data-types/special-data-types/interval.md).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
toIntervalMinute(n)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `n` — Number of minutes. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- Interval of `n` minutes. [IntervalMinute](../data-types/special-data-types/interval.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
WITH
|
||||
toDate('2024-06-15') AS date,
|
||||
toIntervalMinute(12) AS interval_to_minutes
|
||||
SELECT date + interval_to_minutes AS result
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌──────────────result─┐
|
||||
│ 2024-06-15 00:12:00 │
|
||||
└─────────────────────┘
|
||||
```
|
||||
|
||||
## toIntervalSecond
|
||||
|
||||
Returns an interval of `n` seconds of data type [IntervalSecond](../data-types/special-data-types/interval.md).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
toIntervalSecond(n)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `n` — Number of seconds. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- Interval of `n` seconds. [IntervalSecond](../data-types/special-data-types/interval.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
WITH
|
||||
toDate('2024-06-15') AS date,
|
||||
toIntervalSecond(30) AS interval_to_seconds
|
||||
SELECT date + interval_to_seconds AS result
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌──────────────result─┐
|
||||
│ 2024-06-15 00:00:30 │
|
||||
└─────────────────────┘
|
||||
```
|
||||
|
||||
## toIntervalMillisecond
|
||||
|
||||
Returns an interval of `n` milliseconds of data type [IntervalMillisecond](../data-types/special-data-types/interval.md).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
toIntervalMillisecond(n)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `n` — Number of milliseconds. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- Interval of `n` milliseconds. [IntervalMilliseconds](../data-types/special-data-types/interval.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
WITH
|
||||
toDateTime('2024-06-15') AS date,
|
||||
toIntervalMillisecond(30) AS interval_to_milliseconds
|
||||
SELECT date + interval_to_milliseconds AS result
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌──────────────────result─┐
|
||||
│ 2024-06-15 00:00:00.030 │
|
||||
└─────────────────────────┘
|
||||
```
|
||||
|
||||
## toIntervalMicrosecond
|
||||
|
||||
Returns an interval of `n` microseconds of data type [IntervalMicrosecond](../data-types/special-data-types/interval.md).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
toIntervalMicrosecond(n)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `n` — Number of microseconds. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- Interval of `n` microseconds. [IntervalMicrosecond](../data-types/special-data-types/interval.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
WITH
|
||||
toDateTime('2024-06-15') AS date,
|
||||
toIntervalMicrosecond(30) AS interval_to_microseconds
|
||||
SELECT date + interval_to_microseconds AS result
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─────────────────────result─┐
|
||||
│ 2024-06-15 00:00:00.000030 │
|
||||
└────────────────────────────┘
|
||||
```
|
||||
|
||||
## toIntervalNanosecond
|
||||
|
||||
Returns an interval of `n` nanoseconds of data type [IntervalNanosecond](../data-types/special-data-types/interval.md).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
toIntervalNanosecond(n)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `n` — Number of nanoseconds. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- Interval of `n` nanoseconds. [IntervalNanosecond](../data-types/special-data-types/interval.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
WITH
|
||||
toDateTime('2024-06-15') AS date,
|
||||
toIntervalNanosecond(30) AS interval_to_nanoseconds
|
||||
SELECT date + interval_to_nanoseconds AS result
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌────────────────────────result─┐
|
||||
│ 2024-06-15 00:00:00.000000030 │
|
||||
└───────────────────────────────┘
|
||||
```
|
||||
|
||||
## parseDateTime
|
||||
|
@ -200,6 +200,7 @@ Hierarchy of privileges:
|
||||
- `JDBC`
|
||||
- `HDFS`
|
||||
- `S3`
|
||||
- `POSTGRES`
|
||||
- [dictGet](#dictget)
|
||||
- [displaySecretsInShowAndSelect](#displaysecretsinshowandselect)
|
||||
- [NAMED COLLECTION ADMIN](#named-collection-admin)
|
||||
@ -476,6 +477,7 @@ Allows using external data sources. Applies to [table engines](../../engines/tab
|
||||
- `JDBC`. Level: `GLOBAL`
|
||||
- `HDFS`. Level: `GLOBAL`
|
||||
- `S3`. Level: `GLOBAL`
|
||||
- `POSTGRES`. Level: `GLOBAL`
|
||||
|
||||
The `SOURCES` privilege enables use of all the sources. Also you can grant a privilege for each source individually. To use sources, you need additional privileges.
|
||||
|
||||
|
@ -116,7 +116,7 @@ SELECT * from HDFS('hdfs://hdfs1:9000/data/path/date=*/country=*/code=*/*.parque
|
||||
## Storage Settings {#storage-settings}
|
||||
|
||||
- [hdfs_truncate_on_insert](/docs/en/operations/settings/settings.md#hdfs_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default.
|
||||
- [hdfs_create_multiple_files](/docs/en/operations/settings/settings.md#hdfs_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||
- [hdfs_create_new_file_on_insert](/docs/en/operations/settings/settings.md#hdfs_create_new_file_on_insert) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||
- [hdfs_skip_empty_files](/docs/en/operations/settings/settings.md#hdfs_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||
- [ignore_access_denied_multidirectory_globs](/docs/en/operations/settings/settings.md#ignore_access_denied_multidirectory_globs) - allows to ignore permission denied errors for multi-directory globs.
|
||||
|
||||
|
@ -290,7 +290,7 @@ SELECT * from s3('s3://data/path/date=*/country=*/code=*/*.parquet') where _date
|
||||
## Storage Settings {#storage-settings}
|
||||
|
||||
- [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default.
|
||||
- [s3_create_multiple_files](/docs/en/operations/settings/settings.md#s3_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||
- [s3_create_new_file_on_insert](/docs/en/operations/settings/settings.md#s3_create_new_file_on_insert) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||
- [s3_skip_empty_files](/docs/en/operations/settings/settings.md#s3_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||
|
||||
**See Also**
|
||||
|
@ -6,6 +6,7 @@ macro(configure_bash_completion)
|
||||
COMMAND ${PKG_CONFIG_BIN} --variable=completionsdir bash-completion
|
||||
OUTPUT_VARIABLE ${out}
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
endif()
|
||||
string(REPLACE /usr "${CMAKE_INSTALL_PREFIX}" out "${out}")
|
||||
|
@ -1164,9 +1164,6 @@ void Client::processOptions(const OptionsDescription & options_description,
|
||||
/// (There is no need to copy the context because clickhouse-client has no background tasks so it won't use that context in parallel.)
|
||||
client_context = global_context;
|
||||
initClientContext();
|
||||
|
||||
/// Allow to pass-through unknown settings to the server.
|
||||
client_context->getAccessControl().allowAllSettings();
|
||||
}
|
||||
|
||||
|
||||
|
@ -713,7 +713,7 @@ void LocalServer::processConfig()
|
||||
if (index_uncompressed_cache_size > max_cache_size)
|
||||
{
|
||||
index_uncompressed_cache_size = max_cache_size;
|
||||
LOG_INFO(log, "Lowered index uncompressed cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(uncompressed_cache_size));
|
||||
LOG_INFO(log, "Lowered index uncompressed cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(index_uncompressed_cache_size));
|
||||
}
|
||||
global_context->setIndexUncompressedCache(index_uncompressed_cache_policy, index_uncompressed_cache_size, index_uncompressed_cache_size_ratio);
|
||||
|
||||
@ -723,7 +723,7 @@ void LocalServer::processConfig()
|
||||
if (index_mark_cache_size > max_cache_size)
|
||||
{
|
||||
index_mark_cache_size = max_cache_size;
|
||||
LOG_INFO(log, "Lowered index mark cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(uncompressed_cache_size));
|
||||
LOG_INFO(log, "Lowered index mark cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(index_mark_cache_size));
|
||||
}
|
||||
global_context->setIndexMarkCache(index_mark_cache_policy, index_mark_cache_size, index_mark_cache_size_ratio);
|
||||
|
||||
@ -731,7 +731,7 @@ void LocalServer::processConfig()
|
||||
if (mmap_cache_size > max_cache_size)
|
||||
{
|
||||
mmap_cache_size = max_cache_size;
|
||||
LOG_INFO(log, "Lowered mmap file cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(uncompressed_cache_size));
|
||||
LOG_INFO(log, "Lowered mmap file cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(mmap_cache_size));
|
||||
}
|
||||
global_context->setMMappedFileCache(mmap_cache_size);
|
||||
|
||||
|
@ -1420,7 +1420,7 @@ try
|
||||
if (index_uncompressed_cache_size > max_cache_size)
|
||||
{
|
||||
index_uncompressed_cache_size = max_cache_size;
|
||||
LOG_INFO(log, "Lowered index uncompressed cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(uncompressed_cache_size));
|
||||
LOG_INFO(log, "Lowered index uncompressed cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(index_uncompressed_cache_size));
|
||||
}
|
||||
global_context->setIndexUncompressedCache(index_uncompressed_cache_policy, index_uncompressed_cache_size, index_uncompressed_cache_size_ratio);
|
||||
|
||||
@ -1430,7 +1430,7 @@ try
|
||||
if (index_mark_cache_size > max_cache_size)
|
||||
{
|
||||
index_mark_cache_size = max_cache_size;
|
||||
LOG_INFO(log, "Lowered index mark cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(uncompressed_cache_size));
|
||||
LOG_INFO(log, "Lowered index mark cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(index_mark_cache_size));
|
||||
}
|
||||
global_context->setIndexMarkCache(index_mark_cache_policy, index_mark_cache_size, index_mark_cache_size_ratio);
|
||||
|
||||
@ -1438,7 +1438,7 @@ try
|
||||
if (mmap_cache_size > max_cache_size)
|
||||
{
|
||||
mmap_cache_size = max_cache_size;
|
||||
LOG_INFO(log, "Lowered mmap file cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(uncompressed_cache_size));
|
||||
LOG_INFO(log, "Lowered mmap file cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(mmap_cache_size));
|
||||
}
|
||||
global_context->setMMappedFileCache(mmap_cache_size);
|
||||
|
||||
@ -1449,7 +1449,7 @@ try
|
||||
if (query_cache_max_size_in_bytes > max_cache_size)
|
||||
{
|
||||
query_cache_max_size_in_bytes = max_cache_size;
|
||||
LOG_INFO(log, "Lowered query cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(uncompressed_cache_size));
|
||||
LOG_INFO(log, "Lowered query cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(query_cache_max_size_in_bytes));
|
||||
}
|
||||
global_context->setQueryCache(query_cache_max_size_in_bytes, query_cache_max_entries, query_cache_query_cache_max_entry_size_in_bytes, query_cache_max_entry_size_in_rows);
|
||||
|
||||
@ -1922,7 +1922,7 @@ try
|
||||
auto & access_control = global_context->getAccessControl();
|
||||
try
|
||||
{
|
||||
access_control.setupFromMainConfig(config(), config_path, [&] { return global_context->getZooKeeper(); });
|
||||
access_control.setUpFromMainConfig(config(), config_path, [&] { return global_context->getZooKeeper(); });
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
|
@ -280,7 +280,7 @@ void AccessControl::shutdown()
|
||||
}
|
||||
|
||||
|
||||
void AccessControl::setupFromMainConfig(const Poco::Util::AbstractConfiguration & config_, const String & config_path_,
|
||||
void AccessControl::setUpFromMainConfig(const Poco::Util::AbstractConfiguration & config_, const String & config_path_,
|
||||
const zkutil::GetZooKeeper & get_zookeeper_function_)
|
||||
{
|
||||
if (config_.has("custom_settings_prefixes"))
|
||||
@ -868,10 +868,4 @@ const ExternalAuthenticators & AccessControl::getExternalAuthenticators() const
|
||||
return *external_authenticators;
|
||||
}
|
||||
|
||||
|
||||
void AccessControl::allowAllSettings()
|
||||
{
|
||||
custom_settings_prefixes->registerPrefixes({""});
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -57,7 +57,7 @@ public:
|
||||
void shutdown() override;
|
||||
|
||||
/// Initializes access storage (user directories).
|
||||
void setupFromMainConfig(const Poco::Util::AbstractConfiguration & config_, const String & config_path_,
|
||||
void setUpFromMainConfig(const Poco::Util::AbstractConfiguration & config_, const String & config_path_,
|
||||
const zkutil::GetZooKeeper & get_zookeeper_function_);
|
||||
|
||||
/// Parses access entities from a configuration loaded from users.xml.
|
||||
@ -238,9 +238,6 @@ public:
|
||||
/// Gets manager of notifications.
|
||||
AccessChangesNotifier & getChangesNotifier();
|
||||
|
||||
/// Allow all setting names - this can be used in clients to pass-through unknown settings to the server.
|
||||
void allowAllSettings();
|
||||
|
||||
private:
|
||||
class ContextAccessCache;
|
||||
class CustomSettingsPrefixes;
|
||||
|
@ -219,8 +219,8 @@ void SettingsConstraints::clamp(const Settings & current_settings, SettingsChang
|
||||
});
|
||||
}
|
||||
|
||||
template <typename SettingsT>
|
||||
bool getNewValueToCheck(const SettingsT & current_settings, SettingChange & change, Field & new_value, bool throw_on_failure)
|
||||
template <class T>
|
||||
bool getNewValueToCheck(const T & current_settings, SettingChange & change, Field & new_value, bool throw_on_failure)
|
||||
{
|
||||
Field current_value;
|
||||
bool has_current_value = current_settings.tryGet(change.name, current_value);
|
||||
@ -230,12 +230,12 @@ bool getNewValueToCheck(const SettingsT & current_settings, SettingChange & chan
|
||||
return false;
|
||||
|
||||
if (throw_on_failure)
|
||||
new_value = SettingsT::castValueUtil(change.name, change.value);
|
||||
new_value = T::castValueUtil(change.name, change.value);
|
||||
else
|
||||
{
|
||||
try
|
||||
{
|
||||
new_value = SettingsT::castValueUtil(change.name, change.value);
|
||||
new_value = T::castValueUtil(change.name, change.value);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
|
@ -58,7 +58,6 @@
|
||||
#include <QueryPipeline/QueryPipelineBuilder.h>
|
||||
#include <Interpreters/ReplaceQueryParameterVisitor.h>
|
||||
#include <Interpreters/ProfileEventsExt.h>
|
||||
#include <Interpreters/InterpreterSetQuery.h>
|
||||
#include <IO/WriteBufferFromOStream.h>
|
||||
#include <IO/WriteBufferFromFileDescriptor.h>
|
||||
#include <IO/CompressionMethod.h>
|
||||
@ -1609,14 +1608,14 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des
|
||||
auto metadata = storage->getInMemoryMetadataPtr();
|
||||
QueryPlan plan;
|
||||
storage->read(
|
||||
plan,
|
||||
sample.getNames(),
|
||||
storage->getStorageSnapshot(metadata, client_context),
|
||||
query_info,
|
||||
client_context,
|
||||
{},
|
||||
client_context->getSettingsRef().max_block_size,
|
||||
getNumberOfPhysicalCPUCores());
|
||||
plan,
|
||||
sample.getNames(),
|
||||
storage->getStorageSnapshot(metadata, client_context),
|
||||
query_info,
|
||||
client_context,
|
||||
{},
|
||||
client_context->getSettingsRef().max_block_size,
|
||||
getNumberOfPhysicalCPUCores());
|
||||
|
||||
auto builder = plan.buildQueryPipeline(
|
||||
QueryPlanOptimizationSettings::fromContext(client_context),
|
||||
@ -1893,19 +1892,48 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin
|
||||
profile_events.watch.restart();
|
||||
|
||||
{
|
||||
/// Temporarily apply query settings to the context.
|
||||
Settings old_settings = client_context->getSettingsCopy();
|
||||
SCOPE_EXIT_SAFE(
|
||||
{
|
||||
client_context->setSettings(old_settings);
|
||||
/// Temporarily apply query settings to context.
|
||||
std::optional<Settings> old_settings;
|
||||
SCOPE_EXIT_SAFE({
|
||||
if (old_settings)
|
||||
client_context->setSettings(*old_settings);
|
||||
});
|
||||
InterpreterSetQuery::applySettingsFromQuery(parsed_query, client_context);
|
||||
|
||||
auto apply_query_settings = [&](const IAST & settings_ast)
|
||||
{
|
||||
if (!old_settings)
|
||||
old_settings.emplace(client_context->getSettingsRef());
|
||||
client_context->applySettingsChanges(settings_ast.as<ASTSetQuery>()->changes);
|
||||
client_context->resetSettingsToDefaultValue(settings_ast.as<ASTSetQuery>()->default_settings);
|
||||
};
|
||||
|
||||
const auto * insert = parsed_query->as<ASTInsertQuery>();
|
||||
if (const auto * select = parsed_query->as<ASTSelectQuery>(); select && select->settings())
|
||||
apply_query_settings(*select->settings());
|
||||
else if (const auto * select_with_union = parsed_query->as<ASTSelectWithUnionQuery>())
|
||||
{
|
||||
const ASTs & children = select_with_union->list_of_selects->children;
|
||||
if (!children.empty())
|
||||
{
|
||||
// On the client it is enough to apply settings only for the
|
||||
// last SELECT, since the only thing that is important to apply
|
||||
// on the client is format settings.
|
||||
const auto * last_select = children.back()->as<ASTSelectQuery>();
|
||||
if (last_select && last_select->settings())
|
||||
{
|
||||
apply_query_settings(*last_select->settings());
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (const auto * query_with_output = parsed_query->as<ASTQueryWithOutput>(); query_with_output && query_with_output->settings_ast)
|
||||
apply_query_settings(*query_with_output->settings_ast);
|
||||
else if (insert && insert->settings_ast)
|
||||
apply_query_settings(*insert->settings_ast);
|
||||
|
||||
if (!connection->checkConnected(connection_parameters.timeouts))
|
||||
connect();
|
||||
|
||||
ASTPtr input_function;
|
||||
const auto * insert = parsed_query->as<ASTInsertQuery>();
|
||||
if (insert && insert->select)
|
||||
insert->tryFindInputFunction(input_function);
|
||||
|
||||
|
@ -92,3 +92,8 @@ endif()
|
||||
|
||||
clickhouse_add_executable (check_pointer_valid check_pointer_valid.cpp)
|
||||
target_link_libraries (check_pointer_valid PRIVATE clickhouse_common_io clickhouse_common_config)
|
||||
|
||||
if (TARGET ch_contrib::icu)
|
||||
clickhouse_add_executable (utf8_upper_lower utf8_upper_lower.cpp)
|
||||
target_link_libraries (utf8_upper_lower PRIVATE ch_contrib::icu)
|
||||
endif ()
|
||||
|
27
src/Common/examples/utf8_upper_lower.cpp
Normal file
27
src/Common/examples/utf8_upper_lower.cpp
Normal file
@ -0,0 +1,27 @@
|
||||
#include <iostream>
|
||||
#include <unicode/unistr.h>
|
||||
|
||||
std::string utf8_to_lower(const std::string & input)
|
||||
{
|
||||
icu::UnicodeString unicodeInput(input.c_str(), "UTF-8");
|
||||
unicodeInput.toLower();
|
||||
std::string output;
|
||||
unicodeInput.toUTF8String(output);
|
||||
return output;
|
||||
}
|
||||
|
||||
std::string utf8_to_upper(const std::string & input)
|
||||
{
|
||||
icu::UnicodeString unicodeInput(input.c_str(), "UTF-8");
|
||||
unicodeInput.toUpper();
|
||||
std::string output;
|
||||
unicodeInput.toUTF8String(output);
|
||||
return output;
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
std::string input = "ır";
|
||||
std::cout << "upper:" << utf8_to_upper(input) << std::endl;
|
||||
return 0;
|
||||
}
|
@ -35,6 +35,7 @@ namespace ErrorCodes
|
||||
{
|
||||
extern const int UNEXPECTED_AST_STRUCTURE;
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int CANNOT_COMPILE_REGEXP;
|
||||
}
|
||||
|
||||
DataTypeObject::DataTypeObject(
|
||||
@ -51,6 +52,17 @@ DataTypeObject::DataTypeObject(
|
||||
, max_dynamic_paths(max_dynamic_paths_)
|
||||
, max_dynamic_types(max_dynamic_types_)
|
||||
{
|
||||
/// Check if regular expressions are valid.
|
||||
for (const auto & regexp_str : path_regexps_to_skip)
|
||||
{
|
||||
re2::RE2::Options options;
|
||||
/// Don't log errors to stderr.
|
||||
options.set_log_errors(false);
|
||||
auto regexp = re2::RE2(regexp_str, options);
|
||||
if (!regexp.ok())
|
||||
throw Exception(ErrorCodes::CANNOT_COMPILE_REGEXP, "Invalid regexp '{}': {}", regexp_str, regexp.error());
|
||||
}
|
||||
|
||||
for (const auto & [typed_path, type] : typed_paths)
|
||||
{
|
||||
for (const auto & path_to_skip : paths_to_skip)
|
||||
|
@ -1,7 +1,6 @@
|
||||
#pragma once
|
||||
#include <Columns/ColumnString.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
|
@ -1,15 +1,14 @@
|
||||
#pragma once
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#if USE_ICU
|
||||
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Functions/LowerUpperImpl.h>
|
||||
#include <base/defines.h>
|
||||
#include <Poco/UTF8Encoding.h>
|
||||
#include <base/find_symbols.h>
|
||||
#include <unicode/unistr.h>
|
||||
#include <Common/StringUtils.h>
|
||||
#include <Common/UTF8Helpers.h>
|
||||
|
||||
#ifdef __SSE2__
|
||||
#include <emmintrin.h>
|
||||
#endif
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -19,71 +18,7 @@ namespace ErrorCodes
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
/// xor or do nothing
|
||||
template <bool>
|
||||
UInt8 xor_or_identity(const UInt8 c, const int mask)
|
||||
{
|
||||
return c ^ mask;
|
||||
}
|
||||
|
||||
template <>
|
||||
inline UInt8 xor_or_identity<false>(const UInt8 c, const int)
|
||||
{
|
||||
return c;
|
||||
}
|
||||
|
||||
/// It is caller's responsibility to ensure the presence of a valid cyrillic sequence in array
|
||||
template <bool to_lower>
|
||||
inline void UTF8CyrillicToCase(const UInt8 *& src, UInt8 *& dst)
|
||||
{
|
||||
if (src[0] == 0xD0u && (src[1] >= 0x80u && src[1] <= 0x8Fu))
|
||||
{
|
||||
/// ЀЁЂЃЄЅІЇЈЉЊЋЌЍЎЏ
|
||||
*dst++ = xor_or_identity<to_lower>(*src++, 0x1);
|
||||
*dst++ = xor_or_identity<to_lower>(*src++, 0x10);
|
||||
}
|
||||
else if (src[0] == 0xD1u && (src[1] >= 0x90u && src[1] <= 0x9Fu))
|
||||
{
|
||||
/// ѐёђѓєѕіїјљњћќѝўџ
|
||||
*dst++ = xor_or_identity<!to_lower>(*src++, 0x1);
|
||||
*dst++ = xor_or_identity<!to_lower>(*src++, 0x10);
|
||||
}
|
||||
else if (src[0] == 0xD0u && (src[1] >= 0x90u && src[1] <= 0x9Fu))
|
||||
{
|
||||
/// А-П
|
||||
*dst++ = *src++;
|
||||
*dst++ = xor_or_identity<to_lower>(*src++, 0x20);
|
||||
}
|
||||
else if (src[0] == 0xD0u && (src[1] >= 0xB0u && src[1] <= 0xBFu))
|
||||
{
|
||||
/// а-п
|
||||
*dst++ = *src++;
|
||||
*dst++ = xor_or_identity<!to_lower>(*src++, 0x20);
|
||||
}
|
||||
else if (src[0] == 0xD0u && (src[1] >= 0xA0u && src[1] <= 0xAFu))
|
||||
{
|
||||
/// Р-Я
|
||||
*dst++ = xor_or_identity<to_lower>(*src++, 0x1);
|
||||
*dst++ = xor_or_identity<to_lower>(*src++, 0x20);
|
||||
}
|
||||
else if (src[0] == 0xD1u && (src[1] >= 0x80u && src[1] <= 0x8Fu))
|
||||
{
|
||||
/// р-я
|
||||
*dst++ = xor_or_identity<!to_lower>(*src++, 0x1);
|
||||
*dst++ = xor_or_identity<!to_lower>(*src++, 0x20);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/** If the string contains UTF-8 encoded text, convert it to the lower (upper) case.
|
||||
* Note: It is assumed that after the character is converted to another case,
|
||||
* the length of its multibyte sequence in UTF-8 does not change.
|
||||
* Otherwise, the behavior is undefined.
|
||||
*/
|
||||
template <char not_case_lower_bound,
|
||||
char not_case_upper_bound,
|
||||
int to_case(int),
|
||||
void cyrillic_to_case(const UInt8 *&, UInt8 *&)>
|
||||
template <char not_case_lower_bound, char not_case_upper_bound, bool upper>
|
||||
struct LowerUpperUTF8Impl
|
||||
{
|
||||
static void vector(
|
||||
@ -103,180 +38,46 @@ struct LowerUpperUTF8Impl
|
||||
return;
|
||||
}
|
||||
|
||||
res_data.resize_exact(data.size());
|
||||
res_offsets.assign(offsets);
|
||||
array(data.data(), data.data() + data.size(), offsets, res_data.data());
|
||||
res_data.resize(data.size());
|
||||
res_offsets.resize_exact(offsets.size());
|
||||
|
||||
String output;
|
||||
size_t curr_offset = 0;
|
||||
for (size_t i = 0; i < offsets.size(); ++i)
|
||||
{
|
||||
const auto * data_start = reinterpret_cast<const char *>(&data[offsets[i - 1]]);
|
||||
size_t size = offsets[i] - offsets[i - 1];
|
||||
|
||||
icu::UnicodeString input(data_start, static_cast<int32_t>(size), "UTF-8");
|
||||
if constexpr (upper)
|
||||
input.toUpper();
|
||||
else
|
||||
input.toLower();
|
||||
|
||||
output.clear();
|
||||
input.toUTF8String(output);
|
||||
|
||||
/// For valid UTF-8 input strings, ICU sometimes produces output with extra '\0's at the end. Only the data before the first
|
||||
/// '\0' is valid. It the input is not valid UTF-8, then the behavior of lower/upperUTF8 is undefined by definition. In this
|
||||
/// case, the behavior is also reasonable.
|
||||
const char * res_end = find_last_not_symbols_or_null<'\0'>(output.data(), output.data() + output.size());
|
||||
size_t valid_size = res_end ? res_end - output.data() + 1 : 0;
|
||||
|
||||
res_data.resize(curr_offset + valid_size + 1);
|
||||
memcpy(&res_data[curr_offset], output.data(), valid_size);
|
||||
res_data[curr_offset + valid_size] = 0;
|
||||
|
||||
curr_offset += valid_size + 1;
|
||||
res_offsets[i] = curr_offset;
|
||||
}
|
||||
}
|
||||
|
||||
static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &, size_t)
|
||||
{
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Functions lowerUTF8 and upperUTF8 cannot work with FixedString argument");
|
||||
}
|
||||
|
||||
/** Converts a single code point starting at `src` to desired case, storing result starting at `dst`.
|
||||
* `src` and `dst` are incremented by corresponding sequence lengths. */
|
||||
static bool toCase(const UInt8 *& src, const UInt8 * src_end, UInt8 *& dst, bool partial)
|
||||
{
|
||||
if (src[0] <= ascii_upper_bound)
|
||||
{
|
||||
if (*src >= not_case_lower_bound && *src <= not_case_upper_bound)
|
||||
*dst++ = *src++ ^ flip_case_mask;
|
||||
else
|
||||
*dst++ = *src++;
|
||||
}
|
||||
else if (src + 1 < src_end
|
||||
&& ((src[0] == 0xD0u && (src[1] >= 0x80u && src[1] <= 0xBFu)) || (src[0] == 0xD1u && (src[1] >= 0x80u && src[1] <= 0x9Fu))))
|
||||
{
|
||||
cyrillic_to_case(src, dst);
|
||||
}
|
||||
else if (src + 1 < src_end && src[0] == 0xC2u)
|
||||
{
|
||||
/// Punctuation U+0080 - U+00BF, UTF-8: C2 80 - C2 BF
|
||||
*dst++ = *src++;
|
||||
*dst++ = *src++;
|
||||
}
|
||||
else if (src + 2 < src_end && src[0] == 0xE2u)
|
||||
{
|
||||
/// Characters U+2000 - U+2FFF, UTF-8: E2 80 80 - E2 BF BF
|
||||
*dst++ = *src++;
|
||||
*dst++ = *src++;
|
||||
*dst++ = *src++;
|
||||
}
|
||||
else
|
||||
{
|
||||
size_t src_sequence_length = UTF8::seqLength(*src);
|
||||
/// In case partial buffer was passed (due to SSE optimization)
|
||||
/// we cannot convert it with current src_end, but we may have more
|
||||
/// bytes to convert and eventually got correct symbol.
|
||||
if (partial && src_sequence_length > static_cast<size_t>(src_end - src))
|
||||
return false;
|
||||
|
||||
auto src_code_point = UTF8::convertUTF8ToCodePoint(src, src_end - src);
|
||||
if (src_code_point)
|
||||
{
|
||||
int dst_code_point = to_case(*src_code_point);
|
||||
if (dst_code_point > 0)
|
||||
{
|
||||
size_t dst_sequence_length = UTF8::convertCodePointToUTF8(dst_code_point, dst, src_end - src);
|
||||
assert(dst_sequence_length <= 4);
|
||||
|
||||
/// We don't support cases when lowercase and uppercase characters occupy different number of bytes in UTF-8.
|
||||
/// As an example, this happens for ß and ẞ.
|
||||
if (dst_sequence_length == src_sequence_length)
|
||||
{
|
||||
src += dst_sequence_length;
|
||||
dst += dst_sequence_length;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
*dst = *src;
|
||||
++dst;
|
||||
++src;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
private:
|
||||
static constexpr auto ascii_upper_bound = '\x7f';
|
||||
static constexpr auto flip_case_mask = 'A' ^ 'a';
|
||||
|
||||
static void array(const UInt8 * src, const UInt8 * src_end, const ColumnString::Offsets & offsets, UInt8 * dst)
|
||||
{
|
||||
const auto * offset_it = offsets.begin();
|
||||
const UInt8 * begin = src;
|
||||
|
||||
#ifdef __SSE2__
|
||||
static constexpr auto bytes_sse = sizeof(__m128i);
|
||||
|
||||
/// If we are before this position, we can still read at least bytes_sse.
|
||||
const auto * src_end_sse = src_end - bytes_sse + 1;
|
||||
|
||||
/// SSE2 packed comparison operate on signed types, hence compare (c < 0) instead of (c > 0x7f)
|
||||
const auto v_zero = _mm_setzero_si128();
|
||||
const auto v_not_case_lower_bound = _mm_set1_epi8(not_case_lower_bound - 1);
|
||||
const auto v_not_case_upper_bound = _mm_set1_epi8(not_case_upper_bound + 1);
|
||||
const auto v_flip_case_mask = _mm_set1_epi8(flip_case_mask);
|
||||
|
||||
while (src < src_end_sse)
|
||||
{
|
||||
const auto chars = _mm_loadu_si128(reinterpret_cast<const __m128i *>(src));
|
||||
|
||||
/// check for ASCII
|
||||
const auto is_not_ascii = _mm_cmplt_epi8(chars, v_zero);
|
||||
const auto mask_is_not_ascii = _mm_movemask_epi8(is_not_ascii);
|
||||
|
||||
/// ASCII
|
||||
if (mask_is_not_ascii == 0)
|
||||
{
|
||||
const auto is_not_case
|
||||
= _mm_and_si128(_mm_cmpgt_epi8(chars, v_not_case_lower_bound), _mm_cmplt_epi8(chars, v_not_case_upper_bound));
|
||||
const auto mask_is_not_case = _mm_movemask_epi8(is_not_case);
|
||||
|
||||
/// everything in correct case ASCII
|
||||
if (mask_is_not_case == 0)
|
||||
_mm_storeu_si128(reinterpret_cast<__m128i *>(dst), chars);
|
||||
else
|
||||
{
|
||||
/// ASCII in mixed case
|
||||
/// keep `flip_case_mask` only where necessary, zero out elsewhere
|
||||
const auto xor_mask = _mm_and_si128(v_flip_case_mask, is_not_case);
|
||||
|
||||
/// flip case by applying calculated mask
|
||||
const auto cased_chars = _mm_xor_si128(chars, xor_mask);
|
||||
|
||||
/// store result back to destination
|
||||
_mm_storeu_si128(reinterpret_cast<__m128i *>(dst), cased_chars);
|
||||
}
|
||||
|
||||
src += bytes_sse;
|
||||
dst += bytes_sse;
|
||||
}
|
||||
else
|
||||
{
|
||||
/// UTF-8
|
||||
|
||||
/// Find the offset of the next string after src
|
||||
size_t offset_from_begin = src - begin;
|
||||
while (offset_from_begin >= *offset_it)
|
||||
++offset_it;
|
||||
|
||||
/// Do not allow one row influence another (since row may have invalid sequence, and break the next)
|
||||
const UInt8 * row_end = begin + *offset_it;
|
||||
chassert(row_end >= src);
|
||||
const UInt8 * expected_end = std::min(src + bytes_sse, row_end);
|
||||
|
||||
while (src < expected_end)
|
||||
{
|
||||
if (!toCase(src, expected_end, dst, /* partial= */ true))
|
||||
{
|
||||
/// Fallback to handling byte by byte.
|
||||
src_end_sse = src;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Find the offset of the next string after src
|
||||
size_t offset_from_begin = src - begin;
|
||||
while (offset_it != offsets.end() && offset_from_begin >= *offset_it)
|
||||
++offset_it;
|
||||
#endif
|
||||
|
||||
/// handle remaining symbols, row by row (to avoid influence of bad UTF8 symbols from one row, to another)
|
||||
while (src < src_end)
|
||||
{
|
||||
const UInt8 * row_end = begin + *offset_it;
|
||||
chassert(row_end >= src);
|
||||
|
||||
while (src < row_end)
|
||||
toCase(src, row_end, dst, /* partial= */ false);
|
||||
++offset_it;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -1,9 +1,8 @@
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <Functions/FunctionStringToString.h>
|
||||
#include <Functions/LowerUpperUTF8Impl.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Poco/Unicode.h>
|
||||
|
||||
#include <Common/UTF8Helpers.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
@ -1,9 +1,10 @@
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include "config.h"
|
||||
|
||||
#if USE_ICU
|
||||
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/FunctionStringToString.h>
|
||||
#include <Functions/LowerUpperUTF8Impl.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Poco/Unicode.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -15,13 +16,25 @@ struct NameLowerUTF8
|
||||
static constexpr auto name = "lowerUTF8";
|
||||
};
|
||||
|
||||
using FunctionLowerUTF8 = FunctionStringToString<LowerUpperUTF8Impl<'A', 'Z', Poco::Unicode::toLower, UTF8CyrillicToCase<true>>, NameLowerUTF8>;
|
||||
using FunctionLowerUTF8 = FunctionStringToString<LowerUpperUTF8Impl<'A', 'Z', false>, NameLowerUTF8>;
|
||||
|
||||
}
|
||||
|
||||
REGISTER_FUNCTION(LowerUTF8)
|
||||
{
|
||||
factory.registerFunction<FunctionLowerUTF8>();
|
||||
FunctionDocumentation::Description description
|
||||
= R"(Converts a string to lowercase, assuming that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined.)";
|
||||
FunctionDocumentation::Syntax syntax = "lowerUTF8(input)";
|
||||
FunctionDocumentation::Arguments arguments = {{"input", "Input with String type"}};
|
||||
FunctionDocumentation::ReturnedValue returned_value = "A String data type value";
|
||||
FunctionDocumentation::Examples examples = {
|
||||
{"first", "SELECT lowerUTF8('München') as Lowerutf8;", "münchen"},
|
||||
};
|
||||
FunctionDocumentation::Categories categories = {"String"};
|
||||
|
||||
factory.registerFunction<FunctionLowerUTF8>({description, syntax, arguments, returned_value, examples, categories});
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -1,8 +1,10 @@
|
||||
#include "config.h"
|
||||
|
||||
#if USE_ICU
|
||||
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/FunctionStringToString.h>
|
||||
#include <Functions/LowerUpperUTF8Impl.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Poco/Unicode.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -14,13 +16,25 @@ struct NameUpperUTF8
|
||||
static constexpr auto name = "upperUTF8";
|
||||
};
|
||||
|
||||
using FunctionUpperUTF8 = FunctionStringToString<LowerUpperUTF8Impl<'a', 'z', Poco::Unicode::toUpper, UTF8CyrillicToCase<false>>, NameUpperUTF8>;
|
||||
using FunctionUpperUTF8 = FunctionStringToString<LowerUpperUTF8Impl<'a', 'z', true>, NameUpperUTF8>;
|
||||
|
||||
}
|
||||
|
||||
REGISTER_FUNCTION(UpperUTF8)
|
||||
{
|
||||
factory.registerFunction<FunctionUpperUTF8>();
|
||||
FunctionDocumentation::Description description
|
||||
= R"(Converts a string to lowercase, assuming that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined.)";
|
||||
FunctionDocumentation::Syntax syntax = "upperUTF8(input)";
|
||||
FunctionDocumentation::Arguments arguments = {{"input", "Input with String type"}};
|
||||
FunctionDocumentation::ReturnedValue returned_value = "A String data type value";
|
||||
FunctionDocumentation::Examples examples = {
|
||||
{"first", "SELECT upperUTF8('München') as Upperutf8;", "MÜNCHEN"},
|
||||
};
|
||||
FunctionDocumentation::Categories categories = {"String"};
|
||||
|
||||
factory.registerFunction<FunctionUpperUTF8>({description, syntax, arguments, returned_value, examples, categories});
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -9,7 +9,6 @@
|
||||
#include <Parsers/ASTQueryWithOutput.h>
|
||||
#include <Parsers/ASTSelectWithUnionQuery.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -46,7 +45,9 @@ static void applySettingsFromSelectWithUnion(const ASTSelectWithUnionQuery & sel
|
||||
// It is flattened later, when we process UNION ALL/DISTINCT.
|
||||
const auto * last_select = children.back()->as<ASTSelectQuery>();
|
||||
if (last_select && last_select->settings())
|
||||
InterpreterSetQuery(last_select->settings(), context).executeForCurrentContext(/* ignore_setting_constraints= */ false);
|
||||
{
|
||||
InterpreterSetQuery(last_select->settings(), context).executeForCurrentContext();
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterSetQuery::applySettingsFromQuery(const ASTPtr & ast, ContextMutablePtr context_)
|
||||
@ -54,20 +55,10 @@ void InterpreterSetQuery::applySettingsFromQuery(const ASTPtr & ast, ContextMuta
|
||||
if (!ast)
|
||||
return;
|
||||
|
||||
/// First apply the outermost settings. Then they could be overridden by deeper settings.
|
||||
if (const auto * query_with_output = dynamic_cast<const ASTQueryWithOutput *>(ast.get()))
|
||||
{
|
||||
if (query_with_output->settings_ast)
|
||||
InterpreterSetQuery(query_with_output->settings_ast, context_).executeForCurrentContext(/* ignore_setting_constraints= */ false);
|
||||
|
||||
if (const auto * create_query = ast->as<ASTCreateQuery>(); create_query && create_query->select)
|
||||
applySettingsFromSelectWithUnion(create_query->select->as<ASTSelectWithUnionQuery &>(), context_);
|
||||
}
|
||||
|
||||
if (const auto * select_query = ast->as<ASTSelectQuery>())
|
||||
{
|
||||
if (auto new_settings = select_query->settings())
|
||||
InterpreterSetQuery(new_settings, context_).executeForCurrentContext(/* ignore_setting_constraints= */ false);
|
||||
InterpreterSetQuery(new_settings, context_).executeForCurrentContext();
|
||||
}
|
||||
else if (const auto * select_with_union_query = ast->as<ASTSelectWithUnionQuery>())
|
||||
{
|
||||
@ -76,15 +67,28 @@ void InterpreterSetQuery::applySettingsFromQuery(const ASTPtr & ast, ContextMuta
|
||||
else if (const auto * explain_query = ast->as<ASTExplainQuery>())
|
||||
{
|
||||
if (explain_query->settings_ast)
|
||||
InterpreterSetQuery(explain_query->settings_ast, context_).executeForCurrentContext(/* ignore_setting_constraints= */ false);
|
||||
InterpreterSetQuery(explain_query->settings_ast, context_).executeForCurrentContext();
|
||||
|
||||
applySettingsFromQuery(explain_query->getExplainedQuery(), context_);
|
||||
}
|
||||
else if (const auto * query_with_output = dynamic_cast<const ASTQueryWithOutput *>(ast.get()))
|
||||
{
|
||||
if (query_with_output->settings_ast)
|
||||
InterpreterSetQuery(query_with_output->settings_ast, context_).executeForCurrentContext();
|
||||
|
||||
if (const auto * create_query = ast->as<ASTCreateQuery>())
|
||||
{
|
||||
if (create_query->select)
|
||||
{
|
||||
applySettingsFromSelectWithUnion(create_query->select->as<ASTSelectWithUnionQuery &>(), context_);
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (auto * insert_query = ast->as<ASTInsertQuery>())
|
||||
{
|
||||
context_->setInsertFormat(insert_query->format);
|
||||
if (insert_query->settings_ast)
|
||||
InterpreterSetQuery(insert_query->settings_ast, context_).executeForCurrentContext(/* ignore_setting_constraints= */ false);
|
||||
InterpreterSetQuery(insert_query->settings_ast, context_).executeForCurrentContext();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -23,7 +23,7 @@ public:
|
||||
/** Set setting for current context (query context).
|
||||
* It is used for interpretation of SETTINGS clause in SELECT query.
|
||||
*/
|
||||
void executeForCurrentContext(bool ignore_setting_constraints);
|
||||
void executeForCurrentContext(bool ignore_setting_constraints = false);
|
||||
|
||||
bool supportsTransactions() const override { return true; }
|
||||
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include <Parsers/ParserTablePropertiesQuery.h>
|
||||
#include <Parsers/ParserWatchQuery.h>
|
||||
#include <Parsers/ParserDescribeCacheQuery.h>
|
||||
#include <Parsers/QueryWithOutputSettingsPushDownVisitor.h>
|
||||
#include <Parsers/Access/ParserShowAccessEntitiesQuery.h>
|
||||
#include <Parsers/Access/ParserShowAccessQuery.h>
|
||||
#include <Parsers/Access/ParserShowCreateAccessEntityQuery.h>
|
||||
@ -151,55 +152,37 @@ bool ParserQueryWithOutput::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
|
||||
|
||||
}
|
||||
|
||||
/// These two sections are allowed in an arbitrary order.
|
||||
ParserKeyword s_format(Keyword::FORMAT);
|
||||
ParserKeyword s_settings(Keyword::SETTINGS);
|
||||
|
||||
/** Why: let's take the following example:
|
||||
* SELECT 1 UNION ALL SELECT 2 FORMAT TSV
|
||||
* Each subquery can be put in parentheses and have its own settings:
|
||||
* (SELECT 1 SETTINGS a=b) UNION ALL (SELECT 2 SETTINGS c=d) FORMAT TSV
|
||||
* And the whole query can have settings:
|
||||
* (SELECT 1 SETTINGS a=b) UNION ALL (SELECT 2 SETTINGS c=d) FORMAT TSV SETTINGS e=f
|
||||
* A single query with output is parsed in the same way as the UNION ALL chain:
|
||||
* SELECT 1 SETTINGS a=b FORMAT TSV SETTINGS e=f
|
||||
* So while these forms have a slightly different meaning, they both exist:
|
||||
* SELECT 1 SETTINGS a=b FORMAT TSV
|
||||
* SELECT 1 FORMAT TSV SETTINGS e=f
|
||||
* And due to this effect, the users expect that the FORMAT and SETTINGS may go in an arbitrary order.
|
||||
* But while this work:
|
||||
* (SELECT 1) UNION ALL (SELECT 2) FORMAT TSV SETTINGS d=f
|
||||
* This does not work automatically, unless we explicitly allow different orders:
|
||||
* (SELECT 1) UNION ALL (SELECT 2) SETTINGS d=f FORMAT TSV
|
||||
* Inevitably, we also allow this:
|
||||
* SELECT 1 SETTINGS a=b SETTINGS d=f FORMAT TSV
|
||||
* ^^^^^^^^^^^^^^^^^^^^^
|
||||
* Because this part is consumed into ASTSelectWithUnionQuery
|
||||
* and the rest into ASTQueryWithOutput.
|
||||
*/
|
||||
|
||||
for (size_t i = 0; i < 2; ++i)
|
||||
if (s_format.ignore(pos, expected))
|
||||
{
|
||||
if (!query_with_output.format && s_format.ignore(pos, expected))
|
||||
{
|
||||
ParserIdentifier format_p;
|
||||
ParserIdentifier format_p;
|
||||
|
||||
if (!format_p.parse(pos, query_with_output.format, expected))
|
||||
return false;
|
||||
setIdentifierSpecial(query_with_output.format);
|
||||
if (!format_p.parse(pos, query_with_output.format, expected))
|
||||
return false;
|
||||
setIdentifierSpecial(query_with_output.format);
|
||||
|
||||
query_with_output.children.push_back(query_with_output.format);
|
||||
}
|
||||
else if (!query_with_output.settings_ast && s_settings.ignore(pos, expected))
|
||||
query_with_output.children.push_back(query_with_output.format);
|
||||
}
|
||||
|
||||
// SETTINGS key1 = value1, key2 = value2, ...
|
||||
ParserKeyword s_settings(Keyword::SETTINGS);
|
||||
if (!query_with_output.settings_ast && s_settings.ignore(pos, expected))
|
||||
{
|
||||
ParserSetQuery parser_settings(true);
|
||||
if (!parser_settings.parse(pos, query_with_output.settings_ast, expected))
|
||||
return false;
|
||||
query_with_output.children.push_back(query_with_output.settings_ast);
|
||||
|
||||
// SETTINGS after FORMAT is not parsed by the SELECT parser (ParserSelectQuery)
|
||||
// Pass them manually, to apply in InterpreterSelectQuery::initSettings()
|
||||
if (query->as<ASTSelectWithUnionQuery>())
|
||||
{
|
||||
// SETTINGS key1 = value1, key2 = value2, ...
|
||||
ParserSetQuery parser_settings(true);
|
||||
if (!parser_settings.parse(pos, query_with_output.settings_ast, expected))
|
||||
return false;
|
||||
query_with_output.children.push_back(query_with_output.settings_ast);
|
||||
auto settings = query_with_output.settings_ast->clone();
|
||||
assert_cast<ASTSetQuery *>(settings.get())->print_in_format = false;
|
||||
QueryWithOutputSettingsPushDownVisitor::Data data{settings};
|
||||
QueryWithOutputSettingsPushDownVisitor(data).visit(query);
|
||||
}
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
node = std::move(query);
|
||||
|
56
src/Parsers/QueryWithOutputSettingsPushDownVisitor.cpp
Normal file
56
src/Parsers/QueryWithOutputSettingsPushDownVisitor.cpp
Normal file
@ -0,0 +1,56 @@
|
||||
#include <Common/SettingsChanges.h>
|
||||
#include <Parsers/QueryWithOutputSettingsPushDownVisitor.h>
|
||||
#include <Parsers/ASTSelectWithUnionQuery.h>
|
||||
#include <Parsers/ASTSelectQuery.h>
|
||||
#include <Parsers/ASTSetQuery.h>
|
||||
#include <Parsers/ASTSubquery.h>
|
||||
|
||||
#include <iterator>
|
||||
#include <algorithm>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
bool QueryWithOutputSettingsPushDownMatcher::needChildVisit(ASTPtr & node, const ASTPtr & child)
|
||||
{
|
||||
if (node->as<ASTSelectWithUnionQuery>())
|
||||
return true;
|
||||
if (node->as<ASTSubquery>())
|
||||
return true;
|
||||
if (child->as<ASTSelectQuery>())
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
void QueryWithOutputSettingsPushDownMatcher::visit(ASTPtr & ast, Data & data)
|
||||
{
|
||||
if (auto * select_query = ast->as<ASTSelectQuery>())
|
||||
visit(*select_query, ast, data);
|
||||
}
|
||||
|
||||
void QueryWithOutputSettingsPushDownMatcher::visit(ASTSelectQuery & select_query, ASTPtr &, Data & data)
|
||||
{
|
||||
ASTPtr select_settings_ast = select_query.settings();
|
||||
if (!select_settings_ast)
|
||||
{
|
||||
select_query.setExpression(ASTSelectQuery::Expression::SETTINGS, data.settings_ast->clone());
|
||||
return;
|
||||
}
|
||||
|
||||
SettingsChanges & select_settings = select_settings_ast->as<ASTSetQuery &>().changes;
|
||||
SettingsChanges & settings = data.settings_ast->as<ASTSetQuery &>().changes;
|
||||
|
||||
for (auto & setting : settings)
|
||||
{
|
||||
auto it = std::find_if(select_settings.begin(), select_settings.end(), [&](auto & select_setting)
|
||||
{
|
||||
return select_setting.name == setting.name;
|
||||
});
|
||||
if (it == select_settings.end())
|
||||
select_settings.push_back(setting);
|
||||
else
|
||||
it->value = setting.value;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
39
src/Parsers/QueryWithOutputSettingsPushDownVisitor.h
Normal file
39
src/Parsers/QueryWithOutputSettingsPushDownVisitor.h
Normal file
@ -0,0 +1,39 @@
|
||||
#pragma once
|
||||
|
||||
#include <Parsers/IAST.h>
|
||||
#include <Interpreters/InDepthNodeVisitor.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class ASTSelectQuery;
|
||||
struct SettingChange;
|
||||
class SettingsChanges;
|
||||
|
||||
/// Pushdown SETTINGS clause that goes after FORMAT to the SELECT query:
|
||||
/// (since settings after FORMAT parsed separately not in the ParserSelectQuery but in ParserQueryWithOutput)
|
||||
///
|
||||
/// SELECT 1 FORMAT Null SETTINGS max_block_size = 1 ->
|
||||
/// SELECT 1 SETTINGS max_block_size = 1 FORMAT Null SETTINGS max_block_size = 1
|
||||
///
|
||||
/// Otherwise settings after FORMAT will not be applied.
|
||||
class QueryWithOutputSettingsPushDownMatcher
|
||||
{
|
||||
public:
|
||||
using Visitor = InDepthNodeVisitor<QueryWithOutputSettingsPushDownMatcher, true>;
|
||||
|
||||
struct Data
|
||||
{
|
||||
const ASTPtr & settings_ast;
|
||||
};
|
||||
|
||||
static bool needChildVisit(ASTPtr & node, const ASTPtr & child);
|
||||
static void visit(ASTPtr & ast, Data & data);
|
||||
|
||||
private:
|
||||
static void visit(ASTSelectQuery &, ASTPtr &, Data &);
|
||||
};
|
||||
|
||||
using QueryWithOutputSettingsPushDownVisitor = QueryWithOutputSettingsPushDownMatcher::Visitor;
|
||||
|
||||
}
|
@ -369,7 +369,7 @@ MergeTreeDataSelectSamplingData MergeTreeDataSelectExecutor::getSampling(
|
||||
/// If sample and final are used together no need to calculate sampling expression twice.
|
||||
/// The first time it was calculated for final, because sample key is a part of the PK.
|
||||
/// So, assume that we already have calculated column.
|
||||
ASTPtr sampling_key_ast = metadata_snapshot->getSamplingKeyAST();
|
||||
ASTPtr sampling_key_ast;
|
||||
|
||||
if (final)
|
||||
{
|
||||
@ -377,6 +377,12 @@ MergeTreeDataSelectSamplingData MergeTreeDataSelectExecutor::getSampling(
|
||||
/// We do spoil available_real_columns here, but it is not used later.
|
||||
available_real_columns.emplace_back(sampling_key.column_names[0], std::move(sampling_column_type));
|
||||
}
|
||||
else
|
||||
{
|
||||
sampling_key_ast = metadata_snapshot->getSamplingKeyAST()->clone();
|
||||
}
|
||||
|
||||
chassert(sampling_key_ast != nullptr);
|
||||
|
||||
if (has_lower_limit)
|
||||
{
|
||||
|
@ -190,6 +190,8 @@ MergeTreeIndexGranuleVectorSimilarity::MergeTreeIndexGranuleVectorSimilarity(
|
||||
|
||||
void MergeTreeIndexGranuleVectorSimilarity::serializeBinary(WriteBuffer & ostr) const
|
||||
{
|
||||
LOG_TRACE(logger, "Start writing vector similarity index");
|
||||
|
||||
if (empty())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Attempt to write empty minmax index {}", backQuote(index_name));
|
||||
|
||||
@ -207,6 +209,8 @@ void MergeTreeIndexGranuleVectorSimilarity::serializeBinary(WriteBuffer & ostr)
|
||||
|
||||
void MergeTreeIndexGranuleVectorSimilarity::deserializeBinary(ReadBuffer & istr, MergeTreeIndexVersion /*version*/)
|
||||
{
|
||||
LOG_TRACE(logger, "Start loading vector similarity index");
|
||||
|
||||
UInt64 file_version;
|
||||
readIntBinary(file_version, istr);
|
||||
if (file_version != FILE_FORMAT_VERSION)
|
||||
|
@ -15,4 +15,5 @@ warn_return_any = True
|
||||
no_implicit_reexport = True
|
||||
strict_equality = True
|
||||
extra_checks = True
|
||||
ignore_missing_imports = True
|
||||
ignore_missing_imports = True
|
||||
logging-fstring-interpolation = False
|
@ -535,7 +535,10 @@ class CI:
|
||||
JobNames.FAST_TEST: JobConfig(
|
||||
pr_only=True,
|
||||
digest=DigestConfig(
|
||||
include_paths=["./tests/queries/0_stateless/"],
|
||||
include_paths=[
|
||||
"./tests/queries/0_stateless/",
|
||||
"./tests/docker_scripts/",
|
||||
],
|
||||
exclude_files=[".md"],
|
||||
docker=["clickhouse/fasttest"],
|
||||
),
|
||||
|
@ -415,6 +415,7 @@ class CommonJobConfigs:
|
||||
"./tests/clickhouse-test",
|
||||
"./tests/config",
|
||||
"./tests/*.txt",
|
||||
"./tests/docker_scripts/",
|
||||
],
|
||||
exclude_files=[".md"],
|
||||
docker=["clickhouse/stateless-test"],
|
||||
@ -431,6 +432,7 @@ class CommonJobConfigs:
|
||||
"./tests/clickhouse-test",
|
||||
"./tests/config",
|
||||
"./tests/*.txt",
|
||||
"./tests/docker_scripts/",
|
||||
],
|
||||
exclude_files=[".md"],
|
||||
docker=["clickhouse/stateful-test"],
|
||||
@ -448,6 +450,7 @@ class CommonJobConfigs:
|
||||
"./tests/clickhouse-test",
|
||||
"./tests/config",
|
||||
"./tests/*.txt",
|
||||
"./tests/docker_scripts/",
|
||||
],
|
||||
exclude_files=[".md"],
|
||||
docker=["clickhouse/stress-test"],
|
||||
@ -459,9 +462,9 @@ class CommonJobConfigs:
|
||||
UPGRADE_TEST = JobConfig(
|
||||
job_name_keyword="upgrade",
|
||||
digest=DigestConfig(
|
||||
include_paths=["./tests/ci/upgrade_check.py"],
|
||||
include_paths=["./tests/ci/upgrade_check.py", "./tests/docker_scripts/"],
|
||||
exclude_files=[".md"],
|
||||
docker=["clickhouse/upgrade-check"],
|
||||
docker=["clickhouse/stress-test"],
|
||||
),
|
||||
run_command="upgrade_check.py",
|
||||
runner_type=Runners.STRESS_TESTER,
|
||||
|
@ -93,7 +93,7 @@ def process_single_image(
|
||||
results = [] # type: TestResults
|
||||
for ver in versions:
|
||||
stopwatch = Stopwatch()
|
||||
for i in range(5):
|
||||
for i in range(2):
|
||||
success, build_log = build_and_push_one_image(
|
||||
image, ver, additional_cache, push, from_tag
|
||||
)
|
||||
|
@ -31,15 +31,14 @@ def get_fasttest_cmd(
|
||||
"--security-opt seccomp=unconfined " # required to issue io_uring sys-calls
|
||||
"--network=host " # required to get access to IAM credentials
|
||||
f"-e FASTTEST_WORKSPACE=/fasttest-workspace -e FASTTEST_OUTPUT=/test_output "
|
||||
f"-e FASTTEST_SOURCE=/ClickHouse "
|
||||
f"-e FASTTEST_SOURCE=/repo "
|
||||
f"-e FASTTEST_CMAKE_FLAGS='-DCOMPILER_CACHE=sccache' "
|
||||
f"-e PULL_REQUEST_NUMBER={pr_number} -e COMMIT_SHA={commit_sha} "
|
||||
f"-e COPY_CLICKHOUSE_BINARY_TO_OUTPUT=1 "
|
||||
f"-e SCCACHE_BUCKET={S3_BUILDS_BUCKET} -e SCCACHE_S3_KEY_PREFIX=ccache/sccache "
|
||||
"-e stage=clone_submodules "
|
||||
f"--volume={workspace}:/fasttest-workspace --volume={repo_path}:/ClickHouse "
|
||||
f"--volume={repo_path}/tests/analyzer_tech_debt.txt:/analyzer_tech_debt.txt "
|
||||
f"--volume={output_path}:/test_output {image}"
|
||||
f"--volume={workspace}:/fasttest-workspace --volume={repo_path}:/repo "
|
||||
f"--volume={output_path}:/test_output {image} /repo/tests/docker_scripts/fasttest_runner.sh"
|
||||
)
|
||||
|
||||
|
||||
|
@ -119,24 +119,24 @@ def get_run_command(
|
||||
envs += [f"-e {e}" for e in additional_envs]
|
||||
|
||||
env_str = " ".join(envs)
|
||||
volume_with_broken_test = (
|
||||
f"--volume={repo_path}/tests/analyzer_tech_debt.txt:/analyzer_tech_debt.txt "
|
||||
if "analyzer" not in check_name
|
||||
else ""
|
||||
)
|
||||
|
||||
if "stateful" in check_name.lower():
|
||||
run_script = "/repo/tests/docker_scripts/stateful_runner.sh"
|
||||
elif "stateless" in check_name.lower():
|
||||
run_script = "/repo/tests/docker_scripts/stateless_runner.sh"
|
||||
else:
|
||||
assert False
|
||||
|
||||
return (
|
||||
f"docker run --rm --name func-tester --volume={builds_path}:/package_folder "
|
||||
# For dmesg and sysctl
|
||||
"--privileged "
|
||||
f"{ci_logs_args}"
|
||||
f"--volume={repo_path}/tests:/usr/share/clickhouse-test "
|
||||
f"--volume={repo_path}/utils/grpc-client:/usr/share/clickhouse-utils/grpc-client "
|
||||
f"{volume_with_broken_test}"
|
||||
f"{ci_logs_args} "
|
||||
f"--volume={repo_path}:/repo "
|
||||
f"--volume={result_path}:/test_output "
|
||||
f"--volume={server_log_path}:/var/log/clickhouse-server "
|
||||
"--security-opt seccomp=unconfined " # required to issue io_uring sys-calls
|
||||
f"--cap-add=SYS_PTRACE {env_str} {additional_options_str} {image}"
|
||||
f"--cap-add=SYS_PTRACE {env_str} {additional_options_str} {image} {run_script}"
|
||||
)
|
||||
|
||||
|
||||
|
@ -57,10 +57,16 @@ def get_run_command(
|
||||
additional_envs: List[str],
|
||||
ci_logs_args: str,
|
||||
image: DockerImage,
|
||||
upgrade_check: bool,
|
||||
) -> str:
|
||||
envs = [f"-e {e}" for e in additional_envs]
|
||||
env_str = " ".join(envs)
|
||||
|
||||
if upgrade_check:
|
||||
run_script = "/repo/tests/docker_scripts/upgrade_runner.sh"
|
||||
else:
|
||||
run_script = "/repo/tests/docker_scripts/stress_runner.sh"
|
||||
|
||||
cmd = (
|
||||
"docker run --cap-add=SYS_PTRACE "
|
||||
# For dmesg and sysctl
|
||||
@ -70,8 +76,8 @@ def get_run_command(
|
||||
f"{ci_logs_args}"
|
||||
f"--volume={build_path}:/package_folder "
|
||||
f"--volume={result_path}:/test_output "
|
||||
f"--volume={repo_tests_path}:/usr/share/clickhouse-test "
|
||||
f"--volume={server_log_path}:/var/log/clickhouse-server {env_str} {image} "
|
||||
f"--volume={repo_tests_path}/..:/repo "
|
||||
f"--volume={server_log_path}:/var/log/clickhouse-server {env_str} {image} {run_script}"
|
||||
)
|
||||
|
||||
return cmd
|
||||
@ -128,7 +134,7 @@ def process_results(
|
||||
return state, description, test_results, additional_files
|
||||
|
||||
|
||||
def run_stress_test(docker_image_name: str) -> None:
|
||||
def run_stress_test(upgrade_check: bool = False) -> None:
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
for handler in logging.root.handlers:
|
||||
# pylint: disable=protected-access
|
||||
@ -148,7 +154,7 @@ def run_stress_test(docker_image_name: str) -> None:
|
||||
|
||||
pr_info = PRInfo()
|
||||
|
||||
docker_image = pull_image(get_docker_image(docker_image_name))
|
||||
docker_image = pull_image(get_docker_image("clickhouse/stress-test"))
|
||||
|
||||
packages_path = temp_path / "packages"
|
||||
packages_path.mkdir(parents=True, exist_ok=True)
|
||||
@ -177,6 +183,7 @@ def run_stress_test(docker_image_name: str) -> None:
|
||||
additional_envs,
|
||||
ci_logs_args,
|
||||
docker_image,
|
||||
upgrade_check,
|
||||
)
|
||||
logging.info("Going to run stress test: %s", run_command)
|
||||
|
||||
@ -208,4 +215,4 @@ def run_stress_test(docker_image_name: str) -> None:
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_stress_test("clickhouse/stress-test")
|
||||
run_stress_test()
|
||||
|
@ -1,4 +1,4 @@
|
||||
import stress_check
|
||||
|
||||
if __name__ == "__main__":
|
||||
stress_check.run_stress_test("clickhouse/upgrade-check")
|
||||
stress_check.run_stress_test(upgrade_check=True)
|
||||
|
@ -1,7 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
# shellcheck source=./utils.lib
|
||||
source /utils.lib
|
||||
source /repo/tests/docker_scripts/utils.lib
|
||||
|
||||
function attach_gdb_to_clickhouse()
|
||||
{
|
@ -325,7 +325,7 @@ case "$stage" in
|
||||
;&
|
||||
"run_tests")
|
||||
run_tests ||:
|
||||
/process_functional_tests_result.py --in-results-dir "$FASTTEST_OUTPUT/" \
|
||||
/repo/tests/docker_scripts/process_functional_tests_result.py --in-results-dir "$FASTTEST_OUTPUT/" \
|
||||
--out-results-file "$FASTTEST_OUTPUT/test_results.tsv" \
|
||||
--out-status-file "$FASTTEST_OUTPUT/check_status.tsv" || echo -e "failure\tCannot parse results" > "$FASTTEST_OUTPUT/check_status.tsv"
|
||||
;;
|
@ -32,7 +32,7 @@ def process_test_log(log_path, broken_tests):
|
||||
success_finish = False
|
||||
test_results = []
|
||||
test_end = True
|
||||
with open(log_path, "r") as test_file:
|
||||
with open(log_path, "r", encoding="utf-8") as test_file:
|
||||
for line in test_file:
|
||||
original_line = line
|
||||
line = line.strip()
|
||||
@ -150,7 +150,7 @@ def process_result(result_path, broken_tests):
|
||||
|
||||
if result_path and os.path.exists(result_path):
|
||||
(
|
||||
total,
|
||||
_total,
|
||||
skipped,
|
||||
unknown,
|
||||
failed,
|
||||
@ -191,11 +191,11 @@ def process_result(result_path, broken_tests):
|
||||
else:
|
||||
description = ""
|
||||
|
||||
description += "fail: {}, passed: {}".format(failed, success)
|
||||
description += f"fail: {failed}, passed: {success}"
|
||||
if skipped != 0:
|
||||
description += ", skipped: {}".format(skipped)
|
||||
description += f", skipped: {skipped}"
|
||||
if unknown != 0:
|
||||
description += ", unknown: {}".format(unknown)
|
||||
description += f", unknown: {unknown}"
|
||||
else:
|
||||
state = "failure"
|
||||
description = "Output log doesn't exist"
|
||||
@ -205,10 +205,10 @@ def process_result(result_path, broken_tests):
|
||||
|
||||
|
||||
def write_results(results_file, status_file, results, status):
|
||||
with open(results_file, "w") as f:
|
||||
with open(results_file, "w", encoding="utf-8") as f:
|
||||
out = csv.writer(f, delimiter="\t")
|
||||
out.writerows(results)
|
||||
with open(status_file, "w") as f:
|
||||
with open(status_file, "w", encoding="utf-8") as f:
|
||||
out = csv.writer(f, delimiter="\t")
|
||||
out.writerow(status)
|
||||
|
||||
@ -221,15 +221,15 @@ if __name__ == "__main__":
|
||||
parser.add_argument("--in-results-dir", default="/test_output/")
|
||||
parser.add_argument("--out-results-file", default="/test_output/test_results.tsv")
|
||||
parser.add_argument("--out-status-file", default="/test_output/check_status.tsv")
|
||||
parser.add_argument("--broken-tests", default="/analyzer_tech_debt.txt")
|
||||
parser.add_argument("--broken-tests", default="/repo/tests/analyzer_tech_debt.txt")
|
||||
args = parser.parse_args()
|
||||
|
||||
broken_tests = list()
|
||||
broken_tests = []
|
||||
if os.path.exists(args.broken_tests):
|
||||
logging.info(f"File {args.broken_tests} with broken tests found")
|
||||
with open(args.broken_tests) as f:
|
||||
print(f"File {args.broken_tests} with broken tests found")
|
||||
with open(args.broken_tests, encoding="utf-8") as f:
|
||||
broken_tests = f.read().splitlines()
|
||||
logging.info(f"Broken tests in the list: {len(broken_tests)}")
|
||||
print(f"Broken tests in the list: {len(broken_tests)}")
|
||||
|
||||
state, description, test_results = process_result(args.in_results_dir, broken_tests)
|
||||
logging.info("Result parsed")
|
@ -5,7 +5,7 @@ set -e -x -a -u
|
||||
|
||||
ls -lha
|
||||
|
||||
cd hadoop-3.3.1
|
||||
cd /hadoop-3.3.1
|
||||
|
||||
export JAVA_HOME=/usr
|
||||
mkdir -p target/test/data
|
@ -143,7 +143,7 @@ main() {
|
||||
fi
|
||||
start_minio
|
||||
setup_minio "$1"
|
||||
upload_data "${query_dir}" "${2:-/usr/share/clickhouse-test}"
|
||||
upload_data "${query_dir}" "${2:-/repo/tests/}"
|
||||
setup_aws_credentials
|
||||
}
|
||||
|
@ -14,17 +14,17 @@ dpkg -i package_folder/clickhouse-common-static-dbg_*.deb
|
||||
dpkg -i package_folder/clickhouse-server_*.deb
|
||||
dpkg -i package_folder/clickhouse-client_*.deb
|
||||
|
||||
ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
|
||||
ln -s /repo/tests/clickhouse-test /usr/bin/clickhouse-test
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
source /utils.lib
|
||||
source /repo/tests/docker_scripts/utils.lib
|
||||
|
||||
# install test configs
|
||||
/usr/share/clickhouse-test/config/install.sh
|
||||
/repo/tests/config/install.sh
|
||||
|
||||
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --silent --inMemoryPersistence &
|
||||
|
||||
./setup_minio.sh stateful
|
||||
/repo/tests/docker_scripts/setup_minio.sh stateful
|
||||
./mc admin trace clickminio > /test_output/minio.log &
|
||||
MC_ADMIN_PID=$!
|
||||
|
||||
@ -105,7 +105,7 @@ setup_logs_replication
|
||||
|
||||
clickhouse-client --query "SHOW DATABASES"
|
||||
clickhouse-client --query "CREATE DATABASE datasets"
|
||||
clickhouse-client --multiquery < create.sql
|
||||
clickhouse-client --multiquery < /repo/tests/docker_scripts/create.sql
|
||||
clickhouse-client --query "SHOW TABLES FROM datasets"
|
||||
|
||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
@ -237,6 +237,7 @@ function run_tests()
|
||||
--hung-check
|
||||
--print-time
|
||||
--capture-client-stacktrace
|
||||
--queries "/repo/tests/queries"
|
||||
"${ADDITIONAL_OPTIONS[@]}"
|
||||
"$SKIP_TESTS_OPTION"
|
||||
)
|
||||
@ -259,7 +260,7 @@ ls -la ./
|
||||
echo "Files in root directory"
|
||||
ls -la /
|
||||
|
||||
/process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
|
||||
/repo/tests/docker_scripts/process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
|
||||
|
||||
sudo clickhouse stop ||:
|
||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
@ -1,10 +1,13 @@
|
||||
#!/bin/bash
|
||||
|
||||
# fail on errors, verbose and export all env variables
|
||||
set -e -x -a
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
source /setup_export_logs.sh
|
||||
|
||||
# shellcheck source=../stateless/stress_tests.lib
|
||||
source /stress_tests.lib
|
||||
source /repo/tests/docker_scripts/stress_tests.lib
|
||||
|
||||
# Avoid overlaps with previous runs
|
||||
dmesg --clear
|
||||
@ -39,20 +42,22 @@ if [[ -z "$BUGFIX_VALIDATE_CHECK" ]]; then
|
||||
chc --version || exit 1
|
||||
fi
|
||||
|
||||
ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
|
||||
ln -sf /repo/tests/clickhouse-test /usr/bin/clickhouse-test
|
||||
|
||||
export CLICKHOUSE_GRPC_CLIENT="/repo/utils/grpc-client/clickhouse-grpc-client.py"
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
source /attach_gdb.lib
|
||||
source /repo/tests/docker_scripts/attach_gdb.lib
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
source /utils.lib
|
||||
source /repo/tests/docker_scripts/utils.lib
|
||||
|
||||
# install test configs
|
||||
/usr/share/clickhouse-test/config/install.sh
|
||||
/repo/tests/config/install.sh
|
||||
|
||||
./setup_minio.sh stateless
|
||||
/repo/tests/docker_scripts/setup_minio.sh stateless
|
||||
|
||||
./setup_hdfs_minicluster.sh
|
||||
/repo/tests/docker_scripts/setup_hdfs_minicluster.sh
|
||||
|
||||
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
|
||||
|
||||
@ -316,6 +321,7 @@ function run_tests()
|
||||
--print-time
|
||||
--no-drop-if-fail
|
||||
--capture-client-stacktrace
|
||||
--queries "/repo/tests/queries"
|
||||
--test-runs "$NUM_TRIES"
|
||||
"${ADDITIONAL_OPTIONS[@]}"
|
||||
)
|
||||
@ -341,7 +347,7 @@ ls -la ./
|
||||
echo "Files in root directory"
|
||||
ls -la /
|
||||
|
||||
/process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
|
||||
/repo/tests/docker_scripts/process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
|
||||
|
||||
clickhouse-client -q "system flush logs" ||:
|
||||
|
21
docker/test/stress/run.sh → tests/docker_scripts/stress_runner.sh
Normal file → Executable file
21
docker/test/stress/run.sh → tests/docker_scripts/stress_runner.sh
Normal file → Executable file
@ -3,26 +3,25 @@
|
||||
# shellcheck disable=SC2086
|
||||
# shellcheck disable=SC2024
|
||||
|
||||
set -x
|
||||
|
||||
# Avoid overlaps with previous runs
|
||||
dmesg --clear
|
||||
# shellcheck disable=SC1091
|
||||
source /setup_export_logs.sh
|
||||
|
||||
set -x
|
||||
|
||||
# we mount tests folder from repo to /usr/share
|
||||
ln -s /usr/share/clickhouse-test/ci/stress.py /usr/bin/stress
|
||||
ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
|
||||
ln -s /repo/tests/clickhouse-test/ci/stress.py /usr/bin/stress
|
||||
ln -s /repo/tests/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
|
||||
|
||||
# Stress tests and upgrade check uses similar code that was placed
|
||||
# in a separate bash library. See tests/ci/stress_tests.lib
|
||||
# shellcheck source=../stateless/attach_gdb.lib
|
||||
source /attach_gdb.lib
|
||||
source /repo/tests/docker_scripts/attach_gdb.lib
|
||||
# shellcheck source=../stateless/stress_tests.lib
|
||||
source /stress_tests.lib
|
||||
source /repo/tests/docker_scripts/stress_tests.lib
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
source /utils.lib
|
||||
source /repo/tests/docker_scripts/utils.lib
|
||||
|
||||
install_packages package_folder
|
||||
|
||||
@ -55,7 +54,7 @@ export ZOOKEEPER_FAULT_INJECTION=1
|
||||
# available for dump via clickhouse-local
|
||||
configure
|
||||
|
||||
./setup_minio.sh stateless # to have a proper environment
|
||||
/repo/tests/docker_scripts/setup_minio.sh stateless # to have a proper environment
|
||||
|
||||
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
|
||||
|
||||
@ -64,7 +63,7 @@ start_server
|
||||
setup_logs_replication
|
||||
|
||||
clickhouse-client --query "CREATE DATABASE datasets"
|
||||
clickhouse-client --multiquery < create.sql
|
||||
clickhouse-client --multiquery < /repo/tests/docker_scripts/create.sql
|
||||
clickhouse-client --query "SHOW TABLES FROM datasets"
|
||||
|
||||
clickhouse-client --query "CREATE DATABASE IF NOT EXISTS test"
|
||||
@ -267,7 +266,7 @@ fi
|
||||
|
||||
start_server
|
||||
|
||||
stress --hung-check --drop-databases --output-folder test_output --skip-func-tests "$SKIP_TESTS_OPTION" --global-time-limit 1200 \
|
||||
python3 /repo/tests/ci/stress.py --hung-check --drop-databases --output-folder /test_output --skip-func-tests "$SKIP_TESTS_OPTION" --global-time-limit 1200 \
|
||||
&& echo -e "Test script exit code$OK" >> /test_output/test_results.tsv \
|
||||
|| echo -e "Test script failed$FAIL script exit code: $?" >> /test_output/test_results.tsv
|
||||
|
@ -42,7 +42,7 @@ function configure()
|
||||
# install test configs
|
||||
export USE_DATABASE_ORDINARY=1
|
||||
export EXPORT_S3_STORAGE_POLICIES=1
|
||||
/usr/share/clickhouse-test/config/install.sh
|
||||
/repo/tests/config/install.sh
|
||||
|
||||
# avoid too slow startup
|
||||
sudo cat /etc/clickhouse-server/config.d/keeper_port.xml \
|
14
docker/test/upgrade/run.sh → tests/docker_scripts/upgrade_runner.sh
Normal file → Executable file
14
docker/test/upgrade/run.sh → tests/docker_scripts/upgrade_runner.sh
Normal file → Executable file
@ -9,20 +9,20 @@ dmesg --clear
|
||||
set -x
|
||||
|
||||
# we mount tests folder from repo to /usr/share
|
||||
ln -s /usr/share/clickhouse-test/ci/stress.py /usr/bin/stress
|
||||
ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
|
||||
ln -s /usr/share/clickhouse-test/ci/download_release_packages.py /usr/bin/download_release_packages
|
||||
ln -s /usr/share/clickhouse-test/ci/get_previous_release_tag.py /usr/bin/get_previous_release_tag
|
||||
ln -s /repo/tests/ci/stress.py /usr/bin/stress
|
||||
ln -s /repo/tests/clickhouse-test /usr/bin/clickhouse-test
|
||||
ln -s /repo/tests/ci/download_release_packages.py /usr/bin/download_release_packages
|
||||
ln -s /repo/tests/ci/get_previous_release_tag.py /usr/bin/get_previous_release_tag
|
||||
|
||||
# Stress tests and upgrade check uses similar code that was placed
|
||||
# in a separate bash library. See tests/ci/stress_tests.lib
|
||||
# shellcheck source=../stateless/attach_gdb.lib
|
||||
source /attach_gdb.lib
|
||||
source /repo/tests/docker_scripts/attach_gdb.lib
|
||||
# shellcheck source=../stateless/stress_tests.lib
|
||||
source /stress_tests.lib
|
||||
source /repo/tests/docker_scripts/stress_tests.lib
|
||||
|
||||
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log &
|
||||
./setup_minio.sh stateless # to have a proper environment
|
||||
/repo/tests/docker_scripts/setup_minio.sh stateless # to have a proper environment
|
||||
|
||||
echo "Get previous release tag"
|
||||
# shellcheck disable=SC2016
|
@ -22,3 +22,7 @@
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
|
@ -1,3 +1,6 @@
|
||||
-- Tags: no-fasttest
|
||||
-- no-fasttest: upper/lowerUTF8 use ICU
|
||||
|
||||
select lower('aaaaaaaaaaaaaaa012345789,.!aaaa' as str) = str;
|
||||
select lowerUTF8('aaaaaaaaaaaaaaa012345789,.!aaaa' as str) = str;
|
||||
select lower('AaAaAaAaAaAaAaA012345789,.!aAaA') = 'aaaaaaaaaaaaaaa012345789,.!aaaa';
|
||||
@ -27,3 +30,11 @@ select sum(lower(materialize('aaaaАБВГAAAAaaAA')) = materialize('aaaaАБВ
|
||||
select sum(upper(materialize('aaaaАБВГAAAAaaAA')) = materialize('AAAAАБВГAAAAAAAA')) = count() from system.one array join range(16384) as n;
|
||||
select sum(lowerUTF8(materialize('aaaaАБВГAAAAaaAA')) = materialize('aaaaабвгaaaaaaaa')) = count() from system.one array join range(16384) as n;
|
||||
select sum(upperUTF8(materialize('aaaaАБВГAAAAaaAA')) = materialize('AAAAАБВГAAAAAAAA')) = count() from system.one array join range(16384) as n;
|
||||
|
||||
-- Turkish language
|
||||
select upperUTF8('ır') = 'IR';
|
||||
select lowerUTF8('ır') = 'ır';
|
||||
|
||||
-- German language
|
||||
select upper('öäüß') = 'öäüß';
|
||||
select lower('ÖÄÜẞ') = 'ÖÄÜẞ';
|
||||
|
@ -1,3 +1,6 @@
|
||||
-- Tags: no-fasttest
|
||||
-- no-fasttest: upper/lowerUTF8 use ICU
|
||||
|
||||
SET send_logs_level = 'fatal';
|
||||
|
||||
select 1 = position('', '');
|
||||
|
@ -1 +1,4 @@
|
||||
-- Tags: no-fasttest
|
||||
-- no-fasttest: upper/lowerUTF8 use ICU
|
||||
|
||||
SELECT lowerUTF8('\xF0') = lowerUTF8('\xF0');
|
||||
|
@ -1,3 +1,4 @@
|
||||
|
||||
DROP TABLE IF EXISTS local_table;
|
||||
DROP TABLE IF EXISTS other_table;
|
||||
|
||||
|
@ -1,3 +1,6 @@
|
||||
-- Tags: no-fasttest
|
||||
-- no-fasttest: upper/lowerUTF8 use ICU
|
||||
|
||||
SELECT randomStringUTF8('string'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
|
||||
SELECT lengthUTF8(randomStringUTF8(100));
|
||||
SELECT toTypeName(randomStringUTF8(10));
|
||||
|
@ -1,7 +1,7 @@
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
2
|
||||
1
|
||||
2
|
||||
2
|
||||
|
@ -13,7 +13,7 @@ ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" -d 'SELECT DISTINCT blockSize() FROM
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" -d 'SELECT DISTINCT blockSize() FROM numbers(2) FORMAT CSV SETTINGS max_block_size = 1'
|
||||
# push down append
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" -d 'SELECT DISTINCT blockSize() FROM numbers(2) SETTINGS max_compress_block_size = 1 FORMAT CSV SETTINGS max_block_size = 1'
|
||||
# not overwrite on push down
|
||||
# overwrite on push down (since these settings goes latest)
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" -d 'SELECT DISTINCT blockSize() FROM numbers(2) SETTINGS max_block_size = 2 FORMAT CSV SETTINGS max_block_size = 1'
|
||||
# on push-down
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" -d 'SELECT DISTINCT blockSize() FROM numbers(2) SETTINGS max_block_size = 1 FORMAT CSV'
|
||||
|
@ -1,2 +1,2 @@
|
||||
FF
|
||||
FF
|
||||
EFBFBD
|
||||
EFBFBD
|
||||
|
@ -1,2 +1,5 @@
|
||||
-- Tags: no-fasttest
|
||||
-- no-fasttest: upper/lowerUTF8 use ICU
|
||||
|
||||
SELECT hex(lowerUTF8('\xFF'));
|
||||
SELECT hex(upperUTF8('\xFF'));
|
||||
|
@ -1,3 +1,6 @@
|
||||
-- Tags: no-fasttest
|
||||
-- no-fasttest: upper/lowerUTF8 use ICU
|
||||
|
||||
--
|
||||
-- countSubstrings
|
||||
--
|
||||
|
@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
. "$CUR_DIR"/../shell_config.sh
|
||||
|
||||
${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS ghdata"
|
||||
${CLICKHOUSE_CLIENT} -q "CREATE TABLE ghdata (data JSON) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'" --allow_experimental_json_type 1
|
||||
${CLICKHOUSE_CLIENT} -q "CREATE TABLE ghdata (data JSON(max_dynamic_paths=100)) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'" --allow_experimental_json_type 1
|
||||
|
||||
cat $CUR_DIR/data_json/ghdata_sample.json | ${CLICKHOUSE_CLIENT} \
|
||||
--max_memory_usage 10G --query "INSERT INTO ghdata FORMAT JSONAsObject"
|
||||
|
@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-fasttest, no-s3-storage, long
|
||||
# ^ no-s3-storage: it is memory-hungry
|
||||
# Tags: no-fasttest, no-s3-storage, long, no-asan
|
||||
# ^ no-s3-storage: it is memory-hungry, no-asan: too long
|
||||
|
||||
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
@ -10,9 +10,9 @@ ${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS ghdata_2"
|
||||
${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS ghdata_2_string"
|
||||
${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS ghdata_2_from_string"
|
||||
|
||||
${CLICKHOUSE_CLIENT} -q "CREATE TABLE ghdata_2 (data JSON) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'" --allow_experimental_json_type 1
|
||||
${CLICKHOUSE_CLIENT} -q "CREATE TABLE ghdata_2 (data JSON(max_dynamic_paths=100)) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'" --allow_experimental_json_type 1
|
||||
${CLICKHOUSE_CLIENT} -q "CREATE TABLE ghdata_2_string (data String) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'"
|
||||
${CLICKHOUSE_CLIENT} -q "CREATE TABLE ghdata_2_from_string (data JSON) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'" --allow_experimental_json_type 1
|
||||
${CLICKHOUSE_CLIENT} -q "CREATE TABLE ghdata_2_from_string (data JSON(max_dynamic_paths=100)) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'" --allow_experimental_json_type 1
|
||||
|
||||
cat $CUR_DIR/data_json/ghdata_sample.json | ${CLICKHOUSE_CLIENT} --max_memory_usage 10G -q "INSERT INTO ghdata_2 FORMAT JSONAsObject"
|
||||
cat $CUR_DIR/data_json/ghdata_sample.json | ${CLICKHOUSE_CLIENT} -q "INSERT INTO ghdata_2_string FORMAT JSONAsString"
|
||||
|
@ -5,9 +5,9 @@ insert into utf8_overlap values ('\xe2'), ('Foo⚊BarBazBam'), ('\xe2'), ('Foo
|
||||
-- MONOGRAM FOR YANG
|
||||
with lowerUTF8(str) as l_, upperUTF8(str) as u_, '0x' || hex(str) as h_
|
||||
select length(str), if(l_ == '\xe2', h_, l_), if(u_ == '\xe2', h_, u_) from utf8_overlap format CSV;
|
||||
1,"0xE2","0xE2"
|
||||
1,"<EFBFBD>","<22>"
|
||||
15,"foo⚊barbazbam","FOO⚊BARBAZBAM"
|
||||
1,"0xE2","0xE2"
|
||||
1,"<EFBFBD>","<22>"
|
||||
15,"foo⚊barbazbam","FOO⚊BARBAZBAM"
|
||||
-- NOTE: regression test for introduced bug
|
||||
-- https://github.com/ClickHouse/ClickHouse/issues/42756
|
||||
|
@ -1,3 +1,6 @@
|
||||
-- Tags: no-fasttest
|
||||
-- no-fasttest: upper/lowerUTF8 use ICU
|
||||
|
||||
drop table if exists utf8_overlap;
|
||||
create table utf8_overlap (str String) engine=Memory();
|
||||
|
||||
|
@ -416,7 +416,6 @@ logTrace
|
||||
lowCardinalityIndices
|
||||
lowCardinalityKeys
|
||||
lower
|
||||
lowerUTF8
|
||||
makeDate
|
||||
makeDate32
|
||||
makeDateTime
|
||||
@ -897,7 +896,6 @@ tupleToNameValuePairs
|
||||
unbin
|
||||
unhex
|
||||
upper
|
||||
upperUTF8
|
||||
uptime
|
||||
validateNestedArraySizes
|
||||
version
|
||||
|
@ -1,3 +1,6 @@
|
||||
-- Tags: no-fasttest
|
||||
-- no-fasttest: upper/lowerUTF8 use ICU
|
||||
|
||||
create table if not exists t (`arr.key` Array(LowCardinality(String)), `arr.value` Array(LowCardinality(String))) engine = Memory;
|
||||
insert into t (`arr.key`, `arr.value`) values (['a'], ['b']);
|
||||
select if(true, if(lowerUTF8(arr.key) = 'a', 1, 2), 3) as x from t left array join arr;
|
||||
|
@ -1,2 +1,5 @@
|
||||
-- Tags: no-fasttest
|
||||
-- no-fasttest: upper/lowerUTF8 use ICU
|
||||
|
||||
SELECT lowerUTF8(arrayJoin(['©--------------------------------------', '©--------------------'])) ORDER BY 1;
|
||||
SELECT upperUTF8(materialize('aaaaАБВГaaaaaaaaaaaaАБВГAAAAaaAA')) FROM numbers(2);
|
||||
|
@ -1 +1,2 @@
|
||||
select 42 settings compatibility=NULL; -- {clientError BAD_GET}
|
||||
select 42 settings compatibility=NULL; -- {clientError BAD_ARGUMENTS}
|
||||
|
||||
|
@ -1,3 +1,6 @@
|
||||
-- Tags: no-fasttest
|
||||
-- no-fasttest: upper/lowerUTF8 use ICU
|
||||
|
||||
DROP TABLE IF EXISTS test_data;
|
||||
|
||||
CREATE TABLE test_data
|
||||
|
@ -1,14 +0,0 @@
|
||||
1
|
||||
2
|
||||
1
|
||||
2
|
||||
1
|
||||
2
|
||||
1
|
||||
1
|
||||
3
|
||||
3
|
||||
3
|
||||
3
|
||||
3
|
||||
1
|
@ -1,30 +0,0 @@
|
||||
SET max_block_size = 10, max_threads = 1;
|
||||
|
||||
-- Take the following example:
|
||||
SELECT 1 UNION ALL SELECT 2 FORMAT TSV;
|
||||
|
||||
-- Each subquery can be put in parentheses and have its own settings:
|
||||
(SELECT getSetting('max_block_size') SETTINGS max_block_size = 1) UNION ALL (SELECT getSetting('max_block_size') SETTINGS max_block_size = 2) FORMAT TSV;
|
||||
|
||||
-- And the whole query can have settings:
|
||||
(SELECT getSetting('max_block_size') SETTINGS max_block_size = 1) UNION ALL (SELECT getSetting('max_block_size') SETTINGS max_block_size = 2) FORMAT TSV SETTINGS max_block_size = 3;
|
||||
|
||||
-- A single query with output is parsed in the same way as the UNION ALL chain:
|
||||
SELECT getSetting('max_block_size') SETTINGS max_block_size = 1 FORMAT TSV SETTINGS max_block_size = 3;
|
||||
|
||||
-- So while these forms have a slightly different meaning, they both exist:
|
||||
SELECT getSetting('max_block_size') SETTINGS max_block_size = 1 FORMAT TSV;
|
||||
SELECT getSetting('max_block_size') FORMAT TSV SETTINGS max_block_size = 3;
|
||||
|
||||
-- And due to this effect, the users expect that the FORMAT and SETTINGS may go in an arbitrary order.
|
||||
-- But while this work:
|
||||
(SELECT getSetting('max_block_size')) UNION ALL (SELECT getSetting('max_block_size')) FORMAT TSV SETTINGS max_block_size = 3;
|
||||
|
||||
-- This does not work automatically, unless we explicitly allow different orders:
|
||||
(SELECT getSetting('max_block_size')) UNION ALL (SELECT getSetting('max_block_size')) SETTINGS max_block_size = 3 FORMAT TSV;
|
||||
|
||||
-- Inevitably, we allow this:
|
||||
SELECT getSetting('max_block_size') SETTINGS max_block_size = 1 SETTINGS max_block_size = 3 FORMAT TSV;
|
||||
/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
|
||||
-- Because this part is consumed into ASTSelectWithUnionQuery
|
||||
-- and the rest into ASTQueryWithOutput.
|
4
tests/queries/0_stateless/03227_json_invalid_regexp.sql
Normal file
4
tests/queries/0_stateless/03227_json_invalid_regexp.sql
Normal file
@ -0,0 +1,4 @@
|
||||
set allow_experimental_json_type = 1;
|
||||
create table test (json JSON(SKIP REGEXP '[]')) engine=Memory(); -- {serverError CANNOT_COMPILE_REGEXP}
|
||||
create table test (json JSON(SKIP REGEXP '+')) engine=Memory(); -- {serverError CANNOT_COMPILE_REGEXP};
|
||||
|
@ -385,6 +385,18 @@ IntelliJ
|
||||
IntelliSense
|
||||
InterserverConnection
|
||||
InterserverThreads
|
||||
IntervalDay
|
||||
IntervalHour
|
||||
IntervalMicrosecond
|
||||
IntervalMillisecond
|
||||
IntervalMilliseconds
|
||||
IntervalMinute
|
||||
IntervalMonth
|
||||
IntervalNanosecond
|
||||
IntervalQuarter
|
||||
IntervalSecond
|
||||
IntervalWeek
|
||||
IntervalYear
|
||||
IsPentagon
|
||||
IsResClassIII
|
||||
IsValid
|
||||
@ -2740,6 +2752,17 @@ toISOWeek
|
||||
toISOYear
|
||||
toInt
|
||||
toInterval
|
||||
toIntervalDay
|
||||
toIntervalHour
|
||||
toIntervalMicrosecond
|
||||
toIntervalMillisecond
|
||||
toIntervalMinute
|
||||
toIntervalMonth
|
||||
toIntervalNanosecond
|
||||
toIntervalQuarter
|
||||
toIntervalSecond
|
||||
toIntervalWeek
|
||||
toIntervalYear
|
||||
toJSONString
|
||||
toLastDayOfMonth
|
||||
toLastDayOfWeek
|
||||
|
@ -13,6 +13,7 @@ v24.4.4.113-stable 2024-08-02
|
||||
v24.4.3.25-stable 2024-06-14
|
||||
v24.4.2.141-stable 2024-06-07
|
||||
v24.4.1.2088-stable 2024-05-01
|
||||
v24.3.8.13-lts 2024-08-20
|
||||
v24.3.7.30-lts 2024-08-14
|
||||
v24.3.6.48-lts 2024-08-02
|
||||
v24.3.5.46-lts 2024-07-03
|
||||
|
|
Loading…
Reference in New Issue
Block a user