Merge branch 'master' of https://github.com/ClickHouse/ClickHouse into master

This commit is contained in:
Dmitriy 2020-10-31 16:56:30 +03:00
commit 8b64d7be56
239 changed files with 5156 additions and 1534 deletions

View File

@ -2362,7 +2362,7 @@ No changes compared to v20.4.3.16-stable.
* `Live View` table engine refactoring. [#8519](https://github.com/ClickHouse/ClickHouse/pull/8519) ([vzakaznikov](https://github.com/vzakaznikov))
* Add additional checks for external dictionaries created from DDL-queries. [#8127](https://github.com/ClickHouse/ClickHouse/pull/8127) ([alesapin](https://github.com/alesapin))
* Fix error `Column ... already exists` while using `FINAL` and `SAMPLE` together, e.g. `select count() from table final sample 1/2`. Fixes [#5186](https://github.com/ClickHouse/ClickHouse/issues/5186). [#7907](https://github.com/ClickHouse/ClickHouse/pull/7907) ([Nikolai Kochetov](https://github.com/KochetovNicolai))
* Now table the first argument of `joinGet` function can be table indentifier. [#7707](https://github.com/ClickHouse/ClickHouse/pull/7707) ([Amos Bird](https://github.com/amosbird))
* Now table the first argument of `joinGet` function can be table identifier. [#7707](https://github.com/ClickHouse/ClickHouse/pull/7707) ([Amos Bird](https://github.com/amosbird))
* Allow using `MaterializedView` with subqueries above `Kafka` tables. [#8197](https://github.com/ClickHouse/ClickHouse/pull/8197) ([filimonov](https://github.com/filimonov))
* Now background moves between disks run it the seprate thread pool. [#7670](https://github.com/ClickHouse/ClickHouse/pull/7670) ([Vladimir Chebotarev](https://github.com/excitoon))
* `SYSTEM RELOAD DICTIONARY` now executes synchronously. [#8240](https://github.com/ClickHouse/ClickHouse/pull/8240) ([Vitaly Baranov](https://github.com/vitlibar))

View File

@ -59,25 +59,6 @@ set(CMAKE_DEBUG_POSTFIX "d" CACHE STRING "Generate debug library name with a pos
# For more info see https://cmake.org/cmake/help/latest/prop_gbl/USE_FOLDERS.html
set_property(GLOBAL PROPERTY USE_FOLDERS ON)
# cmake 3.9+ needed.
# Usually impractical.
# See also ${ENABLE_THINLTO}
option(ENABLE_IPO "Full link time optimization")
if(ENABLE_IPO)
cmake_policy(SET CMP0069 NEW)
include(CheckIPOSupported)
check_ipo_supported(RESULT IPO_SUPPORTED OUTPUT IPO_NOT_SUPPORTED)
if(IPO_SUPPORTED)
message(STATUS "IPO/LTO is supported, enabling")
set(CMAKE_INTERPROCEDURAL_OPTIMIZATION TRUE)
else()
message (${RECONFIGURE_MESSAGE_LEVEL} "IPO/LTO is not supported: <${IPO_NOT_SUPPORTED}>")
endif()
else()
message(STATUS "IPO/LTO not enabled.")
endif()
# Check that submodules are present only if source was downloaded with git
if (EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/.git" AND NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/boost/boost")
message (FATAL_ERROR "Submodules are not initialized. Run\n\tgit submodule update --init --recursive")

View File

@ -51,7 +51,7 @@ struct StringRef
};
/// Here constexpr doesn't implicate inline, see https://www.viva64.com/en/w/v1043/
/// nullptr can't be used because the StringRef values are used in SipHash's pointer arithmetics
/// nullptr can't be used because the StringRef values are used in SipHash's pointer arithmetic
/// and the UBSan thinks that something like nullptr + 8 is UB.
constexpr const inline char empty_string_ref_addr{};
constexpr const inline StringRef EMPTY_STRING_REF{&empty_string_ref_addr, 0};

View File

@ -16,8 +16,4 @@ endif ()
if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(ppc64le.*|PPC64LE.*)")
set (ARCH_PPC64LE 1)
# FIXME: move this check into tools.cmake
if (COMPILER_CLANG OR (COMPILER_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 8))
message(FATAL_ERROR "Only gcc-8 or higher is supported for powerpc architecture")
endif ()
endif ()

View File

@ -84,3 +84,9 @@ if (LINKER_NAME)
message(STATUS "Using custom linker by name: ${LINKER_NAME}")
endif ()
if (ARCH_PPC64LE)
if (COMPILER_CLANG OR (COMPILER_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 8))
message(FATAL_ERROR "Only gcc-8 or higher is supported for powerpc architecture")
endif ()
endif ()

View File

@ -15,7 +15,7 @@ CFLAGS (GLOBAL -DVERSION_MAJOR=${VERSION_MAJOR})
CFLAGS (GLOBAL -DVERSION_MINOR=${VERSION_MINOR})
CFLAGS (GLOBAL -DVERSION_PATCH=${VERSION_PATCH})
# TODO: not supported yet, not sure if ya.make supports arithmetics.
# TODO: not supported yet, not sure if ya.make supports arithmetic.
CFLAGS (GLOBAL -DVERSION_INTEGER=0)
CFLAGS (GLOBAL -DVERSION_NAME=\"\\\"${VERSION_NAME}\\\"\")

View File

@ -192,7 +192,7 @@ set(SRCS
${HDFS3_SOURCE_DIR}/common/FileWrapper.h
)
# old kernels (< 3.17) doens't have SYS_getrandom. Always use POSIX implementation to have better compatibility
# old kernels (< 3.17) doesn't have SYS_getrandom. Always use POSIX implementation to have better compatibility
set_source_files_properties(${HDFS3_SOURCE_DIR}/rpc/RpcClient.cpp PROPERTIES COMPILE_FLAGS "-DBOOST_UUID_RANDOM_PROVIDER_FORCE_POSIX=1")
# target

@ -1 +1 @@
Subproject commit f5638e954a79f50bac7c7a5deaa5a241e0ce8b5f
Subproject commit 1485b0de3eaa1508dfe49a5ba1e4aa2a71fd8335

View File

@ -0,0 +1,8 @@
# post / preinstall scripts (not needed, we do it in Dockerfile)
alpine-root/install/*
# docs (looks useless)
alpine-root/usr/share/doc/*
# packages, etc. (used by prepare.sh)
alpine-root/tgz-packages/*

1
docker/server/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
alpine-root/*

View File

@ -0,0 +1,26 @@
FROM alpine
ENV LANG=en_US.UTF-8 \
LANGUAGE=en_US:en \
LC_ALL=en_US.UTF-8 \
TZ=UTC \
CLICKHOUSE_CONFIG=/etc/clickhouse-server/config.xml
COPY alpine-root/ /
# from https://github.com/ClickHouse/ClickHouse/blob/master/debian/clickhouse-server.postinst
RUN addgroup clickhouse \
&& adduser -S -H -h /nonexistent -s /bin/false -G clickhouse -g "ClickHouse server" clickhouse \
&& chown clickhouse:clickhouse /var/lib/clickhouse \
&& chmod 700 /var/lib/clickhouse \
&& chown root:clickhouse /var/log/clickhouse-server \
&& chmod 775 /var/log/clickhouse-server \
&& chmod +x /entrypoint.sh \
&& apk add --no-cache su-exec
EXPOSE 9000 8123 9009
VOLUME /var/lib/clickhouse \
/var/log/clickhouse-server
ENTRYPOINT ["/entrypoint.sh"]

59
docker/server/alpine-build.sh Executable file
View File

@ -0,0 +1,59 @@
#!/bin/bash
set -x
REPO_CHANNEL="${REPO_CHANNEL:-stable}" # lts / testing / prestable / etc
REPO_URL="${REPO_URL:-"https://repo.yandex.ru/clickhouse/tgz/${REPO_CHANNEL}"}"
VERSION="${VERSION:-20.9.3.45}"
# where original files live
DOCKER_BUILD_FOLDER="${BASH_SOURCE%/*}"
# we will create root for our image here
CONTAINER_ROOT_FOLDER="${DOCKER_BUILD_FOLDER}/alpine-root"
# where to put downloaded tgz
TGZ_PACKAGES_FOLDER="${CONTAINER_ROOT_FOLDER}/tgz-packages"
# clean up the root from old runs
rm -rf "$CONTAINER_ROOT_FOLDER"
mkdir -p "$TGZ_PACKAGES_FOLDER"
PACKAGES=( "clickhouse-client" "clickhouse-server" "clickhouse-common-static" )
# download tars from the repo
for package in "${PACKAGES[@]}"
do
wget -q --show-progress "${REPO_URL}/${package}-${VERSION}.tgz" -O "${TGZ_PACKAGES_FOLDER}/${package}-${VERSION}.tgz"
done
# unpack tars
for package in "${PACKAGES[@]}"
do
tar xvzf "${TGZ_PACKAGES_FOLDER}/${package}-${VERSION}.tgz" --strip-components=2 -C "$CONTAINER_ROOT_FOLDER"
done
# prepare few more folders
mkdir -p "${CONTAINER_ROOT_FOLDER}/etc/clickhouse-server/users.d" \
"${CONTAINER_ROOT_FOLDER}/etc/clickhouse-server/config.d" \
"${CONTAINER_ROOT_FOLDER}/var/log/clickhouse-server" \
"${CONTAINER_ROOT_FOLDER}/var/lib/clickhouse" \
"${CONTAINER_ROOT_FOLDER}/docker-entrypoint-initdb.d" \
"${CONTAINER_ROOT_FOLDER}/lib64"
cp "${DOCKER_BUILD_FOLDER}/docker_related_config.xml" "${CONTAINER_ROOT_FOLDER}/etc/clickhouse-server/config.d/"
cp "${DOCKER_BUILD_FOLDER}/entrypoint.alpine.sh" "${CONTAINER_ROOT_FOLDER}/entrypoint.sh"
## get glibc components from ubuntu 20.04 and put them to expected place
docker pull ubuntu:20.04
ubuntu20image=$(docker create --rm ubuntu:20.04)
docker cp -L ${ubuntu20image}:/lib/x86_64-linux-gnu/libc.so.6 "${CONTAINER_ROOT_FOLDER}/lib"
docker cp -L ${ubuntu20image}:/lib/x86_64-linux-gnu/libdl.so.2 "${CONTAINER_ROOT_FOLDER}/lib"
docker cp -L ${ubuntu20image}:/lib/x86_64-linux-gnu/libm.so.6 "${CONTAINER_ROOT_FOLDER}/lib"
docker cp -L ${ubuntu20image}:/lib/x86_64-linux-gnu/libpthread.so.0 "${CONTAINER_ROOT_FOLDER}/lib"
docker cp -L ${ubuntu20image}:/lib/x86_64-linux-gnu/librt.so.1 "${CONTAINER_ROOT_FOLDER}/lib"
docker cp -L ${ubuntu20image}:/lib/x86_64-linux-gnu/libnss_dns.so.2 "${CONTAINER_ROOT_FOLDER}/lib"
docker cp -L ${ubuntu20image}:/lib/x86_64-linux-gnu/libresolv.so.2 "${CONTAINER_ROOT_FOLDER}/lib"
docker cp -L ${ubuntu20image}:/lib64/ld-linux-x86-64.so.2 "${CONTAINER_ROOT_FOLDER}/lib64"
docker build "$DOCKER_BUILD_FOLDER" -f Dockerfile.alpine -t "yandex/clickhouse-server:${VERSION}-alpine" --pull

View File

@ -0,0 +1,152 @@
#!/bin/sh
#set -x
DO_CHOWN=1
if [ "$CLICKHOUSE_DO_NOT_CHOWN" = 1 ]; then
DO_CHOWN=0
fi
CLICKHOUSE_UID="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}"
CLICKHOUSE_GID="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}"
# support --user
if [ "$(id -u)" = "0" ]; then
USER=$CLICKHOUSE_UID
GROUP=$CLICKHOUSE_GID
# busybox has setuidgid & chpst buildin
gosu="su-exec $USER:$GROUP"
else
USER="$(id -u)"
GROUP="$(id -g)"
gosu=""
DO_CHOWN=0
fi
# set some vars
CLICKHOUSE_CONFIG="${CLICKHOUSE_CONFIG:-/etc/clickhouse-server/config.xml}"
# port is needed to check if clickhouse-server is ready for connections
HTTP_PORT="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=http_port)"
# get CH directories locations
DATA_DIR="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=path || true)"
TMP_DIR="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=tmp_path || true)"
USER_PATH="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=user_files_path || true)"
LOG_PATH="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=logger.log || true)"
LOG_DIR="$(dirname $LOG_PATH || true)"
ERROR_LOG_PATH="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=logger.errorlog || true)"
ERROR_LOG_DIR="$(dirname $ERROR_LOG_PATH || true)"
FORMAT_SCHEMA_PATH="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=format_schema_path || true)"
CLICKHOUSE_USER="${CLICKHOUSE_USER:-default}"
CLICKHOUSE_PASSWORD="${CLICKHOUSE_PASSWORD:-}"
CLICKHOUSE_DB="${CLICKHOUSE_DB:-}"
for dir in "$DATA_DIR" \
"$ERROR_LOG_DIR" \
"$LOG_DIR" \
"$TMP_DIR" \
"$USER_PATH" \
"$FORMAT_SCHEMA_PATH"
do
# check if variable not empty
[ -z "$dir" ] && continue
# ensure directories exist
if ! mkdir -p "$dir"; then
echo "Couldn't create necessary directory: $dir"
exit 1
fi
if [ "$DO_CHOWN" = "1" ]; then
# ensure proper directories permissions
chown -R "$USER:$GROUP" "$dir"
elif [ "$(stat -c %u "$dir")" != "$USER" ]; then
echo "Necessary directory '$dir' isn't owned by user with id '$USER'"
exit 1
fi
done
# if clickhouse user is defined - create it (user "default" already exists out of box)
if [ -n "$CLICKHOUSE_USER" ] && [ "$CLICKHOUSE_USER" != "default" ] || [ -n "$CLICKHOUSE_PASSWORD" ]; then
echo "$0: create new user '$CLICKHOUSE_USER' instead 'default'"
cat <<EOT > /etc/clickhouse-server/users.d/default-user.xml
<yandex>
<!-- Docs: <https://clickhouse.tech/docs/en/operations/settings/settings_users/> -->
<users>
<!-- Remove default user -->
<default remove="remove">
</default>
<${CLICKHOUSE_USER}>
<profile>default</profile>
<networks>
<ip>::/0</ip>
</networks>
<password>${CLICKHOUSE_PASSWORD}</password>
<quota>default</quota>
</${CLICKHOUSE_USER}>
</users>
</yandex>
EOT
fi
if [ -n "$(ls /docker-entrypoint-initdb.d/)" ] || [ -n "$CLICKHOUSE_DB" ]; then
# Listen only on localhost until the initialization is done
$gosu /usr/bin/clickhouse-server --config-file=$CLICKHOUSE_CONFIG -- --listen_host=127.0.0.1 &
pid="$!"
# check if clickhouse is ready to accept connections
# will try to send ping clickhouse via http_port (max 6 retries, with 1 sec timeout and 1 sec delay between retries)
tries=6
while ! wget --spider -T 1 -q "http://localhost:$HTTP_PORT/ping" 2>/dev/null; do
if [ "$tries" -le "0" ]; then
echo >&2 'ClickHouse init process failed.'
exit 1
fi
tries=$(( tries-1 ))
sleep 1
done
if [ ! -z "$CLICKHOUSE_PASSWORD" ]; then
printf -v WITH_PASSWORD '%s %q' "--password" "$CLICKHOUSE_PASSWORD"
fi
clickhouseclient="clickhouse-client --multiquery -u $CLICKHOUSE_USER $WITH_PASSWORD "
# create default database, if defined
if [ -n "$CLICKHOUSE_DB" ]; then
echo "$0: create database '$CLICKHOUSE_DB'"
"$clickhouseclient" -q "CREATE DATABASE IF NOT EXISTS $CLICKHOUSE_DB";
fi
for f in /docker-entrypoint-initdb.d/*; do
case "$f" in
*.sh)
if [ -x "$f" ]; then
echo "$0: running $f"
"$f"
else
echo "$0: sourcing $f"
. "$f"
fi
;;
*.sql) echo "$0: running $f"; cat "$f" | "$clickhouseclient" ; echo ;;
*.sql.gz) echo "$0: running $f"; gunzip -c "$f" | "$clickhouseclient"; echo ;;
*) echo "$0: ignoring $f" ;;
esac
echo
done
if ! kill -s TERM "$pid" || ! wait "$pid"; then
echo >&2 'Finishing of ClickHouse init process failed.'
exit 1
fi
fi
# if no args passed to `docker run` or first argument start with `--`, then the user is passing clickhouse-server arguments
if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
exec $gosu /usr/bin/clickhouse-server --config-file=$CLICKHOUSE_CONFIG "$@"
fi
# Otherwise, we assume the user want to run his own process, for example a `bash` shell to explore this image
exec "$@"

View File

@ -208,7 +208,7 @@ function run_tests
echo test "$test_name"
# Don't profile if we're past the time limit.
# Use awk because bash doesn't support floating point arithmetics.
# Use awk because bash doesn't support floating point arithmetic.
profile_seconds=$(awk "BEGIN { print ($profile_seconds_left > 0 ? 10 : 0) }")
TIMEFORMAT=$(printf "$test_name\t%%3R\t%%3U\t%%3S\n")

View File

@ -35,7 +35,7 @@ RUN apt-get update \
ENV TZ=Europe/Moscow
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
RUN pip3 install urllib3 testflows==1.6.57 docker-compose docker dicttoxml kazoo tzlocal
RUN pip3 install urllib3 testflows==1.6.59 docker-compose docker dicttoxml kazoo tzlocal
ENV DOCKER_CHANNEL stable
ENV DOCKER_VERSION 17.09.1-ce

View File

@ -51,7 +51,7 @@ Optional parameters:
- `rabbitmq_row_delimiter` Delimiter character, which ends the message.
- `rabbitmq_schema` Parameter that must be used if the format requires a schema definition. For example, [Capn Proto](https://capnproto.org/) requires the path to the schema file and the name of the root `schema.capnp:Message` object.
- `rabbitmq_num_consumers` The number of consumers per table. Default: `1`. Specify more consumers if the throughput of one consumer is insufficient.
- `rabbitmq_num_queues` The number of queues per consumer. Default: `1`. Specify more queues if the capacity of one queue per consumer is insufficient.
- `rabbitmq_num_queues` Total number of queues. Default: `1`. Increasing this number can significantly improve performance.
- `rabbitmq_queue_base` - Specify a hint for queue names. Use cases of this setting are described below.
- `rabbitmq_deadletter_exchange` - Specify name for a [dead letter exchange](https://www.rabbitmq.com/dlx.html). You can create another table with this exchange name and collect messages in cases when they are republished to dead letter exchange. By default dead letter exchange is not specified.
- `rabbitmq_persistent` - If set to 1 (true), in insert query delivery mode will be set to 2 (marks messages as 'persistent'). Default: `0`.
@ -148,4 +148,5 @@ Example:
- `_channel_id` - ChannelID, on which consumer, who received the message, was declared.
- `_delivery_tag` - DeliveryTag of the received message. Scoped per channel.
- `_redelivered` - `redelivered` flag of the message.
- `_message_id` - MessageID of the received message; non-empty if was set, when message was published.
- `_message_id` - messageID of the received message; non-empty if was set, when message was published.
- `_timestamp` - timestamp of the received message; non-empty if was set, when message was published.

View File

@ -2148,7 +2148,34 @@ Result:
└───────────────┘
```
[Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) <!-- hide -->
## output_format_pretty_row_numbers {#output_format_pretty_row_numbers}
Adds row numbers to output in the [Pretty](../../interfaces/formats.md#pretty) format.
Possible values:
- 0 — Output without row numbers.
- 1 — Output with row numbers.
Default value: `0`.
**Example**
Query:
```sql
SET output_format_pretty_row_numbers = 1;
SELECT TOP 3 name, value FROM system.settings;
```
Result:
```text
┌─name────────────────────┬─value───┐
1. │ min_compress_block_size │ 65536 │
2. │ max_compress_block_size │ 1048576 │
3. │ max_block_size │ 65505 │
└─────────────────────────┴─────────┘
```
## allow_experimental_bigint_types {#allow_experimental_bigint_types}
@ -2160,3 +2187,5 @@ Possible values:
- 0 — The bigint data type is disabled.
Default value: `0`.
[Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) <!-- hide -->

View File

@ -80,4 +80,4 @@ Code: 43. DB::Exception: Received from localhost:9000. DB::Exception: Wrong argu
## See Also {#see-also}
- [INTERVAL](../../../sql-reference/operators/index.md#operator-interval) operator
- [toInterval](../../../sql-reference/functions/type-conversion-functions.md#function-tointerval) type convertion functions
- [toInterval](../../../sql-reference/functions/type-conversion-functions.md#function-tointerval) type conversion functions

View File

@ -23,8 +23,6 @@ SELECT
└─────────────────────┴────────────┴────────────┴─────────────────────┘
```
Only time zones that differ from UTC by a whole number of hours are supported.
## toTimeZone {#totimezone}
Convert time or date and time to the specified time zone.

View File

@ -626,7 +626,12 @@ neighbor(column, offset[, default_value])
```
The result of the function depends on the affected data blocks and the order of data in the block.
If you make a subquery with ORDER BY and call the function from outside the subquery, you can get the expected result.
!!! warning "Warning"
It can reach the neighbor rows only inside the currently processed data block.
The rows order used during the calculation of `neighbor` can differ from the order of rows returned to the user.
To prevent that you can make a subquery with ORDER BY and call the function from outside the subquery.
**Parameters**
@ -731,8 +736,13 @@ Result:
Calculates the difference between successive row values in the data block.
Returns 0 for the first row and the difference from the previous row for each subsequent row.
!!! warning "Warning"
It can reach the previos row only inside the currently processed data block.
The result of the function depends on the affected data blocks and the order of data in the block.
If you make a subquery with ORDER BY and call the function from outside the subquery, you can get the expected result.
The rows order used during the calculation of `runningDifference` can differ from the order of rows returned to the user.
To prevent that you can make a subquery with ORDER BY and call the function from outside the subquery.
Example:
@ -1584,7 +1594,7 @@ isDecimalOverflow(d, [p])
**Parameters**
- `d` — value. [Decimal](../../sql-reference/data-types/decimal.md).
- `p` — precision. Optional. If omitted, the initial presicion of the first argument is used. Using of this paratemer could be helpful for data extraction to another DBMS or file. [UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges).
- `p` — precision. Optional. If omitted, the initial precision of the first argument is used. Using of this paratemer could be helpful for data extraction to another DBMS or file. [UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges).
**Returned values**

View File

@ -169,7 +169,7 @@ SELECT now() AS current_date_time, current_date_time + INTERVAL 4 DAY + INTERVAL
**See Also**
- [Interval](../../sql-reference/data-types/special-data-types/interval.md) data type
- [toInterval](../../sql-reference/functions/type-conversion-functions.md#function-tointerval) type convertion functions
- [toInterval](../../sql-reference/functions/type-conversion-functions.md#function-tointerval) type conversion functions
## Logical Negation Operator {#logical-negation-operator}

View File

@ -138,7 +138,7 @@ ENGINE = <Engine>
...
```
The `Default` codec can be specified to reference default compression which may dependend on different settings (and properties of data) in runtime.
The `Default` codec can be specified to reference default compression which may depend on different settings (and properties of data) in runtime.
Example: `value UInt64 CODEC(Default)` — the same as lack of codec specification.
Also you can remove current CODEC from the column and use default compression from config.xml:

View File

@ -13,12 +13,61 @@ Basic query format:
INSERT INTO [db.]table [(c1, c2, c3)] VALUES (v11, v12, v13), (v21, v22, v23), ...
```
The query can specify a list of columns to insert `[(c1, c2, c3)]`. In this case, the rest of the columns are filled with:
You can specify a list of columns to insert using the `(c1, c2, c3)` or `COLUMNS(c1,c2,c3)` syntax.
Instead of listing all the required columns you can use the `(* EXCEPT(column_list))` syntax.
For example, consider the table:
``` sql
SHOW CREATE insert_select_testtable;
```
```
┌─statement────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
│ CREATE TABLE insert_select_testtable
(
`a` Int8,
`b` String,
`c` Int8
)
ENGINE = MergeTree()
ORDER BY a
SETTINGS index_granularity = 8192 │
└──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
```
``` sql
INSERT INTO insert_select_testtable (*) VALUES (1, 'a', 1) ;
```
If you want to insert data in all the columns, except 'b', you need to pass so many values how many columns you chose in parenthesis then:
``` sql
INSERT INTO insert_select_testtable (* EXCEPT(b)) Values (2, 2);
```
``` sql
SELECT * FROM insert_select_testtable;
```
```
┌─a─┬─b─┬─c─┐
│ 2 │ │ 2 │
└───┴───┴───┘
┌─a─┬─b─┬─c─┐
│ 1 │ a │ 1 │
└───┴───┴───┘
```
In this example, we see that the second inserted row has `a` and `c` columns filled by the passed values, and `b` filled with value by default.
If a list of columns doesn't include all existing columns, the rest of the columns are filled with:
- The values calculated from the `DEFAULT` expressions specified in the table definition.
- Zeros and empty strings, if `DEFAULT` expressions are not defined.
If [strict_insert_defaults=1](../../operations/settings/settings.md), columns that do not have `DEFAULT` defined must be listed in the query.
If [strict\_insert\_defaults=1](../../operations/settings/settings.md), columns that do not have `DEFAULT` defined must be listed in the query.
Data can be passed to the INSERT in any [format](../../interfaces/formats.md#formats) supported by ClickHouse. The format must be specified explicitly in the query:

View File

@ -4,13 +4,17 @@ toc_title: WITH
# WITH Clause {#with-clause}
This section provides support for Common Table Expressions ([CTE](https://en.wikipedia.org/wiki/Hierarchical_and_recursive_queries_in_SQL)), so the results of `WITH` clause can be used in the rest of `SELECT` query.
Clickhouse supports Common Table Expressions ([CTE](https://en.wikipedia.org/wiki/Hierarchical_and_recursive_queries_in_SQL)), that is provides to use results of `WITH` clause in the rest of `SELECT` query. Named subqueries can be included to the current and child query context in places where table objects are allowed. Recursion is prevented by hiding the current level CTEs from the WITH expression.
## Limitations {#limitations}
## Syntax
1. Recursive queries are not supported.
2. When subquery is used inside WITH section, its result should be scalar with exactly one row.
3. Expressions results are not available in subqueries.
``` sql
WITH <expression> AS <identifier>
```
or
``` sql
WITH <identifier> AS <subquery expression>
```
## Examples {#examples}
@ -22,10 +26,10 @@ SELECT *
FROM hits
WHERE
EventDate = toDate(ts_upper_bound) AND
EventTime <= ts_upper_bound
EventTime <= ts_upper_bound;
```
**Example 2:** Evicting sum(bytes) expression result from SELECT clause column list
**Example 2:** Evicting a sum(bytes) expression result from the SELECT clause column list
``` sql
WITH sum(bytes) as s
@ -34,10 +38,10 @@ SELECT
table
FROM system.parts
GROUP BY table
ORDER BY s
ORDER BY s;
```
**Example 3:** Using results of scalar subquery
**Example 3:** Using results of a scalar subquery
``` sql
/* this example would return TOP 10 of most huge tables */
@ -53,27 +57,14 @@ SELECT
FROM system.parts
GROUP BY table
ORDER BY table_disk_usage DESC
LIMIT 10
LIMIT 10;
```
**Example 4:** Re-using expression in subquery
As a workaround for current limitation for expression usage in subqueries, you may duplicate it.
**Example 4:** Reusing expression in a subquery
``` sql
WITH ['hello'] AS hello
SELECT
hello,
*
FROM
(
WITH ['hello'] AS hello
SELECT hello
)
WITH test1 AS (SELECT i + 1, j + 1 FROM test1)
SELECT * FROM test1;
```
``` text
┌─hello─────┬─hello─────┐
│ ['hello'] │ ['hello'] │
└───────────┴───────────┘
```
[Original article](https://clickhouse.tech/docs/en/sql-reference/statements/select/with/) <!--hide-->

View File

@ -45,7 +45,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
- `rabbitmq_row_delimiter` символ-разделитель, который завершает сообщение.
- `rabbitmq_schema` опциональный параметр, необходимый, если используется формат, требующий определения схемы. Например, [Capn Proto](https://capnproto.org/) требует путь к файлу со схемой и название корневого объекта `schema.capnp:Message`.
- `rabbitmq_num_consumers` количество потребителей на таблицу. По умолчанию: `1`. Укажите больше потребителей, если пропускная способность одного потребителя недостаточна.
- `rabbitmq_num_queues` количество очередей на потребителя. По умолчанию: `1`. Укажите больше потребителей, если пропускная способность одной очереди на потребителя недостаточна.
- `rabbitmq_num_queues` количество очередей. По умолчанию: `1`. Большее число очередей может сильно увеличить пропускную способность.
- `rabbitmq_queue_base` - настройка для имен очередей. Сценарии использования описаны ниже.
- `rabbitmq_persistent` - флаг, от которого зависит настройка 'durable' для сообщений при запросах `INSERT`. По умолчанию: `0`.
- `rabbitmq_skip_broken_messages` максимальное количество некорректных сообщений в блоке. Если `rabbitmq_skip_broken_messages = N`, то движок отбрасывает `N` сообщений, которые не получилось обработать. Одно сообщение в точности соответствует одной записи (строке). Значение по умолчанию 0.
@ -140,4 +140,5 @@ Example:
- `_channel_id` - идентификатор канала `ChannelID`, на котором было получено сообщение.
- `_delivery_tag` - значение `DeliveryTag` полученного сообщения. Уникально в рамках одного канала.
- `_redelivered` - флаг `redelivered`. (Не равно нулю, если есть возможность, что сообщение было получено более, чем одним каналом.)
- `_message_id` - значение `MessageID` полученного сообщения. Данное поле непусто, если указано в параметрах при отправке сообщения.
- `_message_id` - значение поля `messageID` полученного сообщения. Данное поле непусто, если указано в параметрах при отправке сообщения.
- `_timestamp` - значение поля `timestamp` полученного сообщения. Данное поле непусто, если указано в параметрах при отправке сообщения.

View File

@ -1977,6 +1977,48 @@ SELECT range(number) FROM system.numbers LIMIT 5 FORMAT PrettyCompactNoEscapes;
└───────────────┘
```
## output_format_pretty_row_numbers {#output_format_pretty_row_numbers}
Включает режим отображения номеров строк для запросов, выводимых в формате [Pretty](../../interfaces/formats.md#pretty).
Возможные значения:
- 0 — номера строк не выводятся.
- 1 — номера строк выводятся.
Значение по умолчанию: `0`.
**Пример**
Запрос:
```sql
SET output_format_pretty_row_numbers = 1;
SELECT TOP 3 name, value FROM system.settings;
```
Результат:
```text
┌─name────────────────────┬─value───┐
1. │ min_compress_block_size │ 65536 │
2. │ max_compress_block_size │ 1048576 │
3. │ max_block_size │ 65505 │
└─────────────────────────┴─────────┘
```
## allow_experimental_bigint_types {#allow_experimental_bigint_types}
Включает или отключает поддержку целочисленных значений, превышающих максимальное значение, допустимое для типа `int`.
Возможные значения:
- 1 — большие целочисленные значения поддерживаются.
- 0 — большие целочисленные значения не поддерживаются.
Значение по умолчанию: `0`.
## lock_acquire_timeout {#lock_acquire_timeout}
Устанавливает, сколько секунд сервер ожидает возможности выполнить блокировку таблицы.

View File

@ -13,7 +13,55 @@ toc_title: INSERT INTO
INSERT INTO [db.]table [(c1, c2, c3)] VALUES (v11, v12, v13), (v21, v22, v23), ...
```
В запросе можно указать список столбцов для вставки `[(c1, c2, c3)]`. В этом случае, в остальные столбцы записываются:
Вы можете указать список столбцов для вставки, используя следующий синтаксис: `(c1, c2, c3)` или `COLUMNS(c1,c2,c3)`.
Можно не перечислять все необходимые столбцы, а использовать синтаксис `(* EXCEPT(column_list))`.
В качестве примера рассмотрим таблицу:
``` sql
SHOW CREATE insert_select_testtable
```
```
┌─statement────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
│ CREATE TABLE insert_select_testtable
(
`a` Int8,
`b` String,
`c` Int8
)
ENGINE = MergeTree()
ORDER BY a
SETTINGS index_granularity = 8192 │
└──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
```
``` sql
INSERT INTO insert_select_testtable (*) VALUES (1, 'a', 1)
```
Если вы хотите вставить данные во все столбцы, кроме 'b', вам нужно передать столько значений, сколько столбцов вы указали в скобках:
``` sql
INSERT INTO insert_select_testtable (* EXCEPT(b)) Values (2, 2)
```
``` sql
SELECT * FROM insert_select_testtable
```
```
┌─a─┬─b─┬─c─┐
│ 2 │ │ 2 │
└───┴───┴───┘
┌─a─┬─b─┬─c─┐
│ 1 │ a │ 1 │
└───┴───┴───┘
```
В этом примере мы видим, что вторая строка содержит столбцы `a` и `c`, заполненные переданными значениями и `b`, заполненный значением по умолчанию.
Если список столбцов не включает все существующие столбцы, то все остальные столбцы заполняются следующим образом:
- Значения, вычисляемые из `DEFAULT` выражений, указанных в определении таблицы.
- Нули и пустые строки, если `DEFAULT` не определены.

View File

@ -2,18 +2,21 @@
toc_title: WITH
---
# Секция WITH {#sektsiia-with}
# Секция WITH {#with-clause}
Данная секция представляет собой [Common Table Expressions](https://ru.wikipedia.org/wiki/Иерархические_и_рекурсивныеапросы_в_SQL), то есть позволяет использовать результаты выражений из секции `WITH` в остальной части `SELECT` запроса.
Clickhouse поддерживает [Общие табличные выражения](https://ru.wikipedia.org/wiki/Иерархические_и_рекурсивныеапросы_в_SQL), то есть позволяет использовать результаты выражений из секции `WITH` в остальной части `SELECT` запроса. Именованные подзапросы могут быть включены в текущий и дочерний контекст запроса в тех местах, где разрешены табличные объекты. Рекурсия предотвращается путем скрытия общего табличного выражения текущего уровня из выражения `WITH`.
### Ограничения
## Синтаксис
1. Рекурсивные запросы не поддерживаются
2. Если в качестве выражения используется подзапрос, то результат должен содержать ровно одну строку
3. Результаты выражений нельзя переиспользовать во вложенных запросах
В дальнейшем, результаты выражений можно использовать в секции SELECT.
``` sql
WITH <expression> AS <identifier>
```
или
``` sql
WITH <identifier> AS <subquery expression>
```
### Примеры
## Примеры
**Пример 1:** Использование константного выражения как «переменной»
@ -23,7 +26,7 @@ SELECT *
FROM hits
WHERE
EventDate = toDate(ts_upper_bound) AND
EventTime <= ts_upper_bound
EventTime <= ts_upper_bound;
```
**Пример 2:** Выкидывание выражения sum(bytes) из списка колонок в SELECT
@ -35,7 +38,7 @@ SELECT
table
FROM system.parts
GROUP BY table
ORDER BY s
ORDER BY s;
```
**Пример 3:** Использование результатов скалярного подзапроса
@ -54,27 +57,14 @@ SELECT
FROM system.parts
GROUP BY table
ORDER BY table_disk_usage DESC
LIMIT 10
LIMIT 10;
```
**Пример 4:** Переиспользование выражения
В настоящий момент, переиспользование выражения из секции WITH внутри подзапроса возможно только через дублирование.
``` sql
WITH ['hello'] AS hello
SELECT
hello,
*
FROM
(
WITH ['hello'] AS hello
SELECT hello
)
WITH test1 AS (SELECT i + 1, j + 1 FROM test1)
SELECT * FROM test1;
```
``` text
┌─hello─────┬─hello─────┐
│ ['hello'] │ ['hello'] │
└───────────┴───────────┘
```
[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/statements/select/with/) <!--hide-->

View File

@ -218,6 +218,8 @@ private:
QueryFuzzer fuzzer;
int query_fuzzer_runs = 0;
std::optional<Suggest> suggest;
/// We will format query_id in interactive mode in various ways, the default is just to print Query id: ...
std::vector<std::pair<String, String>> query_id_formats;
@ -577,10 +579,11 @@ private:
if (print_time_to_stderr)
throw Exception("time option could be specified only in non-interactive mode", ErrorCodes::BAD_ARGUMENTS);
suggest.emplace();
if (server_revision >= Suggest::MIN_SERVER_REVISION && !config().getBool("disable_suggestion", false))
{
/// Load suggestion data from the server.
Suggest::instance().load(connection_parameters, config().getInt("suggestion_limit"));
suggest->load(connection_parameters, config().getInt("suggestion_limit"));
}
/// Load command history if present.
@ -607,7 +610,7 @@ private:
highlight_callback = highlight;
ReplxxLineReader lr(
Suggest::instance(),
*suggest,
history_file,
config().has("multiline"),
query_extenders,
@ -615,7 +618,7 @@ private:
highlight_callback);
#elif defined(USE_READLINE) && USE_READLINE
ReadlineLineReader lr(Suggest::instance(), history_file, config().has("multiline"), query_extenders, query_delimiters);
ReadlineLineReader lr(*suggest, history_file, config().has("multiline"), query_extenders, query_delimiters);
#else
LineReader lr(history_file, config().has("multiline"), query_extenders, query_delimiters);
#endif

View File

@ -18,10 +18,11 @@ namespace ErrorCodes
class Suggest : public LineReader::Suggest, boost::noncopyable
{
public:
static Suggest & instance()
Suggest();
~Suggest()
{
static Suggest instance;
return instance;
if (loading_thread.joinable())
loading_thread.join();
}
void load(const ConnectionParameters & connection_parameters, size_t suggestion_limit);
@ -30,12 +31,6 @@ public:
static constexpr int MIN_SERVER_REVISION = 54406;
private:
Suggest();
~Suggest()
{
if (loading_thread.joinable())
loading_thread.join();
}
void loadImpl(Connection & connection, const ConnectionTimeouts & timeouts, size_t suggestion_limit);
void fetch(Connection & connection, const ConnectionTimeouts & timeouts, const std::string & query);

View File

@ -152,7 +152,7 @@ void LocalServer::tryInitPath()
default_path = parent_folder / fmt::format("clickhouse-local-{}-{}-{}", getpid(), time(nullptr), randomSeed());
if (exists(default_path))
throw Exception(ErrorCodes::FILE_ALREADY_EXISTS, "Unsuccessfull attempt to create working directory: {} exist!", default_path.string());
throw Exception(ErrorCodes::FILE_ALREADY_EXISTS, "Unsuccessful attempt to create working directory: {} exist!", default_path.string());
create_directory(default_path);
temporary_directory_to_delete = default_path;

View File

@ -270,7 +270,7 @@
This parameter is mandatory and cannot be empty.
roles - section with a list of locally defined roles that will be assigned to each user retrieved from the LDAP server.
If no roles are specified, user will not be able to perform any actions after authentication.
If any of the listed roles is not defined locally at the time of authentication, the authenthication attept
If any of the listed roles is not defined locally at the time of authentication, the authenthication attempt
will fail as if the provided password was incorrect.
Example:
<ldap>

View File

@ -585,7 +585,7 @@ void IAccessStorage::throwInvalidPassword()
void IAccessStorage::throwCannotAuthenticate(const String & user_name)
{
/// We use the same message for all authentification failures because we don't want to give away any unnecessary information for security reasons,
/// We use the same message for all authentication failures because we don't want to give away any unnecessary information for security reasons,
/// only the log will show the exact reason.
throw Exception(user_name + ": Authentication failed: password is incorrect or there is no user with such name", ErrorCodes::AUTHENTICATION_FAILED);
}

View File

@ -5,7 +5,6 @@ PEERDIR(
clickhouse/src/Common
)
CFLAGS(-g0)
SRCS(
AccessControlManager.cpp

View File

@ -4,7 +4,6 @@ PEERDIR(
clickhouse/src/Common
)
CFLAGS(-g0)
SRCS(
<? find . -name '*.cpp' | sed 's/^\.\// /' | sort ?>

View File

@ -296,7 +296,7 @@ public:
{
typename ColumnVector<T>::Container & data_to = assert_cast<ColumnVector<T> &>(arr_to.getData()).getData();
if constexpr (is_big_int_v<T>)
// is data_to empty? we should probaly use std::vector::insert then
// is data_to empty? we should probably use std::vector::insert then
for (auto it = this->data(place).value.begin(); it != this->data(place).value.end(); it++)
data_to.push_back(*it);
else

View File

@ -23,6 +23,14 @@ namespace ErrorCodes
extern const int BAD_ARGUMENTS;
}
#if defined(OS_DARWIN)
extern "C"
{
double lgammal_r(double x, int * signgamp);
}
#endif
namespace DB
{
@ -150,7 +158,8 @@ struct AggregateFunctionStudentTTestData final
const Float64 t = getTStatisticSquared();
auto f = [&v] (double x) { return std::pow(x, v/2 - 1) / std::sqrt(1 - x); };
Float64 numenator = integrateSimpson(0, v / (t + v), f);
Float64 denominator = std::exp(std::lgammal(v/2) + std::lgammal(0.5) - std::lgammal(v/2 + 0.5));
int unused;
Float64 denominator = std::exp(lgammal_r(v / 2, &unused) + lgammal_r(0.5, &unused) - lgammal_r(v / 2 + 0.5, &unused));
return numenator / denominator;
}

View File

@ -18,11 +18,20 @@
#include <type_traits>
namespace ErrorCodes
{
extern const int BAD_ARGUMENTS;
}
#if defined(OS_DARWIN)
extern "C"
{
double lgammal_r(double x, int * signgamp);
}
#endif
namespace DB
{
@ -161,7 +170,8 @@ struct AggregateFunctionWelchTTestData final
const Float64 t = getTStatisticSquared();
auto f = [&v] (double x) { return std::pow(x, v / 2 - 1) / std::sqrt(1 - x); };
Float64 numenator = integrateSimpson(0, v / (t + v), f);
Float64 denominator = std::exp(std::lgammal(v/2) + std::lgammal(0.5) - std::lgammal(v/2 + 0.5));
int unused;
Float64 denominator = std::exp(lgammal_r(v / 2, &unused) + lgammal_r(0.5, &unused) - lgammal_r(v / 2 + 0.5, &unused));
return numenator / denominator;
}

View File

@ -39,7 +39,7 @@ namespace ErrorCodes
namespace detail
{
const size_t DEFAULT_SAMPLE_COUNT = 8192;
const size_t DEFAULT_MAX_SAMPLE_SIZE = 8192;
const auto MAX_SKIP_DEGREE = sizeof(UInt32) * 8;
}
@ -50,6 +50,7 @@ enum class ReservoirSamplerDeterministicOnEmpty
RETURN_NAN_OR_ZERO,
};
template <typename T,
ReservoirSamplerDeterministicOnEmpty OnEmpty = ReservoirSamplerDeterministicOnEmpty::THROW>
class ReservoirSamplerDeterministic
@ -60,8 +61,8 @@ class ReservoirSamplerDeterministic
}
public:
ReservoirSamplerDeterministic(const size_t sample_count_ = DEFAULT_SAMPLE_COUNT)
: sample_count{sample_count_}
ReservoirSamplerDeterministic(const size_t max_sample_size_ = detail::DEFAULT_MAX_SAMPLE_SIZE)
: max_sample_size{max_sample_size_}
{
}
@ -131,8 +132,8 @@ public:
void merge(const ReservoirSamplerDeterministic & b)
{
if (sample_count != b.sample_count)
throw Poco::Exception("Cannot merge ReservoirSamplerDeterministic's with different sample_count");
if (max_sample_size != b.max_sample_size)
throw Poco::Exception("Cannot merge ReservoirSamplerDeterministic's with different max sample size");
sorted = false;
if (b.skip_degree > skip_degree)
@ -150,11 +151,16 @@ public:
void read(DB::ReadBuffer & buf)
{
DB::readIntBinary<size_t>(sample_count, buf);
size_t size = 0;
DB::readIntBinary<size_t>(size, buf);
DB::readIntBinary<size_t>(total_values, buf);
samples.resize(std::min(total_values, sample_count));
for (size_t i = 0; i < samples.size(); ++i)
/// Compatibility with old versions.
if (size > total_values)
size = total_values;
samples.resize(size);
for (size_t i = 0; i < size; ++i)
DB::readPODBinary(samples[i], buf);
sorted = false;
@ -162,10 +168,11 @@ public:
void write(DB::WriteBuffer & buf) const
{
DB::writeIntBinary<size_t>(sample_count, buf);
size_t size = samples.size();
DB::writeIntBinary<size_t>(size, buf);
DB::writeIntBinary<size_t>(total_values, buf);
for (size_t i = 0; i < std::min(sample_count, total_values); ++i)
for (size_t i = 0; i < size; ++i)
DB::writePODBinary(samples[i], buf);
}
@ -174,18 +181,19 @@ private:
using Element = std::pair<T, UInt32>;
using Array = DB::PODArray<Element, 64>;
size_t sample_count;
size_t total_values{};
bool sorted{};
const size_t max_sample_size; /// Maximum amount of stored values.
size_t total_values = 0; /// How many values were inserted (regardless if they remain in sample or not).
bool sorted = false;
Array samples;
UInt8 skip_degree{};
UInt8 skip_degree = 0; /// The number N determining that we save only one per 2^N elements in average.
void insertImpl(const T & v, const UInt32 hash)
{
/// @todo why + 1? I don't quite recall
while (samples.size() + 1 >= sample_count)
/// Make a room for plus one element.
while (samples.size() >= max_sample_size)
{
if (++skip_degree > detail::MAX_SKIP_DEGREE)
++skip_degree;
if (skip_degree > detail::MAX_SKIP_DEGREE)
throw DB::Exception{"skip_degree exceeds maximum value", DB::ErrorCodes::MEMORY_LIMIT_EXCEEDED};
thinOut();
}
@ -195,35 +203,17 @@ private:
void thinOut()
{
auto size = samples.size();
for (size_t i = 0; i < size;)
{
if (!good(samples[i].second))
{
/// swap current element with the last one
std::swap(samples[size - 1], samples[i]);
--size;
}
else
++i;
}
if (size != samples.size())
{
samples.resize(size);
samples.resize(std::distance(samples.begin(),
std::remove_if(samples.begin(), samples.end(), [this](const auto & elem){ return !good(elem.second); })));
sorted = false;
}
}
void sortIfNeeded()
{
if (sorted)
return;
std::sort(samples.begin(), samples.end(), [](const auto & lhs, const auto & rhs) { return lhs.first < rhs.first; });
sorted = true;
std::sort(samples.begin(), samples.end(), [] (const std::pair<T, UInt32> & lhs, const std::pair<T, UInt32> & rhs)
{
return lhs.first < rhs.first;
});
}
template <typename ResultType>

View File

@ -5,7 +5,6 @@ PEERDIR(
clickhouse/src/Common
)
CFLAGS(-g0)
SRCS(
AggregateFunctionAggThrow.cpp

View File

@ -4,7 +4,6 @@ PEERDIR(
clickhouse/src/Common
)
CFLAGS(-g0)
SRCS(
<? find . -name '*.cpp' | grep -v -F tests | grep -v -F GroupBitmap | sed 's/^\.\// /' | sort ?>

View File

@ -6,7 +6,6 @@ PEERDIR(
contrib/libs/poco/NetSSL_OpenSSL
)
CFLAGS(-g0)
SRCS(
Connection.cpp

View File

@ -5,7 +5,6 @@ PEERDIR(
contrib/libs/poco/NetSSL_OpenSSL
)
CFLAGS(-g0)
SRCS(
<? find . -name '*.cpp' | grep -v -F tests | sed 's/^\.\// /' | sort ?>

View File

@ -82,7 +82,7 @@ public:
* @see DB::ColumnUnique
*
* The most common example uses https://clickhouse.tech/docs/en/sql-reference/data-types/lowcardinality/ columns.
* Consider data type @e LC(String). The inner type here is @e String which is more or less a contigous memory
* Consider data type @e LC(String). The inner type here is @e String which is more or less a contiguous memory
* region, so it can be easily represented as a @e StringRef. So we pass that ref to this function and get its
* index in the dictionary, which can be used to operate with the indices column.
*/

View File

@ -13,7 +13,6 @@ PEERDIR(
contrib/libs/pdqsort
)
CFLAGS(-g0)
SRCS(
Collator.cpp

View File

@ -54,6 +54,7 @@
M(LocalThread, "Number of threads in local thread pools. Should be similar to GlobalThreadActive.") \
M(LocalThreadActive, "Number of threads in local thread pools running a task.") \
M(DistributedFilesToInsert, "Number of pending files to process for asynchronous insertion into Distributed tables. Number of files for every shard is summed.") \
M(TablesToDropQueueSize, "Number of dropped tables, that are waiting for background data removal.") \
namespace CurrentMetrics
{

View File

@ -5,15 +5,15 @@
namespace DB
{
/// Helper class, that recieves file descriptor and does fsync for it in destructor.
/// Helper class, that receives file descriptor and does fsync for it in destructor.
/// It's used to keep descriptor open, while doing some operations with it, and do fsync at the end.
/// Guaranties of sequence 'close-reopen-fsync' may depend on kernel version.
/// Source: linux-fsdevel mailing-list https://marc.info/?l=linux-fsdevel&m=152535409207496
class FileSyncGuard
{
public:
/// NOTE: If you have already opened descriptor, it's preffered to use
/// this constructor instead of construnctor with path.
/// NOTE: If you have already opened descriptor, it's preferred to use
/// this constructor instead of constructor with path.
FileSyncGuard(const DiskPtr & disk_, int fd_) : disk(disk_), fd(fd_) {}
FileSyncGuard(const DiskPtr & disk_, const String & path)

View File

@ -234,13 +234,13 @@ void ThreadPoolImpl<Thread>::worker(typename std::list<Thread>::iterator thread_
std::is_same_v<Thread, std::thread> ? CurrentMetrics::GlobalThreadActive : CurrentMetrics::LocalThreadActive);
job();
/// job should be reseted before decrementing scheduled_jobs to
/// job should be reset before decrementing scheduled_jobs to
/// ensure that the Job destroyed before wait() returns.
job = {};
}
catch (...)
{
/// job should be reseted before decrementing scheduled_jobs to
/// job should be reset before decrementing scheduled_jobs to
/// ensure that the Job destroyed before wait() returns.
job = {};

View File

@ -172,7 +172,7 @@ protected:
void finalizeQueryProfiler();
void logToQueryThreadLog(QueryThreadLog & thread_log);
void logToQueryThreadLog(QueryThreadLog & thread_log, const String & current_database);
void assertState(const std::initializer_list<int> & permitted_states, const char * description = nullptr) const;

View File

@ -1288,7 +1288,7 @@ void ZooKeeper::receiveEvent()
response->removeRootPath(root_path);
}
/// Instead of setting the watch in sendEvent, set it in receiveEvent becuase need to check the response.
/// Instead of setting the watch in sendEvent, set it in receiveEvent because need to check the response.
/// The watch shouldn't be set if the node does not exist and it will never exist like sequential ephemeral nodes.
/// By using getData() instead of exists(), a watch won't be set if the node doesn't exist.
if (request_info.watch)

View File

@ -21,7 +21,6 @@ PEERDIR(
INCLUDE(${ARCADIA_ROOT}/clickhouse/cmake/yandex/ya.make.versions.inc)
CFLAGS(-g0)
SRCS(
ActionLock.cpp

View File

@ -20,7 +20,6 @@ PEERDIR(
INCLUDE(${ARCADIA_ROOT}/clickhouse/cmake/yandex/ya.make.versions.inc)
CFLAGS(-g0)
SRCS(
<? find . -name '*.cpp' | grep -v -F tests | sed 's/^\.\// /' | sort ?>

View File

@ -185,9 +185,9 @@ void CompressedReadBufferBase::decompress(char * to, size_t size_decompressed, s
}
else
{
throw Exception("Data compressed with different methods, given method byte "
throw Exception("Data compressed with different methods, given method byte 0x"
+ getHexUIntLowercase(method)
+ ", previous method byte "
+ ", previous method byte 0x"
+ getHexUIntLowercase(codec->getMethodByte()),
ErrorCodes::CANNOT_DECOMPRESS);
}

View File

@ -87,7 +87,7 @@ ASTPtr CompressionCodecFactory::validateCodecAndGetPreprocessedAST(const ASTPtr
else
throw Exception("Unexpected AST element for compression codec", ErrorCodes::UNEXPECTED_AST_STRUCTURE);
/// Default codec replaced with current default codec which may dependend on different
/// Default codec replaced with current default codec which may depend on different
/// settings (and properties of data) in runtime.
CompressionCodecPtr result_codec;
if (codec_family_name == DEFAULT_CODEC_NAME)

View File

@ -26,7 +26,7 @@ void ICompressionCodec::setCodecDescription(const String & codec_name, const AST
std::shared_ptr<ASTFunction> result = std::make_shared<ASTFunction>();
result->name = "CODEC";
/// Special case for codec Multiple, which doens't have name. It's just list
/// Special case for codec Multiple, which doesn't have name. It's just list
/// of other codecs.
if (codec_name.empty())
{

View File

@ -12,7 +12,6 @@ PEERDIR(
contrib/libs/zstd
)
CFLAGS(-g0)
SRCS(
CachedCompressedReadBuffer.cpp

View File

@ -11,7 +11,6 @@ PEERDIR(
contrib/libs/zstd
)
CFLAGS(-g0)
SRCS(
<? find . -name '*.cpp' | grep -v -F tests | sed 's/^\.\// /' | sort ?>

View File

@ -85,6 +85,9 @@ void GTIDSets::update(const GTID & other)
ErrorCodes::LOGICAL_ERROR);
}
/// Try to shirnk Sequence interval.
GTIDSet::tryShirnk(set, i, current);
/// Sequence, extend the interval.
if (other.seq_no == current.end)
{
@ -116,6 +119,16 @@ void GTIDSets::update(const GTID & other)
sets.emplace_back(set);
}
void GTIDSet::tryShirnk(GTIDSet & set, unsigned int i, GTIDSet::Interval & current)
{
if (i != set.intervals.size() -1)
{
auto & next = set.intervals[i+1];
if (current.end == next.start)
set.tryMerge(i);
}
}
String GTIDSets::toString() const
{
WriteBufferFromOwnString buffer;

View File

@ -26,6 +26,8 @@ public:
std::vector<Interval> intervals;
void tryMerge(size_t i);
static void tryShirnk(GTIDSet & set, unsigned int i, Interval & current);
};
class GTIDSets

View File

@ -705,7 +705,7 @@ namespace MySQLReplication
break;
}
default:
throw ReplicationError("Position update with unsupport event", ErrorCodes::LOGICAL_ERROR);
throw ReplicationError("Position update with unsupported event", ErrorCodes::LOGICAL_ERROR);
}
}

View File

@ -169,6 +169,8 @@ class IColumn;
M(Milliseconds, read_backoff_min_interval_between_events_ms, 1000, "Settings to reduce the number of threads in case of slow reads. Do not pay attention to the event, if the previous one has passed less than a certain amount of time.", 0) \
M(UInt64, read_backoff_min_events, 2, "Settings to reduce the number of threads in case of slow reads. The number of events after which the number of threads will be reduced.", 0) \
\
M(UInt64, read_backoff_min_concurrency, 1, "Settings to try keeping the minimal number of threads in case of slow reads.", 0) \
\
M(Float, memory_tracker_fault_probability, 0., "For testing of `exception safety` - throw an exception every time you allocate memory with the specified probability.", 0) \
\
M(Bool, enable_http_compression, 0, "Compress the result if the client over HTTP said that it understands data compressed by gzip or deflate.", 0) \
@ -390,7 +392,7 @@ class IColumn;
M(Bool, alter_partition_verbose_result, false, "Output information about affected parts. Currently works only for FREEZE and ATTACH commands.", 0) \
M(Bool, allow_experimental_database_materialize_mysql, false, "Allow to create database with Engine=MaterializeMySQL(...).", 0) \
M(Bool, system_events_show_zero_values, false, "Include all metrics, even with zero values", 0) \
M(MySQLDataTypesSupport, mysql_datatypes_support_level, 0, "Which MySQL types should be converted to corresponding ClickHouse types (rather than being represented as String). Can be empty or any combination of 'decimal' or 'datetime64'. When empty MySQL's DECIMAL and DATETIME/TIMESTAMP with non-zero precison are seen as String on ClickHouse's side.", 0) \
M(MySQLDataTypesSupport, mysql_datatypes_support_level, 0, "Which MySQL types should be converted to corresponding ClickHouse types (rather than being represented as String). Can be empty or any combination of 'decimal' or 'datetime64'. When empty MySQL's DECIMAL and DATETIME/TIMESTAMP with non-zero precision are seen as String on ClickHouse's side.", 0) \
\
/** Obsolete settings that do nothing but left for compatibility reasons. Remove each one after half a year of obsolescence. */ \
\

View File

@ -260,6 +260,17 @@ int main(int argc, char ** argv)
"10662d71-9d91-11ea-bbc2-0242ac110003:6-7",
"20662d71-9d91-11ea-bbc2-0242ac110003:9",
"10662d71-9d91-11ea-bbc2-0242ac110003:6-7,20662d71-9d91-11ea-bbc2-0242ac110003:9"},
{"shirnk-sequence",
"10662d71-9d91-11ea-bbc2-0242ac110003:1-3:4-5:7",
"10662d71-9d91-11ea-bbc2-0242ac110003:6",
"10662d71-9d91-11ea-bbc2-0242ac110003:1-7"},
{"shirnk-sequence",
"10662d71-9d91-11ea-bbc2-0242ac110003:1-3:4-5:10",
"10662d71-9d91-11ea-bbc2-0242ac110003:8",
"10662d71-9d91-11ea-bbc2-0242ac110003:1-5:8:10"
}
};
for (auto & tc : cases)

View File

@ -7,7 +7,6 @@ PEERDIR(
contrib/restricted/boost/libs
)
CFLAGS(-g0)
SRCS(
BackgroundSchedulePool.cpp

View File

@ -6,7 +6,6 @@ PEERDIR(
contrib/restricted/boost/libs
)
CFLAGS(-g0)
SRCS(
<? find . -name '*.cpp' | grep -v -F tests | sed 's/^\.\// /' | sort ?>

View File

@ -8,7 +8,6 @@ PEERDIR(
NO_COMPILER_WARNINGS()
CFLAGS(-g0)
SRCS(
AddingDefaultBlockOutputStream.cpp

View File

@ -7,7 +7,6 @@ PEERDIR(
NO_COMPILER_WARNINGS()
CFLAGS(-g0)
SRCS(
<? find . -name '*.cpp' | grep -v -F tests | sed 's/^\.\// /' | sort ?>

View File

@ -29,7 +29,7 @@ constexpr size_t min(size_t x, size_t y)
}
/// @note There's no auto scale to larger big integer, only for integral ones.
/// It's cause of (U)Int64 backward compatibilty and very big performance penalties.
/// It's cause of (U)Int64 backward compatibility and very big performance penalties.
constexpr size_t nextSize(size_t size)
{
if (size < 8)

View File

@ -6,7 +6,6 @@ PEERDIR(
clickhouse/src/Formats
)
CFLAGS(-g0)
SRCS(
convertMySQLDataType.cpp

View File

@ -5,7 +5,6 @@ PEERDIR(
clickhouse/src/Formats
)
CFLAGS(-g0)
SRCS(
<? find . -name '*.cpp' | grep -v -F tests | sed 's/^\.\// /' | sort ?>

View File

@ -116,7 +116,7 @@ void DatabaseAtomic::dropTable(const Context &, const String & table_name, bool
}
tryRemoveSymlink(table_name);
/// Remove the inner table (if any) to avoid deadlock
/// (due to attemp to execute DROP from the worker thread)
/// (due to attempt to execute DROP from the worker thread)
if (auto * mv = dynamic_cast<StorageMaterializedView *>(table.get()))
mv->dropInnerTable(no_delay);
/// Notify DatabaseCatalog that table was dropped. It will remove table data in background.
@ -261,21 +261,29 @@ void DatabaseAtomic::commitCreateTable(const ASTCreateQuery & query, const Stora
{
DetachedTables not_in_use;
auto table_data_path = getTableDataPath(query);
bool locked_uuid = false;
try
{
std::unique_lock lock{mutex};
if (query.database != database_name)
throw Exception(ErrorCodes::UNKNOWN_DATABASE, "Database was renamed to `{}`, cannot create table in `{}`",
database_name, query.database);
/// Do some checks before renaming file from .tmp to .sql
not_in_use = cleanupDetachedTables();
assertDetachedTableNotInUse(query.uuid);
renameNoReplace(table_metadata_tmp_path, table_metadata_path);
/// We will get en exception if some table with the same UUID exists (even if it's detached table or table from another database)
DatabaseCatalog::instance().addUUIDMapping(query.uuid);
locked_uuid = true;
/// It throws if `table_metadata_path` already exists (it's possible if table was detached)
renameNoReplace(table_metadata_tmp_path, table_metadata_path); /// Commit point (a sort of)
attachTableUnlocked(query.table, table, lock); /// Should never throw
table_name_to_path.emplace(query.table, table_data_path);
}
catch (...)
{
Poco::File(table_metadata_tmp_path).remove();
if (locked_uuid)
DatabaseCatalog::instance().removeUUIDMappingFinally(query.uuid);
throw;
}
tryCreateSymlink(query.table, table_data_path);

View File

@ -329,10 +329,4 @@ const StoragePtr & DatabaseLazyIterator::table() const
return current_storage;
}
void DatabaseLazyIterator::reset()
{
if (current_storage)
current_storage.reset();
}
}

View File

@ -22,6 +22,10 @@ public:
String getEngineName() const override { return "Lazy"; }
bool canContainMergeTreeTables() const override { return false; }
bool canContainDistributedTables() const override { return false; }
void loadStoredObjects(
Context & context,
bool has_force_restore_data_flag, bool force_attach) override;
@ -122,7 +126,6 @@ public:
bool isValid() const override;
const String & name() const override;
const StoragePtr & table() const override;
void reset() override;
private:
const DatabaseLazy & database;

View File

@ -53,6 +53,9 @@ void DatabaseMemory::dropTable(
}
table->is_dropped = true;
create_queries.erase(table_name);
UUID table_uuid = table->getStorageID().uuid;
if (table_uuid != UUIDHelpers::Nil)
DatabaseCatalog::instance().removeUUIDMappingFinally(table_uuid);
}
ASTPtr DatabaseMemory::getCreateDatabaseQuery() const

View File

@ -223,6 +223,10 @@ void DatabaseWithDictionaries::removeDictionary(const Context &, const String &
attachDictionary(dictionary_name, attach_info);
throw;
}
UUID dict_uuid = attach_info.create_query->as<ASTCreateQuery>()->uuid;
if (dict_uuid != UUIDHelpers::Nil)
DatabaseCatalog::instance().removeUUIDMappingFinally(dict_uuid);
}
DatabaseDictionariesIteratorPtr DatabaseWithDictionaries::getDictionariesIterator(const FilterByNameFunction & filter_by_dictionary_name)

View File

@ -44,8 +44,6 @@ public:
/// (a database with support for lazy tables loading
/// - it maintains a list of tables but tables are loaded lazily).
virtual const StoragePtr & table() const = 0;
/// Reset reference counter to the StoragePtr.
virtual void reset() = 0;
virtual ~IDatabaseTablesIterator() = default;
@ -95,8 +93,6 @@ public:
const String & name() const override { return it->first; }
const StoragePtr & table() const override { return it->second; }
void reset() override { it->second.reset(); }
};
/// Copies list of dictionaries and iterates through such snapshot.
@ -151,6 +147,10 @@ public:
/// Get name of database engine.
virtual String getEngineName() const = 0;
virtual bool canContainMergeTreeTables() const { return true; }
virtual bool canContainDistributedTables() const { return true; }
/// Load a set of existing tables.
/// You can call only once, right after the object is created.
virtual void loadStoredObjects(Context & /*context*/, bool /*has_force_restore_data_flag*/, bool /*force_attach*/ = false) {}

View File

@ -11,7 +11,7 @@ class Context;
class ASTStorage;
#define LIST_OF_CONNECTION_MYSQL_SETTINGS(M) \
M(MySQLDataTypesSupport, mysql_datatypes_support_level, 0, "Which MySQL types should be converted to corresponding ClickHouse types (rather than being represented as String). Can be empty or any combination of 'decimal' or 'datetime64'. When empty MySQL's DECIMAL and DATETIME/TIMESTAMP with non-zero precison are seen as String on ClickHouse's side.", 0) \
M(MySQLDataTypesSupport, mysql_datatypes_support_level, 0, "Which MySQL types should be converted to corresponding ClickHouse types (rather than being represented as String). Can be empty or any combination of 'decimal' or 'datetime64'. When empty MySQL's DECIMAL and DATETIME/TIMESTAMP with non-zero precision are seen as String on ClickHouse's side.", 0) \
/// Settings that should not change after the creation of a database.
#define APPLY_FOR_IMMUTABLE_CONNECTION_MYSQL_SETTINGS(M) \

View File

@ -42,6 +42,12 @@ public:
String getEngineName() const override { return "MySQL"; }
bool canContainMergeTreeTables() const override { return false; }
bool canContainDistributedTables() const override { return false; }
bool shouldBeEmptyOnDetach() const override { return false; }
bool empty() const override;
DatabaseTablesIteratorPtr getTablesIterator(const Context & context, const FilterByNameFunction & filter_by_table_name) override;

View File

@ -28,11 +28,6 @@ public:
return tables.emplace_back(storage);
}
void reset() override
{
tables.clear();
}
UUID uuid() const override { return nested_iterator->uuid(); }
DatabaseMaterializeTablesIterator(DatabaseTablesIteratorPtr nested_iterator_, DatabaseMaterializeMySQL * database_)

View File

@ -5,7 +5,6 @@ PEERDIR(
clickhouse/src/Common
)
CFLAGS(-g0)
SRCS(
DatabaseAtomic.cpp

View File

@ -4,7 +4,6 @@ PEERDIR(
clickhouse/src/Common
)
CFLAGS(-g0)
SRCS(
<? find . -name '*.cpp' | sed 's/^\.\// /' | sort ?>

View File

@ -1467,7 +1467,6 @@ void SSDComplexKeyCacheDictionary::getItemsNumberImpl(
{
assert(dict_struct.key);
assert(key_columns.size() == key_types.size());
assert(key_columns.size() == dict_struct.key->size());
dict_struct.validateKeyTypes(key_types);

View File

@ -12,7 +12,6 @@ PEERDIR(
NO_COMPILER_WARNINGS()
CFLAGS(-g0)
SRCS(
CacheDictionary.cpp

View File

@ -11,7 +11,6 @@ PEERDIR(
NO_COMPILER_WARNINGS()
CFLAGS(-g0)
SRCS(
<? find . -name '*.cpp' | grep -v -F tests | grep -v -F Trie | sed 's/^\.\// /' | sort ?>

View File

@ -4,7 +4,6 @@ PEERDIR(
clickhouse/src/Common
)
CFLAGS(-g0)
SRCS(
DiskS3.cpp

View File

@ -5,7 +5,6 @@ PEERDIR(
clickhouse/src/Common
)
CFLAGS(-g0)
SRCS(
createVolume.cpp

View File

@ -4,7 +4,6 @@ PEERDIR(
clickhouse/src/Common
)
CFLAGS(-g0)
SRCS(
<? find . -name '*.cpp' | grep -v -F tests | grep -v -F S3 | sed 's/^\.\// /' | sort ?>

View File

@ -7,7 +7,6 @@ PEERDIR(
contrib/libs/protoc
)
CFLAGS(-g0)
SRCS(
FormatFactory.cpp

View File

@ -6,7 +6,6 @@ PEERDIR(
contrib/libs/protoc
)
CFLAGS(-g0)
SRCS(
<? find . -name '*.cpp' | grep -v -F tests | sed 's/^\.\// /' | sort ?>

View File

@ -326,7 +326,7 @@ struct DecimalBinaryOperation
}
private:
/// there's implicit type convertion here
/// there's implicit type conversion here
static NativeResultType apply(NativeResultType a, NativeResultType b)
{
if constexpr (can_overflow && check_overflow)

View File

@ -31,6 +31,7 @@ namespace ErrorCodes
{
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int ARGUMENT_OUT_OF_BOUND;
extern const int ILLEGAL_COLUMN;
extern const int BAD_ARGUMENTS;
}
@ -84,6 +85,9 @@ enum class TieBreakingMode
Bankers, // use banker's rounding
};
/// For N, no more than the number of digits in the largest type.
using Scale = Int16;
/** Rounding functions for integer values.
*/
@ -416,7 +420,7 @@ private:
using Container = typename ColumnDecimal<T>::Container;
public:
static NO_INLINE void apply(const Container & in, Container & out, Int64 scale_arg)
static NO_INLINE void apply(const Container & in, Container & out, Scale scale_arg)
{
scale_arg = in.getScale() - scale_arg;
if (scale_arg > 0)
@ -458,7 +462,7 @@ class Dispatcher
FloatRoundingImpl<T, rounding_mode, scale_mode>,
IntegerRoundingImpl<T, rounding_mode, scale_mode, tie_breaking_mode>>;
static ColumnPtr apply(const ColumnVector<T> * col, Int64 scale_arg)
static ColumnPtr apply(const ColumnVector<T> * col, Scale scale_arg)
{
auto col_res = ColumnVector<T>::create();
@ -487,7 +491,7 @@ class Dispatcher
return col_res;
}
static ColumnPtr apply(const ColumnDecimal<T> * col, Int64 scale_arg)
static ColumnPtr apply(const ColumnDecimal<T> * col, Scale scale_arg)
{
const typename ColumnDecimal<T>::Container & vec_src = col->getData();
@ -501,7 +505,7 @@ class Dispatcher
}
public:
static ColumnPtr apply(const IColumn * column, Int64 scale_arg)
static ColumnPtr apply(const IColumn * column, Scale scale_arg)
{
if constexpr (IsNumber<T>)
return apply(checkAndGetColumn<ColumnVector<T>>(column), scale_arg);
@ -544,20 +548,25 @@ public:
return arguments[0];
}
static Int64 getScaleArg(ColumnsWithTypeAndName & arguments)
static Scale getScaleArg(ColumnsWithTypeAndName & arguments)
{
if (arguments.size() == 2)
{
const IColumn & scale_column = *arguments[1].column;
if (!isColumnConst(scale_column))
throw Exception("Scale argument for rounding functions must be constant.", ErrorCodes::ILLEGAL_COLUMN);
throw Exception("Scale argument for rounding functions must be constant", ErrorCodes::ILLEGAL_COLUMN);
Field scale_field = assert_cast<const ColumnConst &>(scale_column).getField();
if (scale_field.getType() != Field::Types::UInt64
&& scale_field.getType() != Field::Types::Int64)
throw Exception("Scale argument for rounding functions must have integer type.", ErrorCodes::ILLEGAL_COLUMN);
throw Exception("Scale argument for rounding functions must have integer type", ErrorCodes::ILLEGAL_COLUMN);
return scale_field.get<Int64>();
Int64 scale64 = scale_field.get<Int64>();
if (scale64 > std::numeric_limits<Scale>::max()
|| scale64 < std::numeric_limits<Scale>::min())
throw Exception("Scale argument for rounding function is too large", ErrorCodes::ARGUMENT_OUT_OF_BOUND);
return scale64;
}
return 0;
}
@ -568,7 +577,7 @@ public:
ColumnPtr executeImpl(ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override
{
const ColumnWithTypeAndName & column = arguments[0];
Int64 scale_arg = getScaleArg(arguments);
Scale scale_arg = getScaleArg(arguments);
ColumnPtr res;
auto call = [&](const auto & types) -> bool

View File

@ -131,7 +131,7 @@ public:
for (size_t i = 0; i < input_rows_count; ++i)
{
/// Virtual call is Ok (neglible comparing to the rest of calculations).
/// Virtual call is Ok (negligible comparing to the rest of calculations).
Float64 value = arguments[0].column->getFloat64(i);
bool is_negative = value < 0;

View File

@ -22,7 +22,7 @@ namespace
{
/// Returns 1 if and Decimal value has more digits then it's Precision allow, 0 otherwise.
/// Precision could be set as second argument or omitted. If ommited function uses Decimal presicion of the first argument.
/// Precision could be set as second argument or omitted. If omitted function uses Decimal precision of the first argument.
class FunctionIsDecimalOverflow : public IFunction
{
public:

View File

@ -4,7 +4,6 @@
#if defined(OS_DARWIN)
extern "C"
{
/// Is defined in libglibc-compatibility.a
double lgamma_r(double x, int * signgamp);
}
#endif

View File

@ -32,7 +32,6 @@ PEERDIR(
)
# "Arcadia" build is slightly deficient. It lacks many libraries that we need.
CFLAGS(-g0)
SRCS(
abs.cpp

View File

@ -31,7 +31,6 @@ PEERDIR(
)
# "Arcadia" build is slightly deficient. It lacks many libraries that we need.
CFLAGS(-g0)
SRCS(
<? find . -name '*.cpp' | grep -i -v -P 'tests|Bitmap|sumbur|abtesting' | sed 's/^\.\// /' | sort ?>

View File

@ -480,7 +480,7 @@ void readEscapedString(String & s, ReadBuffer & buf)
}
template void readEscapedStringInto<PaddedPODArray<UInt8>>(PaddedPODArray<UInt8> & s, ReadBuffer & buf);
template void readEscapedStringInto<NullSink>(NullSink & s, ReadBuffer & buf);
template void readEscapedStringInto<NullOutput>(NullOutput & s, ReadBuffer & buf);
/** If enable_sql_style_quoting == true,
@ -562,7 +562,7 @@ void readQuotedStringWithSQLStyle(String & s, ReadBuffer & buf)
template void readQuotedStringInto<true>(PaddedPODArray<UInt8> & s, ReadBuffer & buf);
template void readDoubleQuotedStringInto<false>(NullSink & s, ReadBuffer & buf);
template void readDoubleQuotedStringInto<false>(NullOutput & s, ReadBuffer & buf);
void readDoubleQuotedString(String & s, ReadBuffer & buf)
{
@ -742,7 +742,7 @@ void readJSONString(String & s, ReadBuffer & buf)
template void readJSONStringInto<PaddedPODArray<UInt8>, void>(PaddedPODArray<UInt8> & s, ReadBuffer & buf);
template bool readJSONStringInto<PaddedPODArray<UInt8>, bool>(PaddedPODArray<UInt8> & s, ReadBuffer & buf);
template void readJSONStringInto<NullSink>(NullSink & s, ReadBuffer & buf);
template void readJSONStringInto<NullOutput>(NullOutput & s, ReadBuffer & buf);
template void readJSONStringInto<String>(String & s, ReadBuffer & buf);
@ -891,7 +891,7 @@ void skipJSONField(ReadBuffer & buf, const StringRef & name_of_field)
throw Exception("Unexpected EOF for key '" + name_of_field.toString() + "'", ErrorCodes::INCORRECT_DATA);
else if (*buf.position() == '"') /// skip double-quoted string
{
NullSink sink;
NullOutput sink;
readJSONStringInto(sink, buf);
}
else if (isNumericASCII(*buf.position()) || *buf.position() == '-' || *buf.position() == '+' || *buf.position() == '.') /// skip number
@ -955,7 +955,7 @@ void skipJSONField(ReadBuffer & buf, const StringRef & name_of_field)
// field name
if (*buf.position() == '"')
{
NullSink sink;
NullOutput sink;
readJSONStringInto(sink, buf);
}
else

View File

@ -527,7 +527,7 @@ bool tryReadJSONStringInto(Vector & s, ReadBuffer & buf)
}
/// This could be used as template parameter for functions above, if you want to just skip data.
struct NullSink
struct NullOutput
{
void append(const char *, size_t) {}
void push_back(char) {}

View File

@ -533,6 +533,10 @@ ReturnType parseDateTimeBestEffortImpl(
}
}
/// If neither Date nor Time is parsed successfully, it should fail
if (!year && !month && !day_of_month && !has_time)
return on_error("Cannot read DateTime: neither Date nor Time was parsed successfully", ErrorCodes::CANNOT_PARSE_DATETIME);
if (!year)
year = 2000;
if (!month)

View File

@ -8,7 +8,6 @@ PEERDIR(
contrib/libs/poco/NetSSL_OpenSSL
)
CFLAGS(-g0)
SRCS(
AIOContextPool.cpp

View File

@ -7,7 +7,6 @@ PEERDIR(
contrib/libs/poco/NetSSL_OpenSSL
)
CFLAGS(-g0)
SRCS(
<? find . -name '*.cpp' | grep -v -F tests | grep -v -P 'S3|HDFS' | sed 's/^\.\// /' | sort ?>

Some files were not shown because too many files have changed in this diff Show More