mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-29 02:52:13 +00:00
Merge branch 'ClickHouse:master' into add-tlsv1_3-test
This commit is contained in:
commit
a21e4117da
6
.gitmodules
vendored
6
.gitmodules
vendored
@ -268,3 +268,9 @@
|
|||||||
[submodule "contrib/hashidsxx"]
|
[submodule "contrib/hashidsxx"]
|
||||||
path = contrib/hashidsxx
|
path = contrib/hashidsxx
|
||||||
url = https://github.com/schoentoon/hashidsxx.git
|
url = https://github.com/schoentoon/hashidsxx.git
|
||||||
|
[submodule "contrib/liburing"]
|
||||||
|
path = contrib/liburing
|
||||||
|
url = https://github.com/axboe/liburing.git
|
||||||
|
[submodule "contrib/base-x"]
|
||||||
|
path = contrib/base-x
|
||||||
|
url = https://github.com/ClickHouse/base-x.git
|
||||||
|
@ -1,102 +1,21 @@
|
|||||||
#!/bin/bash -e
|
#!/bin/bash -e
|
||||||
|
|
||||||
if [[ -n $1 ]]; then
|
TABLE="hits_100m_obfuscated"
|
||||||
SCALE=$1
|
|
||||||
else
|
|
||||||
SCALE=100
|
|
||||||
fi
|
|
||||||
|
|
||||||
TABLE="hits_${SCALE}m_obfuscated"
|
|
||||||
DATASET="${TABLE}_v1.tar.xz"
|
|
||||||
QUERIES_FILE="queries.sql"
|
QUERIES_FILE="queries.sql"
|
||||||
TRIES=3
|
TRIES=3
|
||||||
|
|
||||||
# Note: on older Ubuntu versions, 'axel' does not support IPv6. If you are using IPv6-only servers on very old Ubuntu, just don't install 'axel'.
|
mkdir -p clickhouse-benchmark
|
||||||
|
pushd clickhouse-benchmark
|
||||||
|
|
||||||
FASTER_DOWNLOAD=wget
|
# Download the binary
|
||||||
if command -v axel >/dev/null; then
|
if [[ ! -x clickhouse ]]; then
|
||||||
FASTER_DOWNLOAD=axel
|
curl https://clickhouse.com/ | sh
|
||||||
else
|
|
||||||
echo "It's recommended to install 'axel' for faster downloads."
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if command -v pixz >/dev/null; then
|
|
||||||
TAR_PARAMS='-Ipixz'
|
|
||||||
else
|
|
||||||
echo "It's recommended to install 'pixz' for faster decompression of the dataset."
|
|
||||||
fi
|
|
||||||
|
|
||||||
mkdir -p clickhouse-benchmark-$SCALE
|
|
||||||
pushd clickhouse-benchmark-$SCALE
|
|
||||||
|
|
||||||
OS=$(uname -s)
|
|
||||||
ARCH=$(uname -m)
|
|
||||||
|
|
||||||
DIR=
|
|
||||||
|
|
||||||
if [ "${OS}" = "Linux" ]
|
|
||||||
then
|
|
||||||
if [ "${ARCH}" = "x86_64" ]
|
|
||||||
then
|
|
||||||
DIR="amd64"
|
|
||||||
elif [ "${ARCH}" = "aarch64" ]
|
|
||||||
then
|
|
||||||
DIR="aarch64"
|
|
||||||
elif [ "${ARCH}" = "powerpc64le" ]
|
|
||||||
then
|
|
||||||
DIR="powerpc64le"
|
|
||||||
fi
|
|
||||||
elif [ "${OS}" = "FreeBSD" ]
|
|
||||||
then
|
|
||||||
if [ "${ARCH}" = "x86_64" ]
|
|
||||||
then
|
|
||||||
DIR="freebsd"
|
|
||||||
elif [ "${ARCH}" = "aarch64" ]
|
|
||||||
then
|
|
||||||
DIR="freebsd-aarch64"
|
|
||||||
elif [ "${ARCH}" = "powerpc64le" ]
|
|
||||||
then
|
|
||||||
DIR="freebsd-powerpc64le"
|
|
||||||
fi
|
|
||||||
elif [ "${OS}" = "Darwin" ]
|
|
||||||
then
|
|
||||||
if [ "${ARCH}" = "x86_64" ]
|
|
||||||
then
|
|
||||||
DIR="macos"
|
|
||||||
elif [ "${ARCH}" = "aarch64" -o "${ARCH}" = "arm64" ]
|
|
||||||
then
|
|
||||||
DIR="macos-aarch64"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "${DIR}" ]
|
|
||||||
then
|
|
||||||
echo "The '${OS}' operating system with the '${ARCH}' architecture is not supported."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
URL="https://builds.clickhouse.com/master/${DIR}/clickhouse"
|
|
||||||
echo
|
|
||||||
echo "Will download ${URL}"
|
|
||||||
echo
|
|
||||||
curl -O "${URL}" && chmod a+x clickhouse || exit 1
|
|
||||||
echo
|
|
||||||
echo "Successfully downloaded the ClickHouse binary"
|
|
||||||
|
|
||||||
chmod a+x clickhouse
|
|
||||||
|
|
||||||
if [[ ! -f $QUERIES_FILE ]]; then
|
if [[ ! -f $QUERIES_FILE ]]; then
|
||||||
wget "https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/$QUERIES_FILE"
|
wget "https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/$QUERIES_FILE"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ ! -d data ]]; then
|
|
||||||
if [[ ! -f $DATASET ]]; then
|
|
||||||
$FASTER_DOWNLOAD "https://datasets.clickhouse.com/hits/partitions/$DATASET"
|
|
||||||
fi
|
|
||||||
|
|
||||||
tar $TAR_PARAMS --strip-components=1 --directory=. -x -v -f $DATASET
|
|
||||||
fi
|
|
||||||
|
|
||||||
uptime
|
uptime
|
||||||
|
|
||||||
echo "Starting clickhouse-server"
|
echo "Starting clickhouse-server"
|
||||||
@ -114,10 +33,20 @@ echo "Waiting for clickhouse-server to start"
|
|||||||
|
|
||||||
for i in {1..30}; do
|
for i in {1..30}; do
|
||||||
sleep 1
|
sleep 1
|
||||||
./clickhouse client --query "SELECT 'The dataset size is: ', count() FROM $TABLE" 2>/dev/null && break || echo '.'
|
./clickhouse client --query "SELECT 'Ok.'" 2>/dev/null && break || echo -n '.'
|
||||||
if [[ $i == 30 ]]; then exit 1; fi
|
if [[ $i == 30 ]]; then exit 1; fi
|
||||||
done
|
done
|
||||||
|
|
||||||
|
echo "Will download the dataset"
|
||||||
|
./clickhouse client --max_insert_threads $(nproc || 4) --progress --query "
|
||||||
|
CREATE OR REPLACE TABLE ${TABLE} ENGINE = MergeTree PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID), EventTime)
|
||||||
|
AS SELECT * FROM url('https://datasets.clickhouse.com/hits/native/hits_100m_obfuscated_{0..255}.native.zst')"
|
||||||
|
|
||||||
|
./clickhouse client --query "SELECT 'The dataset size is: ', count() FROM ${TABLE}"
|
||||||
|
|
||||||
|
echo "Will prepare the dataset"
|
||||||
|
./clickhouse client --query "OPTIMIZE TABLE ${TABLE} FINAL"
|
||||||
|
|
||||||
echo
|
echo
|
||||||
echo "Will perform benchmark. Results:"
|
echo "Will perform benchmark. Results:"
|
||||||
echo
|
echo
|
||||||
@ -133,7 +62,7 @@ cat "$QUERIES_FILE" | sed "s/{table}/${TABLE}/g" | while read query; do
|
|||||||
|
|
||||||
echo -n "["
|
echo -n "["
|
||||||
for i in $(seq 1 $TRIES); do
|
for i in $(seq 1 $TRIES); do
|
||||||
RES=$(./clickhouse client --max_memory_usage 100G --time --format=Null --query="$query" 2>&1 ||:)
|
RES=$(./clickhouse client --time --format=Null --query="$query" 2>&1 ||:)
|
||||||
[[ "$?" == "0" ]] && echo -n "${RES}" || echo -n "null"
|
[[ "$?" == "0" ]] && echo -n "${RES}" || echo -n "null"
|
||||||
[[ "$i" != $TRIES ]] && echo -n ", "
|
[[ "$i" != $TRIES ]] && echo -n ", "
|
||||||
done
|
done
|
||||||
@ -180,10 +109,10 @@ else
|
|||||||
cat /proc/meminfo | grep MemTotal
|
cat /proc/meminfo | grep MemTotal
|
||||||
echo '----RAID Info-------------------'
|
echo '----RAID Info-------------------'
|
||||||
cat /proc/mdstat
|
cat /proc/mdstat
|
||||||
#echo '----PCI-------------------------'
|
|
||||||
#lspci
|
|
||||||
#echo '----All Hardware Info-----------'
|
|
||||||
#lshw
|
|
||||||
echo '--------------------------------'
|
echo '--------------------------------'
|
||||||
fi
|
fi
|
||||||
echo
|
echo
|
||||||
|
|
||||||
|
echo "Instance type from IMDS (if available):"
|
||||||
|
curl --connect-timeout 1 http://169.254.169.254/latest/meta-data/instance-type
|
||||||
|
echo
|
||||||
|
1
contrib/CMakeLists.txt
vendored
1
contrib/CMakeLists.txt
vendored
@ -155,6 +155,7 @@ endif()
|
|||||||
|
|
||||||
add_contrib (sqlite-cmake sqlite-amalgamation)
|
add_contrib (sqlite-cmake sqlite-amalgamation)
|
||||||
add_contrib (s2geometry-cmake s2geometry)
|
add_contrib (s2geometry-cmake s2geometry)
|
||||||
|
add_contrib (base-x-cmake base-x)
|
||||||
|
|
||||||
# Put all targets defined here and in subdirectories under "contrib/<immediate-subdir>" folders in GUI-based IDEs.
|
# Put all targets defined here and in subdirectories under "contrib/<immediate-subdir>" folders in GUI-based IDEs.
|
||||||
# Some of third-party projects may override CMAKE_FOLDER or FOLDER property of their targets, so they would not appear
|
# Some of third-party projects may override CMAKE_FOLDER or FOLDER property of their targets, so they would not appear
|
||||||
|
1
contrib/base-x
vendored
Submodule
1
contrib/base-x
vendored
Submodule
@ -0,0 +1 @@
|
|||||||
|
Subproject commit a85f98fb4ed52c2f4029a4b6ac1ef0bafdfc56f5
|
28
contrib/base-x-cmake/CMakeLists.txt
Normal file
28
contrib/base-x-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
option (ENABLE_BASEX "Enable base-x" ${ENABLE_LIBRARIES})
|
||||||
|
|
||||||
|
if (NOT ENABLE_BASEX)
|
||||||
|
message(STATUS "Not using base-x")
|
||||||
|
return()
|
||||||
|
endif()
|
||||||
|
|
||||||
|
set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/base-x")
|
||||||
|
|
||||||
|
set (SRCS
|
||||||
|
${LIBRARY_DIR}/base_x.hh
|
||||||
|
${LIBRARY_DIR}/uinteger_t.hh
|
||||||
|
)
|
||||||
|
|
||||||
|
add_library(_base-x INTERFACE)
|
||||||
|
target_include_directories(_base-x SYSTEM BEFORE INTERFACE "${ClickHouse_SOURCE_DIR}/contrib/base-x")
|
||||||
|
|
||||||
|
if (XCODE OR XCODE_VERSION)
|
||||||
|
# https://gitlab.kitware.com/cmake/cmake/issues/17457
|
||||||
|
# Some native build systems may not like targets that have only object files, so consider adding at least one real source file
|
||||||
|
# This applies to Xcode.
|
||||||
|
if (NOT EXISTS "${CMAKE_CURRENT_BINARY_DIR}/dummy.c")
|
||||||
|
file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/dummy.c" "")
|
||||||
|
endif ()
|
||||||
|
target_sources(_base-x PRIVATE "${CMAKE_CURRENT_BINARY_DIR}/dummy.c")
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
add_library(ch_contrib::base-x ALIAS _base-x)
|
@ -18,6 +18,7 @@ The list of third-party libraries:
|
|||||||
| aws-c-common | [Apache](https://github.com/ClickHouse-Extras/aws-c-common/blob/736a82d1697c108b04a277e66438a7f4e19b6857/LICENSE) |
|
| aws-c-common | [Apache](https://github.com/ClickHouse-Extras/aws-c-common/blob/736a82d1697c108b04a277e66438a7f4e19b6857/LICENSE) |
|
||||||
| aws-c-event-stream | [Apache](https://github.com/ClickHouse-Extras/aws-c-event-stream/blob/3bc33662f9ccff4f4cbcf9509cc78c26e022fde0/LICENSE) |
|
| aws-c-event-stream | [Apache](https://github.com/ClickHouse-Extras/aws-c-event-stream/blob/3bc33662f9ccff4f4cbcf9509cc78c26e022fde0/LICENSE) |
|
||||||
| aws-checksums | [Apache](https://github.com/ClickHouse-Extras/aws-checksums/blob/519d6d9093819b6cf89ffff589a27ef8f83d0f65/LICENSE) |
|
| aws-checksums | [Apache](https://github.com/ClickHouse-Extras/aws-checksums/blob/519d6d9093819b6cf89ffff589a27ef8f83d0f65/LICENSE) |
|
||||||
|
| base58 | [MIT](https://github.com/ClickHouse/base-x/blob/3e58874643c087f57e82b0ff03825c933fab945a/LICENSE) |
|
||||||
| base64 | [BSD 2-clause](https://github.com/ClickHouse-Extras/Turbo-Base64/blob/af9b331f2b4f30b41c70f3a571ff904a8251c1d3/LICENSE) |
|
| base64 | [BSD 2-clause](https://github.com/ClickHouse-Extras/Turbo-Base64/blob/af9b331f2b4f30b41c70f3a571ff904a8251c1d3/LICENSE) |
|
||||||
| boost | [Boost](https://github.com/ClickHouse-Extras/boost/blob/9cf09dbfd55a5c6202dedbdf40781a51b02c2675/LICENSE_1_0.txt) |
|
| boost | [Boost](https://github.com/ClickHouse-Extras/boost/blob/9cf09dbfd55a5c6202dedbdf40781a51b02c2675/LICENSE_1_0.txt) |
|
||||||
| boringssl | [BSD](https://github.com/ClickHouse-Extras/boringssl/blob/a6a2e2ab3e44d97ce98e51c558e989f211de7eb3/LICENSE) |
|
| boringssl | [BSD](https://github.com/ClickHouse-Extras/boringssl/blob/a6a2e2ab3e44d97ce98e51c558e989f211de7eb3/LICENSE) |
|
||||||
|
@ -455,4 +455,3 @@ ORDER BY yr,
|
|||||||
|
|
||||||
The data is also available for interactive queries in the [Playground](https://play.clickhouse.com/play?user=play), [example](https://play.clickhouse.com/play?user=play#U0VMRUNUIG1hY2hpbmVfbmFtZSwKICAgICAgIE1JTihjcHUpIEFTIGNwdV9taW4sCiAgICAgICBNQVgoY3B1KSBBUyBjcHVfbWF4LAogICAgICAgQVZHKGNwdSkgQVMgY3B1X2F2ZywKICAgICAgIE1JTihuZXRfaW4pIEFTIG5ldF9pbl9taW4sCiAgICAgICBNQVgobmV0X2luKSBBUyBuZXRfaW5fbWF4LAogICAgICAgQVZHKG5ldF9pbikgQVMgbmV0X2luX2F2ZywKICAgICAgIE1JTihuZXRfb3V0KSBBUyBuZXRfb3V0X21pbiwKICAgICAgIE1BWChuZXRfb3V0KSBBUyBuZXRfb3V0X21heCwKICAgICAgIEFWRyhuZXRfb3V0KSBBUyBuZXRfb3V0X2F2ZwpGUk9NICgKICBTRUxFQ1QgbWFjaGluZV9uYW1lLAogICAgICAgICBDT0FMRVNDRShjcHVfdXNlciwgMC4wKSBBUyBjcHUsCiAgICAgICAgIENPQUxFU0NFKGJ5dGVzX2luLCAwLjApIEFTIG5ldF9pbiwKICAgICAgICAgQ09BTEVTQ0UoYnl0ZXNfb3V0LCAwLjApIEFTIG5ldF9vdXQKICBGUk9NIG1nYmVuY2gubG9nczEKICBXSEVSRSBtYWNoaW5lX25hbWUgSU4gKCdhbmFuc2knLCdhcmFnb2cnLCd1cmQnKQogICAgQU5EIGxvZ190aW1lID49IFRJTUVTVEFNUCAnMjAxNy0wMS0xMSAwMDowMDowMCcKKSBBUyByCkdST1VQIEJZIG1hY2hpbmVfbmFtZQ==).
|
The data is also available for interactive queries in the [Playground](https://play.clickhouse.com/play?user=play), [example](https://play.clickhouse.com/play?user=play#U0VMRUNUIG1hY2hpbmVfbmFtZSwKICAgICAgIE1JTihjcHUpIEFTIGNwdV9taW4sCiAgICAgICBNQVgoY3B1KSBBUyBjcHVfbWF4LAogICAgICAgQVZHKGNwdSkgQVMgY3B1X2F2ZywKICAgICAgIE1JTihuZXRfaW4pIEFTIG5ldF9pbl9taW4sCiAgICAgICBNQVgobmV0X2luKSBBUyBuZXRfaW5fbWF4LAogICAgICAgQVZHKG5ldF9pbikgQVMgbmV0X2luX2F2ZywKICAgICAgIE1JTihuZXRfb3V0KSBBUyBuZXRfb3V0X21pbiwKICAgICAgIE1BWChuZXRfb3V0KSBBUyBuZXRfb3V0X21heCwKICAgICAgIEFWRyhuZXRfb3V0KSBBUyBuZXRfb3V0X2F2ZwpGUk9NICgKICBTRUxFQ1QgbWFjaGluZV9uYW1lLAogICAgICAgICBDT0FMRVNDRShjcHVfdXNlciwgMC4wKSBBUyBjcHUsCiAgICAgICAgIENPQUxFU0NFKGJ5dGVzX2luLCAwLjApIEFTIG5ldF9pbiwKICAgICAgICAgQ09BTEVTQ0UoYnl0ZXNfb3V0LCAwLjApIEFTIG5ldF9vdXQKICBGUk9NIG1nYmVuY2gubG9nczEKICBXSEVSRSBtYWNoaW5lX25hbWUgSU4gKCdhbmFuc2knLCdhcmFnb2cnLCd1cmQnKQogICAgQU5EIGxvZ190aW1lID49IFRJTUVTVEFNUCAnMjAxNy0wMS0xMSAwMDowMDowMCcKKSBBUyByCkdST1VQIEJZIG1hY2hpbmVfbmFtZQ==).
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/getting_started/example_datasets/brown-benchmark/) <!--hide-->
|
|
||||||
|
@ -494,6 +494,48 @@ If the ‘s’ string is non-empty and does not contain the ‘c’ character at
|
|||||||
|
|
||||||
Returns the string ‘s’ that was converted from the encoding in ‘from’ to the encoding in ‘to’.
|
Returns the string ‘s’ that was converted from the encoding in ‘from’ to the encoding in ‘to’.
|
||||||
|
|
||||||
|
## Base58Encode(plaintext[, alphabet_name]), Base58Decode(encoded_text[, alphabet_name])
|
||||||
|
|
||||||
|
Accepts a String and encodes/decodes it using [Base58](https://tools.ietf.org/id/draft-msporny-base58-01.html) encoding scheme using specified alphabet.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
base58Encode(decoded[, alphabet_name])
|
||||||
|
base58Decode(encoded[, alphabet_name])
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `decoded` — [String](../../sql-reference/data-types/string.md) column or constant.
|
||||||
|
- `encoded` — [String](../../sql-reference/data-types/string.md) column or constant. If the string is not a valid base58-encoded value, an exception is thrown.
|
||||||
|
- `alphabet_name` — String constant. Specifies alphabet used for encoding. Possible values: `gmp`, `bitcoin`, `ripple`, `flickr`. Default: `bitcoin`.
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- A string containing encoded/decoded value of 1st argument.
|
||||||
|
|
||||||
|
Type: [String](../../sql-reference/data-types/string.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT base58Encode('encode', 'flickr');
|
||||||
|
SELECT base58Decode('izCFiDUY', 'ripple');
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
```text
|
||||||
|
┌─base58Encode('encode', 'flickr')─┐
|
||||||
|
│ SvyTHb1D │
|
||||||
|
└──────────────────────────────────┘
|
||||||
|
┌─base58Decode('izCFiDUY', 'ripple')─┐
|
||||||
|
│ decode │
|
||||||
|
└────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## base64Encode(s)
|
## base64Encode(s)
|
||||||
|
|
||||||
Encodes ‘s’ string into base64
|
Encodes ‘s’ string into base64
|
||||||
|
@ -18,6 +18,7 @@ sidebar_label: "Используемые сторонние библиотеки
|
|||||||
| aws-c-common | [Apache](https://github.com/ClickHouse-Extras/aws-c-common/blob/736a82d1697c108b04a277e66438a7f4e19b6857/LICENSE) |
|
| aws-c-common | [Apache](https://github.com/ClickHouse-Extras/aws-c-common/blob/736a82d1697c108b04a277e66438a7f4e19b6857/LICENSE) |
|
||||||
| aws-c-event-stream | [Apache](https://github.com/ClickHouse-Extras/aws-c-event-stream/blob/3bc33662f9ccff4f4cbcf9509cc78c26e022fde0/LICENSE) |
|
| aws-c-event-stream | [Apache](https://github.com/ClickHouse-Extras/aws-c-event-stream/blob/3bc33662f9ccff4f4cbcf9509cc78c26e022fde0/LICENSE) |
|
||||||
| aws-checksums | [Apache](https://github.com/ClickHouse-Extras/aws-checksums/blob/519d6d9093819b6cf89ffff589a27ef8f83d0f65/LICENSE) |
|
| aws-checksums | [Apache](https://github.com/ClickHouse-Extras/aws-checksums/blob/519d6d9093819b6cf89ffff589a27ef8f83d0f65/LICENSE) |
|
||||||
|
| base58 | [MIT](https://github.com/ClickHouse/base-x/blob/3e58874643c087f57e82b0ff03825c933fab945a/LICENSE) |
|
||||||
| base64 | [BSD 2-clause](https://github.com/ClickHouse-Extras/Turbo-Base64/blob/af9b331f2b4f30b41c70f3a571ff904a8251c1d3/LICENSE) |
|
| base64 | [BSD 2-clause](https://github.com/ClickHouse-Extras/Turbo-Base64/blob/af9b331f2b4f30b41c70f3a571ff904a8251c1d3/LICENSE) |
|
||||||
| boost | [Boost](https://github.com/ClickHouse-Extras/boost/blob/9cf09dbfd55a5c6202dedbdf40781a51b02c2675/LICENSE_1_0.txt) |
|
| boost | [Boost](https://github.com/ClickHouse-Extras/boost/blob/9cf09dbfd55a5c6202dedbdf40781a51b02c2675/LICENSE_1_0.txt) |
|
||||||
| boringssl | [BSD](https://github.com/ClickHouse-Extras/boringssl/blob/a6a2e2ab3e44d97ce98e51c558e989f211de7eb3/LICENSE) |
|
| boringssl | [BSD](https://github.com/ClickHouse-Extras/boringssl/blob/a6a2e2ab3e44d97ce98e51c558e989f211de7eb3/LICENSE) |
|
||||||
|
@ -490,6 +490,48 @@ SELECT concat(key1, key2), sum(value) FROM key_val GROUP BY (key1, key2);
|
|||||||
|
|
||||||
Возвращает сконвертированную из кодировки from в кодировку to строку s.
|
Возвращает сконвертированную из кодировки from в кодировку to строку s.
|
||||||
|
|
||||||
|
## Base58Encode(plaintext[, alphabet_name]), Base58Decode(plaintext[, alphabet_name]) {#base58}
|
||||||
|
|
||||||
|
Принимает на вход строку или колонку строк и кодирует/раскодирует их с помощью схемы кодирования [Base58](https://tools.ietf.org/id/draft-msporny-base58-01.html) с использованием указанного алфавита.
|
||||||
|
|
||||||
|
**Синтаксис**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
base58Encode(decoded[, alphabet_name])
|
||||||
|
base58Decode(encoded[, alphabet_name])
|
||||||
|
```
|
||||||
|
|
||||||
|
**Аргументы**
|
||||||
|
|
||||||
|
- `decoded` — Колонка или строка типа [String](../../sql-reference/data-types/string.md).
|
||||||
|
- `encoded` — Колонка или строка типа [String](../../sql-reference/data-types/string.md). Если входная строка не является корректным кодом для какой-либо другой строки, возникнет исключение `1001`.
|
||||||
|
- `alphabet_name` — Строковая константа. Указывает алфавит, для которого необходимо получить код. Может принимать одно из следующих значений: `gmp`, `bitcoin`, `ripple`, `flickr`. По умолчанию: `bitcoin`.
|
||||||
|
|
||||||
|
**Возвращаемое значение**
|
||||||
|
|
||||||
|
- Строка, содержащая раскодированный/закодированный первый аргумент.
|
||||||
|
|
||||||
|
Тип: [String](../../sql-reference/data-types/string.md).
|
||||||
|
|
||||||
|
**Пример:**
|
||||||
|
|
||||||
|
Запрос:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT base58Encode('encode', 'flickr');
|
||||||
|
SELECT base58Decode('izCFiDUY', 'ripple');
|
||||||
|
```
|
||||||
|
|
||||||
|
Результат:
|
||||||
|
```text
|
||||||
|
┌─base58Encode('encode', 'flickr')─┐
|
||||||
|
│ SvyTHb1D │
|
||||||
|
└──────────────────────────────────┘
|
||||||
|
┌─base58Decode('izCFiDUY', 'ripple')─┐
|
||||||
|
│ decode │
|
||||||
|
└────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## base64Encode(s) {#base64encode}
|
## base64Encode(s) {#base64encode}
|
||||||
|
|
||||||
Производит кодирование строки s в base64-представление.
|
Производит кодирование строки s в base64-представление.
|
||||||
|
@ -16,13 +16,21 @@ public:
|
|||||||
CommandList()
|
CommandList()
|
||||||
{
|
{
|
||||||
command_name = "list";
|
command_name = "list";
|
||||||
|
command_option_description.emplace(createOptionsDescription("Allowed options", getTerminalWidth()));
|
||||||
description = "List files (the default disk is used by default)\nPath should be in format './' or './path' or 'path'";
|
description = "List files (the default disk is used by default)\nPath should be in format './' or './path' or 'path'";
|
||||||
usage = "list [OPTION]... <PATH>...";
|
usage = "list [OPTION]... <PATH>...";
|
||||||
|
command_option_description->add_options()
|
||||||
|
("recursive", "recursively list all directories")
|
||||||
|
;
|
||||||
}
|
}
|
||||||
|
|
||||||
void processOptions(
|
void processOptions(
|
||||||
Poco::Util::LayeredConfiguration &,
|
Poco::Util::LayeredConfiguration & config,
|
||||||
po::variables_map &) const override{}
|
po::variables_map & options) const override
|
||||||
|
{
|
||||||
|
if (options.count("recursive"))
|
||||||
|
config.setBool("recursive", true);
|
||||||
|
}
|
||||||
|
|
||||||
void execute(
|
void execute(
|
||||||
const std::vector<String> & command_arguments,
|
const std::vector<String> & command_arguments,
|
||||||
@ -39,16 +47,45 @@ public:
|
|||||||
|
|
||||||
String path = command_arguments[0];
|
String path = command_arguments[0];
|
||||||
|
|
||||||
std::vector<String> file_names;
|
|
||||||
DiskPtr disk = global_context->getDisk(disk_name);
|
DiskPtr disk = global_context->getDisk(disk_name);
|
||||||
|
|
||||||
String full_path = fullPathWithValidate(disk, path);
|
String full_path = fullPathWithValidate(disk, path);
|
||||||
|
|
||||||
|
bool recursive = config.getBool("recursive", false);
|
||||||
|
|
||||||
|
if (recursive)
|
||||||
|
listRecursive(disk, full_path);
|
||||||
|
else
|
||||||
|
list(disk, full_path);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
static void list(const DiskPtr & disk, const std::string & full_path)
|
||||||
|
{
|
||||||
|
std::vector<String> file_names;
|
||||||
disk->listFiles(full_path, file_names);
|
disk->listFiles(full_path, file_names);
|
||||||
|
|
||||||
for (const auto & file_name : file_names)
|
for (const auto & file_name : file_names)
|
||||||
std::cout << file_name << '\n';
|
std::cout << file_name << '\n';
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void listRecursive(const DiskPtr & disk, const std::string & full_path)
|
||||||
|
{
|
||||||
|
std::vector<String> file_names;
|
||||||
|
disk->listFiles(full_path, file_names);
|
||||||
|
|
||||||
|
std::cout << full_path << ":\n";
|
||||||
|
for (const auto & file_name : file_names)
|
||||||
|
std::cout << file_name << '\n';
|
||||||
|
std::cout << "\n";
|
||||||
|
|
||||||
|
for (const auto & file_name : file_names)
|
||||||
|
{
|
||||||
|
auto path = full_path + "/" + file_name;
|
||||||
|
if (disk->isDirectory(path))
|
||||||
|
listRecursive(disk, path);
|
||||||
|
}
|
||||||
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -91,7 +91,11 @@ T execute(nanodbc::ConnectionHolderPtr connection_holder, std::function<T(nanodb
|
|||||||
}
|
}
|
||||||
catch (const nanodbc::database_error & e)
|
catch (const nanodbc::database_error & e)
|
||||||
{
|
{
|
||||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
LOG_ERROR(
|
||||||
|
&Poco::Logger::get("ODBCConnection"),
|
||||||
|
"ODBC query failed with error: {}, state: {}, native code: {}",
|
||||||
|
e.what(), e.state(), e.native());
|
||||||
|
|
||||||
/// SQLState, connection related errors start with 08 (main: 08S01), cursor invalid state is 24000.
|
/// SQLState, connection related errors start with 08 (main: 08S01), cursor invalid state is 24000.
|
||||||
/// Invalid cursor state is a retriable error.
|
/// Invalid cursor state is a retriable error.
|
||||||
/// Invalid transaction state 25000. Truncate to 2 letters on purpose.
|
/// Invalid transaction state 25000. Truncate to 2 letters on purpose.
|
||||||
|
@ -25,6 +25,7 @@ enum class AccessType
|
|||||||
M(SHOW_DICTIONARIES, "", DICTIONARY, SHOW) /* allows to execute SHOW DICTIONARIES, SHOW CREATE DICTIONARY, EXISTS <dictionary>;
|
M(SHOW_DICTIONARIES, "", DICTIONARY, SHOW) /* allows to execute SHOW DICTIONARIES, SHOW CREATE DICTIONARY, EXISTS <dictionary>;
|
||||||
implicitly enabled by any grant on the dictionary */\
|
implicitly enabled by any grant on the dictionary */\
|
||||||
M(SHOW, "", GROUP, ALL) /* allows to execute SHOW, USE, EXISTS, CHECK, DESCRIBE */\
|
M(SHOW, "", GROUP, ALL) /* allows to execute SHOW, USE, EXISTS, CHECK, DESCRIBE */\
|
||||||
|
M(SHOW_CACHES, "", GROUP, ALL) \
|
||||||
\
|
\
|
||||||
M(SELECT, "", COLUMN, ALL) \
|
M(SELECT, "", COLUMN, ALL) \
|
||||||
M(INSERT, "", COLUMN, ALL) \
|
M(INSERT, "", COLUMN, ALL) \
|
||||||
|
@ -250,9 +250,12 @@ ColumnWithTypeAndName ColumnFunction::reduce() const
|
|||||||
"arguments but " + toString(captured) + " columns were captured.", ErrorCodes::LOGICAL_ERROR);
|
"arguments but " + toString(captured) + " columns were captured.", ErrorCodes::LOGICAL_ERROR);
|
||||||
|
|
||||||
ColumnsWithTypeAndName columns = captured_columns;
|
ColumnsWithTypeAndName columns = captured_columns;
|
||||||
if (is_short_circuit_argument)
|
IFunction::ShortCircuitSettings settings;
|
||||||
|
/// Arguments of lazy executed function can also be lazy executed.
|
||||||
|
/// But we shouldn't execute arguments if this function is short circuit,
|
||||||
|
/// because it will handle lazy executed arguments by itself.
|
||||||
|
if (is_short_circuit_argument && !function->isShortCircuit(settings, args))
|
||||||
{
|
{
|
||||||
/// Arguments of lazy executed function can also be lazy executed.
|
|
||||||
for (auto & col : columns)
|
for (auto & col : columns)
|
||||||
{
|
{
|
||||||
if (const ColumnFunction * arg = checkAndGetShortCircuitArgument(col.column))
|
if (const ColumnFunction * arg = checkAndGetShortCircuitArgument(col.column))
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
#include "FileCacheFactory.h"
|
#include "FileCacheFactory.h"
|
||||||
#include "IFileCache.h"
|
|
||||||
#include "LRUFileCache.h"
|
#include "LRUFileCache.h"
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -19,51 +18,66 @@ FileCacheFactory & FileCacheFactory::instance()
|
|||||||
FileCacheFactory::CacheByBasePath FileCacheFactory::getAll()
|
FileCacheFactory::CacheByBasePath FileCacheFactory::getAll()
|
||||||
{
|
{
|
||||||
std::lock_guard lock(mutex);
|
std::lock_guard lock(mutex);
|
||||||
return caches;
|
return caches_by_path;
|
||||||
}
|
}
|
||||||
|
|
||||||
const FileCacheSettings & FileCacheFactory::getSettings(const std::string & cache_base_path)
|
const FileCacheSettings & FileCacheFactory::getSettings(const std::string & cache_base_path)
|
||||||
{
|
{
|
||||||
std::lock_guard lock(mutex);
|
std::lock_guard lock(mutex);
|
||||||
|
auto it = caches_by_path.find(cache_base_path);
|
||||||
|
if (it == caches_by_path.end())
|
||||||
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "No cache found by path: {}", cache_base_path);
|
||||||
|
return it->second->settings;
|
||||||
|
|
||||||
auto * cache_data = getImpl(cache_base_path, lock);
|
|
||||||
if (cache_data)
|
|
||||||
return cache_data->settings;
|
|
||||||
|
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "No cache found by path: {}", cache_base_path);
|
|
||||||
}
|
|
||||||
|
|
||||||
FileCacheFactory::CacheData * FileCacheFactory::getImpl(const std::string & cache_base_path, std::lock_guard<std::mutex> &)
|
|
||||||
{
|
|
||||||
auto it = caches.find(cache_base_path);
|
|
||||||
if (it == caches.end())
|
|
||||||
return nullptr;
|
|
||||||
return &it->second;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
FileCachePtr FileCacheFactory::get(const std::string & cache_base_path)
|
FileCachePtr FileCacheFactory::get(const std::string & cache_base_path)
|
||||||
{
|
{
|
||||||
std::lock_guard lock(mutex);
|
std::lock_guard lock(mutex);
|
||||||
|
auto it = caches_by_path.find(cache_base_path);
|
||||||
|
if (it == caches_by_path.end())
|
||||||
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "No cache found by path: {}", cache_base_path);
|
||||||
|
return it->second->cache;
|
||||||
|
|
||||||
auto * cache_data = getImpl(cache_base_path, lock);
|
|
||||||
if (cache_data)
|
|
||||||
return cache_data->cache;
|
|
||||||
|
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "No cache found by path: {}", cache_base_path);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
FileCachePtr FileCacheFactory::getOrCreate(
|
FileCachePtr FileCacheFactory::getOrCreate(
|
||||||
const std::string & cache_base_path, const FileCacheSettings & file_cache_settings)
|
const std::string & cache_base_path, const FileCacheSettings & file_cache_settings, const std::string & name)
|
||||||
{
|
{
|
||||||
std::lock_guard lock(mutex);
|
std::lock_guard lock(mutex);
|
||||||
|
|
||||||
auto * cache_data = getImpl(cache_base_path, lock);
|
auto it = caches_by_path.find(cache_base_path);
|
||||||
if (cache_data)
|
if (it != caches_by_path.end())
|
||||||
return cache_data->cache;
|
{
|
||||||
|
caches_by_name.emplace(name, it->second);
|
||||||
|
return it->second->cache;
|
||||||
|
}
|
||||||
|
|
||||||
auto cache = std::make_shared<LRUFileCache>(cache_base_path, file_cache_settings);
|
auto cache = std::make_shared<LRUFileCache>(cache_base_path, file_cache_settings);
|
||||||
caches.emplace(cache_base_path, CacheData(cache, file_cache_settings));
|
FileCacheData result{cache, file_cache_settings};
|
||||||
|
|
||||||
|
auto cache_it = caches.insert(caches.end(), std::move(result));
|
||||||
|
caches_by_name.emplace(name, cache_it);
|
||||||
|
caches_by_path.emplace(cache_base_path, cache_it);
|
||||||
|
|
||||||
return cache;
|
return cache;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
FileCacheFactory::FileCacheData FileCacheFactory::getByName(const std::string & name)
|
||||||
|
{
|
||||||
|
std::lock_guard lock(mutex);
|
||||||
|
|
||||||
|
auto it = caches_by_name.find(name);
|
||||||
|
if (it == caches_by_name.end())
|
||||||
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "No cache found by name: {}", name);
|
||||||
|
|
||||||
|
return *it->second;
|
||||||
|
}
|
||||||
|
|
||||||
|
FileCacheFactory::CacheByName FileCacheFactory::getAllByName()
|
||||||
|
{
|
||||||
|
std::lock_guard lock(mutex);
|
||||||
|
return caches_by_name;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#include <boost/noncopyable.hpp>
|
#include <boost/noncopyable.hpp>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
|
#include <list>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -15,20 +16,22 @@ namespace DB
|
|||||||
*/
|
*/
|
||||||
class FileCacheFactory final : private boost::noncopyable
|
class FileCacheFactory final : private boost::noncopyable
|
||||||
{
|
{
|
||||||
struct CacheData
|
public:
|
||||||
|
struct FileCacheData
|
||||||
{
|
{
|
||||||
FileCachePtr cache;
|
FileCachePtr cache;
|
||||||
FileCacheSettings settings;
|
FileCacheSettings settings;
|
||||||
|
|
||||||
CacheData(FileCachePtr cache_, const FileCacheSettings & settings_) : cache(cache_), settings(settings_) {}
|
FileCacheData(FileCachePtr cache_, const FileCacheSettings & settings_) : cache(cache_), settings(settings_) {}
|
||||||
};
|
};
|
||||||
|
|
||||||
using CacheByBasePath = std::unordered_map<std::string, CacheData>;
|
using Caches = std::list<FileCacheData>;
|
||||||
|
using CacheByBasePath = std::unordered_map<std::string, Caches::iterator>;
|
||||||
|
using CacheByName = std::unordered_map<std::string, Caches::iterator>;
|
||||||
|
|
||||||
public:
|
|
||||||
static FileCacheFactory & instance();
|
static FileCacheFactory & instance();
|
||||||
|
|
||||||
FileCachePtr getOrCreate(const std::string & cache_base_path, const FileCacheSettings & file_cache_settings);
|
FileCachePtr getOrCreate(const std::string & cache_base_path, const FileCacheSettings & file_cache_settings, const std::string & name);
|
||||||
|
|
||||||
FileCachePtr get(const std::string & cache_base_path);
|
FileCachePtr get(const std::string & cache_base_path);
|
||||||
|
|
||||||
@ -36,11 +39,16 @@ public:
|
|||||||
|
|
||||||
const FileCacheSettings & getSettings(const std::string & cache_base_path);
|
const FileCacheSettings & getSettings(const std::string & cache_base_path);
|
||||||
|
|
||||||
private:
|
FileCacheData getByName(const std::string & name);
|
||||||
CacheData * getImpl(const std::string & cache_base_path, std::lock_guard<std::mutex> &);
|
|
||||||
|
|
||||||
|
CacheByName getAllByName();
|
||||||
|
|
||||||
|
private:
|
||||||
std::mutex mutex;
|
std::mutex mutex;
|
||||||
CacheByBasePath caches;
|
Caches caches;
|
||||||
|
|
||||||
|
CacheByBasePath caches_by_path;
|
||||||
|
CacheByName caches_by_name;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -73,7 +73,7 @@ FileCachePtr getCachePtrForDisk(
|
|||||||
FileCacheSettings file_cache_settings;
|
FileCacheSettings file_cache_settings;
|
||||||
file_cache_settings.loadFromConfig(config, config_prefix);
|
file_cache_settings.loadFromConfig(config, config_prefix);
|
||||||
|
|
||||||
auto cache = FileCacheFactory::instance().getOrCreate(cache_base_path, file_cache_settings);
|
auto cache = FileCacheFactory::instance().getOrCreate(cache_base_path, file_cache_settings, name);
|
||||||
cache->initialize();
|
cache->initialize();
|
||||||
|
|
||||||
auto * log = &Poco::Logger::get("Disk(" + name + ")");
|
auto * log = &Poco::Logger::get("Disk(" + name + ")");
|
||||||
|
@ -12,10 +12,9 @@ struct S3Capabilities
|
|||||||
{
|
{
|
||||||
/// Google S3 implementation doesn't support batch delete
|
/// Google S3 implementation doesn't support batch delete
|
||||||
/// TODO: possibly we have to use Google SDK https://github.com/googleapis/google-cloud-cpp/tree/main/google/cloud/storage
|
/// TODO: possibly we have to use Google SDK https://github.com/googleapis/google-cloud-cpp/tree/main/google/cloud/storage
|
||||||
/// because looks like it miss a lot of features like:
|
/// because looks like it misses some features:
|
||||||
/// 1) batch delete
|
/// 1) batch delete (DeleteObjects)
|
||||||
/// 2) list_v2
|
/// 2) upload part copy (UploadPartCopy)
|
||||||
/// 3) multipart upload works differently
|
|
||||||
bool support_batch_delete{true};
|
bool support_batch_delete{true};
|
||||||
|
|
||||||
/// Y.Cloud S3 implementation support proxy for connection
|
/// Y.Cloud S3 implementation support proxy for connection
|
||||||
|
@ -72,6 +72,10 @@ if (TARGET ch_contrib::llvm)
|
|||||||
target_link_libraries(clickhouse_functions PRIVATE ch_contrib::llvm)
|
target_link_libraries(clickhouse_functions PRIVATE ch_contrib::llvm)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
|
if (TARGET ch_contrib::base-x)
|
||||||
|
target_link_libraries(clickhouse_functions PRIVATE ch_contrib::base-x)
|
||||||
|
endif()
|
||||||
|
|
||||||
if (TARGET ch_contrib::base64)
|
if (TARGET ch_contrib::base64)
|
||||||
target_link_libraries(clickhouse_functions PRIVATE ch_contrib::base64)
|
target_link_libraries(clickhouse_functions PRIVATE ch_contrib::base64)
|
||||||
endif()
|
endif()
|
||||||
|
256
src/Functions/FunctionBase58Conversion.h
Normal file
256
src/Functions/FunctionBase58Conversion.h
Normal file
@ -0,0 +1,256 @@
|
|||||||
|
#pragma once
|
||||||
|
#include "config_functions.h"
|
||||||
|
|
||||||
|
#if USE_BASEX
|
||||||
|
# include <Columns/ColumnConst.h>
|
||||||
|
# include <Common/MemorySanitizer.h>
|
||||||
|
# include <Columns/ColumnString.h>
|
||||||
|
# include <DataTypes/DataTypeString.h>
|
||||||
|
# include <Functions/FunctionFactory.h>
|
||||||
|
# include <Functions/FunctionHelpers.h>
|
||||||
|
# include <IO/WriteHelpers.h>
|
||||||
|
# include <base_x.hh>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int ILLEGAL_COLUMN;
|
||||||
|
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||||
|
extern const int BAD_ARGUMENTS;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct Base58Encode
|
||||||
|
{
|
||||||
|
static constexpr auto name = "base58Encode";
|
||||||
|
|
||||||
|
static void process(const ColumnString & input, ColumnString::MutablePtr & dst_column, const std::string & alphabet, size_t input_rows_count)
|
||||||
|
{
|
||||||
|
auto & dst_data = dst_column->getChars();
|
||||||
|
auto & dst_offsets = dst_column->getOffsets();
|
||||||
|
|
||||||
|
/// Wikipedia states Base58 has efficiency of 73%, and we take 1.5 scale to avoid reallocation in most cases
|
||||||
|
size_t current_allocated_size = ceil(1.5 * input.getChars().size());
|
||||||
|
|
||||||
|
dst_data.resize(current_allocated_size);
|
||||||
|
dst_offsets.resize(input_rows_count);
|
||||||
|
|
||||||
|
const ColumnString::Offsets & src_offsets = input.getOffsets();
|
||||||
|
|
||||||
|
const auto * source = input.getChars().raw_data();
|
||||||
|
auto * dst = dst_data.data();
|
||||||
|
auto * dst_pos = dst;
|
||||||
|
|
||||||
|
size_t src_offset_prev = 0;
|
||||||
|
size_t processed_size = 0;
|
||||||
|
|
||||||
|
const auto& encoder = (alphabet == "bitcoin") ? Base58::bitcoin() :
|
||||||
|
((alphabet == "flickr") ? Base58::flickr() :
|
||||||
|
((alphabet == "ripple") ? Base58::ripple() :
|
||||||
|
Base58::base58())); //GMP
|
||||||
|
|
||||||
|
std::string encoded;
|
||||||
|
for (size_t row = 0; row < input_rows_count; ++row)
|
||||||
|
{
|
||||||
|
size_t srclen = src_offsets[row] - src_offset_prev - 1;
|
||||||
|
/// Why we didn't use char* here?
|
||||||
|
/// We don't know the size of the result string beforehand (it's not byte-to-byte encoding),
|
||||||
|
/// so we may need to do many resizes (the worst case -- we'll do it for each row)
|
||||||
|
/// This way we do exponential resizes and one final resize after whole operation is complete
|
||||||
|
encoded.clear();
|
||||||
|
if (srclen)
|
||||||
|
try
|
||||||
|
{
|
||||||
|
encoder.encode(encoded, source, srclen);
|
||||||
|
}
|
||||||
|
catch (const std::invalid_argument& e)
|
||||||
|
{
|
||||||
|
throw Exception(e.what(), ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
}
|
||||||
|
catch (const std::domain_error& e)
|
||||||
|
{
|
||||||
|
throw Exception(e.what(), ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
}
|
||||||
|
size_t outlen = encoded.size();
|
||||||
|
|
||||||
|
if (processed_size + outlen >= current_allocated_size)
|
||||||
|
{
|
||||||
|
current_allocated_size += current_allocated_size;
|
||||||
|
dst_data.resize(current_allocated_size);
|
||||||
|
auto processed_offset = dst_pos - dst;
|
||||||
|
dst = dst_data.data();
|
||||||
|
dst_pos = dst;
|
||||||
|
dst_pos += processed_offset;
|
||||||
|
}
|
||||||
|
std::memcpy(dst_pos, encoded.c_str(), ++outlen);
|
||||||
|
|
||||||
|
source += srclen + 1;
|
||||||
|
dst_pos += outlen;
|
||||||
|
|
||||||
|
dst_offsets[row] = dst_pos - dst;
|
||||||
|
src_offset_prev = src_offsets[row];
|
||||||
|
processed_size += outlen;
|
||||||
|
}
|
||||||
|
|
||||||
|
dst_data.resize(dst_pos - dst);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
struct Base58Decode
|
||||||
|
{
|
||||||
|
static constexpr auto name = "base58Decode";
|
||||||
|
|
||||||
|
static void process(const ColumnString & input, ColumnString::MutablePtr & dst_column, const std::string & alphabet, size_t input_rows_count)
|
||||||
|
{
|
||||||
|
auto & dst_data = dst_column->getChars();
|
||||||
|
auto & dst_offsets = dst_column->getOffsets();
|
||||||
|
|
||||||
|
/// We allocate probably even more then needed to avoid many resizes
|
||||||
|
size_t current_allocated_size = input.getChars().size();
|
||||||
|
|
||||||
|
dst_data.resize(current_allocated_size);
|
||||||
|
dst_offsets.resize(input_rows_count);
|
||||||
|
|
||||||
|
const ColumnString::Offsets & src_offsets = input.getOffsets();
|
||||||
|
|
||||||
|
const auto * source = input.getChars().raw_data();
|
||||||
|
auto * dst = dst_data.data();
|
||||||
|
auto * dst_pos = dst;
|
||||||
|
|
||||||
|
size_t src_offset_prev = 0;
|
||||||
|
size_t processed_size = 0;
|
||||||
|
|
||||||
|
const auto& decoder = (alphabet == "bitcoin") ? Base58::bitcoin() :
|
||||||
|
((alphabet == "flickr") ? Base58::flickr() :
|
||||||
|
((alphabet == "ripple") ? Base58::ripple() :
|
||||||
|
Base58::base58()));
|
||||||
|
|
||||||
|
std::string decoded;
|
||||||
|
for (size_t row = 0; row < input_rows_count; ++row)
|
||||||
|
{
|
||||||
|
size_t srclen = src_offsets[row] - src_offset_prev - 1;
|
||||||
|
/// Why we didn't use char* here?
|
||||||
|
/// We don't know the size of the result string beforehand (it's not byte-to-byte encoding),
|
||||||
|
/// so we may need to do many resizes (the worst case -- we'll do it for each row)
|
||||||
|
/// This way we do exponential resizes and one final resize after whole operation is complete
|
||||||
|
decoded.clear();
|
||||||
|
if (srclen)
|
||||||
|
try
|
||||||
|
{
|
||||||
|
decoder.decode(decoded, source, srclen);
|
||||||
|
}
|
||||||
|
catch (const std::invalid_argument& e)
|
||||||
|
{
|
||||||
|
throw Exception(e.what(), ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
}
|
||||||
|
catch (const std::domain_error& e)
|
||||||
|
{
|
||||||
|
throw Exception(e.what(), ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
}
|
||||||
|
size_t outlen = decoded.size();
|
||||||
|
|
||||||
|
if (processed_size + outlen >= current_allocated_size)
|
||||||
|
{
|
||||||
|
current_allocated_size += current_allocated_size;
|
||||||
|
dst_data.resize(current_allocated_size);
|
||||||
|
auto processed_offset = dst_pos - dst;
|
||||||
|
dst = dst_data.data();
|
||||||
|
dst_pos = dst;
|
||||||
|
dst_pos += processed_offset;
|
||||||
|
}
|
||||||
|
std::memcpy(dst_pos, decoded.c_str(), ++outlen);
|
||||||
|
|
||||||
|
source += srclen + 1;
|
||||||
|
dst_pos += outlen;
|
||||||
|
|
||||||
|
dst_offsets[row] = dst_pos - dst;
|
||||||
|
src_offset_prev = src_offsets[row];
|
||||||
|
processed_size += outlen;
|
||||||
|
}
|
||||||
|
|
||||||
|
dst_data.resize(dst_pos - dst);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename Func>
|
||||||
|
class FunctionBase58Conversion : public IFunction
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
static constexpr auto name = Func::name;
|
||||||
|
|
||||||
|
static FunctionPtr create(ContextPtr)
|
||||||
|
{
|
||||||
|
return std::make_shared<FunctionBase58Conversion>();
|
||||||
|
}
|
||||||
|
|
||||||
|
String getName() const override
|
||||||
|
{
|
||||||
|
return Func::name;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool isVariadic() const override { return true; }
|
||||||
|
|
||||||
|
size_t getNumberOfArguments() const override { return 0; }
|
||||||
|
|
||||||
|
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
|
||||||
|
|
||||||
|
bool useDefaultImplementationForConstants() const override { return true; }
|
||||||
|
|
||||||
|
ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1}; }
|
||||||
|
|
||||||
|
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
|
||||||
|
{
|
||||||
|
if (arguments.size() != 1 && arguments.size() != 2)
|
||||||
|
throw Exception(
|
||||||
|
"Wrong number of arguments for function " + getName() + ": 1 or 2 expected.",
|
||||||
|
ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
|
||||||
|
if (!isString(arguments[0].type))
|
||||||
|
throw Exception(
|
||||||
|
"Illegal type " + arguments[0].type->getName() + " of 1st argument of function " + getName() + ". Must be String.",
|
||||||
|
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||||
|
|
||||||
|
if (arguments.size() == 2 && !isString(arguments[1].type))
|
||||||
|
throw Exception(
|
||||||
|
"Illegal type " + arguments[1].type->getName() + " of 2nd argument of function " + getName() + ". Must be String.",
|
||||||
|
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||||
|
|
||||||
|
return std::make_shared<DataTypeString>();
|
||||||
|
}
|
||||||
|
|
||||||
|
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
||||||
|
{
|
||||||
|
const ColumnPtr column_string = arguments[0].column;
|
||||||
|
const ColumnString * input = checkAndGetColumn<ColumnString>(column_string.get());
|
||||||
|
if (!input)
|
||||||
|
throw Exception(
|
||||||
|
"Illegal column " + arguments[0].column->getName() + " of first argument of function " + getName() + ", must be String",
|
||||||
|
ErrorCodes::ILLEGAL_COLUMN);
|
||||||
|
|
||||||
|
std::string alphabet = "bitcoin";
|
||||||
|
|
||||||
|
if (arguments.size() == 2)
|
||||||
|
{
|
||||||
|
const auto * alphabet_column = checkAndGetColumn<ColumnConst>(arguments[1].column.get());
|
||||||
|
|
||||||
|
if (!alphabet_column)
|
||||||
|
throw Exception("Second argument for function " + getName() + " must be constant String", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||||
|
|
||||||
|
alphabet = alphabet_column->getValue<DB::String>();
|
||||||
|
if (alphabet != "bitcoin" && alphabet != "ripple" && alphabet != "flickr" && alphabet != "gmp")
|
||||||
|
throw Exception("Second argument for function " + getName() + " must be 'bitcoin', 'ripple', 'gmp' or 'flickr'", ErrorCodes::ILLEGAL_COLUMN);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
auto dst_column = ColumnString::create();
|
||||||
|
|
||||||
|
Func::process(*input, dst_column, alphabet, input_rows_count);
|
||||||
|
|
||||||
|
return dst_column;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
@ -85,7 +85,7 @@ public:
|
|||||||
{
|
{
|
||||||
if (!WhichDataType(arguments[0].type).isString())
|
if (!WhichDataType(arguments[0].type).isString())
|
||||||
throw Exception(
|
throw Exception(
|
||||||
"Illegal type " + arguments[0].type->getName() + " of 1 argument of function " + getName() + ". Must be String.",
|
"Illegal type " + arguments[0].type->getName() + " of 1st argument of function " + getName() + ". Must be String.",
|
||||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||||
|
|
||||||
return std::make_shared<DataTypeString>();
|
return std::make_shared<DataTypeString>();
|
||||||
@ -98,7 +98,7 @@ public:
|
|||||||
|
|
||||||
if (!input)
|
if (!input)
|
||||||
throw Exception(
|
throw Exception(
|
||||||
"Illegal column " + arguments[0].column->getName() + " of first argument of function " + getName(),
|
"Illegal column " + arguments[0].column->getName() + " of first argument of function " + getName() + ", must be of type String",
|
||||||
ErrorCodes::ILLEGAL_COLUMN);
|
ErrorCodes::ILLEGAL_COLUMN);
|
||||||
|
|
||||||
auto dst_column = ColumnString::create();
|
auto dst_column = ColumnString::create();
|
||||||
|
17
src/Functions/FunctionsBase58.cpp
Normal file
17
src/Functions/FunctionsBase58.cpp
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
#include <Functions/FunctionBase58Conversion.h>
|
||||||
|
#if USE_BASEX
|
||||||
|
#include <Functions/FunctionFactory.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
void registerFunctionBase58Encode(FunctionFactory & factory)
|
||||||
|
{
|
||||||
|
factory.registerFunction<FunctionBase58Conversion<Base58Encode>>();
|
||||||
|
}
|
||||||
|
|
||||||
|
void registerFunctionBase58Decode(FunctionFactory & factory)
|
||||||
|
{
|
||||||
|
factory.registerFunction<FunctionBase58Conversion<Base58Decode>>();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
@ -545,6 +545,8 @@ ColumnPtr FunctionAnyArityLogical<Impl, Name>::executeShortCircuit(ColumnsWithTy
|
|||||||
if (Name::name != NameAnd::name && Name::name != NameOr::name)
|
if (Name::name != NameAnd::name && Name::name != NameOr::name)
|
||||||
throw Exception("Function " + getName() + " doesn't support short circuit execution", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
throw Exception("Function " + getName() + " doesn't support short circuit execution", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||||
|
|
||||||
|
executeColumnIfNeeded(arguments[0]);
|
||||||
|
|
||||||
/// Let's denote x_i' = maskedExecute(x_i, mask).
|
/// Let's denote x_i' = maskedExecute(x_i, mask).
|
||||||
/// 1) AND(x_0, x_1, x_2, ..., x_n)
|
/// 1) AND(x_0, x_1, x_2, ..., x_n)
|
||||||
/// We will support mask_i = x_0 & x_1 & ... & x_i.
|
/// We will support mask_i = x_0 & x_1 & ... & x_i.
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
// .h autogenerated by cmake!
|
// .h autogenerated by cmake!
|
||||||
|
|
||||||
|
#cmakedefine01 USE_BASEX
|
||||||
#cmakedefine01 USE_BASE64
|
#cmakedefine01 USE_BASE64
|
||||||
#cmakedefine01 USE_SIMDJSON
|
#cmakedefine01 USE_SIMDJSON
|
||||||
#cmakedefine01 USE_RAPIDJSON
|
#cmakedefine01 USE_RAPIDJSON
|
||||||
|
@ -1,6 +1,9 @@
|
|||||||
if (TARGET ch_contrib::fastops)
|
if (TARGET ch_contrib::fastops)
|
||||||
set(USE_FASTOPS 1)
|
set(USE_FASTOPS 1)
|
||||||
endif()
|
endif()
|
||||||
|
if (TARGET ch_contrib::base-x)
|
||||||
|
set(USE_BASEX 1)
|
||||||
|
endif()
|
||||||
if (TARGET ch_contrib::base64)
|
if (TARGET ch_contrib::base64)
|
||||||
set(USE_BASE64 1)
|
set(USE_BASE64 1)
|
||||||
endif()
|
endif()
|
||||||
|
@ -987,6 +987,8 @@ private:
|
|||||||
if (last_short_circuit_argument_index == -1)
|
if (last_short_circuit_argument_index == -1)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
executeColumnIfNeeded(arguments[0]);
|
||||||
|
|
||||||
/// Check if condition is const or null to not create full mask from it.
|
/// Check if condition is const or null to not create full mask from it.
|
||||||
if ((isColumnConst(*arguments[0].column) || arguments[0].column->onlyNull()) && !arguments[0].column->empty())
|
if ((isColumnConst(*arguments[0].column) || arguments[0].column->onlyNull()) && !arguments[0].column->empty())
|
||||||
{
|
{
|
||||||
|
@ -266,6 +266,8 @@ private:
|
|||||||
if (last_short_circuit_argument_index < 0)
|
if (last_short_circuit_argument_index < 0)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
executeColumnIfNeeded(arguments[0]);
|
||||||
|
|
||||||
/// Let's denote x_i' = maskedExecute(x_i, mask).
|
/// Let's denote x_i' = maskedExecute(x_i, mask).
|
||||||
/// multiIf(x_0, y_0, x_1, y_1, x_2, y_2, ..., x_{n-1}, y_{n-1}, y_n)
|
/// multiIf(x_0, y_0, x_1, y_1, x_2, y_2, ..., x_{n-1}, y_{n-1}, y_n)
|
||||||
/// We will support mask_i = !x_0 & !x_1 & ... & !x_i
|
/// We will support mask_i = !x_0 & !x_1 & ... & !x_i
|
||||||
|
@ -49,6 +49,11 @@ void registerFunctionBase64Decode(FunctionFactory &);
|
|||||||
void registerFunctionTryBase64Decode(FunctionFactory &);
|
void registerFunctionTryBase64Decode(FunctionFactory &);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if USE_BASEX
|
||||||
|
void registerFunctionBase58Encode(FunctionFactory &);
|
||||||
|
void registerFunctionBase58Decode(FunctionFactory &);
|
||||||
|
#endif
|
||||||
|
|
||||||
#if USE_NLP
|
#if USE_NLP
|
||||||
void registerFunctionStem(FunctionFactory &);
|
void registerFunctionStem(FunctionFactory &);
|
||||||
void registerFunctionSynonyms(FunctionFactory &);
|
void registerFunctionSynonyms(FunctionFactory &);
|
||||||
@ -105,6 +110,11 @@ void registerFunctionsString(FunctionFactory & factory)
|
|||||||
registerFunctionTryBase64Decode(factory);
|
registerFunctionTryBase64Decode(factory);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if USE_BASEX
|
||||||
|
registerFunctionBase58Encode(factory);
|
||||||
|
registerFunctionBase58Decode(factory);
|
||||||
|
#endif
|
||||||
|
|
||||||
#if USE_NLP
|
#if USE_NLP
|
||||||
registerFunctionStem(factory);
|
registerFunctionStem(factory);
|
||||||
registerFunctionSynonyms(factory);
|
registerFunctionSynonyms(factory);
|
||||||
|
@ -28,14 +28,6 @@ public:
|
|||||||
|
|
||||||
bool useDefaultImplementationForNothing() const override { return false; }
|
bool useDefaultImplementationForNothing() const override { return false; }
|
||||||
|
|
||||||
bool isShortCircuit(ShortCircuitSettings & settings, size_t /*number_of_arguments*/) const override
|
|
||||||
{
|
|
||||||
settings.enable_lazy_execution_for_first_argument = true;
|
|
||||||
settings.enable_lazy_execution_for_common_descendants_of_arguments = true;
|
|
||||||
settings.force_enable_lazy_execution = true;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
|
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
|
||||||
|
|
||||||
size_t getNumberOfArguments() const override
|
size_t getNumberOfArguments() const override
|
||||||
|
@ -32,13 +32,6 @@ public:
|
|||||||
|
|
||||||
bool useDefaultImplementationForNothing() const override { return false; }
|
bool useDefaultImplementationForNothing() const override { return false; }
|
||||||
|
|
||||||
bool isShortCircuit(ShortCircuitSettings & settings, size_t /*number_of_arguments*/) const override
|
|
||||||
{
|
|
||||||
settings.enable_lazy_execution_for_first_argument = false;
|
|
||||||
settings.enable_lazy_execution_for_common_descendants_of_arguments = true;
|
|
||||||
settings.force_enable_lazy_execution = true;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
|
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
|
||||||
|
|
||||||
|
@ -615,8 +615,8 @@ void AsynchronousMetrics::update(std::chrono::system_clock::time_point update_ti
|
|||||||
auto caches = FileCacheFactory::instance().getAll();
|
auto caches = FileCacheFactory::instance().getAll();
|
||||||
for (const auto & [_, cache_data] : caches)
|
for (const auto & [_, cache_data] : caches)
|
||||||
{
|
{
|
||||||
new_values["FilesystemCacheBytes"] = cache_data.cache->getUsedCacheSize();
|
new_values["FilesystemCacheBytes"] = cache_data->cache->getUsedCacheSize();
|
||||||
new_values["FilesystemCacheFiles"] = cache_data.cache->getFileSegmentsNum();
|
new_values["FilesystemCacheFiles"] = cache_data->cache->getFileSegmentsNum();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
60
src/Interpreters/InterpreterDescribeCacheQuery.cpp
Normal file
60
src/Interpreters/InterpreterDescribeCacheQuery.cpp
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
#include <Interpreters/InterpreterDescribeCacheQuery.h>
|
||||||
|
#include <Interpreters/Context.h>
|
||||||
|
#include <Processors/Sources/SourceFromSingleChunk.h>
|
||||||
|
#include <Parsers/ASTDescribeCacheQuery.h>
|
||||||
|
#include <DataTypes/DataTypesNumber.h>
|
||||||
|
#include <DataTypes/DataTypeString.h>
|
||||||
|
#include <Storages/ColumnsDescription.h>
|
||||||
|
#include <Common/FileCacheFactory.h>
|
||||||
|
#include <Common/IFileCache.h>
|
||||||
|
#include <Access/Common/AccessFlags.h>
|
||||||
|
#include <Core/Block.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
static Block getSampleBlock()
|
||||||
|
{
|
||||||
|
ColumnsWithTypeAndName columns{
|
||||||
|
ColumnWithTypeAndName{std::make_shared<DataTypeUInt64>(), "max_size"},
|
||||||
|
ColumnWithTypeAndName{std::make_shared<DataTypeUInt64>(), "max_elements"},
|
||||||
|
ColumnWithTypeAndName{std::make_shared<DataTypeUInt64>(), "max_file_segment_size"},
|
||||||
|
ColumnWithTypeAndName{std::make_shared<DataTypeNumber<UInt8>>(), "cache_on_write_operations"},
|
||||||
|
ColumnWithTypeAndName{std::make_shared<DataTypeNumber<UInt8>>(), "enable_cache_hits_threshold"},
|
||||||
|
ColumnWithTypeAndName{std::make_shared<DataTypeUInt64>(), "current_size"},
|
||||||
|
ColumnWithTypeAndName{std::make_shared<DataTypeUInt64>(), "current_elements"},
|
||||||
|
ColumnWithTypeAndName{std::make_shared<DataTypeString>(), "path"}
|
||||||
|
};
|
||||||
|
return Block(columns);
|
||||||
|
}
|
||||||
|
|
||||||
|
BlockIO InterpreterDescribeCacheQuery::execute()
|
||||||
|
{
|
||||||
|
getContext()->checkAccess(AccessType::SHOW_CACHES);
|
||||||
|
|
||||||
|
const auto & ast = query_ptr->as<ASTDescribeCacheQuery &>();
|
||||||
|
Block sample_block = getSampleBlock();
|
||||||
|
MutableColumns res_columns = sample_block.cloneEmptyColumns();
|
||||||
|
|
||||||
|
auto cache_data = FileCacheFactory::instance().getByName(ast.cache_name);
|
||||||
|
const auto & settings = cache_data.settings;
|
||||||
|
const auto & cache = cache_data.cache;
|
||||||
|
|
||||||
|
res_columns[0]->insert(settings.max_size);
|
||||||
|
res_columns[1]->insert(settings.max_elements);
|
||||||
|
res_columns[2]->insert(settings.max_file_segment_size);
|
||||||
|
res_columns[3]->insert(settings.cache_on_write_operations);
|
||||||
|
res_columns[4]->insert(settings.enable_cache_hits_threshold);
|
||||||
|
res_columns[5]->insert(cache->getUsedCacheSize());
|
||||||
|
res_columns[6]->insert(cache->getFileSegmentsNum());
|
||||||
|
res_columns[7]->insert(cache->getBasePath());
|
||||||
|
|
||||||
|
BlockIO res;
|
||||||
|
size_t num_rows = res_columns[0]->size();
|
||||||
|
auto source = std::make_shared<SourceFromSingleChunk>(sample_block, Chunk(std::move(res_columns), num_rows));
|
||||||
|
res.pipeline = QueryPipeline(std::move(source));
|
||||||
|
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
22
src/Interpreters/InterpreterDescribeCacheQuery.h
Normal file
22
src/Interpreters/InterpreterDescribeCacheQuery.h
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <Interpreters/IInterpreter.h>
|
||||||
|
#include <Parsers/IAST_fwd.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
class InterpreterDescribeCacheQuery : public IInterpreter, WithContext
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
InterpreterDescribeCacheQuery(const ASTPtr & query_ptr_, ContextPtr context_) : WithContext(context_), query_ptr(query_ptr_) {}
|
||||||
|
|
||||||
|
BlockIO execute() override;
|
||||||
|
|
||||||
|
private:
|
||||||
|
ASTPtr query_ptr;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
}
|
@ -35,6 +35,7 @@
|
|||||||
#include <Parsers/Access/ASTShowCreateAccessEntityQuery.h>
|
#include <Parsers/Access/ASTShowCreateAccessEntityQuery.h>
|
||||||
#include <Parsers/Access/ASTShowGrantsQuery.h>
|
#include <Parsers/Access/ASTShowGrantsQuery.h>
|
||||||
#include <Parsers/Access/ASTShowPrivilegesQuery.h>
|
#include <Parsers/Access/ASTShowPrivilegesQuery.h>
|
||||||
|
#include <Parsers/ASTDescribeCacheQuery.h>
|
||||||
|
|
||||||
#include <Interpreters/Context.h>
|
#include <Interpreters/Context.h>
|
||||||
#include <Interpreters/InterpreterAlterQuery.h>
|
#include <Interpreters/InterpreterAlterQuery.h>
|
||||||
@ -43,6 +44,7 @@
|
|||||||
#include <Interpreters/InterpreterCreateFunctionQuery.h>
|
#include <Interpreters/InterpreterCreateFunctionQuery.h>
|
||||||
#include <Interpreters/InterpreterCreateQuery.h>
|
#include <Interpreters/InterpreterCreateQuery.h>
|
||||||
#include <Interpreters/InterpreterDescribeQuery.h>
|
#include <Interpreters/InterpreterDescribeQuery.h>
|
||||||
|
#include <Interpreters/InterpreterDescribeCacheQuery.h>
|
||||||
#include <Interpreters/InterpreterDropFunctionQuery.h>
|
#include <Interpreters/InterpreterDropFunctionQuery.h>
|
||||||
#include <Interpreters/InterpreterDropQuery.h>
|
#include <Interpreters/InterpreterDropQuery.h>
|
||||||
#include <Interpreters/InterpreterExistsQuery.h>
|
#include <Interpreters/InterpreterExistsQuery.h>
|
||||||
@ -200,6 +202,10 @@ std::unique_ptr<IInterpreter> InterpreterFactory::get(ASTPtr & query, ContextMut
|
|||||||
{
|
{
|
||||||
return std::make_unique<InterpreterDescribeQuery>(query, context);
|
return std::make_unique<InterpreterDescribeQuery>(query, context);
|
||||||
}
|
}
|
||||||
|
else if (query->as<ASTDescribeCacheQuery>())
|
||||||
|
{
|
||||||
|
return std::make_unique<InterpreterDescribeCacheQuery>(query, context);
|
||||||
|
}
|
||||||
else if (query->as<ASTExplainQuery>())
|
else if (query->as<ASTExplainQuery>())
|
||||||
{
|
{
|
||||||
return std::make_unique<InterpreterExplainQuery>(query, context);
|
return std::make_unique<InterpreterExplainQuery>(query, context);
|
||||||
|
@ -1176,7 +1176,7 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional<P
|
|||||||
query_plan.getCurrentDataStream(),
|
query_plan.getCurrentDataStream(),
|
||||||
expressions.prewhere_info->row_level_filter,
|
expressions.prewhere_info->row_level_filter,
|
||||||
expressions.prewhere_info->row_level_column_name,
|
expressions.prewhere_info->row_level_column_name,
|
||||||
false);
|
true);
|
||||||
|
|
||||||
row_level_filter_step->setStepDescription("Row-level security filter (PREWHERE)");
|
row_level_filter_step->setStepDescription("Row-level security filter (PREWHERE)");
|
||||||
query_plan.addStep(std::move(row_level_filter_step));
|
query_plan.addStep(std::move(row_level_filter_step));
|
||||||
|
@ -5,6 +5,11 @@
|
|||||||
#include <Interpreters/DatabaseCatalog.h>
|
#include <Interpreters/DatabaseCatalog.h>
|
||||||
#include <Interpreters/executeQuery.h>
|
#include <Interpreters/executeQuery.h>
|
||||||
#include <Interpreters/InterpreterShowTablesQuery.h>
|
#include <Interpreters/InterpreterShowTablesQuery.h>
|
||||||
|
#include <DataTypes/DataTypeString.h>
|
||||||
|
#include <Storages/ColumnsDescription.h>
|
||||||
|
#include <Common/FileCacheFactory.h>
|
||||||
|
#include <Processors/Sources/SourceFromSingleChunk.h>
|
||||||
|
#include <Access/Common/AccessFlags.h>
|
||||||
#include <Common/typeid_cast.h>
|
#include <Common/typeid_cast.h>
|
||||||
#include <IO/Operators.h>
|
#include <IO/Operators.h>
|
||||||
|
|
||||||
@ -142,6 +147,24 @@ String InterpreterShowTablesQuery::getRewrittenQuery()
|
|||||||
|
|
||||||
BlockIO InterpreterShowTablesQuery::execute()
|
BlockIO InterpreterShowTablesQuery::execute()
|
||||||
{
|
{
|
||||||
|
const auto & query = query_ptr->as<ASTShowTablesQuery &>();
|
||||||
|
if (query.caches)
|
||||||
|
{
|
||||||
|
getContext()->checkAccess(AccessType::SHOW_CACHES);
|
||||||
|
|
||||||
|
Block sample_block{ColumnWithTypeAndName(std::make_shared<DataTypeString>(), "Caches")};
|
||||||
|
MutableColumns res_columns = sample_block.cloneEmptyColumns();
|
||||||
|
auto caches = FileCacheFactory::instance().getAllByName();
|
||||||
|
for (const auto & [name, _] : caches)
|
||||||
|
res_columns[0]->insert(name);
|
||||||
|
BlockIO res;
|
||||||
|
size_t num_rows = res_columns[0]->size();
|
||||||
|
auto source = std::make_shared<SourceFromSingleChunk>(sample_block, Chunk(std::move(res_columns), num_rows));
|
||||||
|
res.pipeline = QueryPipeline(std::move(source));
|
||||||
|
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
return executeQuery(getRewrittenQuery(), getContext(), true);
|
return executeQuery(getRewrittenQuery(), getContext(), true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -317,7 +317,7 @@ BlockIO InterpreterSystemQuery::execute()
|
|||||||
{
|
{
|
||||||
auto caches = FileCacheFactory::instance().getAll();
|
auto caches = FileCacheFactory::instance().getAll();
|
||||||
for (const auto & [_, cache_data] : caches)
|
for (const auto & [_, cache_data] : caches)
|
||||||
cache_data.cache->removeIfReleasable(/* remove_persistent_files */false);
|
cache_data->cache->removeIfReleasable(/* remove_persistent_files */false);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
28
src/Parsers/ASTDescribeCacheQuery.h
Normal file
28
src/Parsers/ASTDescribeCacheQuery.h
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
#pragma once
|
||||||
|
#include <Parsers/ASTQueryWithOutput.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
class ASTDescribeCacheQuery : public ASTQueryWithOutput
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
String cache_name;
|
||||||
|
|
||||||
|
String getID(char) const override { return "DescribeCacheQuery"; }
|
||||||
|
|
||||||
|
ASTPtr clone() const override
|
||||||
|
{
|
||||||
|
auto res = std::make_shared<ASTDescribeCacheQuery>(*this);
|
||||||
|
cloneOutputOptions(*res);
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected:
|
||||||
|
void formatQueryImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override
|
||||||
|
{
|
||||||
|
settings.ostr << (settings.hilite ? hilite_keyword : "") << "DESCRIBE CACHE" << (settings.hilite ? hilite_none : "") << " " << cache_name;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
@ -55,6 +55,12 @@ void ASTShowTablesQuery::formatQueryImpl(const FormatSettings & settings, Format
|
|||||||
settings.ostr << (settings.hilite ? hilite_keyword : "") << "SHOW CLUSTER" << (settings.hilite ? hilite_none : "");
|
settings.ostr << (settings.hilite ? hilite_keyword : "") << "SHOW CLUSTER" << (settings.hilite ? hilite_none : "");
|
||||||
settings.ostr << " " << backQuoteIfNeed(cluster_str);
|
settings.ostr << " " << backQuoteIfNeed(cluster_str);
|
||||||
}
|
}
|
||||||
|
else if (caches)
|
||||||
|
{
|
||||||
|
settings.ostr << (settings.hilite ? hilite_keyword : "") << "SHOW CACHES" << (settings.hilite ? hilite_none : "");
|
||||||
|
formatLike(settings);
|
||||||
|
formatLimit(settings, state, frame);
|
||||||
|
}
|
||||||
else if (m_settings)
|
else if (m_settings)
|
||||||
{
|
{
|
||||||
settings.ostr << (settings.hilite ? hilite_keyword : "") << "SHOW " << (changed ? "CHANGED " : "") << "SETTINGS" <<
|
settings.ostr << (settings.hilite ? hilite_keyword : "") << "SHOW " << (changed ? "CHANGED " : "") << "SETTINGS" <<
|
||||||
|
@ -9,7 +9,7 @@ namespace DB
|
|||||||
{
|
{
|
||||||
|
|
||||||
|
|
||||||
/** Query SHOW TABLES or SHOW DATABASES or SHOW CLUSTERS
|
/** Query SHOW TABLES or SHOW DATABASES or SHOW CLUSTERS or SHOW CACHES
|
||||||
*/
|
*/
|
||||||
class ASTShowTablesQuery : public ASTQueryWithOutput
|
class ASTShowTablesQuery : public ASTQueryWithOutput
|
||||||
{
|
{
|
||||||
@ -21,6 +21,7 @@ public:
|
|||||||
bool m_settings{false};
|
bool m_settings{false};
|
||||||
bool changed{false};
|
bool changed{false};
|
||||||
bool temporary{false};
|
bool temporary{false};
|
||||||
|
bool caches{false};
|
||||||
|
|
||||||
String cluster_str;
|
String cluster_str;
|
||||||
String from;
|
String from;
|
||||||
|
34
src/Parsers/ParserDescribeCacheQuery.cpp
Normal file
34
src/Parsers/ParserDescribeCacheQuery.cpp
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
#include <Parsers/ParserDescribeCacheQuery.h>
|
||||||
|
#include <Parsers/ASTDescribeCacheQuery.h>
|
||||||
|
#include <Parsers/CommonParsers.h>
|
||||||
|
#include <Parsers/ASTLiteral.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
|
||||||
|
bool ParserDescribeCacheQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||||
|
{
|
||||||
|
ParserKeyword p_describe("DESCRIBE");
|
||||||
|
ParserKeyword p_desc("DESC");
|
||||||
|
ParserKeyword p_cache("CACHE");
|
||||||
|
ParserLiteral p_cache_name;
|
||||||
|
|
||||||
|
if ((!p_describe.ignore(pos, expected) && !p_desc.ignore(pos, expected))
|
||||||
|
|| !p_cache.ignore(pos, expected))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
auto query = std::make_shared<ASTDescribeCacheQuery>();
|
||||||
|
|
||||||
|
ASTPtr ast;
|
||||||
|
if (!p_cache_name.parse(pos, ast, expected))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
query->cache_name = ast->as<ASTLiteral>()->value.safeGet<String>();
|
||||||
|
node = query;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
20
src/Parsers/ParserDescribeCacheQuery.h
Normal file
20
src/Parsers/ParserDescribeCacheQuery.h
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
|
||||||
|
#include <Parsers/IParserBase.h>
|
||||||
|
#include <Parsers/ExpressionElementParsers.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
/** Query (DESCRIBE | DESC) CACHE 'cache_name'
|
||||||
|
*/
|
||||||
|
class ParserDescribeCacheQuery : public IParserBase
|
||||||
|
{
|
||||||
|
protected:
|
||||||
|
const char * getName() const override { return "DESCRIBE CACHE query"; }
|
||||||
|
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
@ -17,6 +17,7 @@
|
|||||||
#include <Parsers/ParserShowTablesQuery.h>
|
#include <Parsers/ParserShowTablesQuery.h>
|
||||||
#include <Parsers/ParserTablePropertiesQuery.h>
|
#include <Parsers/ParserTablePropertiesQuery.h>
|
||||||
#include <Parsers/ParserWatchQuery.h>
|
#include <Parsers/ParserWatchQuery.h>
|
||||||
|
#include <Parsers/ParserDescribeCacheQuery.h>
|
||||||
#include <Parsers/QueryWithOutputSettingsPushDownVisitor.h>
|
#include <Parsers/QueryWithOutputSettingsPushDownVisitor.h>
|
||||||
#include <Parsers/Access/ParserShowAccessEntitiesQuery.h>
|
#include <Parsers/Access/ParserShowAccessEntitiesQuery.h>
|
||||||
#include <Parsers/Access/ParserShowAccessQuery.h>
|
#include <Parsers/Access/ParserShowAccessQuery.h>
|
||||||
@ -35,6 +36,7 @@ bool ParserQueryWithOutput::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
|
|||||||
ParserSelectWithUnionQuery select_p;
|
ParserSelectWithUnionQuery select_p;
|
||||||
ParserTablePropertiesQuery table_p;
|
ParserTablePropertiesQuery table_p;
|
||||||
ParserDescribeTableQuery describe_table_p;
|
ParserDescribeTableQuery describe_table_p;
|
||||||
|
ParserDescribeCacheQuery describe_cache_p;
|
||||||
ParserShowProcesslistQuery show_processlist_p;
|
ParserShowProcesslistQuery show_processlist_p;
|
||||||
ParserCreateQuery create_p;
|
ParserCreateQuery create_p;
|
||||||
ParserAlterQuery alter_p;
|
ParserAlterQuery alter_p;
|
||||||
@ -59,6 +61,7 @@ bool ParserQueryWithOutput::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
|
|||||||
|| show_create_access_entity_p.parse(pos, query, expected) /// should be before `show_tables_p`
|
|| show_create_access_entity_p.parse(pos, query, expected) /// should be before `show_tables_p`
|
||||||
|| show_tables_p.parse(pos, query, expected)
|
|| show_tables_p.parse(pos, query, expected)
|
||||||
|| table_p.parse(pos, query, expected)
|
|| table_p.parse(pos, query, expected)
|
||||||
|
|| describe_cache_p.parse(pos, query, expected)
|
||||||
|| describe_table_p.parse(pos, query, expected)
|
|| describe_table_p.parse(pos, query, expected)
|
||||||
|| show_processlist_p.parse(pos, query, expected)
|
|| show_processlist_p.parse(pos, query, expected)
|
||||||
|| create_p.parse(pos, query, expected)
|
|| create_p.parse(pos, query, expected)
|
||||||
|
@ -24,6 +24,7 @@ bool ParserShowTablesQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
|
|||||||
ParserKeyword s_clusters("CLUSTERS");
|
ParserKeyword s_clusters("CLUSTERS");
|
||||||
ParserKeyword s_cluster("CLUSTER");
|
ParserKeyword s_cluster("CLUSTER");
|
||||||
ParserKeyword s_dictionaries("DICTIONARIES");
|
ParserKeyword s_dictionaries("DICTIONARIES");
|
||||||
|
ParserKeyword s_caches("CACHES");
|
||||||
ParserKeyword s_settings("SETTINGS");
|
ParserKeyword s_settings("SETTINGS");
|
||||||
ParserKeyword s_changed("CHANGED");
|
ParserKeyword s_changed("CHANGED");
|
||||||
ParserKeyword s_from("FROM");
|
ParserKeyword s_from("FROM");
|
||||||
@ -91,6 +92,10 @@ bool ParserShowTablesQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
else if (s_caches.ignore(pos, expected))
|
||||||
|
{
|
||||||
|
query->caches = true;
|
||||||
|
}
|
||||||
else if (s_cluster.ignore(pos, expected))
|
else if (s_cluster.ignore(pos, expected))
|
||||||
{
|
{
|
||||||
query->cluster = true;
|
query->cluster = true;
|
||||||
|
@ -297,6 +297,11 @@ std::string PrewhereInfo::dump() const
|
|||||||
WriteBufferFromOwnString ss;
|
WriteBufferFromOwnString ss;
|
||||||
ss << "PrewhereDagInfo\n";
|
ss << "PrewhereDagInfo\n";
|
||||||
|
|
||||||
|
if (row_level_filter)
|
||||||
|
{
|
||||||
|
ss << "row_level_filter " << row_level_filter->dumpDAG() << "\n";
|
||||||
|
}
|
||||||
|
|
||||||
if (prewhere_actions)
|
if (prewhere_actions)
|
||||||
{
|
{
|
||||||
ss << "prewhere_actions " << prewhere_actions->dumpDAG() << "\n";
|
ss << "prewhere_actions " << prewhere_actions->dumpDAG() << "\n";
|
||||||
|
@ -89,8 +89,6 @@ protected:
|
|||||||
using ColumnPosition = std::optional<size_t>;
|
using ColumnPosition = std::optional<size_t>;
|
||||||
ColumnPosition findColumnForOffsets(const String & column_name) const;
|
ColumnPosition findColumnForOffsets(const String & column_name) const;
|
||||||
|
|
||||||
friend class MergeTreeRangeReader::DelayedStream;
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/// Alter conversions, which must be applied on fly if required
|
/// Alter conversions, which must be applied on fly if required
|
||||||
MergeTreeData::AlterConversions alter_conversions;
|
MergeTreeData::AlterConversions alter_conversions;
|
||||||
|
@ -264,7 +264,7 @@ bool MergeFromLogEntryTask::finalize(ReplicatedMergeMutateTaskBase::PartLogWrite
|
|||||||
/// Task is not needed
|
/// Task is not needed
|
||||||
merge_task.reset();
|
merge_task.reset();
|
||||||
|
|
||||||
storage.merger_mutator.renameMergedTemporaryPart(part, parts, NO_TRANSACTION_PTR, transaction_ptr.get());
|
storage.merger_mutator.renameMergedTemporaryPart(part, parts, NO_TRANSACTION_PTR, *transaction_ptr);
|
||||||
|
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
|
@ -115,7 +115,11 @@ void MergePlainMergeTreeTask::prepare()
|
|||||||
void MergePlainMergeTreeTask::finish()
|
void MergePlainMergeTreeTask::finish()
|
||||||
{
|
{
|
||||||
new_part = merge_task->getFuture().get();
|
new_part = merge_task->getFuture().get();
|
||||||
storage.merger_mutator.renameMergedTemporaryPart(new_part, future_part->parts, txn, nullptr);
|
|
||||||
|
MergeTreeData::Transaction transaction(storage, txn.get());
|
||||||
|
storage.merger_mutator.renameMergedTemporaryPart(new_part, future_part->parts, txn, transaction);
|
||||||
|
transaction.commit();
|
||||||
|
|
||||||
write_part_log({});
|
write_part_log({});
|
||||||
storage.incrementMergedPartsProfileEvent(new_part->getType());
|
storage.incrementMergedPartsProfileEvent(new_part->getType());
|
||||||
}
|
}
|
||||||
|
@ -74,14 +74,27 @@ MergeTreeBaseSelectProcessor::MergeTreeBaseSelectProcessor(
|
|||||||
prewhere_actions = std::make_unique<PrewhereExprInfo>();
|
prewhere_actions = std::make_unique<PrewhereExprInfo>();
|
||||||
|
|
||||||
if (prewhere_info->row_level_filter)
|
if (prewhere_info->row_level_filter)
|
||||||
prewhere_actions->row_level_filter = std::make_shared<ExpressionActions>(prewhere_info->row_level_filter, actions_settings);
|
{
|
||||||
|
PrewhereExprStep row_level_filter_step
|
||||||
|
{
|
||||||
|
.actions = std::make_shared<ExpressionActions>(prewhere_info->row_level_filter, actions_settings),
|
||||||
|
.column_name = prewhere_info->row_level_column_name,
|
||||||
|
.remove_column = true,
|
||||||
|
.need_filter = true
|
||||||
|
};
|
||||||
|
|
||||||
prewhere_actions->prewhere_actions = std::make_shared<ExpressionActions>(prewhere_info->prewhere_actions, actions_settings);
|
prewhere_actions->steps.emplace_back(std::move(row_level_filter_step));
|
||||||
|
}
|
||||||
|
|
||||||
prewhere_actions->row_level_column_name = prewhere_info->row_level_column_name;
|
PrewhereExprStep prewhere_step
|
||||||
prewhere_actions->prewhere_column_name = prewhere_info->prewhere_column_name;
|
{
|
||||||
prewhere_actions->remove_prewhere_column = prewhere_info->remove_prewhere_column;
|
.actions = std::make_shared<ExpressionActions>(prewhere_info->prewhere_actions, actions_settings),
|
||||||
prewhere_actions->need_filter = prewhere_info->need_filter;
|
.column_name = prewhere_info->prewhere_column_name,
|
||||||
|
.remove_column = prewhere_info->remove_prewhere_column,
|
||||||
|
.need_filter = prewhere_info->need_filter
|
||||||
|
};
|
||||||
|
|
||||||
|
prewhere_actions->steps.emplace_back(std::move(prewhere_step));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -204,30 +217,78 @@ Chunk MergeTreeBaseSelectProcessor::generate()
|
|||||||
|
|
||||||
void MergeTreeBaseSelectProcessor::initializeRangeReaders(MergeTreeReadTask & current_task)
|
void MergeTreeBaseSelectProcessor::initializeRangeReaders(MergeTreeReadTask & current_task)
|
||||||
{
|
{
|
||||||
|
MergeTreeRangeReader* prev_reader = nullptr;
|
||||||
|
bool last_reader = false;
|
||||||
|
|
||||||
if (prewhere_info)
|
if (prewhere_info)
|
||||||
{
|
{
|
||||||
if (reader->getColumns().empty())
|
if (prewhere_actions->steps.size() != pre_reader_for_step.size())
|
||||||
{
|
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||||
current_task.range_reader = MergeTreeRangeReader(pre_reader.get(), nullptr, prewhere_actions.get(), true, non_const_virtual_column_names);
|
"PREWHERE steps count mismatch, actions: {}, readers: {}",
|
||||||
}
|
prewhere_actions->steps.size(), pre_reader_for_step.size());
|
||||||
else
|
|
||||||
{
|
|
||||||
MergeTreeRangeReader * pre_reader_ptr = nullptr;
|
|
||||||
if (pre_reader != nullptr)
|
|
||||||
{
|
|
||||||
current_task.pre_range_reader = MergeTreeRangeReader(pre_reader.get(), nullptr, prewhere_actions.get(), false, non_const_virtual_column_names);
|
|
||||||
pre_reader_ptr = ¤t_task.pre_range_reader;
|
|
||||||
}
|
|
||||||
|
|
||||||
current_task.range_reader = MergeTreeRangeReader(reader.get(), pre_reader_ptr, nullptr, true, non_const_virtual_column_names);
|
|
||||||
|
for (size_t i = 0; i < prewhere_actions->steps.size(); ++i)
|
||||||
|
{
|
||||||
|
last_reader = reader->getColumns().empty() && (i + 1 == prewhere_actions->steps.size());
|
||||||
|
current_task.pre_range_readers.push_back(
|
||||||
|
MergeTreeRangeReader(pre_reader_for_step[i].get(), prev_reader, &prewhere_actions->steps[i], last_reader, non_const_virtual_column_names));
|
||||||
|
|
||||||
|
prev_reader = ¤t_task.pre_range_readers.back();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!last_reader)
|
||||||
|
{
|
||||||
|
current_task.range_reader = MergeTreeRangeReader(reader.get(), prev_reader, nullptr, true, non_const_virtual_column_names);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
current_task.range_reader = MergeTreeRangeReader(reader.get(), nullptr, nullptr, true, non_const_virtual_column_names);
|
/// If all columns are read by pre_range_readers than move last pre_range_reader into range_reader
|
||||||
|
current_task.range_reader = std::move(current_task.pre_range_readers.back());
|
||||||
|
current_task.pre_range_readers.pop_back();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static UInt64 estimateNumRows(const MergeTreeReadTask & current_task, UInt64 current_preferred_block_size_bytes,
|
||||||
|
UInt64 current_max_block_size_rows, UInt64 current_preferred_max_column_in_block_size_bytes, double min_filtration_ratio)
|
||||||
|
{
|
||||||
|
const MergeTreeRangeReader & current_reader = current_task.range_reader;
|
||||||
|
|
||||||
|
if (!current_task.size_predictor)
|
||||||
|
return static_cast<size_t>(current_max_block_size_rows);
|
||||||
|
|
||||||
|
/// Calculates number of rows will be read using preferred_block_size_bytes.
|
||||||
|
/// Can't be less than avg_index_granularity.
|
||||||
|
size_t rows_to_read = current_task.size_predictor->estimateNumRows(current_preferred_block_size_bytes);
|
||||||
|
if (!rows_to_read)
|
||||||
|
return rows_to_read;
|
||||||
|
auto total_row_in_current_granule = current_reader.numRowsInCurrentGranule();
|
||||||
|
rows_to_read = std::max(total_row_in_current_granule, rows_to_read);
|
||||||
|
|
||||||
|
if (current_preferred_max_column_in_block_size_bytes)
|
||||||
|
{
|
||||||
|
/// Calculates number of rows will be read using preferred_max_column_in_block_size_bytes.
|
||||||
|
auto rows_to_read_for_max_size_column
|
||||||
|
= current_task.size_predictor->estimateNumRowsForMaxSizeColumn(current_preferred_max_column_in_block_size_bytes);
|
||||||
|
double filtration_ratio = std::max(min_filtration_ratio, 1.0 - current_task.size_predictor->filtered_rows_ratio);
|
||||||
|
auto rows_to_read_for_max_size_column_with_filtration
|
||||||
|
= static_cast<size_t>(rows_to_read_for_max_size_column / filtration_ratio);
|
||||||
|
|
||||||
|
/// If preferred_max_column_in_block_size_bytes is used, number of rows to read can be less than current_index_granularity.
|
||||||
|
rows_to_read = std::min(rows_to_read, rows_to_read_for_max_size_column_with_filtration);
|
||||||
|
}
|
||||||
|
|
||||||
|
auto unread_rows_in_current_granule = current_reader.numPendingRowsInCurrentGranule();
|
||||||
|
if (unread_rows_in_current_granule >= rows_to_read)
|
||||||
|
return rows_to_read;
|
||||||
|
|
||||||
|
const MergeTreeIndexGranularity & index_granularity = current_task.data_part->index_granularity;
|
||||||
|
|
||||||
|
return index_granularity.countMarksForRows(current_reader.currentMark(), rows_to_read, current_reader.numReadRowsInCurrentGranule());
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
Chunk MergeTreeBaseSelectProcessor::readFromPartImpl()
|
Chunk MergeTreeBaseSelectProcessor::readFromPartImpl()
|
||||||
{
|
{
|
||||||
@ -237,45 +298,10 @@ Chunk MergeTreeBaseSelectProcessor::readFromPartImpl()
|
|||||||
const UInt64 current_max_block_size_rows = max_block_size_rows;
|
const UInt64 current_max_block_size_rows = max_block_size_rows;
|
||||||
const UInt64 current_preferred_block_size_bytes = preferred_block_size_bytes;
|
const UInt64 current_preferred_block_size_bytes = preferred_block_size_bytes;
|
||||||
const UInt64 current_preferred_max_column_in_block_size_bytes = preferred_max_column_in_block_size_bytes;
|
const UInt64 current_preferred_max_column_in_block_size_bytes = preferred_max_column_in_block_size_bytes;
|
||||||
const MergeTreeIndexGranularity & index_granularity = task->data_part->index_granularity;
|
|
||||||
const double min_filtration_ratio = 0.00001;
|
const double min_filtration_ratio = 0.00001;
|
||||||
|
|
||||||
auto estimate_num_rows = [current_preferred_block_size_bytes, current_max_block_size_rows,
|
UInt64 recommended_rows = estimateNumRows(*task, current_preferred_block_size_bytes,
|
||||||
&index_granularity, current_preferred_max_column_in_block_size_bytes, min_filtration_ratio](
|
current_max_block_size_rows, current_preferred_max_column_in_block_size_bytes, min_filtration_ratio);
|
||||||
MergeTreeReadTask & current_task, MergeTreeRangeReader & current_reader)
|
|
||||||
{
|
|
||||||
if (!current_task.size_predictor)
|
|
||||||
return static_cast<size_t>(current_max_block_size_rows);
|
|
||||||
|
|
||||||
/// Calculates number of rows will be read using preferred_block_size_bytes.
|
|
||||||
/// Can't be less than avg_index_granularity.
|
|
||||||
size_t rows_to_read = current_task.size_predictor->estimateNumRows(current_preferred_block_size_bytes);
|
|
||||||
if (!rows_to_read)
|
|
||||||
return rows_to_read;
|
|
||||||
auto total_row_in_current_granule = current_reader.numRowsInCurrentGranule();
|
|
||||||
rows_to_read = std::max(total_row_in_current_granule, rows_to_read);
|
|
||||||
|
|
||||||
if (current_preferred_max_column_in_block_size_bytes)
|
|
||||||
{
|
|
||||||
/// Calculates number of rows will be read using preferred_max_column_in_block_size_bytes.
|
|
||||||
auto rows_to_read_for_max_size_column
|
|
||||||
= current_task.size_predictor->estimateNumRowsForMaxSizeColumn(current_preferred_max_column_in_block_size_bytes);
|
|
||||||
double filtration_ratio = std::max(min_filtration_ratio, 1.0 - current_task.size_predictor->filtered_rows_ratio);
|
|
||||||
auto rows_to_read_for_max_size_column_with_filtration
|
|
||||||
= static_cast<size_t>(rows_to_read_for_max_size_column / filtration_ratio);
|
|
||||||
|
|
||||||
/// If preferred_max_column_in_block_size_bytes is used, number of rows to read can be less than current_index_granularity.
|
|
||||||
rows_to_read = std::min(rows_to_read, rows_to_read_for_max_size_column_with_filtration);
|
|
||||||
}
|
|
||||||
|
|
||||||
auto unread_rows_in_current_granule = current_reader.numPendingRowsInCurrentGranule();
|
|
||||||
if (unread_rows_in_current_granule >= rows_to_read)
|
|
||||||
return rows_to_read;
|
|
||||||
|
|
||||||
return index_granularity.countMarksForRows(current_reader.currentMark(), rows_to_read, current_reader.numReadRowsInCurrentGranule());
|
|
||||||
};
|
|
||||||
|
|
||||||
UInt64 recommended_rows = estimate_num_rows(*task, task->range_reader);
|
|
||||||
UInt64 rows_to_read = std::max(static_cast<UInt64>(1), std::min(current_max_block_size_rows, recommended_rows));
|
UInt64 rows_to_read = std::max(static_cast<UInt64>(1), std::min(current_max_block_size_rows, recommended_rows));
|
||||||
|
|
||||||
auto read_result = task->range_reader.read(rows_to_read, task->mark_ranges);
|
auto read_result = task->range_reader.read(rows_to_read, task->mark_ranges);
|
||||||
@ -602,9 +628,12 @@ std::unique_ptr<MergeTreeBlockSizePredictor> MergeTreeBaseSelectProcessor::getSi
|
|||||||
const Block & sample_block)
|
const Block & sample_block)
|
||||||
{
|
{
|
||||||
const auto & required_column_names = task_columns.columns.getNames();
|
const auto & required_column_names = task_columns.columns.getNames();
|
||||||
const auto & required_pre_column_names = task_columns.pre_columns.getNames();
|
|
||||||
NameSet complete_column_names(required_column_names.begin(), required_column_names.end());
|
NameSet complete_column_names(required_column_names.begin(), required_column_names.end());
|
||||||
complete_column_names.insert(required_pre_column_names.begin(), required_pre_column_names.end());
|
for (const auto & pre_columns_per_step : task_columns.pre_columns)
|
||||||
|
{
|
||||||
|
const auto & required_pre_column_names = pre_columns_per_step.getNames();
|
||||||
|
complete_column_names.insert(required_pre_column_names.begin(), required_pre_column_names.end());
|
||||||
|
}
|
||||||
|
|
||||||
return std::make_unique<MergeTreeBlockSizePredictor>(
|
return std::make_unique<MergeTreeBlockSizePredictor>(
|
||||||
data_part, Names(complete_column_names.begin(), complete_column_names.end()), sample_block);
|
data_part, Names(complete_column_names.begin(), complete_column_names.end()), sample_block);
|
||||||
|
@ -115,7 +115,7 @@ protected:
|
|||||||
|
|
||||||
using MergeTreeReaderPtr = std::unique_ptr<IMergeTreeReader>;
|
using MergeTreeReaderPtr = std::unique_ptr<IMergeTreeReader>;
|
||||||
MergeTreeReaderPtr reader;
|
MergeTreeReaderPtr reader;
|
||||||
MergeTreeReaderPtr pre_reader;
|
std::vector<MergeTreeReaderPtr> pre_reader_for_step;
|
||||||
|
|
||||||
MergeTreeReadTaskPtr task;
|
MergeTreeReadTaskPtr task;
|
||||||
|
|
||||||
|
@ -5,6 +5,9 @@
|
|||||||
#include <Common/checkStackSize.h>
|
#include <Common/checkStackSize.h>
|
||||||
#include <Common/typeid_cast.h>
|
#include <Common/typeid_cast.h>
|
||||||
#include <Columns/ColumnConst.h>
|
#include <Columns/ColumnConst.h>
|
||||||
|
#include <IO/WriteBufferFromString.h>
|
||||||
|
#include <IO/Operators.h>
|
||||||
|
|
||||||
#include <unordered_set>
|
#include <unordered_set>
|
||||||
|
|
||||||
|
|
||||||
@ -131,12 +134,12 @@ NameSet injectRequiredColumns(
|
|||||||
|
|
||||||
MergeTreeReadTask::MergeTreeReadTask(
|
MergeTreeReadTask::MergeTreeReadTask(
|
||||||
const MergeTreeData::DataPartPtr & data_part_, const MarkRanges & mark_ranges_, size_t part_index_in_query_,
|
const MergeTreeData::DataPartPtr & data_part_, const MarkRanges & mark_ranges_, size_t part_index_in_query_,
|
||||||
const Names & ordered_names_, const NameSet & column_name_set_, const NamesAndTypesList & columns_,
|
const Names & ordered_names_, const NameSet & column_name_set_, const MergeTreeReadTaskColumns & task_columns_,
|
||||||
const NamesAndTypesList & pre_columns_, bool remove_prewhere_column_, bool should_reorder_,
|
bool remove_prewhere_column_,
|
||||||
MergeTreeBlockSizePredictorPtr && size_predictor_)
|
MergeTreeBlockSizePredictorPtr && size_predictor_)
|
||||||
: data_part{data_part_}, mark_ranges{mark_ranges_}, part_index_in_query{part_index_in_query_},
|
: data_part{data_part_}, mark_ranges{mark_ranges_}, part_index_in_query{part_index_in_query_},
|
||||||
ordered_names{ordered_names_}, column_name_set{column_name_set_}, columns{columns_}, pre_columns{pre_columns_},
|
ordered_names{ordered_names_}, column_name_set{column_name_set_}, task_columns{task_columns_},
|
||||||
remove_prewhere_column{remove_prewhere_column_}, should_reorder{should_reorder_}, size_predictor{std::move(size_predictor_)}
|
remove_prewhere_column{remove_prewhere_column_}, size_predictor{std::move(size_predictor_)}
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -276,34 +279,40 @@ MergeTreeReadTaskColumns getReadTaskColumns(
|
|||||||
Names pre_column_names;
|
Names pre_column_names;
|
||||||
|
|
||||||
/// inject columns required for defaults evaluation
|
/// inject columns required for defaults evaluation
|
||||||
bool should_reorder = !injectRequiredColumns(
|
injectRequiredColumns(
|
||||||
storage, storage_snapshot, data_part, with_subcolumns, column_names).empty();
|
storage, storage_snapshot, data_part, with_subcolumns, column_names);
|
||||||
|
|
||||||
|
MergeTreeReadTaskColumns result;
|
||||||
|
auto options = GetColumnsOptions(GetColumnsOptions::All).withExtendedObjects();
|
||||||
|
if (with_subcolumns)
|
||||||
|
options.withSubcolumns();
|
||||||
|
|
||||||
if (prewhere_info)
|
if (prewhere_info)
|
||||||
{
|
{
|
||||||
pre_column_names = prewhere_info->prewhere_actions->getRequiredColumnsNames();
|
NameSet pre_name_set;
|
||||||
|
|
||||||
|
/// Add column reading steps:
|
||||||
|
/// 1. Columns for row level filter
|
||||||
if (prewhere_info->row_level_filter)
|
if (prewhere_info->row_level_filter)
|
||||||
{
|
{
|
||||||
NameSet names(pre_column_names.begin(), pre_column_names.end());
|
Names row_filter_column_names = prewhere_info->row_level_filter->getRequiredColumnsNames();
|
||||||
|
result.pre_columns.push_back(storage_snapshot->getColumnsByNames(options, row_filter_column_names));
|
||||||
for (auto & name : prewhere_info->row_level_filter->getRequiredColumnsNames())
|
pre_name_set.insert(row_filter_column_names.begin(), row_filter_column_names.end());
|
||||||
{
|
|
||||||
if (!names.contains(name))
|
|
||||||
pre_column_names.push_back(name);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pre_column_names.empty())
|
/// 2. Columns for prewhere
|
||||||
pre_column_names.push_back(column_names[0]);
|
Names all_pre_column_names = prewhere_info->prewhere_actions->getRequiredColumnsNames();
|
||||||
|
|
||||||
const auto injected_pre_columns = injectRequiredColumns(
|
const auto injected_pre_columns = injectRequiredColumns(
|
||||||
storage, storage_snapshot, data_part, with_subcolumns, pre_column_names);
|
storage, storage_snapshot, data_part, with_subcolumns, all_pre_column_names);
|
||||||
|
|
||||||
if (!injected_pre_columns.empty())
|
for (const auto & name : all_pre_column_names)
|
||||||
should_reorder = true;
|
{
|
||||||
|
if (pre_name_set.contains(name))
|
||||||
const NameSet pre_name_set(pre_column_names.begin(), pre_column_names.end());
|
continue;
|
||||||
|
pre_column_names.push_back(name);
|
||||||
|
pre_name_set.insert(name);
|
||||||
|
}
|
||||||
|
|
||||||
Names post_column_names;
|
Names post_column_names;
|
||||||
for (const auto & name : column_names)
|
for (const auto & name : column_names)
|
||||||
@ -313,17 +322,23 @@ MergeTreeReadTaskColumns getReadTaskColumns(
|
|||||||
column_names = post_column_names;
|
column_names = post_column_names;
|
||||||
}
|
}
|
||||||
|
|
||||||
MergeTreeReadTaskColumns result;
|
result.pre_columns.push_back(storage_snapshot->getColumnsByNames(options, pre_column_names));
|
||||||
NamesAndTypesList all_columns;
|
|
||||||
|
|
||||||
auto options = GetColumnsOptions(GetColumnsOptions::All).withExtendedObjects();
|
/// 3. Rest of the requested columns
|
||||||
if (with_subcolumns)
|
|
||||||
options.withSubcolumns();
|
|
||||||
|
|
||||||
result.pre_columns = storage_snapshot->getColumnsByNames(options, pre_column_names);
|
|
||||||
result.columns = storage_snapshot->getColumnsByNames(options, column_names);
|
result.columns = storage_snapshot->getColumnsByNames(options, column_names);
|
||||||
result.should_reorder = should_reorder;
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
std::string MergeTreeReadTaskColumns::dump() const
|
||||||
|
{
|
||||||
|
WriteBufferFromOwnString s;
|
||||||
|
for (size_t i = 0; i < pre_columns.size(); ++i)
|
||||||
|
{
|
||||||
|
s << "STEP " << i << ": " << pre_columns[i].toString() << "\n";
|
||||||
|
}
|
||||||
|
s << "COLUMNS: " << columns.toString() << "\n";
|
||||||
|
return s.str();
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -30,6 +30,16 @@ NameSet injectRequiredColumns(
|
|||||||
Names & columns);
|
Names & columns);
|
||||||
|
|
||||||
|
|
||||||
|
struct MergeTreeReadTaskColumns
|
||||||
|
{
|
||||||
|
/// column names to read during WHERE
|
||||||
|
NamesAndTypesList columns;
|
||||||
|
/// column names to read during each PREWHERE step
|
||||||
|
std::vector<NamesAndTypesList> pre_columns;
|
||||||
|
|
||||||
|
std::string dump() const;
|
||||||
|
};
|
||||||
|
|
||||||
/// A batch of work for MergeTreeThreadSelectProcessor
|
/// A batch of work for MergeTreeThreadSelectProcessor
|
||||||
struct MergeTreeReadTask
|
struct MergeTreeReadTask
|
||||||
{
|
{
|
||||||
@ -43,39 +53,27 @@ struct MergeTreeReadTask
|
|||||||
const Names & ordered_names;
|
const Names & ordered_names;
|
||||||
/// used to determine whether column should be filtered during PREWHERE or WHERE
|
/// used to determine whether column should be filtered during PREWHERE or WHERE
|
||||||
const NameSet & column_name_set;
|
const NameSet & column_name_set;
|
||||||
/// column names to read during WHERE
|
/// column names to read during PREWHERE and WHERE
|
||||||
const NamesAndTypesList & columns;
|
const MergeTreeReadTaskColumns & task_columns;
|
||||||
/// column names to read during PREWHERE
|
|
||||||
const NamesAndTypesList & pre_columns;
|
|
||||||
/// should PREWHERE column be returned to requesting side?
|
/// should PREWHERE column be returned to requesting side?
|
||||||
const bool remove_prewhere_column;
|
const bool remove_prewhere_column;
|
||||||
/// resulting block may require reordering in accordance with `ordered_names`
|
|
||||||
const bool should_reorder;
|
|
||||||
/// Used to satistfy preferred_block_size_bytes limitation
|
/// Used to satistfy preferred_block_size_bytes limitation
|
||||||
MergeTreeBlockSizePredictorPtr size_predictor;
|
MergeTreeBlockSizePredictorPtr size_predictor;
|
||||||
/// Used to save current range processing status
|
/// Used to save current range processing status
|
||||||
MergeTreeRangeReader range_reader;
|
MergeTreeRangeReader range_reader;
|
||||||
MergeTreeRangeReader pre_range_reader;
|
/// Range readers for multiple filtering steps: row level security, PREWHERE etc.
|
||||||
|
/// NOTE: we take references to elements and push_back new elements, that's why it is a deque but noit a vector
|
||||||
|
std::deque<MergeTreeRangeReader> pre_range_readers;
|
||||||
|
|
||||||
bool isFinished() const { return mark_ranges.empty() && range_reader.isCurrentRangeFinished(); }
|
bool isFinished() const { return mark_ranges.empty() && range_reader.isCurrentRangeFinished(); }
|
||||||
|
|
||||||
MergeTreeReadTask(
|
MergeTreeReadTask(
|
||||||
const MergeTreeData::DataPartPtr & data_part_, const MarkRanges & mark_ranges_, size_t part_index_in_query_,
|
const MergeTreeData::DataPartPtr & data_part_, const MarkRanges & mark_ranges_, size_t part_index_in_query_,
|
||||||
const Names & ordered_names_, const NameSet & column_name_set_, const NamesAndTypesList & columns_,
|
const Names & ordered_names_, const NameSet & column_name_set_, const MergeTreeReadTaskColumns & task_columns_,
|
||||||
const NamesAndTypesList & pre_columns_, bool remove_prewhere_column_, bool should_reorder_,
|
bool remove_prewhere_column_,
|
||||||
MergeTreeBlockSizePredictorPtr && size_predictor_);
|
MergeTreeBlockSizePredictorPtr && size_predictor_);
|
||||||
};
|
};
|
||||||
|
|
||||||
struct MergeTreeReadTaskColumns
|
|
||||||
{
|
|
||||||
/// column names to read during WHERE
|
|
||||||
NamesAndTypesList columns;
|
|
||||||
/// column names to read during PREWHERE
|
|
||||||
NamesAndTypesList pre_columns;
|
|
||||||
/// resulting block may require reordering in accordance with `ordered_names`
|
|
||||||
bool should_reorder = false;
|
|
||||||
};
|
|
||||||
|
|
||||||
MergeTreeReadTaskColumns getReadTaskColumns(
|
MergeTreeReadTaskColumns getReadTaskColumns(
|
||||||
const MergeTreeData & storage,
|
const MergeTreeData & storage,
|
||||||
const StorageSnapshotPtr & storage_snapshot,
|
const StorageSnapshotPtr & storage_snapshot,
|
||||||
|
@ -96,7 +96,6 @@ namespace ProfileEvents
|
|||||||
extern const Event RejectedInserts;
|
extern const Event RejectedInserts;
|
||||||
extern const Event DelayedInserts;
|
extern const Event DelayedInserts;
|
||||||
extern const Event DelayedInsertsMilliseconds;
|
extern const Event DelayedInsertsMilliseconds;
|
||||||
extern const Event DuplicatedInsertedBlocks;
|
|
||||||
extern const Event InsertedWideParts;
|
extern const Event InsertedWideParts;
|
||||||
extern const Event InsertedCompactParts;
|
extern const Event InsertedCompactParts;
|
||||||
extern const Event InsertedInMemoryParts;
|
extern const Event InsertedInMemoryParts;
|
||||||
@ -2786,22 +2785,14 @@ MergeTreeData::DataPartsVector MergeTreeData::getActivePartsToReplace(
|
|||||||
|
|
||||||
bool MergeTreeData::renameTempPartAndAdd(
|
bool MergeTreeData::renameTempPartAndAdd(
|
||||||
MutableDataPartPtr & part,
|
MutableDataPartPtr & part,
|
||||||
MergeTreeTransaction * txn,
|
Transaction & out_transaction,
|
||||||
SimpleIncrement * increment,
|
DataPartsLock & lock)
|
||||||
Transaction * out_transaction,
|
|
||||||
MergeTreeDeduplicationLog * deduplication_log,
|
|
||||||
std::string_view deduplication_token)
|
|
||||||
{
|
{
|
||||||
if (out_transaction && &out_transaction->data != this)
|
|
||||||
throw Exception("MergeTreeData::Transaction for one table cannot be used with another. It is a bug.",
|
|
||||||
ErrorCodes::LOGICAL_ERROR);
|
|
||||||
|
|
||||||
DataPartsVector covered_parts;
|
DataPartsVector covered_parts;
|
||||||
{
|
|
||||||
auto lock = lockParts();
|
if (!renameTempPartAndReplaceImpl(part, out_transaction, lock, &covered_parts))
|
||||||
if (!renameTempPartAndReplace(part, txn, increment, out_transaction, lock, &covered_parts, deduplication_log, deduplication_token))
|
return false;
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if (!covered_parts.empty())
|
if (!covered_parts.empty())
|
||||||
throw Exception("Added part " + part->name + " covers " + toString(covered_parts.size())
|
throw Exception("Added part " + part->name + " covers " + toString(covered_parts.size())
|
||||||
+ " existing part(s) (including " + covered_parts[0]->name + ")", ErrorCodes::LOGICAL_ERROR);
|
+ " existing part(s) (including " + covered_parts[0]->name + ")", ErrorCodes::LOGICAL_ERROR);
|
||||||
@ -2809,29 +2800,10 @@ bool MergeTreeData::renameTempPartAndAdd(
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void MergeTreeData::checkPartCanBeAddedToTable(MutableDataPartPtr & part, DataPartsLock & lock) const
|
||||||
bool MergeTreeData::renameTempPartAndReplace(
|
|
||||||
MutableDataPartPtr & part,
|
|
||||||
MergeTreeTransaction * txn,
|
|
||||||
SimpleIncrement * increment,
|
|
||||||
Transaction * out_transaction,
|
|
||||||
std::unique_lock<std::mutex> & lock,
|
|
||||||
DataPartsVector * out_covered_parts,
|
|
||||||
MergeTreeDeduplicationLog * deduplication_log,
|
|
||||||
std::string_view deduplication_token)
|
|
||||||
{
|
{
|
||||||
if (out_transaction && &out_transaction->data != this)
|
|
||||||
throw Exception("MergeTreeData::Transaction for one table cannot be used with another. It is a bug.",
|
|
||||||
ErrorCodes::LOGICAL_ERROR);
|
|
||||||
|
|
||||||
if (txn)
|
|
||||||
transactions_enabled.store(true);
|
|
||||||
|
|
||||||
part->assertState({DataPartState::Temporary});
|
part->assertState({DataPartState::Temporary});
|
||||||
|
|
||||||
MergeTreePartInfo part_info = part->info;
|
|
||||||
String part_name;
|
|
||||||
|
|
||||||
if (DataPartPtr existing_part_in_partition = getAnyPartInPartition(part->info.partition_id, lock))
|
if (DataPartPtr existing_part_in_partition = getAnyPartInPartition(part->info.partition_id, lock))
|
||||||
{
|
{
|
||||||
if (part->partition.value != existing_part_in_partition->partition.value)
|
if (part->partition.value != existing_part_in_partition->partition.value)
|
||||||
@ -2841,21 +2813,7 @@ bool MergeTreeData::renameTempPartAndReplace(
|
|||||||
ErrorCodes::CORRUPTED_DATA);
|
ErrorCodes::CORRUPTED_DATA);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** It is important that obtaining new block number and adding that block to parts set is done atomically.
|
if (auto it_duplicate = data_parts_by_info.find(part->info); it_duplicate != data_parts_by_info.end())
|
||||||
* Otherwise there is race condition - merge of blocks could happen in interval that doesn't yet contain new part.
|
|
||||||
*/
|
|
||||||
if (increment)
|
|
||||||
{
|
|
||||||
part_info.min_block = part_info.max_block = increment->get();
|
|
||||||
part_info.mutation = 0; /// it's equal to min_block by default
|
|
||||||
part_name = part->getNewName(part_info);
|
|
||||||
}
|
|
||||||
else /// Parts from ReplicatedMergeTree already have names
|
|
||||||
part_name = part->name;
|
|
||||||
|
|
||||||
LOG_TRACE(log, "Renaming temporary part {} to {}.", part->data_part_storage->getPartDirectory(), part_name);
|
|
||||||
|
|
||||||
if (auto it_duplicate = data_parts_by_info.find(part_info); it_duplicate != data_parts_by_info.end())
|
|
||||||
{
|
{
|
||||||
String message = "Part " + (*it_duplicate)->getNameWithState() + " already exists";
|
String message = "Part " + (*it_duplicate)->getNameWithState() + " already exists";
|
||||||
|
|
||||||
@ -2864,93 +2822,51 @@ bool MergeTreeData::renameTempPartAndReplace(
|
|||||||
|
|
||||||
throw Exception(message, ErrorCodes::DUPLICATE_DATA_PART);
|
throw Exception(message, ErrorCodes::DUPLICATE_DATA_PART);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void MergeTreeData::preparePartForCommit(MutableDataPartPtr & part, Transaction & out_transaction, bool need_rename)
|
||||||
|
{
|
||||||
|
part->is_temp = false;
|
||||||
|
part->setState(DataPartState::PreActive);
|
||||||
|
|
||||||
|
if (need_rename)
|
||||||
|
part->renameTo(part->name, true);
|
||||||
|
|
||||||
|
data_parts_indexes.insert(part);
|
||||||
|
out_transaction.precommitted_parts.insert(part);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool MergeTreeData::renameTempPartAndReplaceImpl(
|
||||||
|
MutableDataPartPtr & part,
|
||||||
|
Transaction & out_transaction,
|
||||||
|
DataPartsLock & lock,
|
||||||
|
DataPartsVector * out_covered_parts)
|
||||||
|
{
|
||||||
|
LOG_TRACE(log, "Renaming temporary part {} to {}.", part->data_part_storage->getPartDirectory(), part->name);
|
||||||
|
|
||||||
|
if (&out_transaction.data != this)
|
||||||
|
throw Exception("MergeTreeData::Transaction for one table cannot be used with another. It is a bug.",
|
||||||
|
ErrorCodes::LOGICAL_ERROR);
|
||||||
|
|
||||||
|
checkPartCanBeAddedToTable(part, lock);
|
||||||
|
|
||||||
DataPartPtr covering_part;
|
DataPartPtr covering_part;
|
||||||
DataPartsVector covered_parts = getActivePartsToReplace(part_info, part_name, covering_part, lock);
|
DataPartsVector covered_parts = getActivePartsToReplace(part->info, part->name, covering_part, lock);
|
||||||
|
|
||||||
if (covering_part)
|
if (covering_part)
|
||||||
{
|
{
|
||||||
LOG_WARNING(log, "Tried to add obsolete part {} covered by {}", part_name, covering_part->getNameWithState());
|
LOG_WARNING(log, "Tried to add obsolete part {} covered by {}", part->name, covering_part->getNameWithState());
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Deduplication log used only from non-replicated MergeTree. Replicated
|
|
||||||
/// tables have their own mechanism. We try to deduplicate at such deep
|
|
||||||
/// level, because only here we know real part name which is required for
|
|
||||||
/// deduplication.
|
|
||||||
if (deduplication_log)
|
|
||||||
{
|
|
||||||
const String block_id = part->getZeroLevelPartBlockID(deduplication_token);
|
|
||||||
auto res = deduplication_log->addPart(block_id, part_info);
|
|
||||||
if (!res.second)
|
|
||||||
{
|
|
||||||
ProfileEvents::increment(ProfileEvents::DuplicatedInsertedBlocks);
|
|
||||||
LOG_INFO(log, "Block with ID {} already exists as part {}; ignoring it", block_id, res.first.getPartName());
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// All checks are passed. Now we can rename the part on disk.
|
/// All checks are passed. Now we can rename the part on disk.
|
||||||
/// So, we maintain invariant: if a non-temporary part in filesystem then it is in data_parts
|
/// So, we maintain invariant: if a non-temporary part in filesystem then it is in data_parts
|
||||||
///
|
preparePartForCommit(part, out_transaction, /* need_rename = */ true);
|
||||||
/// If out_transaction is null, we commit the part to the active set immediately, else add it to the transaction.
|
|
||||||
|
|
||||||
part->name = part_name;
|
|
||||||
part->info = part_info;
|
|
||||||
part->is_temp = false;
|
|
||||||
part->setState(DataPartState::PreActive);
|
|
||||||
part->renameTo(part_name, true);
|
|
||||||
|
|
||||||
auto part_it = data_parts_indexes.insert(part).first;
|
|
||||||
|
|
||||||
if (out_transaction)
|
|
||||||
{
|
|
||||||
chassert(out_transaction->txn == txn);
|
|
||||||
out_transaction->precommitted_parts.insert(part);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
/// FIXME Transactions: it's not the best place for checking and setting removal_tid,
|
|
||||||
/// because it's too optimistic. We should lock removal_tid of covered parts at the beginning of operation.
|
|
||||||
MergeTreeTransaction::addNewPartAndRemoveCovered(shared_from_this(), part, covered_parts, txn);
|
|
||||||
|
|
||||||
size_t reduce_bytes = 0;
|
|
||||||
size_t reduce_rows = 0;
|
|
||||||
size_t reduce_parts = 0;
|
|
||||||
auto current_time = time(nullptr);
|
|
||||||
for (const DataPartPtr & covered_part : covered_parts)
|
|
||||||
{
|
|
||||||
covered_part->remove_time.store(current_time, std::memory_order_relaxed);
|
|
||||||
modifyPartState(covered_part, DataPartState::Outdated);
|
|
||||||
removePartContributionToColumnAndSecondaryIndexSizes(covered_part);
|
|
||||||
reduce_bytes += covered_part->getBytesOnDisk();
|
|
||||||
reduce_rows += covered_part->rows_count;
|
|
||||||
++reduce_parts;
|
|
||||||
}
|
|
||||||
|
|
||||||
modifyPartState(part_it, DataPartState::Active);
|
|
||||||
addPartContributionToColumnAndSecondaryIndexSizes(part);
|
|
||||||
|
|
||||||
if (covered_parts.empty())
|
|
||||||
updateObjectColumns(*part_it, lock);
|
|
||||||
else
|
|
||||||
resetObjectColumnsFromActiveParts(lock);
|
|
||||||
|
|
||||||
ssize_t diff_bytes = part->getBytesOnDisk() - reduce_bytes;
|
|
||||||
ssize_t diff_rows = part->rows_count - reduce_rows;
|
|
||||||
ssize_t diff_parts = 1 - reduce_parts;
|
|
||||||
increaseDataVolume(diff_bytes, diff_rows, diff_parts);
|
|
||||||
}
|
|
||||||
|
|
||||||
auto part_in_memory = asInMemoryPart(part);
|
|
||||||
if (part_in_memory && getSettings()->in_memory_parts_enable_wal)
|
|
||||||
{
|
|
||||||
auto wal = getWriteAheadLog();
|
|
||||||
wal->addPart(part_in_memory);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (out_covered_parts)
|
if (out_covered_parts)
|
||||||
{
|
{
|
||||||
|
out_covered_parts->reserve(covered_parts.size());
|
||||||
|
|
||||||
for (DataPartPtr & covered_part : covered_parts)
|
for (DataPartPtr & covered_part : covered_parts)
|
||||||
out_covered_parts->emplace_back(std::move(covered_part));
|
out_covered_parts->emplace_back(std::move(covered_part));
|
||||||
}
|
}
|
||||||
@ -2958,24 +2874,26 @@ bool MergeTreeData::renameTempPartAndReplace(
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
MergeTreeData::DataPartsVector MergeTreeData::renameTempPartAndReplace(
|
MergeTreeData::DataPartsVector MergeTreeData::renameTempPartAndReplaceUnlocked(
|
||||||
MutableDataPartPtr & part, MergeTreeTransaction * txn, SimpleIncrement * increment,
|
MutableDataPartPtr & part,
|
||||||
Transaction * out_transaction, MergeTreeDeduplicationLog * deduplication_log)
|
Transaction & out_transaction,
|
||||||
|
DataPartsLock & lock)
|
||||||
{
|
{
|
||||||
if (out_transaction && &out_transaction->data != this)
|
|
||||||
throw Exception("MergeTreeData::Transaction for one table cannot be used with another. It is a bug.",
|
|
||||||
ErrorCodes::LOGICAL_ERROR);
|
|
||||||
|
|
||||||
DataPartsVector covered_parts;
|
DataPartsVector covered_parts;
|
||||||
{
|
renameTempPartAndReplaceImpl(part, out_transaction, lock, &covered_parts);
|
||||||
auto lock = lockParts();
|
|
||||||
renameTempPartAndReplace(part, txn, increment, out_transaction, lock, &covered_parts, deduplication_log);
|
|
||||||
}
|
|
||||||
return covered_parts;
|
return covered_parts;
|
||||||
}
|
}
|
||||||
|
|
||||||
void MergeTreeData::removePartsFromWorkingSet(MergeTreeTransaction * txn, const MergeTreeData::DataPartsVector & remove, bool clear_without_timeout, DataPartsLock & acquired_lock)
|
MergeTreeData::DataPartsVector MergeTreeData::renameTempPartAndReplace(
|
||||||
|
MutableDataPartPtr & part,
|
||||||
|
Transaction & out_transaction)
|
||||||
|
{
|
||||||
|
auto part_lock = lockParts();
|
||||||
|
return renameTempPartAndReplaceUnlocked(part, out_transaction, part_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MergeTreeData::removePartsFromWorkingSet(MergeTreeTransaction * txn, const MergeTreeData::DataPartsVector & remove, bool clear_without_timeout, DataPartsLock & acquired_lock)
|
||||||
{
|
{
|
||||||
if (txn)
|
if (txn)
|
||||||
transactions_enabled.store(true);
|
transactions_enabled.store(true);
|
||||||
@ -4879,6 +4797,14 @@ MergeTreeData::DataPartPtr MergeTreeData::getAnyPartInPartition(
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
MergeTreeData::Transaction::Transaction(MergeTreeData & data_, MergeTreeTransaction * txn_)
|
||||||
|
: data(data_)
|
||||||
|
, txn(txn_)
|
||||||
|
{
|
||||||
|
if (txn)
|
||||||
|
data.transactions_enabled.store(true);
|
||||||
|
}
|
||||||
|
|
||||||
void MergeTreeData::Transaction::rollbackPartsToTemporaryState()
|
void MergeTreeData::Transaction::rollbackPartsToTemporaryState()
|
||||||
{
|
{
|
||||||
if (!isEmpty())
|
if (!isEmpty())
|
||||||
@ -4922,9 +4848,12 @@ MergeTreeData::DataPartsVector MergeTreeData::Transaction::commit(MergeTreeData:
|
|||||||
|
|
||||||
if (!isEmpty())
|
if (!isEmpty())
|
||||||
{
|
{
|
||||||
|
auto settings = data.getSettings();
|
||||||
|
MergeTreeData::WriteAheadLogPtr wal;
|
||||||
auto parts_lock = acquired_parts_lock ? MergeTreeData::DataPartsLock() : data.lockParts();
|
auto parts_lock = acquired_parts_lock ? MergeTreeData::DataPartsLock() : data.lockParts();
|
||||||
auto * owing_parts_lock = acquired_parts_lock ? acquired_parts_lock : &parts_lock;
|
auto * owing_parts_lock = acquired_parts_lock ? acquired_parts_lock : &parts_lock;
|
||||||
|
|
||||||
|
|
||||||
if (txn)
|
if (txn)
|
||||||
{
|
{
|
||||||
for (const DataPartPtr & part : precommitted_parts)
|
for (const DataPartPtr & part : precommitted_parts)
|
||||||
@ -4949,6 +4878,15 @@ MergeTreeData::DataPartsVector MergeTreeData::Transaction::commit(MergeTreeData:
|
|||||||
|
|
||||||
for (const DataPartPtr & part : precommitted_parts)
|
for (const DataPartPtr & part : precommitted_parts)
|
||||||
{
|
{
|
||||||
|
auto part_in_memory = asInMemoryPart(part);
|
||||||
|
if (part_in_memory && settings->in_memory_parts_enable_wal)
|
||||||
|
{
|
||||||
|
if (!wal)
|
||||||
|
wal = data.getWriteAheadLog();
|
||||||
|
|
||||||
|
wal->addPart(part_in_memory);
|
||||||
|
}
|
||||||
|
|
||||||
DataPartPtr covering_part;
|
DataPartPtr covering_part;
|
||||||
DataPartsVector covered_parts = data.getActivePartsToReplace(part->info, part->name, covering_part, *owing_parts_lock);
|
DataPartsVector covered_parts = data.getActivePartsToReplace(part->info, part->name, covering_part, *owing_parts_lock);
|
||||||
if (covering_part)
|
if (covering_part)
|
||||||
|
@ -252,7 +252,7 @@ public:
|
|||||||
class Transaction : private boost::noncopyable
|
class Transaction : private boost::noncopyable
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
Transaction(MergeTreeData & data_, MergeTreeTransaction * txn_) : data(data_), txn(txn_) {}
|
Transaction(MergeTreeData & data_, MergeTreeTransaction * txn_);
|
||||||
|
|
||||||
DataPartsVector commit(MergeTreeData::DataPartsLock * acquired_parts_lock = nullptr);
|
DataPartsVector commit(MergeTreeData::DataPartsLock * acquired_parts_lock = nullptr);
|
||||||
|
|
||||||
@ -549,37 +549,24 @@ public:
|
|||||||
|
|
||||||
/// Renames temporary part to a permanent part and adds it to the parts set.
|
/// Renames temporary part to a permanent part and adds it to the parts set.
|
||||||
/// It is assumed that the part does not intersect with existing parts.
|
/// It is assumed that the part does not intersect with existing parts.
|
||||||
/// If increment != nullptr, part index is determining using increment. Otherwise part index remains unchanged.
|
/// Adds the part in the PreActive state (the part will be added to the active set later with out_transaction->commit()).
|
||||||
/// If out_transaction != nullptr, adds the part in the PreActive state (the part will be added to the
|
|
||||||
/// active set later with out_transaction->commit()).
|
|
||||||
/// Else, commits the part immediately.
|
|
||||||
/// Returns true if part was added. Returns false if part is covered by bigger part.
|
/// Returns true if part was added. Returns false if part is covered by bigger part.
|
||||||
bool renameTempPartAndAdd(
|
bool renameTempPartAndAdd(
|
||||||
MutableDataPartPtr & part,
|
MutableDataPartPtr & part,
|
||||||
MergeTreeTransaction * txn,
|
Transaction & transaction,
|
||||||
SimpleIncrement * increment = nullptr,
|
DataPartsLock & lock);
|
||||||
Transaction * out_transaction = nullptr,
|
|
||||||
MergeTreeDeduplicationLog * deduplication_log = nullptr,
|
|
||||||
std::string_view deduplication_token = std::string_view());
|
|
||||||
|
|
||||||
/// The same as renameTempPartAndAdd but the block range of the part can contain existing parts.
|
/// The same as renameTempPartAndAdd but the block range of the part can contain existing parts.
|
||||||
/// Returns all parts covered by the added part (in ascending order).
|
/// Returns all parts covered by the added part (in ascending order).
|
||||||
/// If out_transaction == nullptr, marks covered parts as Outdated.
|
|
||||||
DataPartsVector renameTempPartAndReplace(
|
DataPartsVector renameTempPartAndReplace(
|
||||||
MutableDataPartPtr & part, MergeTreeTransaction * txn, SimpleIncrement * increment = nullptr,
|
|
||||||
Transaction * out_transaction = nullptr, MergeTreeDeduplicationLog * deduplication_log = nullptr);
|
|
||||||
|
|
||||||
/// Low-level version of previous one, doesn't lock mutex
|
|
||||||
/// FIXME Transactions: remove add_to_txn flag, maybe merge MergeTreeTransaction and Transaction
|
|
||||||
bool renameTempPartAndReplace(
|
|
||||||
MutableDataPartPtr & part,
|
MutableDataPartPtr & part,
|
||||||
MergeTreeTransaction * txn,
|
Transaction & out_transaction);
|
||||||
SimpleIncrement * increment,
|
|
||||||
Transaction * out_transaction,
|
/// Unlocked version of previous one. Useful when added multiple parts with a single lock.
|
||||||
DataPartsLock & lock,
|
DataPartsVector renameTempPartAndReplaceUnlocked(
|
||||||
DataPartsVector * out_covered_parts = nullptr,
|
MutableDataPartPtr & part,
|
||||||
MergeTreeDeduplicationLog * deduplication_log = nullptr,
|
Transaction & out_transaction,
|
||||||
std::string_view deduplication_token = std::string_view());
|
DataPartsLock & lock);
|
||||||
|
|
||||||
/// Remove parts from working set immediately (without wait for background
|
/// Remove parts from working set immediately (without wait for background
|
||||||
/// process). Transfer part state to temporary. Have very limited usage only
|
/// process). Transfer part state to temporary. Have very limited usage only
|
||||||
@ -1251,6 +1238,22 @@ protected:
|
|||||||
static void incrementMergedPartsProfileEvent(MergeTreeDataPartType type);
|
static void incrementMergedPartsProfileEvent(MergeTreeDataPartType type);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
||||||
|
/// Checking that candidate part doesn't break invariants: correct partition and doesn't exist already
|
||||||
|
void checkPartCanBeAddedToTable(MutableDataPartPtr & part, DataPartsLock & lock) const;
|
||||||
|
|
||||||
|
/// Preparing itself to be committed in memory: fill some fields inside part, add it to data_parts_indexes
|
||||||
|
/// in precommitted state and to transasction
|
||||||
|
void preparePartForCommit(MutableDataPartPtr & part, Transaction & out_transaction, bool need_rename);
|
||||||
|
|
||||||
|
/// Low-level method for preparing parts for commit (in-memory).
|
||||||
|
/// FIXME Merge MergeTreeTransaction and Transaction
|
||||||
|
bool renameTempPartAndReplaceImpl(
|
||||||
|
MutableDataPartPtr & part,
|
||||||
|
Transaction & out_transaction,
|
||||||
|
DataPartsLock & lock,
|
||||||
|
DataPartsVector * out_covered_parts);
|
||||||
|
|
||||||
/// RAII Wrapper for atomic work with currently moving parts
|
/// RAII Wrapper for atomic work with currently moving parts
|
||||||
/// Acquire them in constructor and remove them in destructor
|
/// Acquire them in constructor and remove them in destructor
|
||||||
/// Uses data.currently_moving_parts_mutex
|
/// Uses data.currently_moving_parts_mutex
|
||||||
|
@ -541,7 +541,7 @@ MergeTreeData::DataPartPtr MergeTreeDataMergerMutator::renameMergedTemporaryPart
|
|||||||
MergeTreeData::MutableDataPartPtr & new_data_part,
|
MergeTreeData::MutableDataPartPtr & new_data_part,
|
||||||
const MergeTreeData::DataPartsVector & parts,
|
const MergeTreeData::DataPartsVector & parts,
|
||||||
const MergeTreeTransactionPtr & txn,
|
const MergeTreeTransactionPtr & txn,
|
||||||
MergeTreeData::Transaction * out_transaction)
|
MergeTreeData::Transaction & out_transaction)
|
||||||
{
|
{
|
||||||
/// Some of source parts was possibly created in transaction, so non-transactional merge may break isolation.
|
/// Some of source parts was possibly created in transaction, so non-transactional merge may break isolation.
|
||||||
if (data.transactions_enabled.load(std::memory_order_relaxed) && !txn)
|
if (data.transactions_enabled.load(std::memory_order_relaxed) && !txn)
|
||||||
@ -549,7 +549,7 @@ MergeTreeData::DataPartPtr MergeTreeDataMergerMutator::renameMergedTemporaryPart
|
|||||||
"but transactions were enabled for this table");
|
"but transactions were enabled for this table");
|
||||||
|
|
||||||
/// Rename new part, add to the set and remove original parts.
|
/// Rename new part, add to the set and remove original parts.
|
||||||
auto replaced_parts = data.renameTempPartAndReplace(new_data_part, txn.get(), nullptr, out_transaction);
|
auto replaced_parts = data.renameTempPartAndReplace(new_data_part, out_transaction);
|
||||||
|
|
||||||
/// Let's check that all original parts have been deleted and only them.
|
/// Let's check that all original parts have been deleted and only them.
|
||||||
if (replaced_parts.size() != parts.size())
|
if (replaced_parts.size() != parts.size())
|
||||||
|
@ -133,7 +133,7 @@ public:
|
|||||||
MergeTreeData::MutableDataPartPtr & new_data_part,
|
MergeTreeData::MutableDataPartPtr & new_data_part,
|
||||||
const MergeTreeData::DataPartsVector & parts,
|
const MergeTreeData::DataPartsVector & parts,
|
||||||
const MergeTreeTransactionPtr & txn,
|
const MergeTreeTransactionPtr & txn,
|
||||||
MergeTreeData::Transaction * out_transaction = nullptr);
|
MergeTreeData::Transaction & out_transaction);
|
||||||
|
|
||||||
|
|
||||||
/// The approximate amount of disk space needed for merge or mutation. With a surplus.
|
/// The approximate amount of disk space needed for merge or mutation. With a surplus.
|
||||||
|
@ -34,9 +34,9 @@ try
|
|||||||
: getSizePredictor(data_part, task_columns, sample_block);
|
: getSizePredictor(data_part, task_columns, sample_block);
|
||||||
|
|
||||||
task = std::make_unique<MergeTreeReadTask>(
|
task = std::make_unique<MergeTreeReadTask>(
|
||||||
data_part, mark_ranges_for_task, part_index_in_query, ordered_names, column_name_set, task_columns.columns,
|
data_part, mark_ranges_for_task, part_index_in_query, ordered_names, column_name_set, task_columns,
|
||||||
task_columns.pre_columns, prewhere_info && prewhere_info->remove_prewhere_column,
|
prewhere_info && prewhere_info->remove_prewhere_column,
|
||||||
task_columns.should_reorder, std::move(size_predictor));
|
std::move(size_predictor));
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -3,6 +3,8 @@
|
|||||||
#include <Columns/ColumnConst.h>
|
#include <Columns/ColumnConst.h>
|
||||||
#include <Columns/ColumnsCommon.h>
|
#include <Columns/ColumnsCommon.h>
|
||||||
#include <Common/TargetSpecific.h>
|
#include <Common/TargetSpecific.h>
|
||||||
|
#include <IO/WriteBufferFromString.h>
|
||||||
|
#include <IO/Operators.h>
|
||||||
#include <base/range.h>
|
#include <base/range.h>
|
||||||
#include <Interpreters/castColumn.h>
|
#include <Interpreters/castColumn.h>
|
||||||
#include <DataTypes/DataTypeNothing.h>
|
#include <DataTypes/DataTypeNothing.h>
|
||||||
@ -64,7 +66,7 @@ static void filterColumns(Columns & columns, const ColumnPtr & filter)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static size_t getLastMark(const MergeTreeRangeReader::ReadResult::RangesInfo & ranges)
|
size_t MergeTreeRangeReader::ReadResult::getLastMark(const MergeTreeRangeReader::ReadResult::RangesInfo & ranges)
|
||||||
{
|
{
|
||||||
size_t current_task_last_mark = 0;
|
size_t current_task_last_mark = 0;
|
||||||
for (const auto & mark_range : ranges)
|
for (const auto & mark_range : ranges)
|
||||||
@ -594,6 +596,7 @@ size_t MergeTreeRangeReader::ReadResult::numZerosInTail(const UInt8 * begin, con
|
|||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Filter size must match total_rows_per_granule
|
||||||
void MergeTreeRangeReader::ReadResult::setFilter(const ColumnPtr & new_filter)
|
void MergeTreeRangeReader::ReadResult::setFilter(const ColumnPtr & new_filter)
|
||||||
{
|
{
|
||||||
if (!new_filter && filter)
|
if (!new_filter && filter)
|
||||||
@ -644,7 +647,7 @@ size_t MergeTreeRangeReader::ReadResult::countBytesInResultFilter(const IColumn:
|
|||||||
MergeTreeRangeReader::MergeTreeRangeReader(
|
MergeTreeRangeReader::MergeTreeRangeReader(
|
||||||
IMergeTreeReader * merge_tree_reader_,
|
IMergeTreeReader * merge_tree_reader_,
|
||||||
MergeTreeRangeReader * prev_reader_,
|
MergeTreeRangeReader * prev_reader_,
|
||||||
const PrewhereExprInfo * prewhere_info_,
|
const PrewhereExprStep * prewhere_info_,
|
||||||
bool last_reader_in_chain_,
|
bool last_reader_in_chain_,
|
||||||
const Names & non_const_virtual_column_names_)
|
const Names & non_const_virtual_column_names_)
|
||||||
: merge_tree_reader(merge_tree_reader_)
|
: merge_tree_reader(merge_tree_reader_)
|
||||||
@ -672,17 +675,12 @@ MergeTreeRangeReader::MergeTreeRangeReader(
|
|||||||
|
|
||||||
if (prewhere_info)
|
if (prewhere_info)
|
||||||
{
|
{
|
||||||
if (prewhere_info->row_level_filter)
|
const auto & step = *prewhere_info;
|
||||||
{
|
if (step.actions)
|
||||||
prewhere_info->row_level_filter->execute(sample_block, true);
|
step.actions->execute(sample_block, true);
|
||||||
sample_block.erase(prewhere_info->row_level_column_name);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (prewhere_info->prewhere_actions)
|
if (step.remove_column)
|
||||||
prewhere_info->prewhere_actions->execute(sample_block, true);
|
sample_block.erase(step.column_name);
|
||||||
|
|
||||||
if (prewhere_info->remove_prewhere_column)
|
|
||||||
sample_block.erase(prewhere_info->prewhere_column_name);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -983,11 +981,15 @@ void MergeTreeRangeReader::fillPartOffsetColumn(ReadResult & result, UInt64 lead
|
|||||||
result.columns.emplace_back(std::move(column));
|
result.columns.emplace_back(std::move(column));
|
||||||
}
|
}
|
||||||
|
|
||||||
Columns MergeTreeRangeReader::continueReadingChain(ReadResult & result, size_t & num_rows)
|
Columns MergeTreeRangeReader::continueReadingChain(const ReadResult & result, size_t & num_rows)
|
||||||
{
|
{
|
||||||
Columns columns;
|
Columns columns;
|
||||||
num_rows = 0;
|
num_rows = 0;
|
||||||
|
|
||||||
|
/// No columns need to be read at this step? (only more filtering)
|
||||||
|
if (merge_tree_reader->getColumns().empty())
|
||||||
|
return columns;
|
||||||
|
|
||||||
if (result.rowsPerGranule().empty())
|
if (result.rowsPerGranule().empty())
|
||||||
{
|
{
|
||||||
/// If zero rows were read on prev step, than there is no more rows to read.
|
/// If zero rows were read on prev step, than there is no more rows to read.
|
||||||
@ -1001,7 +1003,7 @@ Columns MergeTreeRangeReader::continueReadingChain(ReadResult & result, size_t &
|
|||||||
const auto & rows_per_granule = result.rowsPerGranule();
|
const auto & rows_per_granule = result.rowsPerGranule();
|
||||||
const auto & started_ranges = result.startedRanges();
|
const auto & started_ranges = result.startedRanges();
|
||||||
|
|
||||||
size_t current_task_last_mark = getLastMark(started_ranges);
|
size_t current_task_last_mark = ReadResult::getLastMark(started_ranges);
|
||||||
size_t next_range_to_start = 0;
|
size_t next_range_to_start = 0;
|
||||||
|
|
||||||
auto size = rows_per_granule.size();
|
auto size = rows_per_granule.size();
|
||||||
@ -1039,6 +1041,8 @@ static void checkCombinedFiltersSize(size_t bytes_in_first_filter, size_t second
|
|||||||
"does not match second filter size ({})", bytes_in_first_filter, second_filter_size);
|
"does not match second filter size ({})", bytes_in_first_filter, second_filter_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Second filter size must be equal to number of 1s in the first filter.
|
||||||
|
/// The result size is equal to first filter size.
|
||||||
static ColumnPtr combineFilters(ColumnPtr first, ColumnPtr second)
|
static ColumnPtr combineFilters(ColumnPtr first, ColumnPtr second)
|
||||||
{
|
{
|
||||||
ConstantFilterDescription first_const_descr(*first);
|
ConstantFilterDescription first_const_descr(*first);
|
||||||
@ -1099,13 +1103,17 @@ void MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(ReadResult & r
|
|||||||
const auto & header = merge_tree_reader->getColumns();
|
const auto & header = merge_tree_reader->getColumns();
|
||||||
size_t num_columns = header.size();
|
size_t num_columns = header.size();
|
||||||
|
|
||||||
if (result.columns.size() != (num_columns + non_const_virtual_column_names.size()))
|
/// Check that we have columns from previous steps and newly read required columns
|
||||||
throw Exception("Invalid number of columns passed to MergeTreeRangeReader. "
|
if (result.columns.size() < num_columns + non_const_virtual_column_names.size())
|
||||||
"Expected " + toString(num_columns) + ", "
|
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||||
"got " + toString(result.columns.size()), ErrorCodes::LOGICAL_ERROR);
|
"Invalid number of columns passed to MergeTreeRangeReader. Expected {}, got {}",
|
||||||
|
num_columns, result.columns.size());
|
||||||
|
|
||||||
ColumnPtr filter;
|
/// This filter has the size of total_rows_per granule. It is applied after reading contiguous chunks from
|
||||||
ColumnPtr row_level_filter;
|
/// the start of each granule.
|
||||||
|
ColumnPtr combined_filter;
|
||||||
|
/// Filter computed at the current step. Its size is equal to num_rows which is <= total_rows_per_granule
|
||||||
|
ColumnPtr current_step_filter;
|
||||||
size_t prewhere_column_pos;
|
size_t prewhere_column_pos;
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -1122,13 +1130,23 @@ void MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(ReadResult & r
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (auto name_and_type = header.begin(); pos < num_columns; ++pos, ++name_and_type)
|
for (auto name_and_type = header.begin(); name_and_type != header.end() && pos < result.columns.size(); ++pos, ++name_and_type)
|
||||||
block.insert({result.columns[pos], name_and_type->type, name_and_type->name});
|
block.insert({result.columns[pos], name_and_type->type, name_and_type->name});
|
||||||
|
|
||||||
for (const auto & column_name : non_const_virtual_column_names)
|
for (const auto & column_name : non_const_virtual_column_names)
|
||||||
{
|
{
|
||||||
|
if (block.has(column_name))
|
||||||
|
continue;
|
||||||
|
|
||||||
if (column_name == "_part_offset")
|
if (column_name == "_part_offset")
|
||||||
|
{
|
||||||
|
if (pos >= result.columns.size())
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||||
|
"Invalid number of columns passed to MergeTreeRangeReader. Expected {}, got {}",
|
||||||
|
num_columns, result.columns.size());
|
||||||
|
|
||||||
block.insert({result.columns[pos], std::make_shared<DataTypeUInt64>(), column_name});
|
block.insert({result.columns[pos], std::make_shared<DataTypeUInt64>(), column_name});
|
||||||
|
}
|
||||||
else
|
else
|
||||||
throw Exception("Unexpected non-const virtual column: " + column_name, ErrorCodes::LOGICAL_ERROR);
|
throw Exception("Unexpected non-const virtual column: " + column_name, ErrorCodes::LOGICAL_ERROR);
|
||||||
++pos;
|
++pos;
|
||||||
@ -1137,58 +1155,37 @@ void MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(ReadResult & r
|
|||||||
/// Columns might be projected out. We need to store them here so that default columns can be evaluated later.
|
/// Columns might be projected out. We need to store them here so that default columns can be evaluated later.
|
||||||
result.block_before_prewhere = block;
|
result.block_before_prewhere = block;
|
||||||
|
|
||||||
if (prewhere_info->row_level_filter)
|
if (prewhere_info->actions)
|
||||||
{
|
prewhere_info->actions->execute(block);
|
||||||
prewhere_info->row_level_filter->execute(block);
|
|
||||||
auto row_level_filter_pos = block.getPositionByName(prewhere_info->row_level_column_name);
|
|
||||||
row_level_filter = block.getByPosition(row_level_filter_pos).column;
|
|
||||||
block.erase(row_level_filter_pos);
|
|
||||||
|
|
||||||
auto columns = block.getColumns();
|
prewhere_column_pos = block.getPositionByName(prewhere_info->column_name);
|
||||||
filterColumns(columns, row_level_filter);
|
|
||||||
if (columns.empty())
|
|
||||||
block = block.cloneEmpty();
|
|
||||||
else
|
|
||||||
block.setColumns(columns);
|
|
||||||
}
|
|
||||||
|
|
||||||
prewhere_info->prewhere_actions->execute(block);
|
|
||||||
|
|
||||||
prewhere_column_pos = block.getPositionByName(prewhere_info->prewhere_column_name);
|
|
||||||
|
|
||||||
result.columns.clear();
|
result.columns.clear();
|
||||||
result.columns.reserve(block.columns());
|
result.columns.reserve(block.columns());
|
||||||
for (auto & col : block)
|
for (auto & col : block)
|
||||||
result.columns.emplace_back(std::move(col.column));
|
result.columns.emplace_back(std::move(col.column));
|
||||||
|
|
||||||
filter.swap(result.columns[prewhere_column_pos]);
|
current_step_filter.swap(result.columns[prewhere_column_pos]);
|
||||||
|
combined_filter = current_step_filter;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (result.getFilter())
|
if (result.getFilter())
|
||||||
{
|
{
|
||||||
/// TODO: implement for prewhere chain.
|
ColumnPtr prev_filter = result.getFilterHolder();
|
||||||
/// In order to do it we need combine filter and result.filter, where filter filters only '1' in result.filter.
|
combined_filter = combineFilters(prev_filter, std::move(combined_filter));
|
||||||
throw Exception("MergeTreeRangeReader chain with several prewhere actions in not implemented.",
|
|
||||||
ErrorCodes::LOGICAL_ERROR);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (filter && row_level_filter)
|
result.setFilter(combined_filter);
|
||||||
{
|
|
||||||
row_level_filter = combineFilters(std::move(row_level_filter), filter);
|
|
||||||
result.setFilter(row_level_filter);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
result.setFilter(filter);
|
|
||||||
|
|
||||||
/// If there is a WHERE, we filter in there, and only optimize IO and shrink columns here
|
/// If there is a WHERE, we filter in there, and only optimize IO and shrink columns here
|
||||||
if (!last_reader_in_chain)
|
if (!last_reader_in_chain)
|
||||||
result.optimize(merge_tree_reader->canReadIncompleteGranules(), prewhere_info->row_level_filter == nullptr);
|
result.optimize(merge_tree_reader->canReadIncompleteGranules(), true);
|
||||||
|
|
||||||
/// If we read nothing or filter gets optimized to nothing
|
/// If we read nothing or filter gets optimized to nothing
|
||||||
if (result.totalRowsPerGranule() == 0)
|
if (result.totalRowsPerGranule() == 0)
|
||||||
result.setFilterConstFalse();
|
result.setFilterConstFalse();
|
||||||
/// If we need to filter in PREWHERE
|
/// If we need to filter in PREWHERE
|
||||||
else if (prewhere_info->need_filter || result.need_filter || prewhere_info->row_level_filter)
|
else if (prewhere_info->need_filter || result.need_filter)
|
||||||
{
|
{
|
||||||
/// If there is a filter and without optimized
|
/// If there is a filter and without optimized
|
||||||
if (result.getFilter() && last_reader_in_chain)
|
if (result.getFilter() && last_reader_in_chain)
|
||||||
@ -1208,10 +1205,7 @@ void MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(ReadResult & r
|
|||||||
/// filter might be shrunk while columns not
|
/// filter might be shrunk while columns not
|
||||||
const auto * result_filter = result.getFilterOriginal();
|
const auto * result_filter = result.getFilterOriginal();
|
||||||
|
|
||||||
if (row_level_filter)
|
filterColumns(result.columns, current_step_filter);
|
||||||
filterColumns(result.columns, filter);
|
|
||||||
else
|
|
||||||
filterColumns(result.columns, result_filter->getData());
|
|
||||||
|
|
||||||
result.need_filter = true;
|
result.need_filter = true;
|
||||||
|
|
||||||
@ -1234,22 +1228,22 @@ void MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(ReadResult & r
|
|||||||
/// Check if the PREWHERE column is needed
|
/// Check if the PREWHERE column is needed
|
||||||
if (!result.columns.empty())
|
if (!result.columns.empty())
|
||||||
{
|
{
|
||||||
if (prewhere_info->remove_prewhere_column)
|
if (prewhere_info->remove_column)
|
||||||
result.columns.erase(result.columns.begin() + prewhere_column_pos);
|
result.columns.erase(result.columns.begin() + prewhere_column_pos);
|
||||||
else
|
else
|
||||||
result.columns[prewhere_column_pos] =
|
result.columns[prewhere_column_pos] =
|
||||||
getSampleBlock().getByName(prewhere_info->prewhere_column_name).type->
|
getSampleBlock().getByName(prewhere_info->column_name).type->
|
||||||
createColumnConst(result.num_rows, 1u)->convertToFullColumnIfConst();
|
createColumnConst(result.num_rows, 1u)->convertToFullColumnIfConst();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/// Filter in WHERE instead
|
/// Filter in WHERE instead
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if (prewhere_info->remove_prewhere_column)
|
if (prewhere_info->remove_column)
|
||||||
result.columns.erase(result.columns.begin() + prewhere_column_pos);
|
result.columns.erase(result.columns.begin() + prewhere_column_pos);
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
auto type = getSampleBlock().getByName(prewhere_info->prewhere_column_name).type;
|
auto type = getSampleBlock().getByName(prewhere_info->column_name).type;
|
||||||
ColumnWithTypeAndName col(result.getFilterHolder()->convertToFullColumnIfConst(), std::make_shared<DataTypeUInt8>(), "");
|
ColumnWithTypeAndName col(result.getFilterHolder()->convertToFullColumnIfConst(), std::make_shared<DataTypeUInt8>(), "");
|
||||||
result.columns[prewhere_column_pos] = castColumn(col, type);
|
result.columns[prewhere_column_pos] = castColumn(col, type);
|
||||||
result.clearFilter(); // Acting as a flag to not filter in PREWHERE
|
result.clearFilter(); // Acting as a flag to not filter in PREWHERE
|
||||||
@ -1257,4 +1251,20 @@ void MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(ReadResult & r
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::string PrewhereExprInfo::dump() const
|
||||||
|
{
|
||||||
|
WriteBufferFromOwnString s;
|
||||||
|
|
||||||
|
for (size_t i = 0; i < steps.size(); ++i)
|
||||||
|
{
|
||||||
|
s << "STEP " << i << ":\n"
|
||||||
|
<< " ACTIONS: " << (steps[i].actions ? steps[i].actions->dumpActions() : "nullptr") << "\n"
|
||||||
|
<< " COLUMN: " << steps[i].column_name << "\n"
|
||||||
|
<< " REMOVE_COLUMN: " << steps[i].remove_column << "\n"
|
||||||
|
<< " NEED_FILTER: " << steps[i].need_filter << "\n";
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.str();
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -18,18 +18,20 @@ using PrewhereInfoPtr = std::shared_ptr<PrewhereInfo>;
|
|||||||
class ExpressionActions;
|
class ExpressionActions;
|
||||||
using ExpressionActionsPtr = std::shared_ptr<ExpressionActions>;
|
using ExpressionActionsPtr = std::shared_ptr<ExpressionActions>;
|
||||||
|
|
||||||
|
struct PrewhereExprStep
|
||||||
|
{
|
||||||
|
ExpressionActionsPtr actions;
|
||||||
|
String column_name;
|
||||||
|
bool remove_column = false;
|
||||||
|
bool need_filter = false;
|
||||||
|
};
|
||||||
|
|
||||||
/// The same as PrewhereInfo, but with ExpressionActions instead of ActionsDAG
|
/// The same as PrewhereInfo, but with ExpressionActions instead of ActionsDAG
|
||||||
struct PrewhereExprInfo
|
struct PrewhereExprInfo
|
||||||
{
|
{
|
||||||
/// Actions for row level security filter. Applied separately before prewhere_actions.
|
std::vector<PrewhereExprStep> steps;
|
||||||
/// This actions are separate because prewhere condition should not be executed over filtered rows.
|
|
||||||
ExpressionActionsPtr row_level_filter;
|
std::string dump() const;
|
||||||
/// Actions which are executed on block in order to get filter column for prewhere step.
|
|
||||||
ExpressionActionsPtr prewhere_actions;
|
|
||||||
String row_level_column_name;
|
|
||||||
String prewhere_column_name;
|
|
||||||
bool remove_prewhere_column = false;
|
|
||||||
bool need_filter = false;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/// MergeTreeReader iterator which allows sequential reading for arbitrary number of rows between pairs of marks in the same part.
|
/// MergeTreeReader iterator which allows sequential reading for arbitrary number of rows between pairs of marks in the same part.
|
||||||
@ -41,7 +43,7 @@ public:
|
|||||||
MergeTreeRangeReader(
|
MergeTreeRangeReader(
|
||||||
IMergeTreeReader * merge_tree_reader_,
|
IMergeTreeReader * merge_tree_reader_,
|
||||||
MergeTreeRangeReader * prev_reader_,
|
MergeTreeRangeReader * prev_reader_,
|
||||||
const PrewhereExprInfo * prewhere_info_,
|
const PrewhereExprStep * prewhere_info_,
|
||||||
bool last_reader_in_chain_,
|
bool last_reader_in_chain_,
|
||||||
const Names & non_const_virtual_column_names);
|
const Names & non_const_virtual_column_names);
|
||||||
|
|
||||||
@ -57,6 +59,7 @@ public:
|
|||||||
bool isCurrentRangeFinished() const;
|
bool isCurrentRangeFinished() const;
|
||||||
bool isInitialized() const { return is_initialized; }
|
bool isInitialized() const { return is_initialized; }
|
||||||
|
|
||||||
|
private:
|
||||||
/// Accumulates sequential read() requests to perform a large read instead of multiple small reads
|
/// Accumulates sequential read() requests to perform a large read instead of multiple small reads
|
||||||
class DelayedStream
|
class DelayedStream
|
||||||
{
|
{
|
||||||
@ -144,10 +147,23 @@ public:
|
|||||||
size_t ceilRowsToCompleteGranules(size_t rows_num) const;
|
size_t ceilRowsToCompleteGranules(size_t rows_num) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
public:
|
||||||
/// Statistics after next reading step.
|
/// Statistics after next reading step.
|
||||||
class ReadResult
|
class ReadResult
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
Columns columns;
|
||||||
|
size_t num_rows = 0;
|
||||||
|
|
||||||
|
/// The number of rows were added to block as a result of reading chain.
|
||||||
|
size_t numReadRows() const { return num_read_rows; }
|
||||||
|
/// The number of bytes read from disk.
|
||||||
|
size_t numBytesRead() const { return num_bytes_read; }
|
||||||
|
|
||||||
|
private:
|
||||||
|
/// Only MergeTreeRangeReader is supposed to access ReadResult internals.
|
||||||
|
friend class MergeTreeRangeReader;
|
||||||
|
|
||||||
using NumRows = std::vector<size_t>;
|
using NumRows = std::vector<size_t>;
|
||||||
|
|
||||||
struct RangeInfo
|
struct RangeInfo
|
||||||
@ -161,13 +177,11 @@ public:
|
|||||||
const RangesInfo & startedRanges() const { return started_ranges; }
|
const RangesInfo & startedRanges() const { return started_ranges; }
|
||||||
const NumRows & rowsPerGranule() const { return rows_per_granule; }
|
const NumRows & rowsPerGranule() const { return rows_per_granule; }
|
||||||
|
|
||||||
|
static size_t getLastMark(const MergeTreeRangeReader::ReadResult::RangesInfo & ranges);
|
||||||
|
|
||||||
/// The number of rows were read at LAST iteration in chain. <= num_added_rows + num_filtered_rows.
|
/// The number of rows were read at LAST iteration in chain. <= num_added_rows + num_filtered_rows.
|
||||||
size_t totalRowsPerGranule() const { return total_rows_per_granule; }
|
size_t totalRowsPerGranule() const { return total_rows_per_granule; }
|
||||||
/// The number of rows were added to block as a result of reading chain.
|
|
||||||
size_t numReadRows() const { return num_read_rows; }
|
|
||||||
size_t numRowsToSkipInLastGranule() const { return num_rows_to_skip_in_last_granule; }
|
size_t numRowsToSkipInLastGranule() const { return num_rows_to_skip_in_last_granule; }
|
||||||
/// The number of bytes read from disk.
|
|
||||||
size_t numBytesRead() const { return num_bytes_read; }
|
|
||||||
/// Filter you need to apply to newly-read columns in order to add them to block.
|
/// Filter you need to apply to newly-read columns in order to add them to block.
|
||||||
const ColumnUInt8 * getFilterOriginal() const { return filter_original ? filter_original : filter; }
|
const ColumnUInt8 * getFilterOriginal() const { return filter_original ? filter_original : filter; }
|
||||||
const ColumnUInt8 * getFilter() const { return filter; }
|
const ColumnUInt8 * getFilter() const { return filter; }
|
||||||
@ -195,13 +209,12 @@ public:
|
|||||||
|
|
||||||
size_t countBytesInResultFilter(const IColumn::Filter & filter);
|
size_t countBytesInResultFilter(const IColumn::Filter & filter);
|
||||||
|
|
||||||
Columns columns;
|
/// If this flag is false than filtering form PREWHERE can be delayed and done in WHERE
|
||||||
size_t num_rows = 0;
|
/// to reduce memory copies and applying heavy filters multiple times
|
||||||
bool need_filter = false;
|
bool need_filter = false;
|
||||||
|
|
||||||
Block block_before_prewhere;
|
Block block_before_prewhere;
|
||||||
|
|
||||||
private:
|
|
||||||
RangesInfo started_ranges;
|
RangesInfo started_ranges;
|
||||||
/// The number of rows read from each granule.
|
/// The number of rows read from each granule.
|
||||||
/// Granule here is not number of rows between two marks
|
/// Granule here is not number of rows between two marks
|
||||||
@ -234,16 +247,15 @@ public:
|
|||||||
const Block & getSampleBlock() const { return sample_block; }
|
const Block & getSampleBlock() const { return sample_block; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
||||||
ReadResult startReadingChain(size_t max_rows, MarkRanges & ranges);
|
ReadResult startReadingChain(size_t max_rows, MarkRanges & ranges);
|
||||||
Columns continueReadingChain(ReadResult & result, size_t & num_rows);
|
Columns continueReadingChain(const ReadResult & result, size_t & num_rows);
|
||||||
void executePrewhereActionsAndFilterColumns(ReadResult & result);
|
void executePrewhereActionsAndFilterColumns(ReadResult & result);
|
||||||
void fillPartOffsetColumn(ReadResult & result, UInt64 leading_begin_part_offset, UInt64 leading_end_part_offset);
|
void fillPartOffsetColumn(ReadResult & result, UInt64 leading_begin_part_offset, UInt64 leading_end_part_offset);
|
||||||
|
|
||||||
IMergeTreeReader * merge_tree_reader = nullptr;
|
IMergeTreeReader * merge_tree_reader = nullptr;
|
||||||
const MergeTreeIndexGranularity * index_granularity = nullptr;
|
const MergeTreeIndexGranularity * index_granularity = nullptr;
|
||||||
MergeTreeRangeReader * prev_reader = nullptr; /// If not nullptr, read from prev_reader firstly.
|
MergeTreeRangeReader * prev_reader = nullptr; /// If not nullptr, read from prev_reader firstly.
|
||||||
const PrewhereExprInfo * prewhere_info;
|
const PrewhereExprStep * prewhere_info;
|
||||||
|
|
||||||
Stream stream;
|
Stream stream;
|
||||||
|
|
||||||
|
@ -135,13 +135,15 @@ MergeTreeReadTaskPtr MergeTreeReadPool::getTask(size_t min_marks_to_read, size_t
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
auto curr_task_size_predictor = !per_part_size_predictor[part_idx] ? nullptr
|
const auto & per_part = per_part_params[part_idx];
|
||||||
: std::make_unique<MergeTreeBlockSizePredictor>(*per_part_size_predictor[part_idx]); /// make a copy
|
|
||||||
|
auto curr_task_size_predictor = !per_part.size_predictor ? nullptr
|
||||||
|
: std::make_unique<MergeTreeBlockSizePredictor>(*per_part.size_predictor); /// make a copy
|
||||||
|
|
||||||
return std::make_unique<MergeTreeReadTask>(
|
return std::make_unique<MergeTreeReadTask>(
|
||||||
part.data_part, ranges_to_get_from_part, part.part_index_in_query, ordered_names,
|
part.data_part, ranges_to_get_from_part, part.part_index_in_query, ordered_names,
|
||||||
per_part_column_name_set[part_idx], per_part_columns[part_idx], per_part_pre_columns[part_idx],
|
per_part.column_name_set, per_part.task_columns,
|
||||||
prewhere_info && prewhere_info->remove_prewhere_column, per_part_should_reorder[part_idx], std::move(curr_task_size_predictor));
|
prewhere_info && prewhere_info->remove_prewhere_column, std::move(curr_task_size_predictor));
|
||||||
}
|
}
|
||||||
|
|
||||||
Block MergeTreeReadPool::getHeader() const
|
Block MergeTreeReadPool::getHeader() const
|
||||||
@ -216,15 +218,14 @@ std::vector<size_t> MergeTreeReadPool::fillPerPartInfo(const RangesInDataParts &
|
|||||||
auto size_predictor = !predict_block_size_bytes ? nullptr
|
auto size_predictor = !predict_block_size_bytes ? nullptr
|
||||||
: MergeTreeBaseSelectProcessor::getSizePredictor(part.data_part, task_columns, sample_block);
|
: MergeTreeBaseSelectProcessor::getSizePredictor(part.data_part, task_columns, sample_block);
|
||||||
|
|
||||||
per_part_size_predictor.emplace_back(std::move(size_predictor));
|
auto & per_part = per_part_params.emplace_back();
|
||||||
|
|
||||||
|
per_part.size_predictor = std::move(size_predictor);
|
||||||
|
|
||||||
/// will be used to distinguish between PREWHERE and WHERE columns when applying filter
|
/// will be used to distinguish between PREWHERE and WHERE columns when applying filter
|
||||||
const auto & required_column_names = task_columns.columns.getNames();
|
const auto & required_column_names = task_columns.columns.getNames();
|
||||||
per_part_column_name_set.emplace_back(required_column_names.begin(), required_column_names.end());
|
per_part.column_name_set = {required_column_names.begin(), required_column_names.end()};
|
||||||
|
per_part.task_columns = std::move(task_columns);
|
||||||
per_part_pre_columns.push_back(std::move(task_columns.pre_columns));
|
|
||||||
per_part_columns.push_back(std::move(task_columns.columns));
|
|
||||||
per_part_should_reorder.push_back(task_columns.should_reorder);
|
|
||||||
|
|
||||||
parts_with_idx.push_back({ part.data_part, part.part_index_in_query });
|
parts_with_idx.push_back({ part.data_part, part.part_index_in_query });
|
||||||
}
|
}
|
||||||
|
@ -99,11 +99,16 @@ private:
|
|||||||
const Names column_names;
|
const Names column_names;
|
||||||
bool do_not_steal_tasks;
|
bool do_not_steal_tasks;
|
||||||
bool predict_block_size_bytes;
|
bool predict_block_size_bytes;
|
||||||
std::vector<NameSet> per_part_column_name_set;
|
|
||||||
std::vector<NamesAndTypesList> per_part_columns;
|
struct PerPartParams
|
||||||
std::vector<NamesAndTypesList> per_part_pre_columns;
|
{
|
||||||
std::vector<char> per_part_should_reorder;
|
MergeTreeReadTaskColumns task_columns;
|
||||||
std::vector<MergeTreeBlockSizePredictorPtr> per_part_size_predictor;
|
NameSet column_name_set;
|
||||||
|
MergeTreeBlockSizePredictorPtr size_predictor;
|
||||||
|
};
|
||||||
|
|
||||||
|
std::vector<PerPartParams> per_part_params;
|
||||||
|
|
||||||
PrewhereInfoPtr prewhere_info;
|
PrewhereInfoPtr prewhere_info;
|
||||||
|
|
||||||
struct Part
|
struct Part
|
||||||
|
@ -67,9 +67,12 @@ size_t MergeTreeReaderWide::readRows(
|
|||||||
size_t read_rows = 0;
|
size_t read_rows = 0;
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
size_t num_columns = columns.size();
|
size_t num_columns = res_columns.size();
|
||||||
checkNumberOfColumns(num_columns);
|
checkNumberOfColumns(num_columns);
|
||||||
|
|
||||||
|
if (num_columns == 0)
|
||||||
|
return max_rows_to_read;
|
||||||
|
|
||||||
std::unordered_map<String, ISerialization::SubstreamsCache> caches;
|
std::unordered_map<String, ISerialization::SubstreamsCache> caches;
|
||||||
|
|
||||||
std::unordered_set<std::string> prefetched_streams;
|
std::unordered_set<std::string> prefetched_streams;
|
||||||
|
@ -31,8 +31,8 @@ try
|
|||||||
|
|
||||||
task = std::make_unique<MergeTreeReadTask>(
|
task = std::make_unique<MergeTreeReadTask>(
|
||||||
data_part, mark_ranges_for_task, part_index_in_query, ordered_names, column_name_set,
|
data_part, mark_ranges_for_task, part_index_in_query, ordered_names, column_name_set,
|
||||||
task_columns.columns, task_columns.pre_columns, prewhere_info && prewhere_info->remove_prewhere_column,
|
task_columns, prewhere_info && prewhere_info->remove_prewhere_column,
|
||||||
task_columns.should_reorder, std::move(size_predictor));
|
std::move(size_predictor));
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -66,10 +66,16 @@ void MergeTreeSelectProcessor::initializeReaders()
|
|||||||
reader = data_part->getReader(task_columns.columns, storage_snapshot->getMetadataForQuery(),
|
reader = data_part->getReader(task_columns.columns, storage_snapshot->getMetadataForQuery(),
|
||||||
all_mark_ranges, owned_uncompressed_cache.get(), owned_mark_cache.get(), reader_settings, {}, {});
|
all_mark_ranges, owned_uncompressed_cache.get(), owned_mark_cache.get(), reader_settings, {}, {});
|
||||||
|
|
||||||
if (prewhere_info)
|
pre_reader_for_step.clear();
|
||||||
pre_reader = data_part->getReader(task_columns.pre_columns, storage_snapshot->getMetadataForQuery(),
|
|
||||||
all_mark_ranges, owned_uncompressed_cache.get(), owned_mark_cache.get(), reader_settings, {}, {});
|
|
||||||
|
|
||||||
|
if (prewhere_info)
|
||||||
|
{
|
||||||
|
for (const auto & pre_columns_for_step : task_columns.pre_columns)
|
||||||
|
{
|
||||||
|
pre_reader_for_step.push_back(data_part->getReader(pre_columns_for_step, storage_snapshot->getMetadataForQuery(),
|
||||||
|
all_mark_ranges, owned_uncompressed_cache.get(), owned_mark_cache.get(), reader_settings, {}, {}));
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -80,7 +86,7 @@ void MergeTreeSelectProcessor::finish()
|
|||||||
* buffers don't waste memory.
|
* buffers don't waste memory.
|
||||||
*/
|
*/
|
||||||
reader.reset();
|
reader.reset();
|
||||||
pre_reader.reset();
|
pre_reader_for_step.clear();
|
||||||
data_part.reset();
|
data_part.reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3,6 +3,10 @@
|
|||||||
#include <Storages/StorageMergeTree.h>
|
#include <Storages/StorageMergeTree.h>
|
||||||
#include <Interpreters/PartLog.h>
|
#include <Interpreters/PartLog.h>
|
||||||
|
|
||||||
|
namespace ProfileEvents
|
||||||
|
{
|
||||||
|
extern const Event DuplicatedInsertedBlocks;
|
||||||
|
}
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -133,8 +137,41 @@ void MergeTreeSink::finishDelayedChunk()
|
|||||||
|
|
||||||
auto & part = partition.temp_part.part;
|
auto & part = partition.temp_part.part;
|
||||||
|
|
||||||
|
bool added = false;
|
||||||
|
|
||||||
|
{
|
||||||
|
auto lock = storage.lockParts();
|
||||||
|
storage.fillNewPartName(part, lock);
|
||||||
|
|
||||||
|
auto * deduplication_log = storage.getDeduplicationLog();
|
||||||
|
if (deduplication_log)
|
||||||
|
{
|
||||||
|
const String block_id = part->getZeroLevelPartBlockID(partition.block_dedup_token);
|
||||||
|
auto res = deduplication_log->addPart(block_id, part->info);
|
||||||
|
if (!res.second)
|
||||||
|
{
|
||||||
|
ProfileEvents::increment(ProfileEvents::DuplicatedInsertedBlocks);
|
||||||
|
LOG_INFO(storage.log, "Block with ID {} already exists as part {}; ignoring it", block_id, res.first.getPartName());
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
MergeTreeData::Transaction transaction(storage, context->getCurrentTransaction().get());
|
||||||
|
added = storage.renameTempPartAndAdd(part, transaction, lock);
|
||||||
|
transaction.commit(&lock);
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
MergeTreeData::Transaction transaction(storage, context->getCurrentTransaction().get());
|
||||||
|
added = storage.renameTempPartAndAdd(part, transaction, lock);
|
||||||
|
transaction.commit(&lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
/// Part can be deduplicated, so increment counters and add to part log only if it's really added
|
/// Part can be deduplicated, so increment counters and add to part log only if it's really added
|
||||||
if (storage.renameTempPartAndAdd(part, context->getCurrentTransaction().get(), &storage.increment, nullptr, storage.getDeduplicationLog(), partition.block_dedup_token))
|
if (added)
|
||||||
{
|
{
|
||||||
PartLog::addNewPart(storage.getContext(), part, partition.elapsed_ns);
|
PartLog::addNewPart(storage.getContext(), part, partition.elapsed_ns);
|
||||||
storage.incrementInsertedPartsProfileEvent(part->getType());
|
storage.incrementInsertedPartsProfileEvent(part->getType());
|
||||||
|
@ -111,14 +111,20 @@ void MergeTreeThreadSelectProcessor::finalizeNewTask()
|
|||||||
owned_uncompressed_cache = storage.getContext()->getUncompressedCache();
|
owned_uncompressed_cache = storage.getContext()->getUncompressedCache();
|
||||||
owned_mark_cache = storage.getContext()->getMarkCache();
|
owned_mark_cache = storage.getContext()->getMarkCache();
|
||||||
|
|
||||||
reader = task->data_part->getReader(task->columns, metadata_snapshot, task->mark_ranges,
|
reader = task->data_part->getReader(task->task_columns.columns, metadata_snapshot, task->mark_ranges,
|
||||||
owned_uncompressed_cache.get(), owned_mark_cache.get(), reader_settings,
|
owned_uncompressed_cache.get(), owned_mark_cache.get(), reader_settings,
|
||||||
IMergeTreeReader::ValueSizeMap{}, profile_callback);
|
IMergeTreeReader::ValueSizeMap{}, profile_callback);
|
||||||
|
|
||||||
|
pre_reader_for_step.clear();
|
||||||
if (prewhere_info)
|
if (prewhere_info)
|
||||||
pre_reader = task->data_part->getReader(task->pre_columns, metadata_snapshot, task->mark_ranges,
|
{
|
||||||
owned_uncompressed_cache.get(), owned_mark_cache.get(), reader_settings,
|
for (const auto & pre_columns_per_step : task->task_columns.pre_columns)
|
||||||
IMergeTreeReader::ValueSizeMap{}, profile_callback);
|
{
|
||||||
|
pre_reader_for_step.push_back(task->data_part->getReader(pre_columns_per_step, metadata_snapshot, task->mark_ranges,
|
||||||
|
owned_uncompressed_cache.get(), owned_mark_cache.get(), reader_settings,
|
||||||
|
IMergeTreeReader::ValueSizeMap{}, profile_callback));
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -126,14 +132,20 @@ void MergeTreeThreadSelectProcessor::finalizeNewTask()
|
|||||||
if (part_name != last_readed_part_name)
|
if (part_name != last_readed_part_name)
|
||||||
{
|
{
|
||||||
/// retain avg_value_size_hints
|
/// retain avg_value_size_hints
|
||||||
reader = task->data_part->getReader(task->columns, metadata_snapshot, task->mark_ranges,
|
reader = task->data_part->getReader(task->task_columns.columns, metadata_snapshot, task->mark_ranges,
|
||||||
owned_uncompressed_cache.get(), owned_mark_cache.get(), reader_settings,
|
owned_uncompressed_cache.get(), owned_mark_cache.get(), reader_settings,
|
||||||
reader->getAvgValueSizeHints(), profile_callback);
|
reader->getAvgValueSizeHints(), profile_callback);
|
||||||
|
|
||||||
|
pre_reader_for_step.clear();
|
||||||
if (prewhere_info)
|
if (prewhere_info)
|
||||||
pre_reader = task->data_part->getReader(task->pre_columns, metadata_snapshot, task->mark_ranges,
|
{
|
||||||
owned_uncompressed_cache.get(), owned_mark_cache.get(), reader_settings,
|
for (const auto & pre_columns_per_step : task->task_columns.pre_columns)
|
||||||
reader->getAvgValueSizeHints(), profile_callback);
|
{
|
||||||
|
pre_reader_for_step.push_back(task->data_part->getReader(pre_columns_per_step, metadata_snapshot, task->mark_ranges,
|
||||||
|
owned_uncompressed_cache.get(), owned_mark_cache.get(), reader_settings,
|
||||||
|
reader->getAvgValueSizeHints(), profile_callback));
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -144,7 +156,7 @@ void MergeTreeThreadSelectProcessor::finalizeNewTask()
|
|||||||
void MergeTreeThreadSelectProcessor::finish()
|
void MergeTreeThreadSelectProcessor::finish()
|
||||||
{
|
{
|
||||||
reader.reset();
|
reader.reset();
|
||||||
pre_reader.reset();
|
pre_reader_for_step.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -171,7 +171,7 @@ bool MutateFromLogEntryTask::finalize(ReplicatedMergeMutateTaskBase::PartLogWrit
|
|||||||
{
|
{
|
||||||
new_part = mutate_task->getFuture().get();
|
new_part = mutate_task->getFuture().get();
|
||||||
|
|
||||||
storage.renameTempPartAndReplace(new_part, NO_TRANSACTION_RAW, nullptr, transaction_ptr.get());
|
storage.renameTempPartAndReplace(new_part, *transaction_ptr);
|
||||||
|
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
|
@ -83,8 +83,12 @@ bool MutatePlainMergeTreeTask::executeStep()
|
|||||||
|
|
||||||
new_part = mutate_task->getFuture().get();
|
new_part = mutate_task->getFuture().get();
|
||||||
|
|
||||||
|
|
||||||
|
MergeTreeData::Transaction transaction(storage, merge_mutate_entry->txn.get());
|
||||||
/// FIXME Transactions: it's too optimistic, better to lock parts before starting transaction
|
/// FIXME Transactions: it's too optimistic, better to lock parts before starting transaction
|
||||||
storage.renameTempPartAndReplace(new_part, merge_mutate_entry->txn.get());
|
storage.renameTempPartAndReplace(new_part, transaction);
|
||||||
|
transaction.commit();
|
||||||
|
|
||||||
storage.updateMutationEntriesErrors(future_part, true, "");
|
storage.updateMutationEntriesErrors(future_part, true, "");
|
||||||
write_part_log({});
|
write_part_log({});
|
||||||
|
|
||||||
|
@ -476,7 +476,8 @@ void ReplicatedMergeTreeSink::commitPart(
|
|||||||
|
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
renamed = storage.renameTempPartAndAdd(part, NO_TRANSACTION_RAW, nullptr, &transaction);
|
auto lock = storage.lockParts();
|
||||||
|
renamed = storage.renameTempPartAndAdd(part, transaction, lock);
|
||||||
}
|
}
|
||||||
catch (const Exception & e)
|
catch (const Exception & e)
|
||||||
{
|
{
|
||||||
|
@ -271,8 +271,24 @@ void MaterializedPostgreSQLConsumer::readTupleData(
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
std::exception_ptr error;
|
||||||
for (int column_idx = 0; column_idx < num_columns; ++column_idx)
|
for (int column_idx = 0; column_idx < num_columns; ++column_idx)
|
||||||
proccess_column_value(readInt8(message, pos, size), column_idx);
|
{
|
||||||
|
try
|
||||||
|
{
|
||||||
|
proccess_column_value(readInt8(message, pos, size), column_idx);
|
||||||
|
}
|
||||||
|
catch (...)
|
||||||
|
{
|
||||||
|
insertDefaultValue(buffer, column_idx);
|
||||||
|
/// Let's collect only the first exception.
|
||||||
|
/// This delaying of error throw is needed because
|
||||||
|
/// some errors can be ignored and just logged,
|
||||||
|
/// but in this case we need to finish insertion to all columns.
|
||||||
|
if (!error)
|
||||||
|
error = std::current_exception();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
switch (type)
|
switch (type)
|
||||||
{
|
{
|
||||||
@ -303,6 +319,9 @@ void MaterializedPostgreSQLConsumer::readTupleData(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (error)
|
||||||
|
std::rethrow_exception(error);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1540,7 +1540,14 @@ PartitionCommandsResultInfo StorageMergeTree::attachPartition(
|
|||||||
loaded_parts[i]->storeVersionMetadata();
|
loaded_parts[i]->storeVersionMetadata();
|
||||||
|
|
||||||
String old_name = renamed_parts.old_and_new_names[i].old_name;
|
String old_name = renamed_parts.old_and_new_names[i].old_name;
|
||||||
renameTempPartAndAdd(loaded_parts[i], local_context->getCurrentTransaction().get(), &increment);
|
{
|
||||||
|
auto lock = lockParts();
|
||||||
|
MergeTreeData::Transaction transaction(*this, local_context->getCurrentTransaction().get());
|
||||||
|
fillNewPartName(loaded_parts[i], lock);
|
||||||
|
renameTempPartAndAdd(loaded_parts[i], transaction, lock);
|
||||||
|
transaction.commit(&lock);
|
||||||
|
}
|
||||||
|
|
||||||
renamed_parts.old_and_new_names[i].old_name.clear();
|
renamed_parts.old_and_new_names[i].old_name.clear();
|
||||||
|
|
||||||
results.push_back(PartitionCommandResultInfo{
|
results.push_back(PartitionCommandResultInfo{
|
||||||
@ -1612,10 +1619,15 @@ void StorageMergeTree::replacePartitionFrom(const StoragePtr & source_table, con
|
|||||||
|
|
||||||
auto data_parts_lock = lockParts();
|
auto data_parts_lock = lockParts();
|
||||||
|
|
||||||
|
/** It is important that obtaining new block number and adding that block to parts set is done atomically.
|
||||||
|
* Otherwise there is race condition - merge of blocks could happen in interval that doesn't yet contain new part.
|
||||||
|
*/
|
||||||
|
for (auto part : dst_parts)
|
||||||
|
{
|
||||||
|
fillNewPartName(part, data_parts_lock);
|
||||||
|
renameTempPartAndReplaceUnlocked(part, transaction, data_parts_lock);
|
||||||
|
}
|
||||||
/// Populate transaction
|
/// Populate transaction
|
||||||
for (MutableDataPartPtr & part : dst_parts)
|
|
||||||
renameTempPartAndReplace(part, local_context->getCurrentTransaction().get(), &increment, &transaction, data_parts_lock);
|
|
||||||
|
|
||||||
transaction.commit(&data_parts_lock);
|
transaction.commit(&data_parts_lock);
|
||||||
|
|
||||||
/// If it is REPLACE (not ATTACH), remove all parts which max_block_number less then min_block_number of the first new block
|
/// If it is REPLACE (not ATTACH), remove all parts which max_block_number less then min_block_number of the first new block
|
||||||
@ -1688,14 +1700,15 @@ void StorageMergeTree::movePartitionToTable(const StoragePtr & dest_table, const
|
|||||||
auto src_data_parts_lock = lockParts();
|
auto src_data_parts_lock = lockParts();
|
||||||
auto dest_data_parts_lock = dest_table_storage->lockParts();
|
auto dest_data_parts_lock = dest_table_storage->lockParts();
|
||||||
|
|
||||||
std::mutex mutex;
|
for (auto & part : dst_parts)
|
||||||
DataPartsLock lock(mutex);
|
{
|
||||||
|
dest_table_storage->fillNewPartName(part, dest_data_parts_lock);
|
||||||
|
dest_table_storage->renameTempPartAndReplaceUnlocked(part, transaction, dest_data_parts_lock);
|
||||||
|
}
|
||||||
|
|
||||||
for (MutableDataPartPtr & part : dst_parts)
|
|
||||||
dest_table_storage->renameTempPartAndReplace(part, local_context->getCurrentTransaction().get(), &dest_table_storage->increment, &transaction, lock);
|
|
||||||
|
|
||||||
removePartsFromWorkingSet(local_context->getCurrentTransaction().get(), src_parts, true, lock);
|
removePartsFromWorkingSet(local_context->getCurrentTransaction().get(), src_parts, true, src_data_parts_lock);
|
||||||
transaction.commit(&lock);
|
transaction.commit(&src_data_parts_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
clearOldPartsFromFilesystem();
|
clearOldPartsFromFilesystem();
|
||||||
@ -1785,7 +1798,13 @@ CheckResults StorageMergeTree::checkData(const ASTPtr & query, ContextPtr local_
|
|||||||
void StorageMergeTree::attachRestoredParts(MutableDataPartsVector && parts)
|
void StorageMergeTree::attachRestoredParts(MutableDataPartsVector && parts)
|
||||||
{
|
{
|
||||||
for (auto part : parts)
|
for (auto part : parts)
|
||||||
renameTempPartAndAdd(part, NO_TRANSACTION_RAW, &increment);
|
{
|
||||||
|
auto lock = lockParts();
|
||||||
|
MergeTreeData::Transaction transaction(*this, NO_TRANSACTION_RAW);
|
||||||
|
fillNewPartName(part, lock);
|
||||||
|
renameTempPartAndAdd(part, transaction, lock);
|
||||||
|
transaction.commit(&lock);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -1810,4 +1829,11 @@ std::unique_ptr<MergeTreeSettings> StorageMergeTree::getDefaultSettings() const
|
|||||||
return std::make_unique<MergeTreeSettings>(getContext()->getMergeTreeSettings());
|
return std::make_unique<MergeTreeSettings>(getContext()->getMergeTreeSettings());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void StorageMergeTree::fillNewPartName(MutableDataPartPtr & part, DataPartsLock &)
|
||||||
|
{
|
||||||
|
part->info.min_block = part->info.max_block = increment.get();
|
||||||
|
part->info.mutation = 0;
|
||||||
|
part->name = part->getNewName(part->info);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -251,6 +251,8 @@ private:
|
|||||||
/// return any ids.
|
/// return any ids.
|
||||||
std::optional<MergeTreeMutationStatus> getIncompleteMutationsStatus(Int64 mutation_version, std::set<String> * mutation_ids = nullptr) const;
|
std::optional<MergeTreeMutationStatus> getIncompleteMutationsStatus(Int64 mutation_version, std::set<String> * mutation_ids = nullptr) const;
|
||||||
|
|
||||||
|
void fillNewPartName(MutableDataPartPtr & part, DataPartsLock & lock);
|
||||||
|
|
||||||
void startBackgroundMovesIfNeeded() override;
|
void startBackgroundMovesIfNeeded() override;
|
||||||
|
|
||||||
/// Attaches restored parts to the storage.
|
/// Attaches restored parts to the storage.
|
||||||
|
@ -1657,7 +1657,7 @@ bool StorageReplicatedMergeTree::executeLogEntry(LogEntry & entry)
|
|||||||
Transaction transaction(*this, NO_TRANSACTION_RAW);
|
Transaction transaction(*this, NO_TRANSACTION_RAW);
|
||||||
|
|
||||||
part->version.setCreationTID(Tx::PrehistoricTID, nullptr);
|
part->version.setCreationTID(Tx::PrehistoricTID, nullptr);
|
||||||
renameTempPartAndReplace(part, NO_TRANSACTION_RAW, nullptr, &transaction);
|
renameTempPartAndReplace(part, transaction);
|
||||||
checkPartChecksumsAndCommit(transaction, part);
|
checkPartChecksumsAndCommit(transaction, part);
|
||||||
|
|
||||||
writePartLog(PartLogElement::Type::NEW_PART, {}, 0 /** log entry is fake so we don't measure the time */,
|
writePartLog(PartLogElement::Type::NEW_PART, {}, 0 /** log entry is fake so we don't measure the time */,
|
||||||
@ -2342,7 +2342,7 @@ bool StorageReplicatedMergeTree::executeReplaceRange(const LogEntry & entry)
|
|||||||
Coordination::Requests ops;
|
Coordination::Requests ops;
|
||||||
for (PartDescriptionPtr & part_desc : final_parts)
|
for (PartDescriptionPtr & part_desc : final_parts)
|
||||||
{
|
{
|
||||||
renameTempPartAndReplace(part_desc->res_part, NO_TRANSACTION_RAW, nullptr, &transaction);
|
renameTempPartAndReplace(part_desc->res_part, transaction);
|
||||||
getCommitPartOps(ops, part_desc->res_part);
|
getCommitPartOps(ops, part_desc->res_part);
|
||||||
|
|
||||||
lockSharedData(*part_desc->res_part, false, part_desc->hardlinked_files);
|
lockSharedData(*part_desc->res_part, false, part_desc->hardlinked_files);
|
||||||
@ -4081,7 +4081,7 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Stora
|
|||||||
if (!to_detached)
|
if (!to_detached)
|
||||||
{
|
{
|
||||||
Transaction transaction(*this, NO_TRANSACTION_RAW);
|
Transaction transaction(*this, NO_TRANSACTION_RAW);
|
||||||
renameTempPartAndReplace(part, NO_TRANSACTION_RAW, nullptr, &transaction);
|
renameTempPartAndReplace(part, transaction);
|
||||||
|
|
||||||
replaced_parts = checkPartChecksumsAndCommit(transaction, part, hardlinked_files);
|
replaced_parts = checkPartChecksumsAndCommit(transaction, part, hardlinked_files);
|
||||||
|
|
||||||
@ -6602,9 +6602,8 @@ void StorageReplicatedMergeTree::replacePartitionFrom(
|
|||||||
Transaction transaction(*this, NO_TRANSACTION_RAW);
|
Transaction transaction(*this, NO_TRANSACTION_RAW);
|
||||||
{
|
{
|
||||||
auto data_parts_lock = lockParts();
|
auto data_parts_lock = lockParts();
|
||||||
|
for (auto & part : dst_parts)
|
||||||
for (MutableDataPartPtr & part : dst_parts)
|
renameTempPartAndReplaceUnlocked(part, transaction, data_parts_lock);
|
||||||
renameTempPartAndReplace(part, query_context->getCurrentTransaction().get(), nullptr, &transaction, data_parts_lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for (size_t i = 0; i < dst_parts.size(); ++i)
|
for (size_t i = 0; i < dst_parts.size(); ++i)
|
||||||
@ -6837,11 +6836,8 @@ void StorageReplicatedMergeTree::movePartitionToTable(const StoragePtr & dest_ta
|
|||||||
auto src_data_parts_lock = lockParts();
|
auto src_data_parts_lock = lockParts();
|
||||||
auto dest_data_parts_lock = dest_table_storage->lockParts();
|
auto dest_data_parts_lock = dest_table_storage->lockParts();
|
||||||
|
|
||||||
std::mutex mutex;
|
for (auto & part : dst_parts)
|
||||||
DataPartsLock lock(mutex);
|
dest_table_storage->renameTempPartAndReplaceUnlocked(part, transaction, dest_data_parts_lock);
|
||||||
|
|
||||||
for (MutableDataPartPtr & part : dst_parts)
|
|
||||||
dest_table_storage->renameTempPartAndReplace(part, query_context->getCurrentTransaction().get(), nullptr, &transaction, lock);
|
|
||||||
|
|
||||||
for (size_t i = 0; i < dst_parts.size(); ++i)
|
for (size_t i = 0; i < dst_parts.size(); ++i)
|
||||||
dest_table_storage->lockSharedData(*dst_parts[i], false, hardlinked_files_for_parts[i]);
|
dest_table_storage->lockSharedData(*dst_parts[i], false, hardlinked_files_for_parts[i]);
|
||||||
@ -6852,8 +6848,8 @@ void StorageReplicatedMergeTree::movePartitionToTable(const StoragePtr & dest_ta
|
|||||||
else
|
else
|
||||||
zkutil::KeeperMultiException::check(code, ops, op_results);
|
zkutil::KeeperMultiException::check(code, ops, op_results);
|
||||||
|
|
||||||
parts_to_remove = removePartsInRangeFromWorkingSetAndGetPartsToRemoveFromZooKeeper(NO_TRANSACTION_RAW, drop_range, lock);
|
parts_to_remove = removePartsInRangeFromWorkingSetAndGetPartsToRemoveFromZooKeeper(NO_TRANSACTION_RAW, drop_range, src_data_parts_lock);
|
||||||
transaction.commit(&lock);
|
transaction.commit(&src_data_parts_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
PartLog::addNewParts(getContext(), dst_parts, watch.elapsed());
|
PartLog::addNewParts(getContext(), dst_parts, watch.elapsed());
|
||||||
@ -8020,7 +8016,7 @@ bool StorageReplicatedMergeTree::createEmptyPartInsteadOfLost(zkutil::ZooKeeperP
|
|||||||
try
|
try
|
||||||
{
|
{
|
||||||
MergeTreeData::Transaction transaction(*this, NO_TRANSACTION_RAW);
|
MergeTreeData::Transaction transaction(*this, NO_TRANSACTION_RAW);
|
||||||
auto replaced_parts = renameTempPartAndReplace(new_data_part, NO_TRANSACTION_RAW, nullptr, &transaction);
|
auto replaced_parts = renameTempPartAndReplace(new_data_part, transaction);
|
||||||
|
|
||||||
if (!replaced_parts.empty())
|
if (!replaced_parts.empty())
|
||||||
{
|
{
|
||||||
|
@ -38,7 +38,7 @@ void StorageSystemFilesystemCache::fillData(MutableColumns & res_columns, Contex
|
|||||||
|
|
||||||
for (const auto & [cache_base_path, cache_data] : caches)
|
for (const auto & [cache_base_path, cache_data] : caches)
|
||||||
{
|
{
|
||||||
const auto & cache = cache_data.cache;
|
const auto & cache = cache_data->cache;
|
||||||
auto file_segments = cache->getSnapshot();
|
auto file_segments = cache->getSnapshot();
|
||||||
|
|
||||||
for (const auto & file_segment : file_segments)
|
for (const auto & file_segment : file_segments)
|
||||||
|
@ -55,6 +55,9 @@ endif()
|
|||||||
if (TARGET ch_contrib::base64)
|
if (TARGET ch_contrib::base64)
|
||||||
set(USE_BASE64 1)
|
set(USE_BASE64 1)
|
||||||
endif()
|
endif()
|
||||||
|
if (TARGET ch_contrib::base-x)
|
||||||
|
set(USE_BASEX 1)
|
||||||
|
endif()
|
||||||
if (TARGET ch_contrib::yaml_cpp)
|
if (TARGET ch_contrib::yaml_cpp)
|
||||||
set(USE_YAML_CPP 1)
|
set(USE_YAML_CPP 1)
|
||||||
endif()
|
endif()
|
||||||
|
@ -58,6 +58,22 @@ def test_disks_app_func_ls(started_cluster):
|
|||||||
|
|
||||||
assert files[0] == "store"
|
assert files[0] == "store"
|
||||||
|
|
||||||
|
out = source.exec_in_container(
|
||||||
|
[
|
||||||
|
"/usr/bin/clickhouse",
|
||||||
|
"disks",
|
||||||
|
"--send-logs",
|
||||||
|
"--disk",
|
||||||
|
"test1",
|
||||||
|
"list",
|
||||||
|
".",
|
||||||
|
"--recursive",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
assert ".:\nstore\n" in out
|
||||||
|
assert "\n./store:\n" in out
|
||||||
|
|
||||||
|
|
||||||
def test_disks_app_func_cp(started_cluster):
|
def test_disks_app_func_cp(started_cluster):
|
||||||
source = cluster.instances["disks_app_test"]
|
source = cluster.instances["disks_app_test"]
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
# Tags: no-fasttest, long
|
# Tags: no-fasttest, long, no-s3-storage
|
||||||
# Tag no-fasttest: setting use_metadata_cache=true is not supported in fasttest, because clickhouse binary in fasttest is build without RocksDB.
|
# Tag no-fasttest: setting use_metadata_cache=true is not supported in fasttest, because clickhouse binary in fasttest is build without RocksDB.
|
||||||
# To suppress Warning messages from CHECK TABLE
|
# To suppress Warning messages from CHECK TABLE
|
||||||
CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=error
|
CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=error
|
||||||
@ -7,6 +7,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
|||||||
# shellcheck source=../shell_config.sh
|
# shellcheck source=../shell_config.sh
|
||||||
. "$CURDIR"/../shell_config.sh
|
. "$CURDIR"/../shell_config.sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
table_engines=(ReplicatedMergeTree)
|
table_engines=(ReplicatedMergeTree)
|
||||||
database_engines=(Ordinary Atomic)
|
database_engines=(Ordinary Atomic)
|
||||||
use_metadata_caches=(false true)
|
use_metadata_caches=(false true)
|
||||||
|
@ -3,6 +3,7 @@ SHOW TABLES [] TABLE SHOW
|
|||||||
SHOW COLUMNS [] COLUMN SHOW
|
SHOW COLUMNS [] COLUMN SHOW
|
||||||
SHOW DICTIONARIES [] DICTIONARY SHOW
|
SHOW DICTIONARIES [] DICTIONARY SHOW
|
||||||
SHOW [] \N ALL
|
SHOW [] \N ALL
|
||||||
|
SHOW CACHES [] \N ALL
|
||||||
SELECT [] COLUMN ALL
|
SELECT [] COLUMN ALL
|
||||||
INSERT [] COLUMN ALL
|
INSERT [] COLUMN ALL
|
||||||
ALTER UPDATE ['UPDATE'] COLUMN ALTER TABLE
|
ALTER UPDATE ['UPDATE'] COLUMN ALTER TABLE
|
||||||
|
@ -277,7 +277,7 @@ CREATE TABLE system.grants
|
|||||||
(
|
(
|
||||||
`user_name` Nullable(String),
|
`user_name` Nullable(String),
|
||||||
`role_name` Nullable(String),
|
`role_name` Nullable(String),
|
||||||
`access_type` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SELECT' = 5, 'INSERT' = 6, 'ALTER UPDATE' = 7, 'ALTER DELETE' = 8, 'ALTER ADD COLUMN' = 9, 'ALTER MODIFY COLUMN' = 10, 'ALTER DROP COLUMN' = 11, 'ALTER COMMENT COLUMN' = 12, 'ALTER CLEAR COLUMN' = 13, 'ALTER RENAME COLUMN' = 14, 'ALTER MATERIALIZE COLUMN' = 15, 'ALTER COLUMN' = 16, 'ALTER MODIFY COMMENT' = 17, 'ALTER ORDER BY' = 18, 'ALTER SAMPLE BY' = 19, 'ALTER ADD INDEX' = 20, 'ALTER DROP INDEX' = 21, 'ALTER MATERIALIZE INDEX' = 22, 'ALTER CLEAR INDEX' = 23, 'ALTER INDEX' = 24, 'ALTER ADD PROJECTION' = 25, 'ALTER DROP PROJECTION' = 26, 'ALTER MATERIALIZE PROJECTION' = 27, 'ALTER CLEAR PROJECTION' = 28, 'ALTER PROJECTION' = 29, 'ALTER ADD CONSTRAINT' = 30, 'ALTER DROP CONSTRAINT' = 31, 'ALTER CONSTRAINT' = 32, 'ALTER TTL' = 33, 'ALTER MATERIALIZE TTL' = 34, 'ALTER SETTINGS' = 35, 'ALTER MOVE PARTITION' = 36, 'ALTER FETCH PARTITION' = 37, 'ALTER FREEZE PARTITION' = 38, 'ALTER DATABASE SETTINGS' = 39, 'ALTER TABLE' = 40, 'ALTER DATABASE' = 41, 'ALTER VIEW REFRESH' = 42, 'ALTER VIEW MODIFY QUERY' = 43, 'ALTER VIEW' = 44, 'ALTER' = 45, 'CREATE DATABASE' = 46, 'CREATE TABLE' = 47, 'CREATE VIEW' = 48, 'CREATE DICTIONARY' = 49, 'CREATE TEMPORARY TABLE' = 50, 'CREATE FUNCTION' = 51, 'CREATE' = 52, 'DROP DATABASE' = 53, 'DROP TABLE' = 54, 'DROP VIEW' = 55, 'DROP DICTIONARY' = 56, 'DROP FUNCTION' = 57, 'DROP' = 58, 'TRUNCATE' = 59, 'OPTIMIZE' = 60, 'BACKUP' = 61, 'KILL QUERY' = 62, 'KILL TRANSACTION' = 63, 'MOVE PARTITION BETWEEN SHARDS' = 64, 'CREATE USER' = 65, 'ALTER USER' = 66, 'DROP USER' = 67, 'CREATE ROLE' = 68, 'ALTER ROLE' = 69, 'DROP ROLE' = 70, 'ROLE ADMIN' = 71, 'CREATE ROW POLICY' = 72, 'ALTER ROW POLICY' = 73, 'DROP ROW POLICY' = 74, 'CREATE QUOTA' = 75, 'ALTER QUOTA' = 76, 'DROP QUOTA' = 77, 'CREATE SETTINGS PROFILE' = 78, 'ALTER SETTINGS PROFILE' = 79, 'DROP SETTINGS PROFILE' = 80, 'SHOW USERS' = 81, 'SHOW ROLES' = 82, 'SHOW ROW POLICIES' = 83, 'SHOW QUOTAS' = 84, 'SHOW SETTINGS PROFILES' = 85, 'SHOW ACCESS' = 86, 'ACCESS MANAGEMENT' = 87, 'SYSTEM SHUTDOWN' = 88, 'SYSTEM DROP DNS CACHE' = 89, 'SYSTEM DROP MARK CACHE' = 90, 'SYSTEM DROP UNCOMPRESSED CACHE' = 91, 'SYSTEM DROP MMAP CACHE' = 92, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 93, 'SYSTEM DROP FILESYSTEM CACHE' = 94, 'SYSTEM DROP CACHE' = 95, 'SYSTEM RELOAD CONFIG' = 96, 'SYSTEM RELOAD SYMBOLS' = 97, 'SYSTEM RELOAD DICTIONARY' = 98, 'SYSTEM RELOAD MODEL' = 99, 'SYSTEM RELOAD FUNCTION' = 100, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 101, 'SYSTEM RELOAD' = 102, 'SYSTEM RESTART DISK' = 103, 'SYSTEM MERGES' = 104, 'SYSTEM TTL MERGES' = 105, 'SYSTEM FETCHES' = 106, 'SYSTEM MOVES' = 107, 'SYSTEM DISTRIBUTED SENDS' = 108, 'SYSTEM REPLICATED SENDS' = 109, 'SYSTEM SENDS' = 110, 'SYSTEM REPLICATION QUEUES' = 111, 'SYSTEM DROP REPLICA' = 112, 'SYSTEM SYNC REPLICA' = 113, 'SYSTEM RESTART REPLICA' = 114, 'SYSTEM RESTORE REPLICA' = 115, 'SYSTEM SYNC DATABASE REPLICA' = 116, 'SYSTEM SYNC TRANSACTION LOG' = 117, 'SYSTEM FLUSH DISTRIBUTED' = 118, 'SYSTEM FLUSH LOGS' = 119, 'SYSTEM FLUSH' = 120, 'SYSTEM THREAD FUZZER' = 121, 'SYSTEM UNFREEZE' = 122, 'SYSTEM' = 123, 'dictGet' = 124, 'addressToLine' = 125, 'addressToLineWithInlines' = 126, 'addressToSymbol' = 127, 'demangle' = 128, 'INTROSPECTION' = 129, 'FILE' = 130, 'URL' = 131, 'REMOTE' = 132, 'MONGO' = 133, 'MEILISEARCH' = 134, 'MYSQL' = 135, 'POSTGRES' = 136, 'SQLITE' = 137, 'ODBC' = 138, 'JDBC' = 139, 'HDFS' = 140, 'S3' = 141, 'HIVE' = 142, 'SOURCES' = 143, 'CLUSTER' = 144, 'ALL' = 145, 'NONE' = 146),
|
`access_type` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER TABLE' = 41, 'ALTER DATABASE' = 42, 'ALTER VIEW REFRESH' = 43, 'ALTER VIEW MODIFY QUERY' = 44, 'ALTER VIEW' = 45, 'ALTER' = 46, 'CREATE DATABASE' = 47, 'CREATE TABLE' = 48, 'CREATE VIEW' = 49, 'CREATE DICTIONARY' = 50, 'CREATE TEMPORARY TABLE' = 51, 'CREATE FUNCTION' = 52, 'CREATE' = 53, 'DROP DATABASE' = 54, 'DROP TABLE' = 55, 'DROP VIEW' = 56, 'DROP DICTIONARY' = 57, 'DROP FUNCTION' = 58, 'DROP' = 59, 'TRUNCATE' = 60, 'OPTIMIZE' = 61, 'BACKUP' = 62, 'KILL QUERY' = 63, 'KILL TRANSACTION' = 64, 'MOVE PARTITION BETWEEN SHARDS' = 65, 'CREATE USER' = 66, 'ALTER USER' = 67, 'DROP USER' = 68, 'CREATE ROLE' = 69, 'ALTER ROLE' = 70, 'DROP ROLE' = 71, 'ROLE ADMIN' = 72, 'CREATE ROW POLICY' = 73, 'ALTER ROW POLICY' = 74, 'DROP ROW POLICY' = 75, 'CREATE QUOTA' = 76, 'ALTER QUOTA' = 77, 'DROP QUOTA' = 78, 'CREATE SETTINGS PROFILE' = 79, 'ALTER SETTINGS PROFILE' = 80, 'DROP SETTINGS PROFILE' = 81, 'SHOW USERS' = 82, 'SHOW ROLES' = 83, 'SHOW ROW POLICIES' = 84, 'SHOW QUOTAS' = 85, 'SHOW SETTINGS PROFILES' = 86, 'SHOW ACCESS' = 87, 'ACCESS MANAGEMENT' = 88, 'SYSTEM SHUTDOWN' = 89, 'SYSTEM DROP DNS CACHE' = 90, 'SYSTEM DROP MARK CACHE' = 91, 'SYSTEM DROP UNCOMPRESSED CACHE' = 92, 'SYSTEM DROP MMAP CACHE' = 93, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 94, 'SYSTEM DROP FILESYSTEM CACHE' = 95, 'SYSTEM DROP CACHE' = 96, 'SYSTEM RELOAD CONFIG' = 97, 'SYSTEM RELOAD SYMBOLS' = 98, 'SYSTEM RELOAD DICTIONARY' = 99, 'SYSTEM RELOAD MODEL' = 100, 'SYSTEM RELOAD FUNCTION' = 101, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 102, 'SYSTEM RELOAD' = 103, 'SYSTEM RESTART DISK' = 104, 'SYSTEM MERGES' = 105, 'SYSTEM TTL MERGES' = 106, 'SYSTEM FETCHES' = 107, 'SYSTEM MOVES' = 108, 'SYSTEM DISTRIBUTED SENDS' = 109, 'SYSTEM REPLICATED SENDS' = 110, 'SYSTEM SENDS' = 111, 'SYSTEM REPLICATION QUEUES' = 112, 'SYSTEM DROP REPLICA' = 113, 'SYSTEM SYNC REPLICA' = 114, 'SYSTEM RESTART REPLICA' = 115, 'SYSTEM RESTORE REPLICA' = 116, 'SYSTEM SYNC DATABASE REPLICA' = 117, 'SYSTEM SYNC TRANSACTION LOG' = 118, 'SYSTEM FLUSH DISTRIBUTED' = 119, 'SYSTEM FLUSH LOGS' = 120, 'SYSTEM FLUSH' = 121, 'SYSTEM THREAD FUZZER' = 122, 'SYSTEM UNFREEZE' = 123, 'SYSTEM' = 124, 'dictGet' = 125, 'addressToLine' = 126, 'addressToLineWithInlines' = 127, 'addressToSymbol' = 128, 'demangle' = 129, 'INTROSPECTION' = 130, 'FILE' = 131, 'URL' = 132, 'REMOTE' = 133, 'MONGO' = 134, 'MEILISEARCH' = 135, 'MYSQL' = 136, 'POSTGRES' = 137, 'SQLITE' = 138, 'ODBC' = 139, 'JDBC' = 140, 'HDFS' = 141, 'S3' = 142, 'HIVE' = 143, 'SOURCES' = 144, 'CLUSTER' = 145, 'ALL' = 146, 'NONE' = 147),
|
||||||
`database` Nullable(String),
|
`database` Nullable(String),
|
||||||
`table` Nullable(String),
|
`table` Nullable(String),
|
||||||
`column` Nullable(String),
|
`column` Nullable(String),
|
||||||
@ -551,10 +551,10 @@ ENGINE = SystemPartsColumns
|
|||||||
COMMENT 'SYSTEM TABLE is built on the fly.'
|
COMMENT 'SYSTEM TABLE is built on the fly.'
|
||||||
CREATE TABLE system.privileges
|
CREATE TABLE system.privileges
|
||||||
(
|
(
|
||||||
`privilege` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SELECT' = 5, 'INSERT' = 6, 'ALTER UPDATE' = 7, 'ALTER DELETE' = 8, 'ALTER ADD COLUMN' = 9, 'ALTER MODIFY COLUMN' = 10, 'ALTER DROP COLUMN' = 11, 'ALTER COMMENT COLUMN' = 12, 'ALTER CLEAR COLUMN' = 13, 'ALTER RENAME COLUMN' = 14, 'ALTER MATERIALIZE COLUMN' = 15, 'ALTER COLUMN' = 16, 'ALTER MODIFY COMMENT' = 17, 'ALTER ORDER BY' = 18, 'ALTER SAMPLE BY' = 19, 'ALTER ADD INDEX' = 20, 'ALTER DROP INDEX' = 21, 'ALTER MATERIALIZE INDEX' = 22, 'ALTER CLEAR INDEX' = 23, 'ALTER INDEX' = 24, 'ALTER ADD PROJECTION' = 25, 'ALTER DROP PROJECTION' = 26, 'ALTER MATERIALIZE PROJECTION' = 27, 'ALTER CLEAR PROJECTION' = 28, 'ALTER PROJECTION' = 29, 'ALTER ADD CONSTRAINT' = 30, 'ALTER DROP CONSTRAINT' = 31, 'ALTER CONSTRAINT' = 32, 'ALTER TTL' = 33, 'ALTER MATERIALIZE TTL' = 34, 'ALTER SETTINGS' = 35, 'ALTER MOVE PARTITION' = 36, 'ALTER FETCH PARTITION' = 37, 'ALTER FREEZE PARTITION' = 38, 'ALTER DATABASE SETTINGS' = 39, 'ALTER TABLE' = 40, 'ALTER DATABASE' = 41, 'ALTER VIEW REFRESH' = 42, 'ALTER VIEW MODIFY QUERY' = 43, 'ALTER VIEW' = 44, 'ALTER' = 45, 'CREATE DATABASE' = 46, 'CREATE TABLE' = 47, 'CREATE VIEW' = 48, 'CREATE DICTIONARY' = 49, 'CREATE TEMPORARY TABLE' = 50, 'CREATE FUNCTION' = 51, 'CREATE' = 52, 'DROP DATABASE' = 53, 'DROP TABLE' = 54, 'DROP VIEW' = 55, 'DROP DICTIONARY' = 56, 'DROP FUNCTION' = 57, 'DROP' = 58, 'TRUNCATE' = 59, 'OPTIMIZE' = 60, 'BACKUP' = 61, 'KILL QUERY' = 62, 'KILL TRANSACTION' = 63, 'MOVE PARTITION BETWEEN SHARDS' = 64, 'CREATE USER' = 65, 'ALTER USER' = 66, 'DROP USER' = 67, 'CREATE ROLE' = 68, 'ALTER ROLE' = 69, 'DROP ROLE' = 70, 'ROLE ADMIN' = 71, 'CREATE ROW POLICY' = 72, 'ALTER ROW POLICY' = 73, 'DROP ROW POLICY' = 74, 'CREATE QUOTA' = 75, 'ALTER QUOTA' = 76, 'DROP QUOTA' = 77, 'CREATE SETTINGS PROFILE' = 78, 'ALTER SETTINGS PROFILE' = 79, 'DROP SETTINGS PROFILE' = 80, 'SHOW USERS' = 81, 'SHOW ROLES' = 82, 'SHOW ROW POLICIES' = 83, 'SHOW QUOTAS' = 84, 'SHOW SETTINGS PROFILES' = 85, 'SHOW ACCESS' = 86, 'ACCESS MANAGEMENT' = 87, 'SYSTEM SHUTDOWN' = 88, 'SYSTEM DROP DNS CACHE' = 89, 'SYSTEM DROP MARK CACHE' = 90, 'SYSTEM DROP UNCOMPRESSED CACHE' = 91, 'SYSTEM DROP MMAP CACHE' = 92, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 93, 'SYSTEM DROP FILESYSTEM CACHE' = 94, 'SYSTEM DROP CACHE' = 95, 'SYSTEM RELOAD CONFIG' = 96, 'SYSTEM RELOAD SYMBOLS' = 97, 'SYSTEM RELOAD DICTIONARY' = 98, 'SYSTEM RELOAD MODEL' = 99, 'SYSTEM RELOAD FUNCTION' = 100, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 101, 'SYSTEM RELOAD' = 102, 'SYSTEM RESTART DISK' = 103, 'SYSTEM MERGES' = 104, 'SYSTEM TTL MERGES' = 105, 'SYSTEM FETCHES' = 106, 'SYSTEM MOVES' = 107, 'SYSTEM DISTRIBUTED SENDS' = 108, 'SYSTEM REPLICATED SENDS' = 109, 'SYSTEM SENDS' = 110, 'SYSTEM REPLICATION QUEUES' = 111, 'SYSTEM DROP REPLICA' = 112, 'SYSTEM SYNC REPLICA' = 113, 'SYSTEM RESTART REPLICA' = 114, 'SYSTEM RESTORE REPLICA' = 115, 'SYSTEM SYNC DATABASE REPLICA' = 116, 'SYSTEM SYNC TRANSACTION LOG' = 117, 'SYSTEM FLUSH DISTRIBUTED' = 118, 'SYSTEM FLUSH LOGS' = 119, 'SYSTEM FLUSH' = 120, 'SYSTEM THREAD FUZZER' = 121, 'SYSTEM UNFREEZE' = 122, 'SYSTEM' = 123, 'dictGet' = 124, 'addressToLine' = 125, 'addressToLineWithInlines' = 126, 'addressToSymbol' = 127, 'demangle' = 128, 'INTROSPECTION' = 129, 'FILE' = 130, 'URL' = 131, 'REMOTE' = 132, 'MONGO' = 133, 'MEILISEARCH' = 134, 'MYSQL' = 135, 'POSTGRES' = 136, 'SQLITE' = 137, 'ODBC' = 138, 'JDBC' = 139, 'HDFS' = 140, 'S3' = 141, 'HIVE' = 142, 'SOURCES' = 143, 'CLUSTER' = 144, 'ALL' = 145, 'NONE' = 146),
|
`privilege` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER TABLE' = 41, 'ALTER DATABASE' = 42, 'ALTER VIEW REFRESH' = 43, 'ALTER VIEW MODIFY QUERY' = 44, 'ALTER VIEW' = 45, 'ALTER' = 46, 'CREATE DATABASE' = 47, 'CREATE TABLE' = 48, 'CREATE VIEW' = 49, 'CREATE DICTIONARY' = 50, 'CREATE TEMPORARY TABLE' = 51, 'CREATE FUNCTION' = 52, 'CREATE' = 53, 'DROP DATABASE' = 54, 'DROP TABLE' = 55, 'DROP VIEW' = 56, 'DROP DICTIONARY' = 57, 'DROP FUNCTION' = 58, 'DROP' = 59, 'TRUNCATE' = 60, 'OPTIMIZE' = 61, 'BACKUP' = 62, 'KILL QUERY' = 63, 'KILL TRANSACTION' = 64, 'MOVE PARTITION BETWEEN SHARDS' = 65, 'CREATE USER' = 66, 'ALTER USER' = 67, 'DROP USER' = 68, 'CREATE ROLE' = 69, 'ALTER ROLE' = 70, 'DROP ROLE' = 71, 'ROLE ADMIN' = 72, 'CREATE ROW POLICY' = 73, 'ALTER ROW POLICY' = 74, 'DROP ROW POLICY' = 75, 'CREATE QUOTA' = 76, 'ALTER QUOTA' = 77, 'DROP QUOTA' = 78, 'CREATE SETTINGS PROFILE' = 79, 'ALTER SETTINGS PROFILE' = 80, 'DROP SETTINGS PROFILE' = 81, 'SHOW USERS' = 82, 'SHOW ROLES' = 83, 'SHOW ROW POLICIES' = 84, 'SHOW QUOTAS' = 85, 'SHOW SETTINGS PROFILES' = 86, 'SHOW ACCESS' = 87, 'ACCESS MANAGEMENT' = 88, 'SYSTEM SHUTDOWN' = 89, 'SYSTEM DROP DNS CACHE' = 90, 'SYSTEM DROP MARK CACHE' = 91, 'SYSTEM DROP UNCOMPRESSED CACHE' = 92, 'SYSTEM DROP MMAP CACHE' = 93, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 94, 'SYSTEM DROP FILESYSTEM CACHE' = 95, 'SYSTEM DROP CACHE' = 96, 'SYSTEM RELOAD CONFIG' = 97, 'SYSTEM RELOAD SYMBOLS' = 98, 'SYSTEM RELOAD DICTIONARY' = 99, 'SYSTEM RELOAD MODEL' = 100, 'SYSTEM RELOAD FUNCTION' = 101, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 102, 'SYSTEM RELOAD' = 103, 'SYSTEM RESTART DISK' = 104, 'SYSTEM MERGES' = 105, 'SYSTEM TTL MERGES' = 106, 'SYSTEM FETCHES' = 107, 'SYSTEM MOVES' = 108, 'SYSTEM DISTRIBUTED SENDS' = 109, 'SYSTEM REPLICATED SENDS' = 110, 'SYSTEM SENDS' = 111, 'SYSTEM REPLICATION QUEUES' = 112, 'SYSTEM DROP REPLICA' = 113, 'SYSTEM SYNC REPLICA' = 114, 'SYSTEM RESTART REPLICA' = 115, 'SYSTEM RESTORE REPLICA' = 116, 'SYSTEM SYNC DATABASE REPLICA' = 117, 'SYSTEM SYNC TRANSACTION LOG' = 118, 'SYSTEM FLUSH DISTRIBUTED' = 119, 'SYSTEM FLUSH LOGS' = 120, 'SYSTEM FLUSH' = 121, 'SYSTEM THREAD FUZZER' = 122, 'SYSTEM UNFREEZE' = 123, 'SYSTEM' = 124, 'dictGet' = 125, 'addressToLine' = 126, 'addressToLineWithInlines' = 127, 'addressToSymbol' = 128, 'demangle' = 129, 'INTROSPECTION' = 130, 'FILE' = 131, 'URL' = 132, 'REMOTE' = 133, 'MONGO' = 134, 'MEILISEARCH' = 135, 'MYSQL' = 136, 'POSTGRES' = 137, 'SQLITE' = 138, 'ODBC' = 139, 'JDBC' = 140, 'HDFS' = 141, 'S3' = 142, 'HIVE' = 143, 'SOURCES' = 144, 'CLUSTER' = 145, 'ALL' = 146, 'NONE' = 147),
|
||||||
`aliases` Array(String),
|
`aliases` Array(String),
|
||||||
`level` Nullable(Enum8('GLOBAL' = 0, 'DATABASE' = 1, 'TABLE' = 2, 'DICTIONARY' = 3, 'VIEW' = 4, 'COLUMN' = 5)),
|
`level` Nullable(Enum8('GLOBAL' = 0, 'DATABASE' = 1, 'TABLE' = 2, 'DICTIONARY' = 3, 'VIEW' = 4, 'COLUMN' = 5)),
|
||||||
`parent_group` Nullable(Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SELECT' = 5, 'INSERT' = 6, 'ALTER UPDATE' = 7, 'ALTER DELETE' = 8, 'ALTER ADD COLUMN' = 9, 'ALTER MODIFY COLUMN' = 10, 'ALTER DROP COLUMN' = 11, 'ALTER COMMENT COLUMN' = 12, 'ALTER CLEAR COLUMN' = 13, 'ALTER RENAME COLUMN' = 14, 'ALTER MATERIALIZE COLUMN' = 15, 'ALTER COLUMN' = 16, 'ALTER MODIFY COMMENT' = 17, 'ALTER ORDER BY' = 18, 'ALTER SAMPLE BY' = 19, 'ALTER ADD INDEX' = 20, 'ALTER DROP INDEX' = 21, 'ALTER MATERIALIZE INDEX' = 22, 'ALTER CLEAR INDEX' = 23, 'ALTER INDEX' = 24, 'ALTER ADD PROJECTION' = 25, 'ALTER DROP PROJECTION' = 26, 'ALTER MATERIALIZE PROJECTION' = 27, 'ALTER CLEAR PROJECTION' = 28, 'ALTER PROJECTION' = 29, 'ALTER ADD CONSTRAINT' = 30, 'ALTER DROP CONSTRAINT' = 31, 'ALTER CONSTRAINT' = 32, 'ALTER TTL' = 33, 'ALTER MATERIALIZE TTL' = 34, 'ALTER SETTINGS' = 35, 'ALTER MOVE PARTITION' = 36, 'ALTER FETCH PARTITION' = 37, 'ALTER FREEZE PARTITION' = 38, 'ALTER DATABASE SETTINGS' = 39, 'ALTER TABLE' = 40, 'ALTER DATABASE' = 41, 'ALTER VIEW REFRESH' = 42, 'ALTER VIEW MODIFY QUERY' = 43, 'ALTER VIEW' = 44, 'ALTER' = 45, 'CREATE DATABASE' = 46, 'CREATE TABLE' = 47, 'CREATE VIEW' = 48, 'CREATE DICTIONARY' = 49, 'CREATE TEMPORARY TABLE' = 50, 'CREATE FUNCTION' = 51, 'CREATE' = 52, 'DROP DATABASE' = 53, 'DROP TABLE' = 54, 'DROP VIEW' = 55, 'DROP DICTIONARY' = 56, 'DROP FUNCTION' = 57, 'DROP' = 58, 'TRUNCATE' = 59, 'OPTIMIZE' = 60, 'BACKUP' = 61, 'KILL QUERY' = 62, 'KILL TRANSACTION' = 63, 'MOVE PARTITION BETWEEN SHARDS' = 64, 'CREATE USER' = 65, 'ALTER USER' = 66, 'DROP USER' = 67, 'CREATE ROLE' = 68, 'ALTER ROLE' = 69, 'DROP ROLE' = 70, 'ROLE ADMIN' = 71, 'CREATE ROW POLICY' = 72, 'ALTER ROW POLICY' = 73, 'DROP ROW POLICY' = 74, 'CREATE QUOTA' = 75, 'ALTER QUOTA' = 76, 'DROP QUOTA' = 77, 'CREATE SETTINGS PROFILE' = 78, 'ALTER SETTINGS PROFILE' = 79, 'DROP SETTINGS PROFILE' = 80, 'SHOW USERS' = 81, 'SHOW ROLES' = 82, 'SHOW ROW POLICIES' = 83, 'SHOW QUOTAS' = 84, 'SHOW SETTINGS PROFILES' = 85, 'SHOW ACCESS' = 86, 'ACCESS MANAGEMENT' = 87, 'SYSTEM SHUTDOWN' = 88, 'SYSTEM DROP DNS CACHE' = 89, 'SYSTEM DROP MARK CACHE' = 90, 'SYSTEM DROP UNCOMPRESSED CACHE' = 91, 'SYSTEM DROP MMAP CACHE' = 92, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 93, 'SYSTEM DROP FILESYSTEM CACHE' = 94, 'SYSTEM DROP CACHE' = 95, 'SYSTEM RELOAD CONFIG' = 96, 'SYSTEM RELOAD SYMBOLS' = 97, 'SYSTEM RELOAD DICTIONARY' = 98, 'SYSTEM RELOAD MODEL' = 99, 'SYSTEM RELOAD FUNCTION' = 100, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 101, 'SYSTEM RELOAD' = 102, 'SYSTEM RESTART DISK' = 103, 'SYSTEM MERGES' = 104, 'SYSTEM TTL MERGES' = 105, 'SYSTEM FETCHES' = 106, 'SYSTEM MOVES' = 107, 'SYSTEM DISTRIBUTED SENDS' = 108, 'SYSTEM REPLICATED SENDS' = 109, 'SYSTEM SENDS' = 110, 'SYSTEM REPLICATION QUEUES' = 111, 'SYSTEM DROP REPLICA' = 112, 'SYSTEM SYNC REPLICA' = 113, 'SYSTEM RESTART REPLICA' = 114, 'SYSTEM RESTORE REPLICA' = 115, 'SYSTEM SYNC DATABASE REPLICA' = 116, 'SYSTEM SYNC TRANSACTION LOG' = 117, 'SYSTEM FLUSH DISTRIBUTED' = 118, 'SYSTEM FLUSH LOGS' = 119, 'SYSTEM FLUSH' = 120, 'SYSTEM THREAD FUZZER' = 121, 'SYSTEM UNFREEZE' = 122, 'SYSTEM' = 123, 'dictGet' = 124, 'addressToLine' = 125, 'addressToLineWithInlines' = 126, 'addressToSymbol' = 127, 'demangle' = 128, 'INTROSPECTION' = 129, 'FILE' = 130, 'URL' = 131, 'REMOTE' = 132, 'MONGO' = 133, 'MEILISEARCH' = 134, 'MYSQL' = 135, 'POSTGRES' = 136, 'SQLITE' = 137, 'ODBC' = 138, 'JDBC' = 139, 'HDFS' = 140, 'S3' = 141, 'HIVE' = 142, 'SOURCES' = 143, 'CLUSTER' = 144, 'ALL' = 145, 'NONE' = 146))
|
`parent_group` Nullable(Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER TABLE' = 41, 'ALTER DATABASE' = 42, 'ALTER VIEW REFRESH' = 43, 'ALTER VIEW MODIFY QUERY' = 44, 'ALTER VIEW' = 45, 'ALTER' = 46, 'CREATE DATABASE' = 47, 'CREATE TABLE' = 48, 'CREATE VIEW' = 49, 'CREATE DICTIONARY' = 50, 'CREATE TEMPORARY TABLE' = 51, 'CREATE FUNCTION' = 52, 'CREATE' = 53, 'DROP DATABASE' = 54, 'DROP TABLE' = 55, 'DROP VIEW' = 56, 'DROP DICTIONARY' = 57, 'DROP FUNCTION' = 58, 'DROP' = 59, 'TRUNCATE' = 60, 'OPTIMIZE' = 61, 'BACKUP' = 62, 'KILL QUERY' = 63, 'KILL TRANSACTION' = 64, 'MOVE PARTITION BETWEEN SHARDS' = 65, 'CREATE USER' = 66, 'ALTER USER' = 67, 'DROP USER' = 68, 'CREATE ROLE' = 69, 'ALTER ROLE' = 70, 'DROP ROLE' = 71, 'ROLE ADMIN' = 72, 'CREATE ROW POLICY' = 73, 'ALTER ROW POLICY' = 74, 'DROP ROW POLICY' = 75, 'CREATE QUOTA' = 76, 'ALTER QUOTA' = 77, 'DROP QUOTA' = 78, 'CREATE SETTINGS PROFILE' = 79, 'ALTER SETTINGS PROFILE' = 80, 'DROP SETTINGS PROFILE' = 81, 'SHOW USERS' = 82, 'SHOW ROLES' = 83, 'SHOW ROW POLICIES' = 84, 'SHOW QUOTAS' = 85, 'SHOW SETTINGS PROFILES' = 86, 'SHOW ACCESS' = 87, 'ACCESS MANAGEMENT' = 88, 'SYSTEM SHUTDOWN' = 89, 'SYSTEM DROP DNS CACHE' = 90, 'SYSTEM DROP MARK CACHE' = 91, 'SYSTEM DROP UNCOMPRESSED CACHE' = 92, 'SYSTEM DROP MMAP CACHE' = 93, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 94, 'SYSTEM DROP FILESYSTEM CACHE' = 95, 'SYSTEM DROP CACHE' = 96, 'SYSTEM RELOAD CONFIG' = 97, 'SYSTEM RELOAD SYMBOLS' = 98, 'SYSTEM RELOAD DICTIONARY' = 99, 'SYSTEM RELOAD MODEL' = 100, 'SYSTEM RELOAD FUNCTION' = 101, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 102, 'SYSTEM RELOAD' = 103, 'SYSTEM RESTART DISK' = 104, 'SYSTEM MERGES' = 105, 'SYSTEM TTL MERGES' = 106, 'SYSTEM FETCHES' = 107, 'SYSTEM MOVES' = 108, 'SYSTEM DISTRIBUTED SENDS' = 109, 'SYSTEM REPLICATED SENDS' = 110, 'SYSTEM SENDS' = 111, 'SYSTEM REPLICATION QUEUES' = 112, 'SYSTEM DROP REPLICA' = 113, 'SYSTEM SYNC REPLICA' = 114, 'SYSTEM RESTART REPLICA' = 115, 'SYSTEM RESTORE REPLICA' = 116, 'SYSTEM SYNC DATABASE REPLICA' = 117, 'SYSTEM SYNC TRANSACTION LOG' = 118, 'SYSTEM FLUSH DISTRIBUTED' = 119, 'SYSTEM FLUSH LOGS' = 120, 'SYSTEM FLUSH' = 121, 'SYSTEM THREAD FUZZER' = 122, 'SYSTEM UNFREEZE' = 123, 'SYSTEM' = 124, 'dictGet' = 125, 'addressToLine' = 126, 'addressToLineWithInlines' = 127, 'addressToSymbol' = 128, 'demangle' = 129, 'INTROSPECTION' = 130, 'FILE' = 131, 'URL' = 132, 'REMOTE' = 133, 'MONGO' = 134, 'MEILISEARCH' = 135, 'MYSQL' = 136, 'POSTGRES' = 137, 'SQLITE' = 138, 'ODBC' = 139, 'JDBC' = 140, 'HDFS' = 141, 'S3' = 142, 'HIVE' = 143, 'SOURCES' = 144, 'CLUSTER' = 145, 'ALL' = 146, 'NONE' = 147))
|
||||||
)
|
)
|
||||||
ENGINE = SystemPrivileges
|
ENGINE = SystemPrivileges
|
||||||
COMMENT 'SYSTEM TABLE is built on the fly.'
|
COMMENT 'SYSTEM TABLE is built on the fly.'
|
||||||
|
@ -0,0 +1,11 @@
|
|||||||
|
1000000
|
||||||
|
0
|
||||||
|
0
|
||||||
|
0
|
||||||
|
400000
|
||||||
|
195431
|
||||||
|
195431
|
||||||
|
5923
|
||||||
|
200000
|
||||||
|
200000
|
||||||
|
6061
|
@ -0,0 +1,36 @@
|
|||||||
|
DROP ROW POLICY IF EXISTS test_filter_policy ON test_table;
|
||||||
|
DROP ROW POLICY IF EXISTS test_filter_policy_2 ON test_table;
|
||||||
|
DROP TABLE IF EXISTS test_table;
|
||||||
|
|
||||||
|
CREATE TABLE test_table (`n` UInt64, `s` String)
|
||||||
|
ENGINE = MergeTree
|
||||||
|
PRIMARY KEY n ORDER BY n;
|
||||||
|
|
||||||
|
INSERT INTO test_table SELECT number, concat('some string ', CAST(number, 'String')) FROM numbers(1000000);
|
||||||
|
|
||||||
|
-- Create row policy that doesn't use any column
|
||||||
|
CREATE ROW POLICY test_filter_policy ON test_table USING False TO ALL;
|
||||||
|
|
||||||
|
-- Run query under default user so that always false row_level_filter is added that doesn't require any columns
|
||||||
|
SELECT count(1) FROM test_table;
|
||||||
|
SELECT count(1) FROM test_table PREWHERE (n % 8192) < 4000;
|
||||||
|
SELECT count(1) FROM test_table WHERE (n % 8192) < 4000;
|
||||||
|
SELECT count(1) FROM test_table PREWHERE (n % 8192) < 4000 WHERE (n % 33) == 0;
|
||||||
|
|
||||||
|
-- Add policy for default user that will read a column
|
||||||
|
CREATE ROW POLICY test_filter_policy_2 ON test_table USING (n % 5) >= 3 TO default;
|
||||||
|
|
||||||
|
-- Run query under default user that needs the same column as PREWHERE and WHERE
|
||||||
|
SELECT count(1) FROM test_table;
|
||||||
|
SELECT count(1) FROM test_table PREWHERE (n % 8192) < 4000;
|
||||||
|
SELECT count(1) FROM test_table WHERE (n % 8192) < 4000;
|
||||||
|
SELECT count(1) FROM test_table PREWHERE (n % 8192) < 4000 WHERE (n % 33) == 0;
|
||||||
|
|
||||||
|
-- Run queries that have division by zero if row level filter isn't applied before prewhere
|
||||||
|
SELECT count(1) FROM test_table PREWHERE 7 / (n % 5) > 2;
|
||||||
|
SELECT count(1) FROM test_table WHERE 7 / (n % 5) > 2;
|
||||||
|
SELECT count(1) FROM test_table PREWHERE 7 / (n % 5) > 2 WHERE (n % 33) == 0;
|
||||||
|
|
||||||
|
DROP TABLE test_table;
|
||||||
|
DROP ROW POLICY test_filter_policy ON test_table;
|
||||||
|
DROP ROW POLICY test_filter_policy_2 ON test_table;
|
@ -0,0 +1,20 @@
|
|||||||
|
1
|
||||||
|
1
|
||||||
|
0
|
||||||
|
0
|
||||||
|
0
|
||||||
|
1
|
||||||
|
1
|
||||||
|
0
|
||||||
|
0
|
||||||
|
0
|
||||||
|
0
|
||||||
|
1
|
||||||
|
1
|
||||||
|
1
|
||||||
|
1
|
||||||
|
0
|
||||||
|
0
|
||||||
|
0
|
||||||
|
0
|
||||||
|
0
|
@ -0,0 +1,3 @@
|
|||||||
|
select number >= 0 and if(number != 0, intDiv(1, number), 1) from numbers(5);
|
||||||
|
select if(number >= 0, if(number != 0, intDiv(1, number), 1), 1) from numbers(5);
|
||||||
|
select number >= 0 and if(number = 0, 0, if(number == 1, intDiv(1, number), if(number == 2, intDiv(1, number - 1), if(number == 3, intDiv(1, number - 2), intDiv(1, number - 3))))) from numbers(10);
|
47
tests/queries/0_stateless/02337_base58.reference
Normal file
47
tests/queries/0_stateless/02337_base58.reference
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
32YCBjgZhV4AdCWHaCDNu
|
||||||
|
|
||||||
|
f
|
||||||
|
fo
|
||||||
|
foo
|
||||||
|
foob
|
||||||
|
fooba
|
||||||
|
foobar
|
||||||
|
Hello world!
|
||||||
|
|
||||||
|
f
|
||||||
|
fo
|
||||||
|
foo
|
||||||
|
foob
|
||||||
|
fooba
|
||||||
|
foobar
|
||||||
|
Hello world!
|
||||||
|
|
||||||
|
f
|
||||||
|
fo
|
||||||
|
foo
|
||||||
|
foob
|
||||||
|
fooba
|
||||||
|
foobar
|
||||||
|
Hello world!
|
||||||
|
|
||||||
|
f
|
||||||
|
fo
|
||||||
|
foo
|
||||||
|
foob
|
||||||
|
fooba
|
||||||
|
foobar
|
||||||
|
Hello world!
|
||||||
|
|
||||||
|
2m
|
||||||
|
8o8
|
||||||
|
bQbp
|
||||||
|
3csAg9
|
||||||
|
CZJRhmz
|
||||||
|
t1Zv2yaZ
|
||||||
|
|
||||||
|
f
|
||||||
|
fo
|
||||||
|
foo
|
||||||
|
foob
|
||||||
|
fooba
|
||||||
|
foobar
|
17
tests/queries/0_stateless/02337_base58.sql
Normal file
17
tests/queries/0_stateless/02337_base58.sql
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
-- Tags: no-fasttest
|
||||||
|
|
||||||
|
SET send_logs_level = 'fatal';
|
||||||
|
|
||||||
|
SELECT base58Encode('Hold my beer...');
|
||||||
|
SELECT base58Encode('Hold my beer...', ''); -- { serverError 44 }
|
||||||
|
SELECT base58Encode('Hold my beer...', 'gmp', 'third'); -- { serverError 36 }
|
||||||
|
|
||||||
|
SELECT base58Decode(encoded, 'gmp') FROM (SELECT base58Encode(val, 'gmp') as encoded FROM (select arrayJoin(['', 'f', 'fo', 'foo', 'foob', 'fooba', 'foobar', 'Hello world!']) val));
|
||||||
|
SELECT base58Decode(encoded, 'ripple') FROM (SELECT base58Encode(val, 'ripple') as encoded FROM (select arrayJoin(['', 'f', 'fo', 'foo', 'foob', 'fooba', 'foobar', 'Hello world!']) val));
|
||||||
|
SELECT base58Decode(encoded, 'flickr') FROM (SELECT base58Encode(val, 'flickr') as encoded FROM (select arrayJoin(['', 'f', 'fo', 'foo', 'foob', 'fooba', 'foobar', 'Hello world!']) val));
|
||||||
|
SELECT base58Decode(encoded, 'bitcoin') FROM (SELECT base58Encode(val, 'bitcoin') as encoded FROM (select arrayJoin(['', 'f', 'fo', 'foo', 'foob', 'fooba', 'foobar', 'Hello world!']) val));
|
||||||
|
|
||||||
|
SELECT base58Encode(val) FROM (select arrayJoin(['', 'f', 'fo', 'foo', 'foob', 'fooba', 'foobar']) val);
|
||||||
|
SELECT base58Decode(val) FROM (select arrayJoin(['', '2m', '8o8', 'bQbp', '3csAg9', 'CZJRhmz', 't1Zv2yaZ']) val);
|
||||||
|
|
||||||
|
SELECT base58Decode('Why_not?'); -- { serverError 36 }
|
1
tests/queries/0_stateless/02344_describe_cache.reference
Normal file
1
tests/queries/0_stateless/02344_describe_cache.reference
Normal file
@ -0,0 +1 @@
|
|||||||
|
22548578304 1048576 104857600 1 0 0 0 ./s3_cache/
|
3
tests/queries/0_stateless/02344_describe_cache.sql
Normal file
3
tests/queries/0_stateless/02344_describe_cache.sql
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
-- Tags: no-fasttest
|
||||||
|
|
||||||
|
DESCRIBE CACHE 's3_cache';
|
Loading…
Reference in New Issue
Block a user