Merge branch 'master' into log_queries_probability

This commit is contained in:
Nikolay Degterinsky 2021-08-26 16:32:49 +03:00
commit 75575505cf
125 changed files with 2205 additions and 602 deletions

View File

@ -127,12 +127,13 @@ if (USE_STATIC_LIBRARIES)
list(REVERSE CMAKE_FIND_LIBRARY_SUFFIXES)
endif ()
# Implies ${WITH_COVERAGE}
option (ENABLE_FUZZING "Fuzzy testing using libfuzzer" OFF)
if (ENABLE_FUZZING)
# Also set WITH_COVERAGE=1 for better fuzzing process
# By default this is disabled, because fuzzers are built in CI with the clickhouse itself.
# And we don't want to enable coverage for it.
message (STATUS "Fuzzing instrumentation enabled")
set (WITH_COVERAGE ON)
set (FUZZER "libfuzzer")
endif()

View File

@ -13,6 +13,3 @@ ClickHouse® is an open-source column-oriented database management system that a
* [Code Browser](https://clickhouse.tech/codebrowser/html_report/ClickHouse/index.html) with syntax highlight and navigation.
* [Contacts](https://clickhouse.tech/#contacts) can help to get your questions answered if there are any.
* You can also [fill this form](https://clickhouse.tech/#meet) to meet Yandex ClickHouse team in person.
## Upcoming Events
* [SF Bay Area ClickHouse August Community Meetup (online)](https://www.meetup.com/San-Francisco-Bay-Area-ClickHouse-Meetup/events/279109379/) on 25 August 2021.

View File

@ -1,4 +1,5 @@
#include <sys/auxv.h>
#include "atomic.h"
#include <unistd.h> // __environ
#include <errno.h>
@ -17,18 +18,7 @@ static size_t __find_auxv(unsigned long type)
return (size_t) -1;
}
__attribute__((constructor)) static void __auxv_init()
{
size_t i;
for (i = 0; __environ[i]; i++);
__auxv = (unsigned long *) (__environ + i + 1);
size_t secure_idx = __find_auxv(AT_SECURE);
if (secure_idx != ((size_t) -1))
__auxv_secure = __auxv[secure_idx];
}
unsigned long getauxval(unsigned long type)
unsigned long __getauxval(unsigned long type)
{
if (type == AT_SECURE)
return __auxv_secure;
@ -43,3 +33,38 @@ unsigned long getauxval(unsigned long type)
errno = ENOENT;
return 0;
}
static void * volatile getauxval_func;
static unsigned long __auxv_init(unsigned long type)
{
if (!__environ)
{
// __environ is not initialized yet so we can't initialize __auxv right now.
// That's normally occurred only when getauxval() is called from some sanitizer's internal code.
errno = ENOENT;
return 0;
}
// Initialize __auxv and __auxv_secure.
size_t i;
for (i = 0; __environ[i]; i++);
__auxv = (unsigned long *) (__environ + i + 1);
size_t secure_idx = __find_auxv(AT_SECURE);
if (secure_idx != ((size_t) -1))
__auxv_secure = __auxv[secure_idx];
// Now we've initialized __auxv, next time getauxval() will only call __get_auxval().
a_cas_p(&getauxval_func, (void *)__auxv_init, (void *)__getauxval);
return __getauxval(type);
}
// First time getauxval() will call __auxv_init().
static void * volatile getauxval_func = (void *)__auxv_init;
unsigned long getauxval(unsigned long type)
{
return ((unsigned long (*)(unsigned long))getauxval_func)(type);
}

View File

@ -22,6 +22,7 @@ set (SRCS
"${LIBRARY_DIR}/src/transaction.cxx"
"${LIBRARY_DIR}/src/transaction_base.cxx"
"${LIBRARY_DIR}/src/row.cxx"
"${LIBRARY_DIR}/src/params.cxx"
"${LIBRARY_DIR}/src/util.cxx"
"${LIBRARY_DIR}/src/version.cxx"
)
@ -31,6 +32,7 @@ set (SRCS
# conflicts with all includes of <array>.
set (HDRS
"${LIBRARY_DIR}/include/pqxx/array.hxx"
"${LIBRARY_DIR}/include/pqxx/params.hxx"
"${LIBRARY_DIR}/include/pqxx/binarystring.hxx"
"${LIBRARY_DIR}/include/pqxx/composite.hxx"
"${LIBRARY_DIR}/include/pqxx/connection.hxx"
@ -75,4 +77,3 @@ set(CM_CONFIG_PQ "${LIBRARY_DIR}/include/pqxx/config-internal-libpq.h")
configure_file("${CM_CONFIG_H_IN}" "${CM_CONFIG_INT}" @ONLY)
configure_file("${CM_CONFIG_H_IN}" "${CM_CONFIG_PUB}" @ONLY)
configure_file("${CM_CONFIG_H_IN}" "${CM_CONFIG_PQ}" @ONLY)

View File

@ -83,6 +83,16 @@ then
mv "$COMBINED_OUTPUT.tgz" /output
fi
# Also build fuzzers if any sanitizer specified
if [ -n "$SANITIZER" ]
then
# Currently we are in build/build_docker directory
../docker/packager/other/fuzzer.sh
fi
ccache --show-config ||:
ccache --show-stats ||:
if [ "${CCACHE_DEBUG:-}" == "1" ]
then
find . -name '*.ccache-*' -print0 \
@ -95,4 +105,3 @@ then
# files in place, and will fail because this directory is not writable.
tar -cv -I pixz -f /output/ccache.log.txz "$CCACHE_LOGFILE"
fi

View File

@ -23,12 +23,24 @@ then
echo "Place $BINARY_OUTPUT to output"
mkdir /output/binary ||: # if exists
mv /build/obj-*/programs/clickhouse* /output/binary
if [ "$BINARY_OUTPUT" = "tests" ]
then
mv /build/obj-*/src/unit_tests_dbms /output/binary
fi
fi
# Also build fuzzers if any sanitizer specified
if [ -n "$SANITIZER" ]
then
# Script is supposed that we are in build directory.
mkdir -p build/build_docker
cd build/build_docker
# Launching build script
../docker/packager/other/fuzzer.sh
cd
fi
ccache --show-config ||:
ccache --show-stats ||:

35
docker/packager/other/fuzzer.sh Executable file
View File

@ -0,0 +1,35 @@
#!/usr/bin/env bash
# This script is responsible for building all fuzzers, and copy them to output directory
# as an archive.
# Script is supposed that we are in build directory.
set -x -e
printenv
# Delete previous cache, because we add a new flags -DENABLE_FUZZING=1 and -DFUZZER=libfuzzer
rm -f CMakeCache.txt
read -ra CMAKE_FLAGS <<< "${CMAKE_FLAGS:-}"
# Hope, that the most part of files will be in cache, so we just link new executables
cmake --debug-trycompile --verbose=1 -DCMAKE_VERBOSE_MAKEFILE=1 -LA -DCMAKE_C_COMPILER="$CC" -DCMAKE_CXX_COMPILER="$CXX" -DENABLE_CLICKHOUSE_ODBC_BRIDGE=OFF \
-DENABLE_LIBRARIES=0 -DENABLE_SSL=1 -DUSE_INTERNAL_SSL_LIBRARY=1 -DUSE_UNWIND=ON -DENABLE_EMBEDDED_COMPILER=0 \
-DENABLE_EXAMPLES=0 -DENABLE_UTILS=0 -DENABLE_THINLTO=0 "-DSANITIZE=$SANITIZER" \
-DENABLE_FUZZING=1 -DFUZZER='libfuzzer' -DENABLE_TCMALLOC=0 -DENABLE_JEMALLOC=0 \
-DENABLE_CHECK_HEAVY_BUILDS=1 "${CMAKE_FLAGS[@]}" ..
FUZZER_TARGETS=$(find ../src -name '*_fuzzer.cpp' -execdir basename {} .cpp ';' | tr '\n' ' ')
mkdir -p /output/fuzzers
for FUZZER_TARGET in $FUZZER_TARGETS
do
# shellcheck disable=SC2086 # No quotes because I want it to expand to nothing if empty.
ninja $NINJA_FLAGS $FUZZER_TARGET
# Find this binary in build directory and strip it
FUZZER_PATH=$(find ./src -name "$FUZZER_TARGET")
strip --strip-unneeded "$FUZZER_PATH"
mv "$FUZZER_PATH" /output/fuzzers
done
tar -zcvf /output/fuzzers.tar.gz /output/fuzzers
rm -rf /output/fuzzers

View File

@ -105,6 +105,9 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ
if image_type == "deb" or image_type == "unbundled":
result.append("DEB_CC={}".format(cc))
result.append("DEB_CXX={}".format(cxx))
# For building fuzzers
result.append("CC={}".format(cc))
result.append("CXX={}".format(cxx))
elif image_type == "binary":
result.append("CC={}".format(cc))
result.append("CXX={}".format(cxx))

View File

@ -16,6 +16,8 @@ RUN apt-get update \
p7zip-full \
parallel \
psmisc \
python3 \
python3-pip \
rsync \
tree \
tzdata \
@ -25,6 +27,8 @@ RUN apt-get update \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
RUN pip3 install Jinja2
COPY * /
SHELL ["/bin/bash", "-c"]

View File

@ -0,0 +1,62 @@
#!/usr/bin/env python3
from argparse import ArgumentParser
import os
import jinja2
def removesuffix(text, suffix):
"""
Added in python 3.9
https://www.python.org/dev/peps/pep-0616/
"""
if suffix and text.endswith(suffix):
return text[:-len(suffix)]
else:
return text[:]
def render_test_template(j2env, suite_dir, test_name):
"""
Render template for test and reference file if needed
"""
test_base_name = removesuffix(test_name, ".sql.j2")
reference_file_name = test_base_name + ".reference.j2"
reference_file_path = os.path.join(suite_dir, reference_file_name)
if os.path.isfile(reference_file_path):
tpl = j2env.get_template(reference_file_name)
tpl.stream().dump(os.path.join(suite_dir, test_base_name) + ".gen.reference")
if test_name.endswith(".sql.j2"):
tpl = j2env.get_template(test_name)
generated_test_name = test_base_name + ".gen.sql"
tpl.stream().dump(os.path.join(suite_dir, generated_test_name))
return generated_test_name
return test_name
def main(args):
suite_dir = args.path
print(f"Scanning {suite_dir} directory...")
j2env = jinja2.Environment(
loader=jinja2.FileSystemLoader(suite_dir),
keep_trailing_newline=True,
)
test_names = os.listdir(suite_dir)
for test_name in test_names:
if not test_name.endswith(".sql.j2"):
continue
new_name = render_test_template(j2env, suite_dir, test_name)
print(f"File {new_name} generated")
if __name__ == "__main__":
parser = ArgumentParser(description="Jinja2 test generator")
parser.add_argument("-p", "--path", help="Path to test dir", required=True)
main(parser.parse_args())

View File

@ -71,12 +71,12 @@ function watchdog
kill -9 -- $fuzzer_pid ||:
}
function filter_exists
function filter_exists_and_template
{
local path
for path in "$@"; do
if [ -e "$path" ]; then
echo "$path"
echo "$path" | sed -n 's/\.sql\.j2$/.gen.sql/'
else
echo "'$path' does not exists" >&2
fi
@ -85,11 +85,13 @@ function filter_exists
function fuzz
{
/generate-test-j2.py --path ch/tests/queries/0_stateless
# Obtain the list of newly added tests. They will be fuzzed in more extreme way than other tests.
# Don't overwrite the NEW_TESTS_OPT so that it can be set from the environment.
NEW_TESTS="$(sed -n 's!\(^tests/queries/0_stateless/.*\.sql\)$!ch/\1!p' ci-changed-files.txt | sort -R)"
NEW_TESTS="$(sed -n 's!\(^tests/queries/0_stateless/.*\.sql\(\.j2\)\?\)$!ch/\1!p' ci-changed-files.txt | sort -R)"
# ci-changed-files.txt contains also files that has been deleted/renamed, filter them out.
NEW_TESTS="$(filter_exists $NEW_TESTS)"
NEW_TESTS="$(filter_exists_and_template $NEW_TESTS)"
if [[ -n "$NEW_TESTS" ]]
then
NEW_TESTS_OPT="${NEW_TESTS_OPT:---interleave-queries-file ${NEW_TESTS}}"

View File

@ -34,6 +34,7 @@ The table structure can differ from the original PostgreSQL table structure:
- `user` — PostgreSQL user.
- `password` — User password.
- `schema` — Non-default table schema. Optional.
- `on conflict ...` — example: `ON CONFLICT DO NOTHING`. Optional. Note: adding this option will make insertion less efficient.
## Implementation Details {#implementation-details}

View File

@ -390,20 +390,27 @@ Functions with a constant argument that is less than ngram size cant be used
- `s != 1`
- `NOT startsWith(s, 'test')`
### Projections {#projections}
Projections are like materialized views but defined in part-level. It provides consistency guarantees along with automatic usage in queries.
## Projections {#projections}
Projections are like [materialized views](../../../sql-reference/statements/create/view.md#materialized) but defined in part-level. It provides consistency guarantees along with automatic usage in queries.
#### Query {#projection-query}
A projection query is what defines a projection. It has the following grammar:
Projections are an experimental feature. To enable them you must set the [allow_experimental_projection_optimization](../../../operations/settings/settings.md#allow-experimental-projection-optimization) to `1`. See also the [force_optimize_projection](../../../operations/settings/settings.md#force-optimize-projection) setting.
`SELECT <COLUMN LIST EXPR> [GROUP BY] [ORDER BY]`
Projections are not supported in the `SELECT` statements with the [FINAL](../../../sql-reference/statements/select/from.md#select-from-final) modifier.
It implicitly selects data from the parent table.
### Projection Query {#projection-query}
A projection query is what defines a projection. It implicitly selects data from the parent table.
**Syntax**
#### Storage {#projection-storage}
Projections are stored inside the part directory. It's similar to an index but contains a subdirectory that stores an anonymous MergeTree table's part. The table is induced by the definition query of the projection. If there is a GROUP BY clause, the underlying storage engine becomes AggregatedMergeTree, and all aggregate functions are converted to AggregateFunction. If there is an ORDER BY clause, the MergeTree table will use it as its primary key expression. During the merge process, the projection part will be merged via its storage's merge routine. The checksum of the parent table's part will combine the projection's part. Other maintenance jobs are similar to skip indices.
```sql
SELECT <column list expr> [GROUP BY] <group keys expr> [ORDER BY] <expr>
```
#### Query Analysis {#projection-query-analysis}
Projections can be modified or dropped with the [ALTER](../../../sql-reference/statements/alter/projection.md) statement.
### Projection Storage {#projection-storage}
Projections are stored inside the part directory. It's similar to an index but contains a subdirectory that stores an anonymous `MergeTree` table's part. The table is induced by the definition query of the projection. If there is a `GROUP BY` clause, the underlying storage engine becomes [AggregatingMergeTree](aggregatingmergetree.md), and all aggregate functions are converted to `AggregateFunction`. If there is an `ORDER BY` clause, the `MergeTree` table uses it as its primary key expression. During the merge process the projection part is merged via its storage's merge routine. The checksum of the parent table's part is combined with the projection's part. Other maintenance jobs are similar to skip indices.
### Query Analysis {#projection-query-analysis}
1. Check if the projection can be used to answer the given query, that is, it generates the same answer as querying the base table.
2. Select the best feasible match, which contains the least granules to read.
3. The query pipeline which uses projections will be different from the one that uses the original parts. If the projection is absent in some parts, we can add the pipeline to "project" it on the fly.

View File

@ -141,7 +141,7 @@ Since version 20.5, `clickhouse-client` has automatic syntax highlighting (alway
Example of a config file:
``` xml
```xml
<config>
<user>username</user>
<password>password</password>
@ -149,4 +149,30 @@ Example of a config file:
</config>
```
[Original article](https://clickhouse.tech/docs/en/interfaces/cli/) <!--hide-->
### Query ID Format {#query-id-format}
In interactive mode `clickhouse-client` shows query ID for every query. By default, the ID is formatted like this:
```sql
Query id: 927f137d-00f1-4175-8914-0dd066365e96
```
A custom format may be specified in a configuration file inside a `query_id_formats` tag. `{query_id}` placeholder in the format string is replaced with the ID of a query. Several format strings are allowed inside the tag.
This feature can be used to generate URLs to facilitate profiling of queries.
**Example**
```xml
<config>
<query_id_formats>
<speedscope>http://speedscope-host/#profileURL=qp%3Fid%3D{query_id}</speedscope>
</query_id_formats>
</config>
```
If the configuration above is applied, the ID of a query is shown in the following format:
``` text
speedscope:http://speedscope-host/#profileURL=qp%3Fid%3Dc8ecc783-e753-4b38-97f1-42cddfb98b7d
```

View File

@ -3435,3 +3435,25 @@ Possible values:
- 1 — The table is automatically updated in the background, when schema changes are detected.
Default value: `0`.
## allow_experimental_projection_optimization {#allow-experimental-projection-optimization}
Enables or disables [projection](../../engines/table-engines/mergetree-family/mergetree.md#projections) optimization when processing `SELECT` queries.
Possible values:
- 0 — Projection optimization disabled.
- 1 — Projection optimization enabled.
Default value: `0`.
## force_optimize_projection {#force-optimize-projection}
Enables or disables the obligatory use of [projections](../../engines/table-engines/mergetree-family/mergetree.md#projections) in `SELECT` queries, when projection optimization is enabled (see [allow_experimental_projection_optimization](#allow-experimental-projection-optimization) setting).
Possible values:
- 0 — Projection optimization is not obligatory.
- 1 — Projection optimization is obligatory.
Default value: `0`.

View File

@ -2236,3 +2236,74 @@ defaultRoles()
Type: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)).
## queryID {#query-id}
Returns the ID of the current query. Other parameters of a query can be extracted from the [system.query_log](../../operations/system-tables/query_log.md) table via `query_id`.
In contrast to [initialQueryID](#initial-query-id) function, `queryID` can return different results on different shards (see example).
**Syntax**
``` sql
queryID()
```
**Returned value**
- The ID of the current query.
Type: [String](../../sql-reference/data-types/string.md)
**Example**
Query:
``` sql
CREATE TABLE tmp (str String) ENGINE = Log;
INSERT INTO tmp (*) VALUES ('a');
SELECT count(DISTINCT t) FROM (SELECT queryID() AS t FROM remote('127.0.0.{1..3}', currentDatabase(), 'tmp') GROUP BY queryID());
```
Result:
``` text
┌─count()─┐
│ 3 │
└─────────┘
```
## initialQueryID {#initial-query-id}
Returns the ID of the initial current query. Other parameters of a query can be extracted from the [system.query_log](../../operations/system-tables/query_log.md) table via `initial_query_id`.
In contrast to [queryID](#query-id) function, `initialQueryID` returns the same results on different shards (see example).
**Syntax**
``` sql
initialQueryID()
```
**Returned value**
- The ID of the initial current query.
Type: [String](../../sql-reference/data-types/string.md)
**Example**
Query:
``` sql
CREATE TABLE tmp (str String) ENGINE = Log;
INSERT INTO tmp (*) VALUES ('a');
SELECT count(DISTINCT t) FROM (SELECT initialQueryID() AS t FROM remote('127.0.0.{1..3}', currentDatabase(), 'tmp') GROUP BY queryID());
```
Result:
``` text
┌─count()─┐
│ 1 │
└─────────┘
```

View File

@ -5,7 +5,7 @@ toc_title: PROJECTION
# Manipulating Projections {#manipulations-with-projections}
The following operations are available:
The following operations with [projections](../../../engines/table-engines/mergetree-family/mergetree.md#projections) are available:
- `ALTER TABLE [db].name ADD PROJECTION name AS SELECT <COLUMN LIST EXPR> [GROUP BY] [ORDER BY]` - Adds projection description to tables metadata.
@ -15,7 +15,7 @@ The following operations are available:
- `ALTER TABLE [db.]table CLEAR PROJECTION name IN PARTITION partition_name` - Deletes projection files from disk without removing description.
The commands ADD, DROP and CLEAR are lightweight in a sense that they only change metadata or remove files.
The commands `ADD`, `DROP` and `CLEAR` are lightweight in a sense that they only change metadata or remove files.
Also, they are replicated, syncing projections metadata via ZooKeeper.

View File

@ -254,6 +254,7 @@ CREATE TABLE codec_example
ENGINE = MergeTree()
```
<!--
### Encryption Codecs {#create-query-encryption-codecs}
These codecs don't actually compress data, but instead encrypt data on disk. These are only available when an encryption key is specified by [encryption](../../../operations/server-configuration-parameters/settings.md#server-settings-encryption) settings. Note that encryption only makes sense at the end of codec pipelines, because encrypted data usually can't be compressed in any meaningful way.
@ -267,7 +268,7 @@ Encryption codecs:
!!! attention "Attention"
If you perform a SELECT query mentioning a specific value in an encrypted column (such as in its WHERE clause), the value may appear in [system.query_log](../../../operations/system-tables/query_log.md). You may want to disable the logging.
-->
## Temporary Tables {#temporary-tables}
ClickHouse supports temporary tables which have the following characteristics:

View File

@ -377,23 +377,33 @@ INDEX b (u64 * length(str), i32 + f64 * 100, date, str) TYPE set(100) GRANULARIT
- `s != 1`
- `NOT startsWith(s, 'test')`
### Проекции {#projections}
Проекции похожи на материализованные представления, но определяются на уровне партов. Это обеспечивает гарантии согласованности наряду с автоматическим использованием в запросах.
## Проекции {#projections}
Проекции похожи на [материализованные представления](../../../sql-reference/statements/create/view.md#materialized), но определяются на уровне кусков данных. Это обеспечивает гарантии согласованности данных наряду с автоматическим использованием в запросах.
#### Запрос {#projection-query}
Запрос проекции — это то, что определяет проекцию. Он имеет следующую грамматику:
Проекции — это экспериментальная возможность. Чтобы включить поддержку проекций, установите настройку [allow_experimental_projection_optimization](../../../operations/settings/settings.md#allow-experimental-projection-optimization) в значение `1`. См. также настройку [force_optimize_projection ](../../../operations/settings/settings.md#force-optimize-projection).
`SELECT <COLUMN LIST EXPR> [GROUP BY] [ORDER BY]`
Проекции не поддерживаются для запросов `SELECT` с модификатором [FINAL](../../../sql-reference/statements/select/from.md#select-from-final).
Он неявно выбирает данные из родительской таблицы.
### Запрос проекции {#projection-query}
Запрос проекции — это то, что определяет проекцию. Такой запрос неявно выбирает данные из родительской таблицы.
**Синтаксис**
#### Хранение {#projection-storage}
Проекции хранятся в каталоге парта. Это похоже на хранение индексов, но используется подкаталог, в котором хранится анонимный парт таблицы MergeTree. Таблица создается запросом определения проекции. Если есть конструкция GROUP BY, то базовый механизм хранения становится AggregatedMergeTree, а все агрегатные функции преобразуются в AggregateFunction. Если есть конструкция ORDER BY, таблица MergeTree будет использовать его в качестве выражения первичного ключа. Во время процесса слияния парт проекции будет слит с помощью процедуры слияния ее хранилища. Контрольная сумма парта родительской таблицы будет включать парт проекции. Другие процедуры аналогичны индексам пропуска данных.
```sql
SELECT <column list expr> [GROUP BY] <group keys expr> [ORDER BY] <expr>
```
#### Анализ запросов {#projection-query-analysis}
1. Проверить, можно ли использовать проекцию в данном запросе, то есть, что с ней выходит тот же результат, что и с запросом к базовой таблице.
2. Выбрать наиболее подходящее совпадение, содержащее наименьшее количество гранул для чтения.
3. План запроса, который использует проекции, будет отличаться от того, который использует исходные парты. При отсутствии проекции в некоторых партах можно расширить план, чтобы «проецировать» на лету.
Проекции можно изменить или удалить с помощью запроса [ALTER](../../../sql-reference/statements/alter/projection.md).
### Хранение проекции {#projection-storage}
Проекции хранятся в каталоге куска данных. Это похоже на хранение индексов, но используется подкаталог, в котором хранится анонимный кусок таблицы `MergeTree`. Таблица создается запросом определения проекции.
Если присутствует секция `GROUP BY`, то используется движок [AggregatingMergeTree](aggregatingmergetree.md), а все агрегатные функции преобразуются в `AggregateFunction`.
Если присутствует секция `ORDER BY`, таблица `MergeTree` использует ее в качестве выражения для первичного ключа.
Во время процесса слияния кусок данных проекции объединяется с помощью процедуры слияния хранилища. Контрольная сумма куска данных родительской таблицы включает кусок данных проекции. Другие процедуры аналогичны индексам пропуска данных.
### Анализ запросов {#projection-query-analysis}
1. Проверьте, можно ли использовать проекцию в данном запросе, то есть, что с ней получается тот же результат, что и с запросом к базовой таблице.
2. Выберите наиболее подходящее совпадение, содержащее наименьшее количество гранул для чтения.
3. План запроса, который использует проекции, отличается от того, который использует исходные куски данных. Если в некоторых кусках проекции отсутствуют, можно расширить план, чтобы «проецировать» на лету.
## Конкурентный доступ к данным {#concurrent-data-access}

View File

@ -26,7 +26,7 @@ Connected to ClickHouse server version 20.13.1 revision 54442.
Клиент может быть использован в интерактивном и не интерактивном (batch) режиме.
Чтобы использовать batch режим, укажите параметр query, или отправьте данные в stdin (проверяется, что stdin - не терминал), или и то, и другое.
Аналогично HTTP интерфейсу, при использовании одновременно параметра query и отправке данных в stdin, запрос составляется из конкатенации параметра query, перевода строки, и данных в stdin. Это удобно для больших INSERT запросов.
Аналогично HTTP интерфейсу, при использовании одновременно параметра query и отправке данных в stdin, запрос составляется из конкатенации параметра query, перевода строки и данных в stdin. Это удобно для больших `INSERT` запросов.
Примеры использования клиента для вставки данных:
@ -41,17 +41,17 @@ _EOF
$ cat file.csv | clickhouse-client --database=test --query="INSERT INTO test FORMAT CSV";
```
В batch режиме в качестве формата данных по умолчанию используется формат TabSeparated. Формат может быть указан в секции FORMAT запроса.
В batch режиме в качестве формата данных по умолчанию используется формат `TabSeparated`. Формат может быть указан в запросе в секции `FORMAT`.
По умолчанию, в batch режиме вы можете выполнить только один запрос. Чтобы выполнить несколько запросов из «скрипта», используйте параметр multiquery. Это работает для всех запросов кроме INSERT. Результаты запросов выводятся подряд без дополнительных разделителей.
Также, при необходимости выполнить много запросов, вы можете запускать clickhouse-client на каждый запрос. Заметим, что запуск программы clickhouse-client может занимать десятки миллисекунд.
По умолчанию в batch режиме вы можете выполнить только один запрос. Чтобы выполнить несколько запросов из «скрипта», используйте параметр `-multiquery`. Это работает для всех запросов кроме `INSERT`. Результаты запросов выводятся подряд без дополнительных разделителей.
Если нужно выполнить много запросов, вы можете запускать clickhouse-client отдельно на каждый запрос. Заметим, что запуск программы clickhouse-client может занимать десятки миллисекунд.
В интерактивном режиме, вы получите командную строку, в которую можно вводить запросы.
В интерактивном режиме вы получаете командную строку, в которую можно вводить запросы.
Если не указано multiline (по умолчанию):
Чтобы выполнить запрос, нажмите Enter. Точка с запятой на конце запроса не обязательна. Чтобы ввести запрос, состоящий из нескольких строк, перед переводом строки, введите символ обратного слеша: `\` - тогда после нажатия Enter, вам предложат ввести следующую строку запроса.
Чтобы выполнить запрос, нажмите Enter. Точка с запятой на конце запроса необязательна. Чтобы ввести запрос, состоящий из нескольких строк, в конце строки поставьте символ обратного слеша `\`, тогда после нажатия Enter вы сможете ввести следующую строку запроса.
Если указано multiline (многострочный режим):
Если указан параметр `--multiline` (многострочный режим):
Чтобы выполнить запрос, завершите его точкой с запятой и нажмите Enter. Если в конце введённой строки не было точки с запятой, то вам предложат ввести следующую строчку запроса.
Исполняется только один запрос, поэтому всё, что введено после точки с запятой, игнорируется.
@ -61,20 +61,20 @@ $ cat file.csv | clickhouse-client --database=test --query="INSERT INTO test FOR
Командная строка сделана на основе readline (и history) (или libedit, или без какой-либо библиотеки, в зависимости от сборки) - то есть, в ней работают привычные сочетания клавиш, а также присутствует история.
История пишется в `~/.clickhouse-client-history`.
По умолчанию, в качестве формата, используется формат PrettyCompact (красивые таблички). Вы можете изменить формат с помощью секции FORMAT запроса, или с помощью указания `\G` на конце запроса, с помощью аргумента командной строки `--format` или `--vertical`, или с помощью конфигурационного файла клиента.
По умолчанию используется формат вывода `PrettyCompact` (он поддерживает красивый вывод таблиц). Вы можете изменить формат вывода результатов запроса следующими способами: с помощью секции `FORMAT` в запросе, указав символ `\G` в конце запроса, используя аргументы командной строки `--format` или `--vertical` или с помощью конфигурационного файла клиента.
Чтобы выйти из клиента, нажмите Ctrl+D, или наберите вместо запроса одно из: «exit», «quit», «logout», «учше», «йгше», «дщпщге», «exit;», «quit;», «logout;», «учшеж», «йгшеж», «дщпщгеж», «q», «й», «q», «Q», «:q», «й», «Й», «Жй»
Чтобы выйти из клиента, нажмите Ctrl+D или наберите вместо запроса одно из: «exit», «quit», «logout», «учше», «йгше», «дщпщге», «exit;», «quit;», «logout;», «учшеж», «йгшеж», «дщпщгеж», «q», «й», «q», «Q», «:q», «й», «Й», «Жй».
При выполнении запроса, клиент показывает:
При выполнении запроса клиент показывает:
1. Прогресс выполнение запроса, который обновляется не чаще, чем 10 раз в секунду (по умолчанию). При быстрых запросах, прогресс может не успеть отобразиться.
1. Прогресс выполнение запроса, который обновляется не чаще, чем 10 раз в секунду (по умолчанию). При быстрых запросах прогресс может не успеть отобразиться.
2. Отформатированный запрос после его парсинга - для отладки.
3. Результат в заданном формате.
4. Количество строк результата, прошедшее время, а также среднюю скорость выполнения запроса.
Вы можете прервать длинный запрос, нажав Ctrl+C. При этом вам всё равно придётся чуть-чуть подождать, пока сервер остановит запрос. На некоторых стадиях выполнения, запрос невозможно прервать. Если вы не дождётесь и нажмёте Ctrl+C второй раз, то клиент будет завершён.
Вы можете прервать длинный запрос, нажав Ctrl+C. При этом вам всё равно придётся чуть-чуть подождать, пока сервер остановит запрос. На некоторых стадиях выполнения запрос невозможно прервать. Если вы не дождётесь и нажмёте Ctrl+C второй раз, то клиент будет завершён.
Клиент командной строки позволяет передать внешние данные (внешние временные таблицы) для использования запроса. Подробнее смотрите раздел «Внешние данные для обработки запроса»
Клиент командной строки позволяет передать внешние данные (внешние временные таблицы) для выполнения запроса. Подробнее смотрите раздел «Внешние данные для обработки запроса».
### Запросы с параметрами {#cli-queries-with-parameters}
@ -84,7 +84,7 @@ $ cat file.csv | clickhouse-client --database=test --query="INSERT INTO test FOR
clickhouse-client --param_parName="[1, 2]" -q "SELECT * FROM table WHERE a = {parName:Array(UInt16)}"
```
#### Cинтаксис запроса {#cli-queries-with-parameters-syntax}
#### Синтаксис запроса {#cli-queries-with-parameters-syntax}
Отформатируйте запрос обычным способом. Представьте значения, которые вы хотите передать из параметров приложения в запрос в следующем формате:
@ -155,3 +155,29 @@ $ clickhouse-client --param_tbl="numbers" --param_db="system" --param_col="numbe
</config>
```
### Формат ID запроса {#query-id-format}
В интерактивном режиме `clickhouse-client` показывает ID для каждого запроса. По умолчанию ID выводится в таком виде:
```sql
Query id: 927f137d-00f1-4175-8914-0dd066365e96
```
Произвольный формат ID можно задать в конфигурационном файле внутри тега `query_id_formats`. ID подставляется вместо `{query_id}` в строке формата. В теге может быть перечислено несколько строк формата.
Эта возможность может быть полезна для генерации URL, с помощью которых выполняется профилирование запросов.
**Пример**
```xml
<config>
<query_id_formats>
<speedscope>http://speedscope-host/#profileURL=qp%3Fid%3D{query_id}</speedscope>
</query_id_formats>
</config>
```
Если применить приведённую выше конфигурацию, то ID запроса будет выводиться в следующем виде:
``` text
speedscope:http://speedscope-host/#profileURL=qp%3Fid%3Dc8ecc783-e753-4b38-97f1-42cddfb98b7d
```

View File

@ -3252,3 +3252,25 @@ SETTINGS index_granularity = 8192 │
- 1 — таблица обновляется автоматически в фоновом режиме при обнаружении изменений схемы.
Значение по умолчанию: `0`.
## allow_experimental_projection_optimization {#allow-experimental-projection-optimization}
Включает или отключает поддержку [проекций](../../engines/table-engines/mergetree-family/mergetree.md#projections) при обработке запросов `SELECT`.
Возможные значения:
- 0 — Проекции не поддерживаются.
- 1 — Проекции поддерживаются.
Значение по умолчанию: `0`.
## force_optimize_projection {#force-optimize-projection}
Включает или отключает обязательное использование [проекций](../../engines/table-engines/mergetree-family/mergetree.md#projections) в запросах `SELECT`, если поддержка проекций включена (см. настройку [allow_experimental_projection_optimization](#allow-experimental-projection-optimization)).
Возможные значения:
- 0 — Проекции используются опционально.
- 1 — Проекции обязательно используются.
Значение по умолчанию: `0`.

View File

@ -2185,3 +2185,75 @@ defaultRoles()
- Список ролей по умолчанию.
Тип: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)).
## queryID {#query-id}
Возвращает идентификатор текущего запроса. Другие параметры запроса могут быть извлечены из системной таблицы [system.query_log](../../operations/system-tables/query_log.md) через `query_id`.
В отличие от [initialQueryID](#initial-query-id), функция `queryID` может возвращать различные значения для разных шардов (см. пример).
**Синтаксис**
``` sql
queryID()
```
**Возвращаемое значение**
- Идентификатор текущего запроса.
Тип: [String](../../sql-reference/data-types/string.md)
**Пример**
Запрос:
``` sql
CREATE TABLE tmp (str String) ENGINE = Log;
INSERT INTO tmp (*) VALUES ('a');
SELECT count(DISTINCT t) FROM (SELECT queryID() AS t FROM remote('127.0.0.{1..3}', currentDatabase(), 'tmp') GROUP BY queryID());
```
Результат:
``` text
┌─count()─┐
│ 3 │
└─────────┘
```
## initialQueryID {#initial-query-id}
Возвращает идентификатор родительского запроса. Другие параметры запроса могут быть извлечены из системной таблицы [system.query_log](../../operations/system-tables/query_log.md) через `initial_query_id`.
В отличие от [queryID](#query-id), функция `initialQueryID` возвращает одинаковые значения для разных шардов (см. пример).
**Синтаксис**
``` sql
initialQueryID()
```
**Возвращаемое значение**
- Идентификатор родительского запроса.
Тип: [String](../../sql-reference/data-types/string.md)
**Пример**
Запрос:
``` sql
CREATE TABLE tmp (str String) ENGINE = Log;
INSERT INTO tmp (*) VALUES ('a');
SELECT count(DISTINCT t) FROM (SELECT initialQueryID() AS t FROM remote('127.0.0.{1..3}', currentDatabase(), 'tmp') GROUP BY queryID());
```
Результат:
``` text
┌─count()─┐
│ 1 │
└─────────┘
```

View File

@ -5,7 +5,7 @@ toc_title: PROJECTION
# Манипуляции с проекциями {#manipulations-with-projections}
Доступны следующие операции:
Доступны следующие операции с [проекциями](../../../engines/table-engines/mergetree-family/mergetree.md#projections):
- `ALTER TABLE [db].name ADD PROJECTION name AS SELECT <COLUMN LIST EXPR> [GROUP BY] [ORDER BY]` — добавляет описание проекции в метаданные.
@ -15,7 +15,7 @@ toc_title: PROJECTION
- `ALTER TABLE [db.]table CLEAR PROJECTION name IN PARTITION partition_name` — удаляет файлы проекции с диска без удаления описания.
Комманды ADD, DROP и CLEAR — легковесны, поскольку они только меняют метаданные или удаляют файлы.
Команды `ADD`, `DROP` и `CLEAR` — легковесны, поскольку они только меняют метаданные или удаляют файлы.
Также команды реплицируются, синхронизируя описания проекций в метаданных с помощью ZooKeeper.

View File

@ -62,7 +62,6 @@
#include <IO/Operators.h>
#include <IO/UseSSL.h>
#include <IO/WriteBufferFromOStream.h>
#include <IO/ReadBufferFromFile.h>
#include <Processors/Transforms/AddingDefaultsTransform.h>
#include <DataStreams/InternalTextLogsRowOutputStream.h>
#include <DataStreams/NullBlockOutputStream.h>

View File

@ -300,9 +300,9 @@ int Keeper::main(const std::vector<std::string> & /*args*/)
if (config().has("keeper_server.storage_path"))
path = config().getString("keeper_server.storage_path");
else if (config().has("keeper_server.log_storage_path"))
path = config().getString("keeper_server.log_storage_path");
path = std::filesystem::path(config().getString("keeper_server.log_storage_path")).parent_path();
else if (config().has("keeper_server.snapshot_storage_path"))
path = config().getString("keeper_server.snapshot_storage_path");
path = std::filesystem::path(config().getString("keeper_server.snapshot_storage_path")).parent_path();
else
path = std::filesystem::path{KEEPER_DEFAULT_PATH};
@ -359,7 +359,7 @@ int Keeper::main(const std::vector<std::string> & /*args*/)
auto servers = std::make_shared<std::vector<ProtocolServerAdapter>>();
/// Initialize test keeper RAFT. Do nothing if no nu_keeper_server in config.
global_context->initializeKeeperStorageDispatcher();
global_context->initializeKeeperDispatcher();
for (const auto & listen_host : listen_hosts)
{
/// TCP Keeper
@ -428,7 +428,7 @@ int Keeper::main(const std::vector<std::string> & /*args*/)
else
LOG_INFO(log, "Closed connections to Keeper.");
global_context->shutdownKeeperStorageDispatcher();
global_context->shutdownKeeperDispatcher();
/// Wait server pool to avoid use-after-free of destroyed context in the handlers
server_pool.joinAll();

View File

@ -5,40 +5,16 @@
#include <Core/Field.h>
#include <common/LocalDate.h>
#include <common/LocalDateTime.h>
#include <Parsers/ASTInsertQuery.h>
#include <Parsers/ASTExpressionList.h>
#include <Parsers/ASTIdentifier.h>
#include "getIdentifierQuote.h"
#include <IO/WriteHelpers.h>
#include <IO/Operators.h>
#include <Formats/FormatFactory.h>
#include <Parsers/getInsertQuery.h>
namespace DB
{
namespace
{
using ValueType = ExternalResultDescription::ValueType;
std::string getInsertQuery(const std::string & db_name, const std::string & table_name, const ColumnsWithTypeAndName & columns, IdentifierQuotingStyle quoting)
{
ASTInsertQuery query;
query.table_id.database_name = db_name;
query.table_id.table_name = table_name;
query.columns = std::make_shared<ASTExpressionList>(',');
query.children.push_back(query.columns);
for (const auto & column : columns)
query.columns->children.emplace_back(std::make_shared<ASTIdentifier>(column.name));
WriteBufferFromOwnString buf;
IAST::FormatSettings settings(buf, true);
settings.always_quote_identifiers = true;
settings.identifier_quoting_style = quoting;
query.IAST::format(settings);
return buf.str();
}
}
ODBCBlockOutputStream::ODBCBlockOutputStream(nanodbc::ConnectionHolderPtr connection_holder_,
const std::string & remote_database_name_,

View File

@ -13,6 +13,7 @@ namespace DB
class ODBCBlockOutputStream : public IBlockOutputStream
{
using ValueType = ExternalResultDescription::ValueType;
public:
ODBCBlockOutputStream(

View File

@ -996,7 +996,7 @@ if (ThreadFuzzer::instance().isEffective())
{
#if USE_NURAFT
/// Initialize test keeper RAFT. Do nothing if no nu_keeper_server in config.
global_context->initializeKeeperStorageDispatcher();
global_context->initializeKeeperDispatcher();
for (const auto & listen_host : listen_hosts)
{
/// TCP Keeper
@ -1079,7 +1079,7 @@ if (ThreadFuzzer::instance().isEffective())
else
LOG_INFO(log, "Closed connections to servers for tables.");
global_context->shutdownKeeperStorageDispatcher();
global_context->shutdownKeeperDispatcher();
}
/// Wait server pool to avoid use-after-free of destroyed context in the handlers

View File

@ -60,9 +60,6 @@ then
elif [[ "$SANITIZER" == "thread" ]]; then VERSION_POSTFIX+="+tsan"
elif [[ "$SANITIZER" == "memory" ]]; then VERSION_POSTFIX+="+msan"
elif [[ "$SANITIZER" == "undefined" ]]; then VERSION_POSTFIX+="+ubsan"
elif [[ "$SANITIZER" == "libfuzzer" ]]; then
VERSION_POSTFIX+="+libfuzzer"
MALLOC_OPTS="-DENABLE_TCMALLOC=0 -DENABLE_JEMALLOC=0"
else
echo "Unknown value of SANITIZER variable: $SANITIZER"
exit 3

View File

@ -74,17 +74,24 @@ void TimerDescriptor::drain() const
}
}
void TimerDescriptor::setRelative(Poco::Timespan timespan) const
void TimerDescriptor::setRelative(uint64_t usec) const
{
static constexpr uint32_t TIMER_PRECISION = 1e6;
itimerspec spec;
spec.it_interval.tv_nsec = 0;
spec.it_interval.tv_sec = 0;
spec.it_value.tv_sec = timespan.totalSeconds();
spec.it_value.tv_nsec = timespan.useconds() * 1000;
spec.it_value.tv_sec = usec / TIMER_PRECISION;
spec.it_value.tv_nsec = (usec % TIMER_PRECISION) * 1'000;
if (-1 == timerfd_settime(timer_fd, 0 /*relative timer */, &spec, nullptr))
throwFromErrno("Cannot set time for timer_fd", ErrorCodes::CANNOT_SET_TIMER_PERIOD);
}
void TimerDescriptor::setRelative(Poco::Timespan timespan) const
{
setRelative(timespan.totalMicroseconds());
}
}
#endif

View File

@ -24,6 +24,7 @@ public:
void reset() const;
void drain() const;
void setRelative(uint64_t usec) const;
void setRelative(Poco::Timespan timespan) const;
};

View File

@ -253,4 +253,3 @@ CompressedReadBufferBase::~CompressedReadBufferBase() = default; /// Proper d
}

View File

@ -82,8 +82,10 @@ void compressDataForType(const char * source, UInt32 source_size, char * dest)
}
template <typename T>
void decompressDataForType(const char * source, UInt32 source_size, char * dest)
void decompressDataForType(const char * source, UInt32 source_size, char * dest, UInt32 output_size)
{
const char * output_end = dest + output_size;
if (source_size % sizeof(T) != 0)
throw Exception(ErrorCodes::CANNOT_DECOMPRESS, "Cannot delta decompress, data size {} is not aligned to {}", source_size, sizeof(T));
@ -92,6 +94,8 @@ void decompressDataForType(const char * source, UInt32 source_size, char * dest)
while (source < source_end)
{
accumulator += unalignedLoad<T>(source);
if (dest + sizeof(accumulator) > output_end)
throw Exception(ErrorCodes::CANNOT_DECOMPRESS, "Cannot decompress the data");
unalignedStore<T>(dest, accumulator);
source += sizeof(T);
@ -137,6 +141,7 @@ void CompressionCodecDelta::doDecompressData(const char * source, UInt32 source_
throw Exception("Cannot decompress. File has wrong header", ErrorCodes::CANNOT_DECOMPRESS);
UInt8 bytes_to_skip = uncompressed_size % bytes_size;
UInt32 output_size = uncompressed_size - bytes_to_skip;
if (UInt32(2 + bytes_to_skip) > source_size)
throw Exception("Cannot decompress. File has wrong header", ErrorCodes::CANNOT_DECOMPRESS);
@ -146,16 +151,16 @@ void CompressionCodecDelta::doDecompressData(const char * source, UInt32 source_
switch (bytes_size)
{
case 1:
decompressDataForType<UInt8>(&source[2 + bytes_to_skip], source_size_no_header, &dest[bytes_to_skip]);
decompressDataForType<UInt8>(&source[2 + bytes_to_skip], source_size_no_header, &dest[bytes_to_skip], output_size);
break;
case 2:
decompressDataForType<UInt16>(&source[2 + bytes_to_skip], source_size_no_header, &dest[bytes_to_skip]);
decompressDataForType<UInt16>(&source[2 + bytes_to_skip], source_size_no_header, &dest[bytes_to_skip], output_size);
break;
case 4:
decompressDataForType<UInt32>(&source[2 + bytes_to_skip], source_size_no_header, &dest[bytes_to_skip]);
decompressDataForType<UInt32>(&source[2 + bytes_to_skip], source_size_no_header, &dest[bytes_to_skip], output_size);
break;
case 8:
decompressDataForType<UInt64>(&source[2 + bytes_to_skip], source_size_no_header, &dest[bytes_to_skip]);
decompressDataForType<UInt64>(&source[2 + bytes_to_skip], source_size_no_header, &dest[bytes_to_skip], output_size);
break;
}
}
@ -209,4 +214,10 @@ void registerCodecDelta(CompressionCodecFactory & factory)
return std::make_shared<CompressionCodecDelta>(delta_bytes_size);
});
}
CompressionCodecPtr getCompressionCodecDelta(UInt8 delta_bytes_size)
{
return std::make_shared<CompressionCodecDelta>(delta_bytes_size);
}
}

View File

@ -353,12 +353,13 @@ UInt32 compressDataForType(const char * source, UInt32 source_size, char * dest)
}
template <typename ValueType>
void decompressDataForType(const char * source, UInt32 source_size, char * dest)
void decompressDataForType(const char * source, UInt32 source_size, char * dest, UInt32 output_size)
{
static_assert(is_unsigned_v<ValueType>, "ValueType must be unsigned.");
using UnsignedDeltaType = ValueType;
const char * source_end = source + source_size;
const char * output_end = dest + output_size;
if (source + sizeof(UInt32) > source_end)
return;
@ -374,6 +375,8 @@ void decompressDataForType(const char * source, UInt32 source_size, char * dest)
return;
prev_value = unalignedLoad<ValueType>(source);
if (dest + sizeof(prev_value) > output_end)
throw Exception(ErrorCodes::CANNOT_DECOMPRESS, "Cannot decompress the data");
unalignedStore<ValueType>(dest, prev_value);
source += sizeof(prev_value);
@ -385,6 +388,8 @@ void decompressDataForType(const char * source, UInt32 source_size, char * dest)
prev_delta = unalignedLoad<UnsignedDeltaType>(source);
prev_value = prev_value + static_cast<ValueType>(prev_delta);
if (dest + sizeof(prev_value) > output_end)
throw Exception(ErrorCodes::CANNOT_DECOMPRESS, "Cannot decompress the data");
unalignedStore<ValueType>(dest, prev_value);
source += sizeof(prev_delta);
@ -416,6 +421,8 @@ void decompressDataForType(const char * source, UInt32 source_size, char * dest)
const UnsignedDeltaType delta = double_delta + prev_delta;
const ValueType curr_value = prev_value + delta;
if (dest + sizeof(curr_value) > output_end)
throw Exception(ErrorCodes::CANNOT_DECOMPRESS, "Cannot decompress the data");
unalignedStore<ValueType>(dest, curr_value);
dest += sizeof(curr_value);
@ -507,6 +514,7 @@ void CompressionCodecDoubleDelta::doDecompressData(const char * source, UInt32 s
throw Exception("Cannot decompress. File has wrong header", ErrorCodes::CANNOT_DECOMPRESS);
UInt8 bytes_to_skip = uncompressed_size % bytes_size;
UInt32 output_size = uncompressed_size - bytes_to_skip;
if (UInt32(2 + bytes_to_skip) > source_size)
throw Exception("Cannot decompress. File has wrong header", ErrorCodes::CANNOT_DECOMPRESS);
@ -516,16 +524,16 @@ void CompressionCodecDoubleDelta::doDecompressData(const char * source, UInt32 s
switch (bytes_size)
{
case 1:
decompressDataForType<UInt8>(&source[2 + bytes_to_skip], source_size_no_header, &dest[bytes_to_skip]);
decompressDataForType<UInt8>(&source[2 + bytes_to_skip], source_size_no_header, &dest[bytes_to_skip], output_size);
break;
case 2:
decompressDataForType<UInt16>(&source[2 + bytes_to_skip], source_size_no_header, &dest[bytes_to_skip]);
decompressDataForType<UInt16>(&source[2 + bytes_to_skip], source_size_no_header, &dest[bytes_to_skip], output_size);
break;
case 4:
decompressDataForType<UInt32>(&source[2 + bytes_to_skip], source_size_no_header, &dest[bytes_to_skip]);
decompressDataForType<UInt32>(&source[2 + bytes_to_skip], source_size_no_header, &dest[bytes_to_skip], output_size);
break;
case 8:
decompressDataForType<UInt64>(&source[2 + bytes_to_skip], source_size_no_header, &dest[bytes_to_skip]);
decompressDataForType<UInt64>(&source[2 + bytes_to_skip], source_size_no_header, &dest[bytes_to_skip], output_size);
break;
}
}
@ -543,4 +551,10 @@ void registerCodecDoubleDelta(CompressionCodecFactory & factory)
return std::make_shared<CompressionCodecDoubleDelta>(data_bytes_size);
});
}
CompressionCodecPtr getCompressionCodecDoubleDelta(UInt8 data_bytes_size)
{
return std::make_shared<CompressionCodecDoubleDelta>(data_bytes_size);
}
}

View File

@ -51,7 +51,7 @@ namespace DB
*/
static void setMasterKey(const std::string_view & master_key);
CompressionCodecEncrypted(const std::string_view & cipher);
explicit CompressionCodecEncrypted(const std::string_view & cipher);
uint8_t getMethodByte() const override;
void updateHash(SipHash & hash) const override;
@ -88,7 +88,7 @@ namespace DB
*/
struct KeyHolder : private boost::noncopyable
{
KeyHolder(const std::string_view & master_key);
explicit KeyHolder(const std::string_view & master_key);
~KeyHolder();
std::string keygen_key;
@ -99,6 +99,11 @@ namespace DB
static inline std::optional<KeyHolder> keys;
};
inline CompressionCodecPtr getCompressionCodecEncrypted(const std::string_view & master_key)
{
return std::make_shared<CompressionCodecEncrypted>(master_key);
}
}
#endif /* USE_SSL && USE_INTERNAL_SSL_LIBRARY */

View File

@ -147,4 +147,10 @@ CompressionCodecLZ4HC::CompressionCodecLZ4HC(int level_)
setCodecDescription("LZ4HC", {std::make_shared<ASTLiteral>(static_cast<UInt64>(level))});
}
CompressionCodecPtr getCompressionCodecLZ4(int level)
{
return std::make_shared<CompressionCodecLZ4HC>(level);
}
}

View File

@ -9,7 +9,7 @@ class CompressionCodecMultiple final : public ICompressionCodec
{
public:
CompressionCodecMultiple() = default; /// Need for CompressionFactory to register codec by method byte.
CompressionCodecMultiple(Codecs codecs_);
explicit CompressionCodecMultiple(Codecs codecs_);
uint8_t getMethodByte() const override;

View File

@ -156,4 +156,9 @@ void registerCodecZSTD(CompressionCodecFactory & factory)
});
}
CompressionCodecPtr getCompressionCodecZSTD(int level)
{
return std::make_shared<CompressionCodecZSTD>(level);
}
}

View File

@ -18,6 +18,8 @@ using Codecs = std::vector<CompressionCodecPtr>;
class IDataType;
extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size);
/**
* Represents interface for compression codecs like LZ4, ZSTD, etc.
*/
@ -84,6 +86,8 @@ public:
virtual bool isNone() const { return false; }
protected:
/// This is used for fuzz testing
friend int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size);
/// Return size of compressed data without header
virtual UInt32 getMaxCompressedDataSize(UInt32 uncompressed_size) const { return uncompressed_size; }

View File

@ -450,7 +450,11 @@ bool NO_INLINE decompressImpl(
const unsigned token = *ip++;
length = token >> 4;
if (length == 0x0F)
{
if (unlikely(ip + 1 >= input_end))
return false;
continue_read_length();
}
/// Copy literals.
@ -470,6 +474,20 @@ bool NO_INLINE decompressImpl(
if (unlikely(copy_end > output_end))
return false;
// Due to implementation specifics the copy length is always a multiple of copy_amount
size_t real_length = 0;
static_assert(copy_amount == 8 || copy_amount == 16 || copy_amount == 32);
if constexpr (copy_amount == 8)
real_length = (((length >> 3) + 1) * 8);
else if constexpr (copy_amount == 16)
real_length = (((length >> 4) + 1) * 16);
else if constexpr (copy_amount == 32)
real_length = (((length >> 5) + 1) * 32);
if (unlikely(ip + real_length >= input_end + ADDITIONAL_BYTES_AT_END_OF_BUFFER))
return false;
wildCopy<copy_amount>(op, ip, copy_end); /// Here we can write up to copy_amount - 1 bytes after buffer.
if (copy_end == output_end)
@ -494,7 +512,11 @@ bool NO_INLINE decompressImpl(
length = token & 0x0F;
if (length == 0x0F)
{
if (unlikely(ip + 1 >= input_end))
return false;
continue_read_length();
}
length += 4;
/// Copy match within block, that produce overlapping pattern. Match may replicate itself.

View File

@ -1,2 +1,20 @@
# Our code has strong cohesion and target associated with `Compression` also depends on `DataTypes`.
# But we can exclude some files which have dependencies in case of
# fuzzer related build (we are interested in fuzzing only particular part of our code).
# So, some symbols will be declared, but not defined. Unfortunately, this trick doesn't work with UBSan.
# If you want really small size of the resulted binary, just link with fuzz_compression and clickhouse_common_io
add_executable (compressed_buffer_fuzzer compressed_buffer_fuzzer.cpp)
target_link_libraries (compressed_buffer_fuzzer PRIVATE fuzz_compression clickhouse_common_io ${LIB_FUZZING_ENGINE})
target_link_libraries (compressed_buffer_fuzzer PRIVATE dbms ${LIB_FUZZING_ENGINE})
add_executable (lz4_decompress_fuzzer lz4_decompress_fuzzer.cpp)
target_link_libraries (lz4_decompress_fuzzer PUBLIC dbms lz4 ${LIB_FUZZING_ENGINE})
add_executable (delta_decompress_fuzzer delta_decompress_fuzzer.cpp)
target_link_libraries (delta_decompress_fuzzer PRIVATE dbms ${LIB_FUZZING_ENGINE})
add_executable (double_delta_decompress_fuzzer double_delta_decompress_fuzzer.cpp)
target_link_libraries (double_delta_decompress_fuzzer PRIVATE dbms ${LIB_FUZZING_ENGINE})
add_executable (encrypted_decompress_fuzzer encrypted_decompress_fuzzer.cpp)
target_link_libraries (encrypted_decompress_fuzzer PRIVATE dbms ${LIB_FUZZING_ENGINE})

View File

@ -17,6 +17,5 @@ try
}
catch (...)
{
std::cerr << DB::getCurrentExceptionMessage(true) << std::endl;
return 1;
}

View File

@ -0,0 +1,44 @@
#include <iostream>
#include <string>
#include <Compression/ICompressionCodec.h>
#include <IO/BufferWithOwnMemory.h>
namespace DB
{
CompressionCodecPtr getCompressionCodecDelta(UInt8 delta_bytes_size);
}
struct AuxiliaryRandomData
{
UInt8 delta_size_bytes;
size_t decompressed_size;
};
extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size)
try
{
if (size < sizeof(AuxiliaryRandomData))
return 0;
const auto * p = reinterpret_cast<const AuxiliaryRandomData *>(data);
auto codec = DB::getCompressionCodecDelta(p->delta_size_bytes);
size_t output_buffer_size = p->decompressed_size % 65536;
size -= sizeof(AuxiliaryRandomData);
data += sizeof(AuxiliaryRandomData) / sizeof(uint8_t);
// std::string input = std::string(reinterpret_cast<const char*>(data), size);
// fmt::print(stderr, "Using input {} of size {}, output size is {}. \n", input, size, output_buffer_size);
DB::Memory<> memory;
memory.resize(output_buffer_size + codec->getAdditionalSizeAtTheEndOfBuffer());
codec->doDecompressData(reinterpret_cast<const char *>(data), size, memory.data(), output_buffer_size);
return 0;
}
catch (...)
{
return 1;
}

View File

@ -0,0 +1,44 @@
#include <iostream>
#include <string>
#include <Compression/ICompressionCodec.h>
#include <IO/BufferWithOwnMemory.h>
namespace DB
{
CompressionCodecPtr getCompressionCodecDoubleDelta(UInt8 data_bytes_size);
}
struct AuxiliaryRandomData
{
UInt8 data_bytes_size;
size_t decompressed_size;
};
extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size)
try
{
if (size < sizeof(AuxiliaryRandomData))
return 0;
const auto * p = reinterpret_cast<const AuxiliaryRandomData *>(data);
auto codec = DB::getCompressionCodecDoubleDelta(p->data_bytes_size);
size_t output_buffer_size = p->decompressed_size % 65536;
size -= sizeof(AuxiliaryRandomData);
data += sizeof(AuxiliaryRandomData) / sizeof(uint8_t);
// std::string input = std::string(reinterpret_cast<const char*>(data), size);
// fmt::print(stderr, "Using input {} of size {}, output size is {}. \n", input, size, output_buffer_size);
DB::Memory<> memory;
memory.resize(output_buffer_size + codec->getAdditionalSizeAtTheEndOfBuffer());
codec->doDecompressData(reinterpret_cast<const char *>(data), size, memory.data(), output_buffer_size);
return 0;
}
catch (...)
{
return 1;
}

View File

@ -0,0 +1,52 @@
#include <iostream>
#include <string>
#include <Compression/ICompressionCodec.h>
#include <Compression/CompressionCodecEncrypted.h>
#include <IO/BufferWithOwnMemory.h>
namespace DB
{
CompressionCodecPtr getCompressionCodecEncrypted(const std::string_view & master_key);
}
constexpr size_t key_size = 20;
struct AuxiliaryRandomData
{
char key[key_size];
size_t decompressed_size;
};
extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size)
try
{
if (size < sizeof(AuxiliaryRandomData))
return 0;
const auto * p = reinterpret_cast<const AuxiliaryRandomData *>(data);
std::string key = std::string(p->key, key_size);
auto codec = DB::getCompressionCodecEncrypted(key);
size_t output_buffer_size = p->decompressed_size % 65536;
size -= sizeof(AuxiliaryRandomData);
data += sizeof(AuxiliaryRandomData) / sizeof(uint8_t);
std::string input = std::string(reinterpret_cast<const char*>(data), size);
fmt::print(stderr, "Using input {} of size {}, output size is {}. \n", input, size, output_buffer_size);
if (output_buffer_size < size)
return 0;
DB::Memory<> memory;
memory.resize(output_buffer_size + codec->getAdditionalSizeAtTheEndOfBuffer());
codec->doDecompressData(reinterpret_cast<const char *>(data), size, memory.data(), output_buffer_size);
return 0;
}
catch (...)
{
return 1;
}

View File

@ -0,0 +1,47 @@
#include <iostream>
#include <string>
#include <Compression/ICompressionCodec.h>
#include <IO/BufferWithOwnMemory.h>
#include <Compression/LZ4_decompress_faster.h>
namespace DB
{
CompressionCodecPtr getCompressionCodecLZ4(int level);
}
struct AuxiliaryRandomData
{
size_t level;
size_t decompressed_size;
};
extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size)
try
{
if (size < sizeof(AuxiliaryRandomData) + LZ4::ADDITIONAL_BYTES_AT_END_OF_BUFFER)
return 0;
const auto * p = reinterpret_cast<const AuxiliaryRandomData *>(data);
auto codec = DB::getCompressionCodecLZ4(p->level);
size_t output_buffer_size = p->decompressed_size % 65536;
size -= sizeof(AuxiliaryRandomData);
size -= LZ4::ADDITIONAL_BYTES_AT_END_OF_BUFFER;
data += sizeof(AuxiliaryRandomData) / sizeof(uint8_t);
// std::string input = std::string(reinterpret_cast<const char*>(data), size);
// fmt::print(stderr, "Using input {} of size {}, output size is {}. \n", input, size, output_buffer_size);
DB::Memory<> memory;
memory.resize(output_buffer_size + LZ4::ADDITIONAL_BYTES_AT_END_OF_BUFFER);
codec->doDecompressData(reinterpret_cast<const char *>(data), size, memory.data(), output_buffer_size);
return 0;
}
catch (...)
{
return 1;
}

View File

@ -165,10 +165,11 @@ public:
while (!read_buf.eof())
{
result.last_position = read_buf.count();
/// Read checksum
Checksum record_checksum;
readIntBinary(record_checksum, read_buf);
/// Initialization is required, otherwise checksums may fail
/// Read header
ChangelogRecord record;
readIntBinary(record.header.version, read_buf);
readIntBinary(record.header.index, read_buf);
@ -179,6 +180,7 @@ public:
if (record.header.version > CURRENT_CHANGELOG_VERSION)
throw Exception(ErrorCodes::UNKNOWN_FORMAT_VERSION, "Unsupported changelog version {} on path {}", record.header.version, filepath);
/// Read data
if (record.header.blob_size != 0)
{
auto buffer = nuraft::buffer::alloc(record.header.blob_size);
@ -189,11 +191,13 @@ public:
else
record.blob = nullptr;
/// Check changelog integrity
if (previous_index != 0 && previous_index + 1 != record.header.index)
throw Exception(ErrorCodes::CORRUPTED_DATA, "Previous log entry {}, next log entry {}, seems like some entries skipped", previous_index, record.header.index);
previous_index = record.header.index;
/// Compare checksums
Checksum checksum = computeRecordChecksum(record);
if (checksum != record_checksum)
{
@ -202,22 +206,25 @@ public:
filepath, record.header.version, record.header.index, record.header.blob_size);
}
/// Check for duplicated changelog ids
if (logs.count(record.header.index) != 0)
throw Exception(ErrorCodes::CORRUPTED_DATA, "Duplicated index id {} in log {}", record.header.index, filepath);
result.entries_read += 1;
/// Read but skip this entry because our state is already more fresh
if (record.header.index < start_log_index)
{
continue;
}
/// Create log entry for read data
auto log_entry = nuraft::cs_new<nuraft::log_entry>(record.header.term, record.blob, record.header.value_type);
if (result.first_read_index == 0)
result.first_read_index = record.header.index;
/// Put it into in memory structure
logs.emplace(record.header.index, log_entry);
index_to_offset[record.header.index] = result.last_position;
if (result.entries_read % 50000 == 0)
LOG_TRACE(log, "Reading changelog from path {}, entries {}", filepath, result.entries_read);
}
@ -235,6 +242,7 @@ public:
result.error = true;
tryLogCurrentException(log);
}
LOG_TRACE(log, "Totally read from changelog {} {} entries", filepath, result.entries_read);
return result;
@ -255,6 +263,7 @@ Changelog::Changelog(
, force_sync(force_sync_)
, log(log_)
{
/// Load all files in changelog directory
namespace fs = std::filesystem;
if (!fs::exists(changelogs_dir))
fs::create_directories(changelogs_dir);
@ -264,23 +273,35 @@ Changelog::Changelog(
auto file_description = getChangelogFileDescription(p.path());
existing_changelogs[file_description.from_log_index] = file_description;
}
if (existing_changelogs.empty())
LOG_WARNING(log, "No logs exists in {}. It's Ok if it's the first run of clickhouse-keeper.", changelogs_dir);
}
void Changelog::readChangelogAndInitWriter(uint64_t last_commited_log_index, uint64_t logs_to_keep)
{
uint64_t total_read = 0;
/// Amount of entries in last log index
uint64_t entries_in_last = 0;
uint64_t incomplete_log_index = 0;
/// Log idx of the first incomplete log (key in existing_changelogs)
uint64_t first_incomplete_log_start_index = 0;
ChangelogReadResult result{};
/// First log index which was read from all changelogs
uint64_t first_read_index = 0;
uint64_t start_to_read_from = last_commited_log_index;
if (start_to_read_from > logs_to_keep)
start_to_read_from -= logs_to_keep;
else
start_to_read_from = 1;
/// At least we read something
bool started = false;
for (const auto & [changelog_start_index, changelog_description] : existing_changelogs)
{
entries_in_last = changelog_description.to_log_index - changelog_description.from_log_index + 1;
@ -292,7 +313,7 @@ void Changelog::readChangelogAndInitWriter(uint64_t last_commited_log_index, uin
if (changelog_description.from_log_index > last_commited_log_index && (changelog_description.from_log_index - last_commited_log_index) > 1)
{
LOG_ERROR(log, "Some records was lost, last committed log index {}, smallest available log index on disk {}. Hopefully will receive missing records from leader.", last_commited_log_index, changelog_description.from_log_index);
incomplete_log_index = changelog_start_index;
first_incomplete_log_start_index = changelog_start_index;
break;
}
else if (changelog_description.from_log_index > start_to_read_from)
@ -311,7 +332,7 @@ void Changelog::readChangelogAndInitWriter(uint64_t last_commited_log_index, uin
/// May happen after truncate, crash or simply unfinished log
if (result.entries_read < entries_in_last)
{
incomplete_log_index = changelog_start_index;
first_incomplete_log_start_index = changelog_start_index;
break;
}
}
@ -322,11 +343,13 @@ void Changelog::readChangelogAndInitWriter(uint64_t last_commited_log_index, uin
else
start_index = last_commited_log_index;
if (incomplete_log_index != 0)
/// Found some broken or non finished logs
/// We have to remove broken data and continue to write into incomplete log.
if (first_incomplete_log_start_index != 0)
{
auto start_remove_from = existing_changelogs.begin();
if (started)
start_remove_from = existing_changelogs.upper_bound(incomplete_log_index);
start_remove_from = existing_changelogs.upper_bound(first_incomplete_log_start_index);
/// All subsequent logs shouldn't exist. But they may exist if we crashed after writeAt started. Remove them.
for (auto itr = start_remove_from; itr != existing_changelogs.end();)
@ -363,6 +386,7 @@ void Changelog::rotate(uint64_t new_start_log_index)
/// Flush previous log
flush();
/// Start new one
ChangelogFileDescription new_description;
new_description.prefix = DEFAULT_PREFIX;
new_description.from_log_index = new_start_log_index;
@ -378,7 +402,7 @@ void Changelog::rotate(uint64_t new_start_log_index)
ChangelogRecord Changelog::buildRecord(uint64_t index, const LogEntryPtr & log_entry)
{
ChangelogRecord record;
record.header.version = ChangelogVersion::V0;
record.header.version = ChangelogVersion::V1;
record.header.index = index;
record.header.term = log_entry->get_term();
record.header.value_type = log_entry->get_val_type();
@ -416,7 +440,9 @@ void Changelog::writeAt(uint64_t index, const LogEntryPtr & log_entry)
if (index_to_start_pos.count(index) == 0)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot write at index {} because changelog doesn't contain it", index);
/// Complex case when we need to override data from already rotated log
bool go_to_previous_file = index < current_writer->getStartIndex();
if (go_to_previous_file)
{
auto index_changelog = existing_changelogs.lower_bound(index);
@ -450,6 +476,7 @@ void Changelog::writeAt(uint64_t index, const LogEntryPtr & log_entry)
auto log_itr = logs.find(i);
if (log_itr == logs.end())
break;
logs.erase(log_itr);
index_to_start_pos.erase(i);
entries_written--;
@ -467,7 +494,6 @@ void Changelog::compact(uint64_t up_to_log_index)
/// Remove all completely outdated changelog files
if (itr->second.to_log_index <= up_to_log_index)
{
LOG_INFO(log, "Removing changelog {} because of compaction", itr->second.path);
std::erase_if(index_to_start_pos, [right_index = itr->second.to_log_index] (const auto & item) { return item.first <= right_index; });
std::filesystem::remove(itr->second.path);
@ -482,6 +508,7 @@ void Changelog::compact(uint64_t up_to_log_index)
LogEntryPtr Changelog::getLastEntry() const
{
/// This entry treaded in special way by NuRaft
static LogEntryPtr fake_entry = nuraft::cs_new<nuraft::log_entry>(0, nuraft::buffer::alloc(sizeof(uint64_t)));
uint64_t next_index = getNextEntryIndex() - 1;

View File

@ -58,8 +58,8 @@ struct ChangelogFileDescription
class ChangelogWriter;
/// Simplest changelog with files rotation.
/// No compression, no metadata, just entries with headers one by one
/// Able to read broken files/entries and discard them.
/// No compression, no metadata, just entries with headers one by one.
/// Able to read broken files/entries and discard them. Not thread safe.
class Changelog
{

View File

@ -1,4 +1,4 @@
#include <Coordination/KeeperStorageDispatcher.h>
#include <Coordination/KeeperDispatcher.h>
#include <Common/setThreadName.h>
#include <Common/ZooKeeper/KeeperException.h>
#include <future>
@ -9,19 +9,18 @@ namespace DB
namespace ErrorCodes
{
extern const int LOGICAL_ERROR;
extern const int TIMEOUT_EXCEEDED;
}
KeeperStorageDispatcher::KeeperStorageDispatcher()
KeeperDispatcher::KeeperDispatcher()
: coordination_settings(std::make_shared<CoordinationSettings>())
, log(&Poco::Logger::get("KeeperDispatcher"))
{
}
void KeeperStorageDispatcher::requestThread()
void KeeperDispatcher::requestThread()
{
setThreadName("KeeperReqT");
@ -133,7 +132,7 @@ void KeeperStorageDispatcher::requestThread()
}
}
void KeeperStorageDispatcher::responseThread()
void KeeperDispatcher::responseThread()
{
setThreadName("KeeperRspT");
while (!shutdown_called)
@ -159,7 +158,7 @@ void KeeperStorageDispatcher::responseThread()
}
}
void KeeperStorageDispatcher::snapshotThread()
void KeeperDispatcher::snapshotThread()
{
setThreadName("KeeperSnpT");
while (!shutdown_called)
@ -181,9 +180,11 @@ void KeeperStorageDispatcher::snapshotThread()
}
}
void KeeperStorageDispatcher::setResponse(int64_t session_id, const Coordination::ZooKeeperResponsePtr & response)
void KeeperDispatcher::setResponse(int64_t session_id, const Coordination::ZooKeeperResponsePtr & response)
{
std::lock_guard lock(session_to_response_callback_mutex);
/// Special new session response.
if (response->xid != Coordination::WATCH_XID && response->getOpNum() == Coordination::OpNum::SessionID)
{
const Coordination::ZooKeeperSessionIDResponse & session_id_resp = dynamic_cast<const Coordination::ZooKeeperSessionIDResponse &>(*response);
@ -196,25 +197,28 @@ void KeeperStorageDispatcher::setResponse(int64_t session_id, const Coordination
callback(response);
new_session_id_response_callback.erase(session_id_resp.internal_id);
}
else
else /// Normal response, just write to client
{
auto session_writer = session_to_response_callback.find(session_id);
if (session_writer == session_to_response_callback.end())
auto session_response_callback = session_to_response_callback.find(session_id);
/// Session was disconnected, just skip this response
if (session_response_callback == session_to_response_callback.end())
return;
session_writer->second(response);
session_response_callback->second(response);
/// Session closed, no more writes
if (response->xid != Coordination::WATCH_XID && response->getOpNum() == Coordination::OpNum::Close)
{
session_to_response_callback.erase(session_writer);
session_to_response_callback.erase(session_response_callback);
}
}
}
bool KeeperStorageDispatcher::putRequest(const Coordination::ZooKeeperRequestPtr & request, int64_t session_id)
bool KeeperDispatcher::putRequest(const Coordination::ZooKeeperRequestPtr & request, int64_t session_id)
{
{
/// If session was already disconnected than we will ignore requests
std::lock_guard lock(session_to_response_callback_mutex);
if (session_to_response_callback.count(session_id) == 0)
return false;
@ -237,7 +241,7 @@ bool KeeperStorageDispatcher::putRequest(const Coordination::ZooKeeperRequestPtr
return true;
}
void KeeperStorageDispatcher::initialize(const Poco::Util::AbstractConfiguration & config, bool standalone_keeper)
void KeeperDispatcher::initialize(const Poco::Util::AbstractConfiguration & config, bool standalone_keeper)
{
LOG_DEBUG(log, "Initializing storage dispatcher");
int myid = config.getInt("keeper_server.server_id");
@ -251,6 +255,7 @@ void KeeperStorageDispatcher::initialize(const Poco::Util::AbstractConfiguration
server = std::make_unique<KeeperServer>(
myid, coordination_settings, config, responses_queue, snapshots_queue, standalone_keeper);
try
{
LOG_DEBUG(log, "Waiting server to initialize");
@ -266,13 +271,13 @@ void KeeperStorageDispatcher::initialize(const Poco::Util::AbstractConfiguration
throw;
}
/// Start it after keeper server start
session_cleaner_thread = ThreadFromGlobalPool([this] { sessionCleanerTask(); });
LOG_DEBUG(log, "Dispatcher initialized");
}
void KeeperStorageDispatcher::shutdown()
void KeeperDispatcher::shutdown()
{
try
{
@ -306,6 +311,8 @@ void KeeperStorageDispatcher::shutdown()
server->shutdown();
KeeperStorage::RequestForSession request_for_session;
/// Set session expired for all pending requests
while (requests_queue->tryPop(request_for_session))
{
if (request_for_session.request)
@ -320,6 +327,7 @@ void KeeperStorageDispatcher::shutdown()
}
}
/// Clear all registered sessions
std::lock_guard lock(session_to_response_callback_mutex);
session_to_response_callback.clear();
}
@ -331,19 +339,19 @@ void KeeperStorageDispatcher::shutdown()
LOG_DEBUG(log, "Dispatcher shut down");
}
KeeperStorageDispatcher::~KeeperStorageDispatcher()
KeeperDispatcher::~KeeperDispatcher()
{
shutdown();
}
void KeeperStorageDispatcher::registerSession(int64_t session_id, ZooKeeperResponseCallback callback)
void KeeperDispatcher::registerSession(int64_t session_id, ZooKeeperResponseCallback callback)
{
std::lock_guard lock(session_to_response_callback_mutex);
if (!session_to_response_callback.try_emplace(session_id, callback).second)
throw Exception(DB::ErrorCodes::LOGICAL_ERROR, "Session with id {} already registered in dispatcher", session_id);
}
void KeeperStorageDispatcher::sessionCleanerTask()
void KeeperDispatcher::sessionCleanerTask()
{
while (true)
{
@ -352,12 +360,16 @@ void KeeperStorageDispatcher::sessionCleanerTask()
try
{
/// Only leader node must check dead sessions
if (isLeader())
{
auto dead_sessions = server->getDeadSessions();
for (int64_t dead_session : dead_sessions)
{
LOG_INFO(log, "Found dead session {}, will try to close it", dead_session);
/// Close session == send close request to raft server
Coordination::ZooKeeperRequestPtr request = Coordination::ZooKeeperRequestFactory::instance().get(Coordination::OpNum::Close);
request->xid = Coordination::CLOSE_XID;
KeeperStorage::RequestForSession request_info;
@ -367,6 +379,8 @@ void KeeperStorageDispatcher::sessionCleanerTask()
std::lock_guard lock(push_request_mutex);
requests_queue->push(std::move(request_info));
}
/// Remove session from registered sessions
finishSession(dead_session);
LOG_INFO(log, "Dead session close request pushed");
}
@ -381,7 +395,7 @@ void KeeperStorageDispatcher::sessionCleanerTask()
}
}
void KeeperStorageDispatcher::finishSession(int64_t session_id)
void KeeperDispatcher::finishSession(int64_t session_id)
{
std::lock_guard lock(session_to_response_callback_mutex);
auto session_it = session_to_response_callback.find(session_id);
@ -389,7 +403,7 @@ void KeeperStorageDispatcher::finishSession(int64_t session_id)
session_to_response_callback.erase(session_it);
}
void KeeperStorageDispatcher::addErrorResponses(const KeeperStorage::RequestsForSessions & requests_for_sessions, Coordination::Error error)
void KeeperDispatcher::addErrorResponses(const KeeperStorage::RequestsForSessions & requests_for_sessions, Coordination::Error error)
{
for (const auto & [session_id, request] : requests_for_sessions)
{
@ -402,7 +416,7 @@ void KeeperStorageDispatcher::addErrorResponses(const KeeperStorage::RequestsFor
}
}
void KeeperStorageDispatcher::forceWaitAndProcessResult(RaftAppendResult & result, KeeperStorage::RequestsForSessions & requests_for_sessions)
void KeeperDispatcher::forceWaitAndProcessResult(RaftAppendResult & result, KeeperStorage::RequestsForSessions & requests_for_sessions)
{
if (!result->has_result())
result->get();
@ -417,10 +431,14 @@ void KeeperStorageDispatcher::forceWaitAndProcessResult(RaftAppendResult & resul
requests_for_sessions.clear();
}
int64_t KeeperStorageDispatcher::getSessionID(int64_t session_timeout_ms)
int64_t KeeperDispatcher::getSessionID(int64_t session_timeout_ms)
{
/// New session id allocation is a special request, because we cannot process it in normal
/// way: get request -> put to raft -> set response for registered callback.
KeeperStorage::RequestForSession request_info;
std::shared_ptr<Coordination::ZooKeeperSessionIDRequest> request = std::make_shared<Coordination::ZooKeeperSessionIDRequest>();
/// Internal session id. It's a temporary number which is unique for each client on this server
/// but can be same on different servers.
request->internal_id = internal_session_id_counter.fetch_add(1);
request->session_timeout_ms = session_timeout_ms;
request->server_id = server->getServerID();
@ -430,6 +448,7 @@ int64_t KeeperStorageDispatcher::getSessionID(int64_t session_timeout_ms)
auto promise = std::make_shared<std::promise<int64_t>>();
auto future = promise->get_future();
{
std::lock_guard lock(session_to_response_callback_mutex);
new_session_id_response_callback[request->internal_id] = [promise, internal_id = request->internal_id] (const Coordination::ZooKeeperResponsePtr & response)
@ -452,6 +471,7 @@ int64_t KeeperStorageDispatcher::getSessionID(int64_t session_timeout_ms)
};
}
/// Push new session request to queue
{
std::lock_guard lock(push_request_mutex);
if (!requests_queue->tryPush(std::move(request_info), session_timeout_ms))
@ -461,6 +481,8 @@ int64_t KeeperStorageDispatcher::getSessionID(int64_t session_timeout_ms)
if (future.wait_for(std::chrono::milliseconds(session_timeout_ms)) != std::future_status::ready)
throw Exception("Cannot receive session id within session timeout", ErrorCodes::TIMEOUT_EXCEEDED);
/// Forcefully wait for request execution because we cannot process any other
/// requests for this client until it get new session id.
return future.get();
}

View File

@ -22,7 +22,9 @@ namespace DB
using ZooKeeperResponseCallback = std::function<void(const Coordination::ZooKeeperResponsePtr & response)>;
class KeeperStorageDispatcher
/// Highlevel wrapper for ClickHouse Keeper.
/// Process user requests via consensus and return responses.
class KeeperDispatcher
{
private:
@ -45,6 +47,7 @@ private:
/// (get, set, list, etc.). Dispatcher determines callback for each response
/// using session id from this map.
SessionToResponseCallback session_to_response_callback;
/// But when client connects to the server for the first time it doesn't
/// have session_id. It request it from server. We give temporary
/// internal id for such requests just to much client with its response.
@ -60,7 +63,7 @@ private:
/// Dumping new snapshots to disk
ThreadFromGlobalPool snapshot_thread;
/// RAFT wrapper. Most important class.
/// RAFT wrapper.
std::unique_ptr<KeeperServer> server;
Poco::Logger * log;
@ -69,10 +72,15 @@ private:
std::atomic<int64_t> internal_session_id_counter{0};
private:
/// Thread put requests to raft
void requestThread();
/// Thread put responses for subscribed sessions
void responseThread();
/// Thread clean disconnected sessions from memory
void sessionCleanerTask();
/// Thread create snapshots in the background
void snapshotThread();
void setResponse(int64_t session_id, const Coordination::ZooKeeperResponsePtr & response);
/// Add error responses for requests to responses queue.
@ -84,16 +92,23 @@ private:
void forceWaitAndProcessResult(RaftAppendResult & result, KeeperStorage::RequestsForSessions & requests_for_sessions);
public:
KeeperStorageDispatcher();
/// Just allocate some objects, real initialization is done by `intialize method`
KeeperDispatcher();
/// Call shutdown
~KeeperDispatcher();
/// Initialization from config.
/// standalone_keeper -- we are standalone keeper application (not inside clickhouse server)
void initialize(const Poco::Util::AbstractConfiguration & config, bool standalone_keeper);
/// Shutdown internal keeper parts (server, state machine, log storage, etc)
void shutdown();
~KeeperStorageDispatcher();
/// Put request to ClickHouse Keeper
bool putRequest(const Coordination::ZooKeeperRequestPtr & request, int64_t session_id);
/// Are we leader
bool isLeader() const
{
return server->isLeader();
@ -104,9 +119,12 @@ public:
return server->isLeaderAlive();
}
/// Get new session ID
int64_t getSessionID(int64_t session_timeout_ms);
/// Register session and subscribe for responses with callback
void registerSession(int64_t session_id, ZooKeeperResponseCallback callback);
/// Call if we don't need any responses for this session no more (session was expired)
void finishSession(int64_t session_id);
};

View File

@ -9,39 +9,53 @@
namespace DB
{
/// Wrapper around Changelog class. Implements RAFT log storage.
class KeeperLogStore : public nuraft::log_store
{
public:
KeeperLogStore(const std::string & changelogs_path, uint64_t rotate_interval_, bool force_sync_);
/// Read log storage from filesystem starting from last_commited_log_index
void init(uint64_t last_commited_log_index, uint64_t logs_to_keep);
uint64_t start_index() const override;
uint64_t next_slot() const override;
/// return last entry from log
nuraft::ptr<nuraft::log_entry> last_entry() const override;
/// Append new entry to log
uint64_t append(nuraft::ptr<nuraft::log_entry> & entry) override;
/// Remove all entries starting from index and write entry into index position
void write_at(uint64_t index, nuraft::ptr<nuraft::log_entry> & entry) override;
/// Return entries between [start, end)
nuraft::ptr<std::vector<nuraft::ptr<nuraft::log_entry>>> log_entries(uint64_t start, uint64_t end) override;
/// Return entry at index
nuraft::ptr<nuraft::log_entry> entry_at(uint64_t index) override;
/// Term if the index
uint64_t term_at(uint64_t index) override;
/// Serialize entries in interval [index, index + cnt)
nuraft::ptr<nuraft::buffer> pack(uint64_t index, int32_t cnt) override;
/// Apply serialized entries starting from index
void apply_pack(uint64_t index, nuraft::buffer & pack) override;
/// Entries from last_log_index can be removed from memory and from disk
bool compact(uint64_t last_log_index) override;
/// Call fsync to the stored data
bool flush() override;
/// Current log storage size
uint64_t size() const;
/// Flush batch of appended entries
void end_of_append_batch(uint64_t start_index, uint64_t count) override;
private:

View File

@ -38,6 +38,8 @@ private:
Poco::Logger * log;
/// Callback func which is called by NuRaft on all internal events.
/// Used to determine the moment when raft is ready to server new requests
nuraft::cb_func::ReturnCode callbackFunc(nuraft::cb_func::Type type, nuraft::cb_func::Param * param);
/// Almost copy-paste from nuraft::launcher, but with separated server init and start
@ -57,18 +59,25 @@ public:
SnapshotsQueue & snapshots_queue_,
bool standalone_keeper);
/// Load state machine from the latest snapshot and load log storage. Start NuRaft with required settings.
void startup();
/// Put local read request and execute in state machine directly and response into
/// responses queue
void putLocalReadRequest(const KeeperStorage::RequestForSession & request);
/// Put batch of requests into Raft and get result of put. Responses will be set separately into
/// responses_queue.
RaftAppendResult putRequestBatch(const KeeperStorage::RequestsForSessions & requests);
/// Return set of the non-active sessions
std::unordered_set<int64_t> getDeadSessions();
bool isLeader() const;
bool isLeaderAlive() const;
/// Wait server initialization (see callbackFunc)
void waitInit();
void shutdown();

View File

@ -14,29 +14,32 @@ namespace ErrorCodes
extern const int LOGICAL_ERROR;
}
KeeperStorage::RequestForSession parseRequest(nuraft::buffer & data)
namespace
{
ReadBufferFromNuraftBuffer buffer(data);
KeeperStorage::RequestForSession request_for_session;
readIntBinary(request_for_session.session_id, buffer);
KeeperStorage::RequestForSession parseRequest(nuraft::buffer & data)
{
ReadBufferFromNuraftBuffer buffer(data);
KeeperStorage::RequestForSession request_for_session;
readIntBinary(request_for_session.session_id, buffer);
int32_t length;
Coordination::read(length, buffer);
int32_t length;
Coordination::read(length, buffer);
int32_t xid;
Coordination::read(xid, buffer);
int32_t xid;
Coordination::read(xid, buffer);
Coordination::OpNum opnum;
Coordination::OpNum opnum;
Coordination::read(opnum, buffer);
Coordination::read(opnum, buffer);
request_for_session.request = Coordination::ZooKeeperRequestFactory::instance().get(opnum);
request_for_session.request->xid = xid;
request_for_session.request->readImpl(buffer);
return request_for_session;
request_for_session.request = Coordination::ZooKeeperRequestFactory::instance().get(opnum);
request_for_session.request->xid = xid;
request_for_session.request->readImpl(buffer);
return request_for_session;
}
}
KeeperStateMachine::KeeperStateMachine(
KeeperStateMachine::KeeperStateMachine(
ResponsesQueue & responses_queue_,
SnapshotsQueue & snapshots_queue_,
const std::string & snapshots_path_,
@ -58,6 +61,7 @@ void KeeperStateMachine::init()
LOG_DEBUG(log, "Totally have {} snapshots", snapshot_manager.totalSnapshots());
bool loaded = false;
bool has_snapshots = snapshot_manager.totalSnapshots() != 0;
/// Deserialize latest snapshot from disk
while (snapshot_manager.totalSnapshots() != 0)
{
uint64_t latest_log_index = snapshot_manager.getLatestSnapshotIndex();
@ -97,6 +101,7 @@ void KeeperStateMachine::init()
nuraft::ptr<nuraft::buffer> KeeperStateMachine::commit(const uint64_t log_idx, nuraft::buffer & data)
{
auto request_for_session = parseRequest(data);
/// Special processing of session_id request
if (request_for_session.request->getOpNum() == Coordination::OpNum::SessionID)
{
const Coordination::ZooKeeperSessionIDRequest & session_id_request = dynamic_cast<const Coordination::ZooKeeperSessionIDRequest &>(*request_for_session.request);
@ -136,7 +141,7 @@ bool KeeperStateMachine::apply_snapshot(nuraft::snapshot & s)
{
LOG_DEBUG(log, "Applying snapshot {}", s.get_last_log_idx());
nuraft::ptr<nuraft::buffer> latest_snapshot_ptr;
{
{ /// save snapshot into memory
std::lock_guard lock(snapshots_lock);
if (s.get_last_log_idx() != latest_snapshot_meta->get_last_log_idx())
throw Exception(ErrorCodes::LOGICAL_ERROR, "Required to apply snapshot with last log index {}, but our last log index is {}",
@ -144,10 +149,11 @@ bool KeeperStateMachine::apply_snapshot(nuraft::snapshot & s)
latest_snapshot_ptr = latest_snapshot_buf;
}
{
{ /// deserialize and apply snapshot to storage
std::lock_guard lock(storage_lock);
std::tie(latest_snapshot_meta, storage) = snapshot_manager.deserializeSnapshotFromBuffer(latest_snapshot_ptr);
}
last_committed_idx = s.get_last_log_idx();
return true;
}
@ -168,18 +174,19 @@ void KeeperStateMachine::create_snapshot(
nuraft::ptr<nuraft::buffer> snp_buf = s.serialize();
auto snapshot_meta_copy = nuraft::snapshot::deserialize(*snp_buf);
CreateSnapshotTask snapshot_task;
{
{ /// lock storage for a short period time to turn on "snapshot mode". After that we can read consistent storage state without locking.
std::lock_guard lock(storage_lock);
snapshot_task.snapshot = std::make_shared<KeeperStorageSnapshot>(storage.get(), snapshot_meta_copy);
}
/// create snapshot task for background execution (in snapshot thread)
snapshot_task.create_snapshot = [this, when_done] (KeeperStorageSnapshotPtr && snapshot)
{
nuraft::ptr<std::exception> exception(nullptr);
bool ret = true;
try
{
{
{ /// Read storage data without locks and create snapshot
std::lock_guard lock(snapshots_lock);
auto snapshot_buf = snapshot_manager.serializeSnapshotToBuffer(*snapshot);
auto result_path = snapshot_manager.serializeSnapshotBufferToDisk(*snapshot_buf, snapshot->snapshot_meta->get_last_log_idx());
@ -192,6 +199,7 @@ void KeeperStateMachine::create_snapshot(
{
/// Must do it with lock (clearing elements from list)
std::lock_guard lock(storage_lock);
/// Turn off "snapshot mode" and clear outdate part of storage state
storage->clearGarbageAfterSnapshot();
/// Destroy snapshot with lock
snapshot.reset();
@ -209,7 +217,9 @@ void KeeperStateMachine::create_snapshot(
when_done(ret, exception);
};
LOG_DEBUG(log, "In memory snapshot {} created, queueing task to flash to disk", s.get_last_log_idx());
/// Flush snapshot to disk in a separate thread.
snapshots_queue.push(std::move(snapshot_task));
}
@ -224,7 +234,7 @@ void KeeperStateMachine::save_logical_snp_obj(
nuraft::ptr<nuraft::buffer> cloned_buffer;
nuraft::ptr<nuraft::snapshot> cloned_meta;
if (obj_id == 0)
if (obj_id == 0) /// Fake snapshot required by NuRaft at startup
{
std::lock_guard lock(storage_lock);
KeeperStorageSnapshot snapshot(storage.get(), s.get_last_log_idx());
@ -232,15 +242,18 @@ void KeeperStateMachine::save_logical_snp_obj(
}
else
{
/// copy snapshot into memory
cloned_buffer = nuraft::buffer::clone(data);
}
/// copy snapshot meta into memory
nuraft::ptr<nuraft::buffer> snp_buf = s.serialize();
cloned_meta = nuraft::snapshot::deserialize(*snp_buf);
try
{
std::lock_guard lock(snapshots_lock);
/// Serialize snapshot to disk and switch in memory pointers.
auto result_path = snapshot_manager.serializeSnapshotBufferToDisk(*cloned_buffer, s.get_last_log_idx());
latest_snapshot_buf = cloned_buffer;
latest_snapshot_meta = cloned_meta;
@ -262,7 +275,7 @@ int KeeperStateMachine::read_logical_snp_obj(
{
LOG_DEBUG(log, "Reading snapshot {} obj_id {}", s.get_last_log_idx(), obj_id);
if (obj_id == 0)
if (obj_id == 0) /// Fake snapshot required by NuRaft at startup
{
data_out = nuraft::buffer::alloc(sizeof(int32_t));
nuraft::buffer_serializer bs(data_out);
@ -272,6 +285,8 @@ int KeeperStateMachine::read_logical_snp_obj(
else
{
std::lock_guard lock(snapshots_lock);
/// Our snapshot is not equal to required. Maybe we still creating it in the background.
/// Let's wait and NuRaft will retry this call.
if (s.get_last_log_idx() != latest_snapshot_meta->get_last_log_idx())
{
LOG_WARNING(log, "Required to apply snapshot with last log index {}, but our last log index is {}. Will ignore this one and retry",
@ -281,11 +296,13 @@ int KeeperStateMachine::read_logical_snp_obj(
data_out = nuraft::buffer::clone(*latest_snapshot_buf);
is_last_obj = true;
}
return 1;
}
void KeeperStateMachine::processReadRequest(const KeeperStorage::RequestForSession & request_for_session)
{
/// Pure local request, just process it with storage
KeeperStorage::ResponsesForSessions responses;
{
std::lock_guard lock(storage_lock);

View File

@ -13,6 +13,8 @@ namespace DB
using ResponsesQueue = ThreadSafeQueue<KeeperStorage::ResponseForSession>;
using SnapshotsQueue = ConcurrentBoundedQueue<CreateSnapshotTask>;
/// ClickHouse Keeper state machine. Wrapper for KeeperStorage.
/// Responsible for entries commit, snapshots creation and so on.
class KeeperStateMachine : public nuraft::state_machine
{
public:
@ -21,24 +23,30 @@ public:
const std::string & snapshots_path_, const CoordinationSettingsPtr & coordination_settings_,
const std::string & superdigest_ = "");
/// Read state from the latest snapshot
void init();
/// Currently not supported
nuraft::ptr<nuraft::buffer> pre_commit(const uint64_t /*log_idx*/, nuraft::buffer & /*data*/) override { return nullptr; }
nuraft::ptr<nuraft::buffer> commit(const uint64_t log_idx, nuraft::buffer & data) override;
/// Currently not supported
void rollback(const uint64_t /*log_idx*/, nuraft::buffer & /*data*/) override {}
uint64_t last_commit_index() override { return last_committed_idx; }
/// Apply preliminarily saved (save_logical_snp_obj) snapshot to our state.
bool apply_snapshot(nuraft::snapshot & s) override;
nuraft::ptr<nuraft::snapshot> last_snapshot() override;
/// Create new snapshot from current state.
void create_snapshot(
nuraft::snapshot & s,
nuraft::async_result<bool>::handler_type & when_done) override;
/// Save snapshot which was send by leader to us. After that we will apply it in apply_snapshot.
void save_logical_snp_obj(
nuraft::snapshot & s,
uint64_t & obj_id,
@ -46,6 +54,8 @@ public:
bool is_first_obj,
bool is_last_obj) override;
/// Better name is `serialize snapshot` -- save existing snapshot (created by create_snapshot) into
/// in-memory buffer data_out.
int read_logical_snp_obj(
nuraft::snapshot & s,
void* & user_snp_ctx,
@ -58,6 +68,7 @@ public:
return *storage;
}
/// Process local read request
void processReadRequest(const KeeperStorage::RequestForSession & request_for_session);
std::unordered_set<int64_t> getDeadSessions();
@ -66,18 +77,25 @@ public:
private:
/// In our state machine we always have a single snapshot which is stored
/// in memory in compressed (serialized) format.
SnapshotMetadataPtr latest_snapshot_meta = nullptr;
nuraft::ptr<nuraft::buffer> latest_snapshot_buf = nullptr;
CoordinationSettingsPtr coordination_settings;
/// Main state machine logic
KeeperStoragePtr storage;
/// Save/Load and Serialize/Deserialize logic for snapshots.
KeeperSnapshotManager snapshot_manager;
/// Put processed responses into this queue
ResponsesQueue & responses_queue;
/// Snapshots to create by snapshot thread
SnapshotsQueue & snapshots_queue;
/// Mutex for snapshots
std::mutex snapshots_lock;
@ -88,6 +106,7 @@ private:
std::atomic<uint64_t> last_committed_idx;
Poco::Logger * log;
/// Special part of ACL system -- superdigest specified in server config.
const std::string superdigest;
};

View File

@ -177,32 +177,32 @@ KeeperStorage::KeeperStorage(int64_t tick_time_ms, const String & superdigest_)
using Undo = std::function<void()>;
struct KeeperStorageRequest
struct KeeperStorageRequestProcessor
{
Coordination::ZooKeeperRequestPtr zk_request;
explicit KeeperStorageRequest(const Coordination::ZooKeeperRequestPtr & zk_request_)
explicit KeeperStorageRequestProcessor(const Coordination::ZooKeeperRequestPtr & zk_request_)
: zk_request(zk_request_)
{}
virtual std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(KeeperStorage & storage, int64_t zxid, int64_t session_id) const = 0;
virtual KeeperStorage::ResponsesForSessions processWatches(KeeperStorage::Watches & /*watches*/, KeeperStorage::Watches & /*list_watches*/) const { return {}; }
virtual bool checkAuth(KeeperStorage & /*storage*/, int64_t /*session_id*/) const { return true; }
virtual ~KeeperStorageRequest() = default;
virtual ~KeeperStorageRequestProcessor() = default;
};
struct KeeperStorageHeartbeatRequest final : public KeeperStorageRequest
struct KeeperStorageHeartbeatRequestProcessor final : public KeeperStorageRequestProcessor
{
using KeeperStorageRequest::KeeperStorageRequest;
using KeeperStorageRequestProcessor::KeeperStorageRequestProcessor;
std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(KeeperStorage & /* storage */, int64_t /* zxid */, int64_t /* session_id */) const override
{
return {zk_request->makeResponse(), {}};
}
};
struct KeeperStorageSyncRequest final : public KeeperStorageRequest
struct KeeperStorageSyncRequestProcessor final : public KeeperStorageRequestProcessor
{
using KeeperStorageRequest::KeeperStorageRequest;
using KeeperStorageRequestProcessor::KeeperStorageRequestProcessor;
std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(KeeperStorage & /* storage */, int64_t /* zxid */, int64_t /* session_id */) const override
{
auto response = zk_request->makeResponse();
@ -212,9 +212,9 @@ struct KeeperStorageSyncRequest final : public KeeperStorageRequest
}
};
struct KeeperStorageCreateRequest final : public KeeperStorageRequest
struct KeeperStorageCreateRequestProcessor final : public KeeperStorageRequestProcessor
{
using KeeperStorageRequest::KeeperStorageRequest;
using KeeperStorageRequestProcessor::KeeperStorageRequestProcessor;
KeeperStorage::ResponsesForSessions processWatches(KeeperStorage::Watches & watches, KeeperStorage::Watches & list_watches) const override
{
@ -363,7 +363,7 @@ struct KeeperStorageCreateRequest final : public KeeperStorageRequest
}
};
struct KeeperStorageGetRequest final : public KeeperStorageRequest
struct KeeperStorageGetRequestProcessor final : public KeeperStorageRequestProcessor
{
bool checkAuth(KeeperStorage & storage, int64_t session_id) const override
@ -381,7 +381,7 @@ struct KeeperStorageGetRequest final : public KeeperStorageRequest
return checkACL(Coordination::ACL::Read, node_acls, session_auths);
}
using KeeperStorageRequest::KeeperStorageRequest;
using KeeperStorageRequestProcessor::KeeperStorageRequestProcessor;
std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(KeeperStorage & storage, int64_t /* zxid */, int64_t /* session_id */) const override
{
auto & container = storage.container;
@ -423,7 +423,7 @@ namespace
}
}
struct KeeperStorageRemoveRequest final : public KeeperStorageRequest
struct KeeperStorageRemoveRequestProcessor final : public KeeperStorageRequestProcessor
{
bool checkAuth(KeeperStorage & storage, int64_t session_id) const override
{
@ -440,7 +440,7 @@ struct KeeperStorageRemoveRequest final : public KeeperStorageRequest
return checkACL(Coordination::ACL::Delete, node_acls, session_auths);
}
using KeeperStorageRequest::KeeperStorageRequest;
using KeeperStorageRequestProcessor::KeeperStorageRequestProcessor;
std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(KeeperStorage & storage, int64_t zxid, int64_t /*session_id*/) const override
{
auto & container = storage.container;
@ -520,9 +520,9 @@ struct KeeperStorageRemoveRequest final : public KeeperStorageRequest
}
};
struct KeeperStorageExistsRequest final : public KeeperStorageRequest
struct KeeperStorageExistsRequestProcessor final : public KeeperStorageRequestProcessor
{
using KeeperStorageRequest::KeeperStorageRequest;
using KeeperStorageRequestProcessor::KeeperStorageRequestProcessor;
std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(KeeperStorage & storage, int64_t /*zxid*/, int64_t /* session_id */) const override
{
auto & container = storage.container;
@ -546,7 +546,7 @@ struct KeeperStorageExistsRequest final : public KeeperStorageRequest
}
};
struct KeeperStorageSetRequest final : public KeeperStorageRequest
struct KeeperStorageSetRequestProcessor final : public KeeperStorageRequestProcessor
{
bool checkAuth(KeeperStorage & storage, int64_t session_id) const override
{
@ -563,7 +563,7 @@ struct KeeperStorageSetRequest final : public KeeperStorageRequest
return checkACL(Coordination::ACL::Write, node_acls, session_auths);
}
using KeeperStorageRequest::KeeperStorageRequest;
using KeeperStorageRequestProcessor::KeeperStorageRequestProcessor;
std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(KeeperStorage & storage, int64_t zxid, int64_t /* session_id */) const override
{
auto & container = storage.container;
@ -624,7 +624,7 @@ struct KeeperStorageSetRequest final : public KeeperStorageRequest
}
};
struct KeeperStorageListRequest final : public KeeperStorageRequest
struct KeeperStorageListRequestProcessor final : public KeeperStorageRequestProcessor
{
bool checkAuth(KeeperStorage & storage, int64_t session_id) const override
{
@ -641,7 +641,7 @@ struct KeeperStorageListRequest final : public KeeperStorageRequest
return checkACL(Coordination::ACL::Read, node_acls, session_auths);
}
using KeeperStorageRequest::KeeperStorageRequest;
using KeeperStorageRequestProcessor::KeeperStorageRequestProcessor;
std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(KeeperStorage & storage, int64_t /*zxid*/, int64_t /*session_id*/) const override
{
auto & container = storage.container;
@ -669,7 +669,7 @@ struct KeeperStorageListRequest final : public KeeperStorageRequest
}
};
struct KeeperStorageCheckRequest final : public KeeperStorageRequest
struct KeeperStorageCheckRequestProcessor final : public KeeperStorageRequestProcessor
{
bool checkAuth(KeeperStorage & storage, int64_t session_id) const override
{
@ -686,7 +686,7 @@ struct KeeperStorageCheckRequest final : public KeeperStorageRequest
return checkACL(Coordination::ACL::Read, node_acls, session_auths);
}
using KeeperStorageRequest::KeeperStorageRequest;
using KeeperStorageRequestProcessor::KeeperStorageRequestProcessor;
std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(KeeperStorage & storage, int64_t /*zxid*/, int64_t /*session_id*/) const override
{
auto & container = storage.container;
@ -713,7 +713,7 @@ struct KeeperStorageCheckRequest final : public KeeperStorageRequest
};
struct KeeperStorageSetACLRequest final : public KeeperStorageRequest
struct KeeperStorageSetACLRequestProcessor final : public KeeperStorageRequestProcessor
{
bool checkAuth(KeeperStorage & storage, int64_t session_id) const override
{
@ -730,7 +730,7 @@ struct KeeperStorageSetACLRequest final : public KeeperStorageRequest
return checkACL(Coordination::ACL::Admin, node_acls, session_auths);
}
using KeeperStorageRequest::KeeperStorageRequest;
using KeeperStorageRequestProcessor::KeeperStorageRequestProcessor;
std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(KeeperStorage & storage, int64_t /*zxid*/, int64_t session_id) const override
{
@ -777,7 +777,7 @@ struct KeeperStorageSetACLRequest final : public KeeperStorageRequest
}
};
struct KeeperStorageGetACLRequest final : public KeeperStorageRequest
struct KeeperStorageGetACLRequestProcessor final : public KeeperStorageRequestProcessor
{
bool checkAuth(KeeperStorage & storage, int64_t session_id) const override
{
@ -794,7 +794,7 @@ struct KeeperStorageGetACLRequest final : public KeeperStorageRequest
/// LOL, GetACL require more permissions, then SetACL...
return checkACL(Coordination::ACL::Admin | Coordination::ACL::Read, node_acls, session_auths);
}
using KeeperStorageRequest::KeeperStorageRequest;
using KeeperStorageRequestProcessor::KeeperStorageRequestProcessor;
std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(KeeperStorage & storage, int64_t /*zxid*/, int64_t /*session_id*/) const override
{
@ -817,7 +817,7 @@ struct KeeperStorageGetACLRequest final : public KeeperStorageRequest
}
};
struct KeeperStorageMultiRequest final : public KeeperStorageRequest
struct KeeperStorageMultiRequestProcessor final : public KeeperStorageRequestProcessor
{
bool checkAuth(KeeperStorage & storage, int64_t session_id) const override
{
@ -827,9 +827,9 @@ struct KeeperStorageMultiRequest final : public KeeperStorageRequest
return true;
}
std::vector<KeeperStorageRequestPtr> concrete_requests;
explicit KeeperStorageMultiRequest(const Coordination::ZooKeeperRequestPtr & zk_request_)
: KeeperStorageRequest(zk_request_)
std::vector<KeeperStorageRequestProcessorPtr> concrete_requests;
explicit KeeperStorageMultiRequestProcessor(const Coordination::ZooKeeperRequestPtr & zk_request_)
: KeeperStorageRequestProcessor(zk_request_)
{
Coordination::ZooKeeperMultiRequest & request = dynamic_cast<Coordination::ZooKeeperMultiRequest &>(*zk_request);
concrete_requests.reserve(request.requests.size());
@ -839,19 +839,19 @@ struct KeeperStorageMultiRequest final : public KeeperStorageRequest
auto sub_zk_request = std::dynamic_pointer_cast<Coordination::ZooKeeperRequest>(sub_request);
if (sub_zk_request->getOpNum() == Coordination::OpNum::Create)
{
concrete_requests.push_back(std::make_shared<KeeperStorageCreateRequest>(sub_zk_request));
concrete_requests.push_back(std::make_shared<KeeperStorageCreateRequestProcessor>(sub_zk_request));
}
else if (sub_zk_request->getOpNum() == Coordination::OpNum::Remove)
{
concrete_requests.push_back(std::make_shared<KeeperStorageRemoveRequest>(sub_zk_request));
concrete_requests.push_back(std::make_shared<KeeperStorageRemoveRequestProcessor>(sub_zk_request));
}
else if (sub_zk_request->getOpNum() == Coordination::OpNum::Set)
{
concrete_requests.push_back(std::make_shared<KeeperStorageSetRequest>(sub_zk_request));
concrete_requests.push_back(std::make_shared<KeeperStorageSetRequestProcessor>(sub_zk_request));
}
else if (sub_zk_request->getOpNum() == Coordination::OpNum::Check)
{
concrete_requests.push_back(std::make_shared<KeeperStorageCheckRequest>(sub_zk_request));
concrete_requests.push_back(std::make_shared<KeeperStorageCheckRequestProcessor>(sub_zk_request));
}
else
throw DB::Exception(ErrorCodes::BAD_ARGUMENTS, "Illegal command as part of multi ZooKeeper request {}", sub_zk_request->getOpNum());
@ -923,18 +923,18 @@ struct KeeperStorageMultiRequest final : public KeeperStorageRequest
}
};
struct KeeperStorageCloseRequest final : public KeeperStorageRequest
struct KeeperStorageCloseRequestProcessor final : public KeeperStorageRequestProcessor
{
using KeeperStorageRequest::KeeperStorageRequest;
using KeeperStorageRequestProcessor::KeeperStorageRequestProcessor;
std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(KeeperStorage &, int64_t, int64_t) const override
{
throw DB::Exception("Called process on close request", ErrorCodes::LOGICAL_ERROR);
}
};
struct KeeperStorageAuthRequest final : public KeeperStorageRequest
struct KeeperStorageAuthRequestProcessor final : public KeeperStorageRequestProcessor
{
using KeeperStorageRequest::KeeperStorageRequest;
using KeeperStorageRequestProcessor::KeeperStorageRequestProcessor;
std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(KeeperStorage & storage, int64_t /*zxid*/, int64_t session_id) const override
{
Coordination::ZooKeeperAuthRequest & auth_request = dynamic_cast<Coordination::ZooKeeperAuthRequest &>(*zk_request);
@ -988,20 +988,20 @@ void KeeperStorage::finalize()
}
class KeeperWrapperFactory final : private boost::noncopyable
class KeeperStorageRequestProcessorsFactory final : private boost::noncopyable
{
public:
using Creator = std::function<KeeperStorageRequestPtr(const Coordination::ZooKeeperRequestPtr &)>;
using Creator = std::function<KeeperStorageRequestProcessorPtr(const Coordination::ZooKeeperRequestPtr &)>;
using OpNumToRequest = std::unordered_map<Coordination::OpNum, Creator>;
static KeeperWrapperFactory & instance()
static KeeperStorageRequestProcessorsFactory & instance()
{
static KeeperWrapperFactory factory;
static KeeperStorageRequestProcessorsFactory factory;
return factory;
}
KeeperStorageRequestPtr get(const Coordination::ZooKeeperRequestPtr & zk_request) const
KeeperStorageRequestProcessorPtr get(const Coordination::ZooKeeperRequestPtr & zk_request) const
{
auto it = op_num_to_request.find(zk_request->getOpNum());
if (it == op_num_to_request.end())
@ -1018,33 +1018,33 @@ public:
private:
OpNumToRequest op_num_to_request;
KeeperWrapperFactory();
KeeperStorageRequestProcessorsFactory();
};
template<Coordination::OpNum num, typename RequestT>
void registerKeeperRequestWrapper(KeeperWrapperFactory & factory)
void registerKeeperRequestProcessor(KeeperStorageRequestProcessorsFactory & factory)
{
factory.registerRequest(num, [] (const Coordination::ZooKeeperRequestPtr & zk_request) { return std::make_shared<RequestT>(zk_request); });
}
KeeperWrapperFactory::KeeperWrapperFactory()
KeeperStorageRequestProcessorsFactory::KeeperStorageRequestProcessorsFactory()
{
registerKeeperRequestWrapper<Coordination::OpNum::Heartbeat, KeeperStorageHeartbeatRequest>(*this);
registerKeeperRequestWrapper<Coordination::OpNum::Sync, KeeperStorageSyncRequest>(*this);
registerKeeperRequestWrapper<Coordination::OpNum::Auth, KeeperStorageAuthRequest>(*this);
registerKeeperRequestWrapper<Coordination::OpNum::Close, KeeperStorageCloseRequest>(*this);
registerKeeperRequestWrapper<Coordination::OpNum::Create, KeeperStorageCreateRequest>(*this);
registerKeeperRequestWrapper<Coordination::OpNum::Remove, KeeperStorageRemoveRequest>(*this);
registerKeeperRequestWrapper<Coordination::OpNum::Exists, KeeperStorageExistsRequest>(*this);
registerKeeperRequestWrapper<Coordination::OpNum::Get, KeeperStorageGetRequest>(*this);
registerKeeperRequestWrapper<Coordination::OpNum::Set, KeeperStorageSetRequest>(*this);
registerKeeperRequestWrapper<Coordination::OpNum::List, KeeperStorageListRequest>(*this);
registerKeeperRequestWrapper<Coordination::OpNum::SimpleList, KeeperStorageListRequest>(*this);
registerKeeperRequestWrapper<Coordination::OpNum::Check, KeeperStorageCheckRequest>(*this);
registerKeeperRequestWrapper<Coordination::OpNum::Multi, KeeperStorageMultiRequest>(*this);
registerKeeperRequestWrapper<Coordination::OpNum::SetACL, KeeperStorageSetACLRequest>(*this);
registerKeeperRequestWrapper<Coordination::OpNum::GetACL, KeeperStorageGetACLRequest>(*this);
registerKeeperRequestProcessor<Coordination::OpNum::Heartbeat, KeeperStorageHeartbeatRequestProcessor>(*this);
registerKeeperRequestProcessor<Coordination::OpNum::Sync, KeeperStorageSyncRequestProcessor>(*this);
registerKeeperRequestProcessor<Coordination::OpNum::Auth, KeeperStorageAuthRequestProcessor>(*this);
registerKeeperRequestProcessor<Coordination::OpNum::Close, KeeperStorageCloseRequestProcessor>(*this);
registerKeeperRequestProcessor<Coordination::OpNum::Create, KeeperStorageCreateRequestProcessor>(*this);
registerKeeperRequestProcessor<Coordination::OpNum::Remove, KeeperStorageRemoveRequestProcessor>(*this);
registerKeeperRequestProcessor<Coordination::OpNum::Exists, KeeperStorageExistsRequestProcessor>(*this);
registerKeeperRequestProcessor<Coordination::OpNum::Get, KeeperStorageGetRequestProcessor>(*this);
registerKeeperRequestProcessor<Coordination::OpNum::Set, KeeperStorageSetRequestProcessor>(*this);
registerKeeperRequestProcessor<Coordination::OpNum::List, KeeperStorageListRequestProcessor>(*this);
registerKeeperRequestProcessor<Coordination::OpNum::SimpleList, KeeperStorageListRequestProcessor>(*this);
registerKeeperRequestProcessor<Coordination::OpNum::Check, KeeperStorageCheckRequestProcessor>(*this);
registerKeeperRequestProcessor<Coordination::OpNum::Multi, KeeperStorageMultiRequestProcessor>(*this);
registerKeeperRequestProcessor<Coordination::OpNum::SetACL, KeeperStorageSetACLRequestProcessor>(*this);
registerKeeperRequestProcessor<Coordination::OpNum::GetACL, KeeperStorageGetACLRequestProcessor>(*this);
}
@ -1059,7 +1059,8 @@ KeeperStorage::ResponsesForSessions KeeperStorage::processRequest(const Coordina
}
session_expiry_queue.update(session_id, session_and_timeout[session_id]);
if (zk_request->getOpNum() == Coordination::OpNum::Close)
if (zk_request->getOpNum() == Coordination::OpNum::Close) /// Close request is special
{
auto it = ephemerals.find(session_id);
if (it != ephemerals.end())
@ -1092,21 +1093,21 @@ KeeperStorage::ResponsesForSessions KeeperStorage::processRequest(const Coordina
session_and_timeout.erase(session_id);
results.push_back(ResponseForSession{session_id, response});
}
else if (zk_request->getOpNum() == Coordination::OpNum::Heartbeat)
else if (zk_request->getOpNum() == Coordination::OpNum::Heartbeat) /// Heartbeat request is also special
{
KeeperStorageRequestPtr storage_request = KeeperWrapperFactory::instance().get(zk_request);
KeeperStorageRequestProcessorPtr storage_request = KeeperStorageRequestProcessorsFactory::instance().get(zk_request);
auto [response, _] = storage_request->process(*this, zxid, session_id);
response->xid = zk_request->xid;
response->zxid = getZXID();
results.push_back(ResponseForSession{session_id, response});
}
else
else /// normal requests proccession
{
KeeperStorageRequestPtr storage_request = KeeperWrapperFactory::instance().get(zk_request);
KeeperStorageRequestProcessorPtr request_processor = KeeperStorageRequestProcessorsFactory::instance().get(zk_request);
Coordination::ZooKeeperResponsePtr response;
if (check_acl && !storage_request->checkAuth(*this, session_id))
if (check_acl && !request_processor->checkAuth(*this, session_id))
{
response = zk_request->makeResponse();
/// Original ZooKeeper always throws no auth, even when user provided some credentials
@ -1114,9 +1115,10 @@ KeeperStorage::ResponsesForSessions KeeperStorage::processRequest(const Coordina
}
else
{
std::tie(response, std::ignore) = storage_request->process(*this, zxid, session_id);
std::tie(response, std::ignore) = request_processor->process(*this, zxid, session_id);
}
/// Watches for this requests are added to the watches lists
if (zk_request->has_watch)
{
if (response->error == Coordination::Error::ZOK)
@ -1135,9 +1137,10 @@ KeeperStorage::ResponsesForSessions KeeperStorage::processRequest(const Coordina
}
}
/// If this requests processed successfully we need to check watches
if (response->error == Coordination::Error::ZOK)
{
auto watch_responses = storage_request->processWatches(watches, list_watches);
auto watch_responses = request_processor->processWatches(watches, list_watches);
results.insert(results.end(), watch_responses.begin(), watch_responses.end());
}
@ -1153,11 +1156,13 @@ KeeperStorage::ResponsesForSessions KeeperStorage::processRequest(const Coordina
void KeeperStorage::clearDeadWatches(int64_t session_id)
{
/// Clear all watches for this session
auto watches_it = sessions_and_watchers.find(session_id);
if (watches_it != sessions_and_watchers.end())
{
for (const auto & watch_path : watches_it->second)
{
/// Maybe it's a normal watch
auto watch = watches.find(watch_path);
if (watch != watches.end())
{
@ -1173,6 +1178,7 @@ void KeeperStorage::clearDeadWatches(int64_t session_id)
watches.erase(watch);
}
/// Maybe it's a list watch
auto list_watch = list_watches.find(watch_path);
if (list_watch != list_watches.end())
{
@ -1188,6 +1194,7 @@ void KeeperStorage::clearDeadWatches(int64_t session_id)
list_watches.erase(list_watch);
}
}
sessions_and_watchers.erase(watches_it);
}
}

View File

@ -15,14 +15,17 @@ namespace DB
{
using namespace DB;
struct KeeperStorageRequest;
using KeeperStorageRequestPtr = std::shared_ptr<KeeperStorageRequest>;
struct KeeperStorageRequestProcessor;
using KeeperStorageRequestProcessorPtr = std::shared_ptr<KeeperStorageRequestProcessor>;
using ResponseCallback = std::function<void(const Coordination::ZooKeeperResponsePtr &)>;
using ChildrenSet = std::unordered_set<std::string>;
using SessionAndTimeout = std::unordered_map<int64_t, int64_t>;
struct KeeperStorageSnapshot;
/// Keeper state machine almost equal to the ZooKeeper's state machine.
/// Implements all logic of operations, data changes, sessions allocation.
/// In-memory and not thread safe.
class KeeperStorage
{
public:
@ -77,21 +80,34 @@ public:
using Watches = std::map<String /* path, relative of root_path */, SessionIDs>;
/// Main hashtable with nodes. Contain all information about data.
/// All other structures expect session_and_timeout can be restored from
/// container.
Container container;
/// Mapping session_id -> set of ephemeral nodes paths
Ephemerals ephemerals;
/// Mapping sessuib_id -> set of watched nodes paths
SessionAndWatcher sessions_and_watchers;
/// Expiration queue for session, allows to get dead sessions at some point of time
SessionExpiryQueue session_expiry_queue;
/// All active sessions with timeout
SessionAndTimeout session_and_timeout;
/// ACLMap for more compact ACLs storage inside nodes.
ACLMap acl_map;
/// Global id of all requests applied to storage
int64_t zxid{0};
bool finalized{false};
/// Currently active watches (node_path -> subscribed sessions)
Watches watches;
Watches list_watches; /// Watches for 'list' request (watches on children).
void clearDeadWatches(int64_t session_id);
/// Get current zxid
int64_t getZXID() const
{
return zxid;
@ -102,6 +118,7 @@ public:
public:
KeeperStorage(int64_t tick_time_ms, const String & superdigest_);
/// Allocate new session id with the specified timeouts
int64_t getSessionID(int64_t session_timeout_ms)
{
auto result = session_id_counter++;
@ -110,21 +127,28 @@ public:
return result;
}
/// Add session id. Used when restoring KeeperStorage from snapshot.
void addSessionID(int64_t session_id, int64_t session_timeout_ms)
{
session_and_timeout.emplace(session_id, session_timeout_ms);
session_expiry_queue.update(session_id, session_timeout_ms);
}
/// Process user request and return response.
/// check_acl = false only when converting data from ZooKeeper.
ResponsesForSessions processRequest(const Coordination::ZooKeeperRequestPtr & request, int64_t session_id, std::optional<int64_t> new_last_zxid, bool check_acl = true);
void finalize();
/// Set of methods for creating snapshots
/// Turn on snapshot mode, so data inside Container is not deleted, but replaced with new version.
void enableSnapshotMode()
{
container.enableSnapshotMode();
}
/// Turn off snapshot mode.
void disableSnapshotMode()
{
container.disableSnapshotMode();
@ -135,16 +159,19 @@ public:
return container.begin();
}
/// Clear outdated data from internal container.
void clearGarbageAfterSnapshot()
{
container.clearOutdatedNodes();
}
/// Get all active sessions
const SessionAndTimeout & getActiveSessions() const
{
return session_and_timeout;
}
/// Get all dead sessions
std::unordered_set<int64_t> getDeadSessions()
{
return session_expiry_queue.getExpiredSessions();

View File

@ -526,7 +526,18 @@ void RemoteQueryExecutor::tryCancel(const char * reason, std::unique_ptr<ReadCon
was_cancelled = true;
if (read_context && *read_context)
{
/// The timer should be set for query cancellation to avoid query cancellation hung.
///
/// Since in case the remote server will abnormally terminated, neither
/// FIN nor RST packet will be sent, and the initiator will not know that
/// the connection died (unless tcp_keep_alive_timeout > 0).
///
/// Also note that it is possible to get this situation even when
/// enough data already had been read.
(*read_context)->setTimer();
(*read_context)->cancel();
}
connections->sendCancel();

View File

@ -100,7 +100,7 @@ void RemoteQueryExecutorReadContext::setConnectionFD(int fd, Poco::Timespan time
connection_fd = fd;
epoll.add(connection_fd);
receive_timeout = timeout;
receive_timeout_usec = timeout.totalMicroseconds();
connection_fd_description = fd_description;
}
@ -157,8 +157,8 @@ void RemoteQueryExecutorReadContext::setTimer() const
/// Did not get packet yet. Init timeout for the next async reading.
timer.reset();
if (receive_timeout.totalMicroseconds())
timer.setRelative(receive_timeout);
if (receive_timeout_usec)
timer.setRelative(receive_timeout_usec);
}
bool RemoteQueryExecutorReadContext::resumeRoutine()

View File

@ -34,7 +34,8 @@ public:
/// This mutex for fiber is needed because fiber could be destroyed in cancel method from another thread.
std::mutex fiber_lock;
Poco::Timespan receive_timeout;
/// atomic is required due to data-race between setConnectionFD() and setTimer() from the cancellation path.
std::atomic<uint64_t> receive_timeout_usec = 0;
IConnections & connections;
Poco::Net::Socket * last_used_socket = nullptr;
@ -75,6 +76,7 @@ class RemoteQueryExecutorReadContext
{
public:
void cancel() {}
void setTimer() {}
};
}

View File

@ -76,17 +76,17 @@ TTLBlockInputStream::TTLBlockInputStream(
algorithms.emplace_back(std::make_unique<TTLColumnAlgorithm>(
description, old_ttl_infos.columns_ttl[name], current_time_,
force_, name, default_expression, default_column_name));
force_, name, default_expression, default_column_name, isCompactPart(data_part)));
}
}
for (const auto & move_ttl : metadata_snapshot_->getMoveTTLs())
algorithms.emplace_back(std::make_unique<TTLMoveAlgorithm>(
move_ttl, old_ttl_infos.moves_ttl[move_ttl.result_column], current_time_, force_));
algorithms.emplace_back(std::make_unique<TTLUpdateInfoAlgorithm>(
move_ttl, TTLUpdateField::MOVES_TTL, move_ttl.result_column, old_ttl_infos.moves_ttl[move_ttl.result_column], current_time_, force_));
for (const auto & recompression_ttl : metadata_snapshot_->getRecompressionTTLs())
algorithms.emplace_back(std::make_unique<TTLRecompressionAlgorithm>(
recompression_ttl, old_ttl_infos.recompression_ttl[recompression_ttl.result_column], current_time_, force_));
algorithms.emplace_back(std::make_unique<TTLUpdateInfoAlgorithm>(
recompression_ttl, TTLUpdateField::RECOMPRESSION_TTL, recompression_ttl.result_column, old_ttl_infos.recompression_ttl[recompression_ttl.result_column], current_time_, force_));
}
Block reorderColumns(Block block, const Block & header)

View File

@ -0,0 +1,77 @@
#include <DataStreams/TTLCalcInputStream.h>
#include <DataStreams/TTLUpdateInfoAlgorithm.h>
namespace DB
{
TTLCalcInputStream::TTLCalcInputStream(
const BlockInputStreamPtr & input_,
const MergeTreeData & storage_,
const StorageMetadataPtr & metadata_snapshot_,
const MergeTreeData::MutableDataPartPtr & data_part_,
time_t current_time_,
bool force_)
: data_part(data_part_)
, log(&Poco::Logger::get(storage_.getLogName() + " (TTLCalcInputStream)"))
{
children.push_back(input_);
header = children.at(0)->getHeader();
auto old_ttl_infos = data_part->ttl_infos;
if (metadata_snapshot_->hasRowsTTL())
{
const auto & rows_ttl = metadata_snapshot_->getRowsTTL();
algorithms.emplace_back(std::make_unique<TTLUpdateInfoAlgorithm>(
rows_ttl, TTLUpdateField::TABLE_TTL, rows_ttl.result_column, old_ttl_infos.table_ttl, current_time_, force_));
}
for (const auto & where_ttl : metadata_snapshot_->getRowsWhereTTLs())
algorithms.emplace_back(std::make_unique<TTLUpdateInfoAlgorithm>(
where_ttl, TTLUpdateField::ROWS_WHERE_TTL, where_ttl.result_column, old_ttl_infos.rows_where_ttl[where_ttl.result_column], current_time_, force_));
for (const auto & group_by_ttl : metadata_snapshot_->getGroupByTTLs())
algorithms.emplace_back(std::make_unique<TTLUpdateInfoAlgorithm>(
group_by_ttl, TTLUpdateField::GROUP_BY_TTL, group_by_ttl.result_column, old_ttl_infos.group_by_ttl[group_by_ttl.result_column], current_time_, force_));
if (metadata_snapshot_->hasAnyColumnTTL())
{
for (const auto & [name, description] : metadata_snapshot_->getColumnTTLs())
{
algorithms.emplace_back(std::make_unique<TTLUpdateInfoAlgorithm>(
description, TTLUpdateField::COLUMNS_TTL, name, old_ttl_infos.columns_ttl[name], current_time_, force_));
}
}
for (const auto & move_ttl : metadata_snapshot_->getMoveTTLs())
algorithms.emplace_back(std::make_unique<TTLUpdateInfoAlgorithm>(
move_ttl, TTLUpdateField::MOVES_TTL, move_ttl.result_column, old_ttl_infos.moves_ttl[move_ttl.result_column], current_time_, force_));
for (const auto & recompression_ttl : metadata_snapshot_->getRecompressionTTLs())
algorithms.emplace_back(std::make_unique<TTLUpdateInfoAlgorithm>(
recompression_ttl, TTLUpdateField::RECOMPRESSION_TTL, recompression_ttl.result_column, old_ttl_infos.recompression_ttl[recompression_ttl.result_column], current_time_, force_));
}
Block TTLCalcInputStream::readImpl()
{
auto block = children.at(0)->read();
for (const auto & algorithm : algorithms)
algorithm->execute(block);
if (!block)
return block;
Block res;
for (const auto & col : header)
res.insert(block.getByName(col.name));
return res;
}
void TTLCalcInputStream::readSuffixImpl()
{
data_part->ttl_infos = {};
for (const auto & algorithm : algorithms)
algorithm->finalize(data_part);
}
}

View File

@ -0,0 +1,44 @@
#pragma once
#include <DataStreams/IBlockInputStream.h>
#include <Storages/MergeTree/MergeTreeData.h>
#include <Storages/MergeTree/IMergeTreeDataPart.h>
#include <Core/Block.h>
#include <Storages/MergeTree/MergeTreeDataPartTTLInfo.h>
#include <DataStreams/ITTLAlgorithm.h>
#include <common/DateLUT.h>
namespace DB
{
class TTLCalcInputStream : public IBlockInputStream
{
public:
TTLCalcInputStream(
const BlockInputStreamPtr & input_,
const MergeTreeData & storage_,
const StorageMetadataPtr & metadata_snapshot_,
const MergeTreeData::MutableDataPartPtr & data_part_,
time_t current_time,
bool force_
);
String getName() const override { return "TTL_CALC"; }
Block getHeader() const override { return header; }
protected:
Block readImpl() override;
/// Finalizes ttl infos and updates data part
void readSuffixImpl() override;
private:
std::vector<TTLAlgorithmPtr> algorithms;
/// ttl_infos and empty_columns are updating while reading
const MergeTreeData::MutableDataPartPtr & data_part;
Poco::Logger * log;
Block header;
};
}

View File

@ -10,11 +10,13 @@ TTLColumnAlgorithm::TTLColumnAlgorithm(
bool force_,
const String & column_name_,
const ExpressionActionsPtr & default_expression_,
const String & default_column_name_)
const String & default_column_name_,
bool is_compact_part_)
: ITTLAlgorithm(description_, old_ttl_info_, current_time_, force_)
, column_name(column_name_)
, default_expression(default_expression_)
, default_column_name(default_column_name_)
, is_compact_part(is_compact_part_)
{
if (!isMinTTLExpired())
{
@ -40,7 +42,7 @@ void TTLColumnAlgorithm::execute(Block & block)
return;
/// Later drop full column
if (isMaxTTLExpired())
if (isMaxTTLExpired() && !is_compact_part)
return;
auto default_column = executeExpressionAndGetColumn(default_expression, block, default_column_name);

View File

@ -17,7 +17,9 @@ public:
bool force_,
const String & column_name_,
const ExpressionActionsPtr & default_expression_,
const String & default_column_name_);
const String & default_column_name_,
bool is_compact_part_
);
void execute(Block & block) override;
void finalize(const MutableDataPartPtr & data_part) const override;
@ -28,6 +30,7 @@ private:
const String default_column_name;
bool is_fully_empty = true;
bool is_compact_part;
};
}

View File

@ -4,8 +4,15 @@ namespace DB
{
TTLUpdateInfoAlgorithm::TTLUpdateInfoAlgorithm(
const TTLDescription & description_, const TTLInfo & old_ttl_info_, time_t current_time_, bool force_)
const TTLDescription & description_,
const TTLUpdateField ttl_update_field_,
const String ttl_update_key_,
const TTLInfo & old_ttl_info_,
time_t current_time_,
bool force_)
: ITTLAlgorithm(description_, old_ttl_info_, current_time_, force_)
, ttl_update_field(ttl_update_field_)
, ttl_update_key(ttl_update_key_)
{
}
@ -22,26 +29,37 @@ void TTLUpdateInfoAlgorithm::execute(Block & block)
}
}
TTLMoveAlgorithm::TTLMoveAlgorithm(
const TTLDescription & description_, const TTLInfo & old_ttl_info_, time_t current_time_, bool force_)
: TTLUpdateInfoAlgorithm(description_, old_ttl_info_, current_time_, force_)
void TTLUpdateInfoAlgorithm::finalize(const MutableDataPartPtr & data_part) const
{
}
if (ttl_update_field == TTLUpdateField::RECOMPRESSION_TTL)
{
data_part->ttl_infos.recompression_ttl[ttl_update_key] = new_ttl_info;
}
else if (ttl_update_field == TTLUpdateField::MOVES_TTL)
{
data_part->ttl_infos.moves_ttl[ttl_update_key] = new_ttl_info;
}
else if (ttl_update_field == TTLUpdateField::GROUP_BY_TTL)
{
data_part->ttl_infos.group_by_ttl[ttl_update_key] = new_ttl_info;
data_part->ttl_infos.updatePartMinMaxTTL(new_ttl_info.min, new_ttl_info.max);
}
else if (ttl_update_field == TTLUpdateField::ROWS_WHERE_TTL)
{
data_part->ttl_infos.rows_where_ttl[ttl_update_key] = new_ttl_info;
data_part->ttl_infos.updatePartMinMaxTTL(new_ttl_info.min, new_ttl_info.max);
}
else if (ttl_update_field == TTLUpdateField::TABLE_TTL)
{
data_part->ttl_infos.table_ttl = new_ttl_info;
data_part->ttl_infos.updatePartMinMaxTTL(new_ttl_info.min, new_ttl_info.max);
}
else if (ttl_update_field == TTLUpdateField::COLUMNS_TTL)
{
data_part->ttl_infos.columns_ttl[ttl_update_key] = new_ttl_info;
data_part->ttl_infos.updatePartMinMaxTTL(new_ttl_info.min, new_ttl_info.max);
}
void TTLMoveAlgorithm::finalize(const MutableDataPartPtr & data_part) const
{
data_part->ttl_infos.moves_ttl[description.result_column] = new_ttl_info;
}
TTLRecompressionAlgorithm::TTLRecompressionAlgorithm(
const TTLDescription & description_, const TTLInfo & old_ttl_info_, time_t current_time_, bool force_)
: TTLUpdateInfoAlgorithm(description_, old_ttl_info_, current_time_, force_)
{
}
void TTLRecompressionAlgorithm::finalize(const MutableDataPartPtr & data_part) const
{
data_part->ttl_infos.recompression_ttl[description.result_column] = new_ttl_info;
}
}

View File

@ -5,28 +5,35 @@
namespace DB
{
enum class TTLUpdateField
{
COLUMNS_TTL,
TABLE_TTL,
ROWS_WHERE_TTL,
MOVES_TTL,
RECOMPRESSION_TTL,
GROUP_BY_TTL,
};
/// Calculates new ttl_info and does nothing with data.
class TTLUpdateInfoAlgorithm : public ITTLAlgorithm
{
public:
TTLUpdateInfoAlgorithm(const TTLDescription & description_, const TTLInfo & old_ttl_info_, time_t current_time_, bool force_);
TTLUpdateInfoAlgorithm(
const TTLDescription & description_,
const TTLUpdateField ttl_update_field_,
const String ttl_update_key_,
const TTLInfo & old_ttl_info_,
time_t current_time_, bool force_
);
void execute(Block & block) override;
void finalize(const MutableDataPartPtr & data_part) const override = 0;
void finalize(const MutableDataPartPtr & data_part) const override;
private:
const TTLUpdateField ttl_update_field;
const String ttl_update_key;
};
class TTLMoveAlgorithm final : public TTLUpdateInfoAlgorithm
{
public:
TTLMoveAlgorithm(const TTLDescription & description_, const TTLInfo & old_ttl_info_, time_t current_time_, bool force_);
void finalize(const MutableDataPartPtr & data_part) const override;
};
class TTLRecompressionAlgorithm final : public TTLUpdateInfoAlgorithm
{
public:
TTLRecompressionAlgorithm(const TTLDescription & description_, const TTLInfo & old_ttl_info_, time_t current_time_, bool force_);
void finalize(const MutableDataPartPtr & data_part) const override;
};
}

View File

@ -27,6 +27,8 @@ public:
bool isCategorial() const override { return true; }
bool canBeInsideNullable() const override { return true; }
bool isComparable() const override { return true; }
virtual bool contains(const IDataType & rhs) const = 0;
};
@ -76,7 +78,7 @@ public:
/// Example:
/// Enum('a' = 1, 'b' = 2) -> Enum('c' = 1, 'b' = 2, 'd' = 3) OK
/// Enum('a' = 1, 'b' = 2) -> Enum('a' = 2, 'b' = 1) NOT OK
bool contains(const IDataType & rhs) const;
bool contains(const IDataType & rhs) const override;
SerializationPtr doGetDefaultSerialization() const override;
};

View File

@ -1,4 +1,5 @@
#include <DataTypes/Serializations/ISerialization.h>
#include <Compression/CompressionFactory.h>
#include <Columns/IColumn.h>
#include <IO/WriteHelpers.h>
#include <IO/Operators.h>

View File

@ -325,7 +325,7 @@ struct StringSource
};
/// Differs to StringSource by having 'offest' and 'length' in code points instead of bytes in getSlice* methods.
/// Differs to StringSource by having 'offset' and 'length' in code points instead of bytes in getSlice* methods.
/** NOTE: The behaviour of substring and substringUTF8 is inconsistent when negative offset is greater than string size:
* substring:
* hello

View File

@ -1,6 +1,7 @@
#include <Functions/IFunction.h>
#include <Functions/FunctionFactory.h>
#include <Functions/FunctionHelpers.h>
#include <Functions/castTypeToEither.h>
#include <DataTypes/DataTypeArray.h>
#include <DataTypes/DataTypeNullable.h>
#include <DataTypes/DataTypeTuple.h>
@ -95,32 +96,30 @@ private:
using Offsets = ColumnArray::Offsets;
static bool matchKeyToIndex(const IColumn & data, const Offsets & offsets,
const ColumnsWithTypeAndName & arguments, PaddedPODArray<UInt64> & matched_idxs);
static bool matchKeyToIndexNumber(
const IColumn & data, const Offsets & offsets, bool is_key_const,
const IColumn & index, PaddedPODArray<UInt64> & matched_idxs);
static bool matchKeyToIndexConst(const IColumn & data, const Offsets & offsets,
static bool matchKeyToIndexNumberConst(
const IColumn & data, const Offsets & offsets,
const Field & index, PaddedPODArray<UInt64> & matched_idxs);
template <typename DataType>
static bool matchKeyToIndexNumber(const IColumn & data, const Offsets & offsets,
const ColumnsWithTypeAndName & arguments, PaddedPODArray<UInt64> & matched_idxs);
static bool matchKeyToIndexString(
const IColumn & data, const Offsets & offsets, bool is_key_const,
const IColumn & index, PaddedPODArray<UInt64> & matched_idxs);
template <typename DataType>
static bool matchKeyToIndexNumberConst(const IColumn & data, const Offsets & offsets,
const Field & index, PaddedPODArray<UInt64> & matched_idxs);
static bool matchKeyToIndexString(const IColumn & data, const Offsets & offsets,
const ColumnsWithTypeAndName & arguments, PaddedPODArray<UInt64> & matched_idxs);
static bool matchKeyToIndexFixedString(const IColumn & data, const Offsets & offsets,
const ColumnsWithTypeAndName & arguments, PaddedPODArray<UInt64> & matched_idxs);
static bool matchKeyToIndexStringConst(const IColumn & data, const Offsets & offsets,
static bool matchKeyToIndexStringConst(
const IColumn & data, const Offsets & offsets,
const Field & index, PaddedPODArray<UInt64> & matched_idxs);
template <typename Matcher>
static void executeMatchKeyToIndex(const Offsets & offsets,
PaddedPODArray<UInt64> & matched_idxs, const Matcher & matcher);
template <typename Matcher>
static void executeMatchConstKeyToIndex(
size_t num_rows, size_t num_values,
PaddedPODArray<UInt64> & matched_idxs, const Matcher & matcher);
};
@ -759,23 +758,11 @@ ColumnPtr FunctionArrayElement::executeTuple(const ColumnsWithTypeAndName & argu
namespace
{
template<typename DataColumn, typename IndexColumn>
struct MatcherString
{
const ColumnString & data;
const ColumnString & index;
bool match(size_t row_data, size_t row_index) const
{
auto data_ref = data.getDataAt(row_data);
auto index_ref = index.getDataAt(row_index);
return memequalSmallAllowOverflow15(index_ref.data, index_ref.size, data_ref.data, data_ref.size);
}
};
struct MatcherFixedString
{
const ColumnFixedString & data;
const ColumnFixedString & index;
const DataColumn & data;
const IndexColumn & index;
bool match(size_t row_data, size_t row_index) const
{
@ -785,9 +772,10 @@ struct MatcherFixedString
}
};
template<typename DataColumn>
struct MatcherStringConst
{
const ColumnString & data;
const DataColumn & data;
const String & index;
bool match(size_t row_data, size_t /* row_index */) const
@ -797,23 +785,23 @@ struct MatcherStringConst
}
};
template <typename T>
template <typename DataType, typename IndexType>
struct MatcherNumber
{
const PaddedPODArray<T> & data;
const PaddedPODArray<T> & index;
const PaddedPODArray<DataType> & data;
const PaddedPODArray<IndexType> & index;
bool match(size_t row_data, size_t row_index) const
{
return data[row_data] == index[row_index];
return data[row_data] == static_cast<DataType>(index[row_index]);
}
};
template <typename T>
template <typename DataType>
struct MatcherNumberConst
{
const PaddedPODArray<T> & data;
T index;
const PaddedPODArray<DataType> & data;
DataType index;
bool match(size_t row_data, size_t /* row_index */) const
{
@ -848,147 +836,158 @@ void FunctionArrayElement::executeMatchKeyToIndex(
}
}
template <typename Matcher>
void FunctionArrayElement::executeMatchConstKeyToIndex(
size_t num_rows, size_t num_values,
PaddedPODArray<UInt64> & matched_idxs, const Matcher & matcher)
{
for (size_t i = 0; i < num_rows; ++i)
{
bool matched = false;
for (size_t j = 0; j < num_values; ++j)
{
if (matcher.match(j, i))
{
matched_idxs.push_back(j + 1);
matched = true;
break;
}
}
if (!matched)
matched_idxs.push_back(0);
}
}
template <typename F>
static bool castColumnString(const IColumn * column, F && f)
{
return castTypeToEither<ColumnString, ColumnFixedString>(column, std::forward<F>(f));
}
bool FunctionArrayElement::matchKeyToIndexStringConst(
const IColumn & data, const Offsets & offsets,
const Field & index, PaddedPODArray<UInt64> & matched_idxs)
{
const auto * data_string = checkAndGetColumn<ColumnString>(&data);
if (!data_string)
return false;
return castColumnString(&data, [&](const auto & data_column)
{
using DataColumn = std::decay_t<decltype(data_column)>;
if (index.getType() != Field::Types::String)
return false;
MatcherStringConst matcher{*data_string, get<const String &>(index)};
executeMatchKeyToIndex(offsets, matched_idxs, matcher);
return true;
MatcherStringConst<DataColumn> matcher{data_column, get<const String &>(index)};
executeMatchKeyToIndex(offsets, matched_idxs, matcher);
return true;
});
}
bool FunctionArrayElement::matchKeyToIndexString(
const IColumn & data, const Offsets & offsets,
const ColumnsWithTypeAndName & arguments, PaddedPODArray<UInt64> & matched_idxs)
const IColumn & data, const Offsets & offsets, bool is_key_const,
const IColumn & index, PaddedPODArray<UInt64> & matched_idxs)
{
const auto * index_string = checkAndGetColumn<ColumnString>(arguments[1].column.get());
if (!index_string)
return false;
return castColumnString(&data, [&](const auto & data_column)
{
return castColumnString(&index, [&](const auto & index_column)
{
using DataColumn = std::decay_t<decltype(data_column)>;
using IndexColumn = std::decay_t<decltype(index_column)>;
const auto * data_string = checkAndGetColumn<ColumnString>(&data);
if (!data_string)
return false;
MatcherString<DataColumn, IndexColumn> matcher{data_column, index_column};
if (is_key_const)
executeMatchConstKeyToIndex(index.size(), data.size(), matched_idxs, matcher);
else
executeMatchKeyToIndex(offsets, matched_idxs, matcher);
MatcherString matcher{*data_string, *index_string};
executeMatchKeyToIndex(offsets, matched_idxs, matcher);
return true;
return true;
});
});
}
bool FunctionArrayElement::matchKeyToIndexFixedString(
const IColumn & data, const Offsets & offsets,
const ColumnsWithTypeAndName & arguments, PaddedPODArray<UInt64> & matched_idxs)
template <typename FromType, typename ToType>
static constexpr bool areConvertibleTypes =
std::is_same_v<FromType, ToType>
|| (is_integer_v<FromType> && is_integer_v<ToType>
&& std::is_convertible_v<FromType, ToType>);
template <typename F>
static bool castColumnNumeric(const IColumn * column, F && f)
{
const auto * index_string = checkAndGetColumn<ColumnFixedString>(arguments[1].column.get());
if (!index_string)
return false;
const auto * data_string = checkAndGetColumn<ColumnFixedString>(&data);
if (!data_string)
return false;
MatcherFixedString matcher{*data_string, *index_string};
executeMatchKeyToIndex(offsets, matched_idxs, matcher);
return true;
return castTypeToEither<
ColumnVector<UInt8>,
ColumnVector<UInt16>,
ColumnVector<UInt32>,
ColumnVector<UInt64>,
ColumnVector<UInt128>,
ColumnVector<UInt256>,
ColumnVector<Int8>,
ColumnVector<Int16>,
ColumnVector<Int32>,
ColumnVector<Int64>,
ColumnVector<Int128>,
ColumnVector<Int256>,
ColumnVector<UUID>
>(column, std::forward<F>(f));
}
template <typename DataType>
bool FunctionArrayElement::matchKeyToIndexNumberConst(
const IColumn & data, const Offsets & offsets,
const Field & index, PaddedPODArray<UInt64> & matched_idxs)
{
const auto * data_numeric = checkAndGetColumn<ColumnVector<DataType>>(&data);
if (!data_numeric)
return false;
std::optional<DataType> index_as_integer;
Field::dispatch([&](const auto & value)
return castColumnNumeric(&data, [&](const auto & data_column)
{
using FieldType = std::decay_t<decltype(value)>;
if constexpr (std::is_same_v<FieldType, DataType> || (is_integer_v<FieldType> && std::is_convertible_v<FieldType, DataType>))
index_as_integer = static_cast<DataType>(value);
}, index);
using DataType = typename std::decay_t<decltype(data_column)>::ValueType;
std::optional<DataType> index_as_integer;
if (!index_as_integer)
return false;
Field::dispatch([&](const auto & value)
{
using FieldType = std::decay_t<decltype(value)>;
if constexpr (areConvertibleTypes<FieldType, DataType>)
index_as_integer = static_cast<DataType>(value);
}, index);
MatcherNumberConst<DataType> matcher{data_numeric->getData(), *index_as_integer};
executeMatchKeyToIndex(offsets, matched_idxs, matcher);
return true;
if (!index_as_integer)
return false;
MatcherNumberConst<DataType> matcher{data_column.getData(), *index_as_integer};
executeMatchKeyToIndex(offsets, matched_idxs, matcher);
return true;
});
}
template <typename DataType>
bool FunctionArrayElement::matchKeyToIndexNumber(
const IColumn & data, const Offsets & offsets,
const ColumnsWithTypeAndName & arguments, PaddedPODArray<UInt64> & matched_idxs)
const IColumn & data, const Offsets & offsets, bool is_key_const,
const IColumn & index, PaddedPODArray<UInt64> & matched_idxs)
{
const auto * index_numeric = checkAndGetColumn<ColumnVector<DataType>>(arguments[1].column.get());
if (!index_numeric)
return false;
return castColumnNumeric(&data, [&](const auto & data_column)
{
return castColumnNumeric(&index, [&](const auto & index_column)
{
using DataType = typename std::decay_t<decltype(data_column)>::ValueType;
using IndexType = typename std::decay_t<decltype(index_column)>::ValueType;
const auto * data_numeric = checkAndGetColumn<ColumnVector<DataType>>(&data);
if (!data_numeric)
return false;
if constexpr (areConvertibleTypes<IndexType, DataType>)
{
MatcherNumber<DataType, IndexType> matcher{data_column.getData(), index_column.getData()};
if (is_key_const)
executeMatchConstKeyToIndex(index_column.size(), data_column.size(), matched_idxs, matcher);
else
executeMatchKeyToIndex(offsets, matched_idxs, matcher);
MatcherNumber<DataType> matcher{data_numeric->getData(), index_numeric->getData()};
executeMatchKeyToIndex(offsets, matched_idxs, matcher);
return true;
}
return true;
}
bool FunctionArrayElement::matchKeyToIndex(
const IColumn & data, const Offsets & offsets,
const ColumnsWithTypeAndName & arguments, PaddedPODArray<UInt64> & matched_idxs)
{
return matchKeyToIndexNumber<UInt8>(data, offsets, arguments, matched_idxs)
|| matchKeyToIndexNumber<UInt16>(data, offsets, arguments, matched_idxs)
|| matchKeyToIndexNumber<UInt32>(data, offsets, arguments, matched_idxs)
|| matchKeyToIndexNumber<UInt64>(data, offsets, arguments, matched_idxs)
|| matchKeyToIndexNumber<UInt128>(data, offsets, arguments, matched_idxs)
|| matchKeyToIndexNumber<UInt256>(data, offsets, arguments, matched_idxs)
|| matchKeyToIndexNumber<Int8>(data, offsets, arguments, matched_idxs)
|| matchKeyToIndexNumber<Int16>(data, offsets, arguments, matched_idxs)
|| matchKeyToIndexNumber<Int32>(data, offsets, arguments, matched_idxs)
|| matchKeyToIndexNumber<Int64>(data, offsets, arguments, matched_idxs)
|| matchKeyToIndexNumber<Int128>(data, offsets, arguments, matched_idxs)
|| matchKeyToIndexNumber<Int256>(data, offsets, arguments, matched_idxs)
|| matchKeyToIndexNumber<UInt256>(data, offsets, arguments, matched_idxs)
|| matchKeyToIndexNumber<UUID>(data, offsets, arguments, matched_idxs)
|| matchKeyToIndexString(data, offsets, arguments, matched_idxs)
|| matchKeyToIndexFixedString(data, offsets, arguments, matched_idxs);
}
bool FunctionArrayElement::matchKeyToIndexConst(
const IColumn & data, const Offsets & offsets,
const Field & index, PaddedPODArray<UInt64> & matched_idxs)
{
return matchKeyToIndexNumberConst<UInt8>(data, offsets, index, matched_idxs)
|| matchKeyToIndexNumberConst<UInt16>(data, offsets, index, matched_idxs)
|| matchKeyToIndexNumberConst<UInt32>(data, offsets, index, matched_idxs)
|| matchKeyToIndexNumberConst<UInt64>(data, offsets, index, matched_idxs)
|| matchKeyToIndexNumberConst<UInt128>(data, offsets, index, matched_idxs)
|| matchKeyToIndexNumberConst<UInt256>(data, offsets, index, matched_idxs)
|| matchKeyToIndexNumberConst<Int8>(data, offsets, index, matched_idxs)
|| matchKeyToIndexNumberConst<Int16>(data, offsets, index, matched_idxs)
|| matchKeyToIndexNumberConst<Int32>(data, offsets, index, matched_idxs)
|| matchKeyToIndexNumberConst<Int64>(data, offsets, index, matched_idxs)
|| matchKeyToIndexNumberConst<Int128>(data, offsets, index, matched_idxs)
|| matchKeyToIndexNumberConst<Int256>(data, offsets, index, matched_idxs)
|| matchKeyToIndexNumberConst<UUID>(data, offsets, index, matched_idxs)
|| matchKeyToIndexStringConst(data, offsets, index, matched_idxs);
return false;
});
});
}
ColumnPtr FunctionArrayElement::executeMap(
const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const
{
const ColumnMap * col_map = typeid_cast<const ColumnMap *>(arguments[0].column.get());
if (!col_map)
return nullptr;
const auto * col_map = checkAndGetColumn<ColumnMap>(arguments[0].column.get());
const auto * col_const_map = checkAndGetColumnConst<ColumnMap>(arguments[0].column.get());
assert(col_map || col_const_map);
if (col_const_map)
col_map = typeid_cast<const ColumnMap *>(&col_const_map->getDataColumn());
const auto & nested_column = col_map->getNestedColumn();
const auto & keys_data = col_map->getNestedData().getColumn(0);
@ -1000,29 +999,33 @@ ColumnPtr FunctionArrayElement::executeMap(
indices_column->reserve(input_rows_count);
auto & indices_data = assert_cast<ColumnVector<UInt64> &>(*indices_column).getData();
bool executed = false;
if (!isColumnConst(*arguments[1].column))
{
if (input_rows_count > 0 && !matchKeyToIndex(keys_data, offsets, arguments, indices_data))
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"Illegal types of arguments: {}, {} for function {}",
arguments[0].type->getName(), arguments[1].type->getName(), getName());
executed = matchKeyToIndexNumber(keys_data, offsets, !!col_const_map, *arguments[1].column, indices_data)
|| matchKeyToIndexString(keys_data, offsets, !!col_const_map, *arguments[1].column, indices_data);
}
else
{
Field index = (*arguments[1].column)[0];
// Get Matched key's value
if (input_rows_count > 0 && !matchKeyToIndexConst(keys_data, offsets, index, indices_data))
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"Illegal types of arguments: {}, {} for function {}",
arguments[0].type->getName(), arguments[1].type->getName(), getName());
executed = matchKeyToIndexNumberConst(keys_data, offsets, index, indices_data)
|| matchKeyToIndexStringConst(keys_data, offsets, index, indices_data);
}
if (!executed)
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"Illegal types of arguments: {}, {} for function {}",
arguments[0].type->getName(), arguments[1].type->getName(), getName());
ColumnPtr values_array = ColumnArray::create(values_data.getPtr(), nested_column.getOffsetsPtr());
if (col_const_map)
values_array = ColumnConst::create(values_array, input_rows_count);
/// Prepare arguments to call arrayElement for array with values and calculated indices at previous step.
ColumnsWithTypeAndName new_arguments =
{
{
ColumnArray::create(values_data.getPtr(), nested_column.getOffsetsPtr()),
values_array,
std::make_shared<DataTypeArray>(result_type),
""
},
@ -1066,13 +1069,14 @@ DataTypePtr FunctionArrayElement::getReturnTypeImpl(const DataTypes & arguments)
ColumnPtr FunctionArrayElement::executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const
{
/// Check nullability.
bool is_array_of_nullable = false;
const auto * col_map = checkAndGetColumn<ColumnMap>(arguments[0].column.get());
const auto * col_const_map = checkAndGetColumnConst<ColumnMap>(arguments[0].column.get());
const ColumnMap * col_map = checkAndGetColumn<ColumnMap>(arguments[0].column.get());
if (col_map)
if (col_map || col_const_map)
return executeMap(arguments, result_type, input_rows_count);
/// Check nullability.
bool is_array_of_nullable = false;
const ColumnArray * col_array = nullptr;
const ColumnArray * col_const_array = nullptr;

View File

@ -14,7 +14,7 @@
#include <Common/Throttler.h>
#include <Common/thread_local_rng.h>
#include <Common/FieldVisitorToString.h>
#include <Coordination/KeeperStorageDispatcher.h>
#include <Coordination/KeeperDispatcher.h>
#include <Compression/ICompressionCodec.h>
#include <Core/BackgroundSchedulePool.h>
#include <Formats/FormatFactory.h>
@ -146,7 +146,7 @@ struct ContextSharedPart
#if USE_NURAFT
mutable std::mutex keeper_storage_dispatcher_mutex;
mutable std::shared_ptr<KeeperStorageDispatcher> keeper_storage_dispatcher;
mutable std::shared_ptr<KeeperDispatcher> keeper_storage_dispatcher;
#endif
mutable std::mutex auxiliary_zookeepers_mutex;
mutable std::map<String, zkutil::ZooKeeperPtr> auxiliary_zookeepers; /// Map for auxiliary ZooKeeper clients.
@ -1649,7 +1649,7 @@ void Context::setSystemZooKeeperLogAfterInitializationIfNeeded()
zk.second->setZooKeeperLog(shared->system_logs->zookeeper_log);
}
void Context::initializeKeeperStorageDispatcher() const
void Context::initializeKeeperDispatcher() const
{
#if USE_NURAFT
std::lock_guard lock(shared->keeper_storage_dispatcher_mutex);
@ -1660,14 +1660,14 @@ void Context::initializeKeeperStorageDispatcher() const
const auto & config = getConfigRef();
if (config.has("keeper_server"))
{
shared->keeper_storage_dispatcher = std::make_shared<KeeperStorageDispatcher>();
shared->keeper_storage_dispatcher = std::make_shared<KeeperDispatcher>();
shared->keeper_storage_dispatcher->initialize(config, getApplicationType() == ApplicationType::KEEPER);
}
#endif
}
#if USE_NURAFT
std::shared_ptr<KeeperStorageDispatcher> & Context::getKeeperStorageDispatcher() const
std::shared_ptr<KeeperDispatcher> & Context::getKeeperDispatcher() const
{
std::lock_guard lock(shared->keeper_storage_dispatcher_mutex);
if (!shared->keeper_storage_dispatcher)
@ -1677,7 +1677,7 @@ std::shared_ptr<KeeperStorageDispatcher> & Context::getKeeperStorageDispatcher()
}
#endif
void Context::shutdownKeeperStorageDispatcher() const
void Context::shutdownKeeperDispatcher() const
{
#if USE_NURAFT
std::lock_guard lock(shared->keeper_storage_dispatcher_mutex);

View File

@ -102,7 +102,7 @@ class StoragePolicySelector;
using StoragePolicySelectorPtr = std::shared_ptr<const StoragePolicySelector>;
struct PartUUIDs;
using PartUUIDsPtr = std::shared_ptr<PartUUIDs>;
class KeeperStorageDispatcher;
class KeeperDispatcher;
class Session;
class IOutputFormat;
@ -647,10 +647,10 @@ public:
std::shared_ptr<zkutil::ZooKeeper> getAuxiliaryZooKeeper(const String & name) const;
#if USE_NURAFT
std::shared_ptr<KeeperStorageDispatcher> & getKeeperStorageDispatcher() const;
std::shared_ptr<KeeperDispatcher> & getKeeperDispatcher() const;
#endif
void initializeKeeperStorageDispatcher() const;
void shutdownKeeperStorageDispatcher() const;
void initializeKeeperDispatcher() const;
void shutdownKeeperDispatcher() const;
/// Set auxiliary zookeepers configuration at server starting or configuration reloading.
void reloadAuxiliaryZooKeepersConfigIfChanged(const ConfigurationPtr & config);

View File

@ -156,7 +156,7 @@ ColumnDependencies getAllColumnDependencies(const StorageMetadataPtr & metadata_
ColumnDependencies dependencies;
while (!new_updated_columns.empty())
{
auto new_dependencies = metadata_snapshot->getColumnDependencies(new_updated_columns);
auto new_dependencies = metadata_snapshot->getColumnDependencies(new_updated_columns, true);
new_updated_columns.clear();
for (const auto & dependency : new_dependencies)
{
@ -303,6 +303,15 @@ static NameSet getKeyColumns(const StoragePtr & storage, const StorageMetadataPt
return key_columns;
}
static bool materializeTTLRecalculateOnly(const StoragePtr & storage)
{
auto storage_from_merge_tree_data_part = std::dynamic_pointer_cast<StorageFromMergeTreeDataPart>(storage);
if (!storage_from_merge_tree_data_part)
return false;
return storage_from_merge_tree_data_part->materializeTTLRecalculateOnly();
}
static void validateUpdateColumns(
const StoragePtr & storage,
const StorageMetadataPtr & metadata_snapshot, const NameSet & updated_columns,
@ -394,8 +403,13 @@ ASTPtr MutationsInterpreter::prepare(bool dry_run)
NamesAndTypesList all_columns = columns_desc.getAllPhysical();
NameSet updated_columns;
bool materialize_ttl_recalculate_only = materializeTTLRecalculateOnly(storage);
for (const MutationCommand & command : commands)
{
if (command.type == MutationCommand::Type::UPDATE
|| command.type == MutationCommand::Type::DELETE)
materialize_ttl_recalculate_only = false;
for (const auto & kv : command.column_to_update_expression)
{
updated_columns.insert(kv.first);
@ -569,7 +583,18 @@ ASTPtr MutationsInterpreter::prepare(bool dry_run)
else if (command.type == MutationCommand::MATERIALIZE_TTL)
{
mutation_kind.set(MutationKind::MUTATE_OTHER);
if (metadata_snapshot->hasRowsTTL())
if (materialize_ttl_recalculate_only)
{
// just recalculate ttl_infos without remove expired data
auto all_columns_vec = all_columns.getNames();
auto new_dependencies = metadata_snapshot->getColumnDependencies(NameSet(all_columns_vec.begin(), all_columns_vec.end()), false);
for (const auto & dependency : new_dependencies)
{
if (dependency.kind == ColumnDependency::TTL_EXPRESSION)
dependencies.insert(dependency);
}
}
else if (metadata_snapshot->hasRowsTTL())
{
for (const auto & column : all_columns)
dependencies.emplace(column.name, ColumnDependency::TTL_TARGET);
@ -594,19 +619,19 @@ ASTPtr MutationsInterpreter::prepare(bool dry_run)
}
/// Recalc only skip indices and projections of columns which could be updated by TTL.
auto new_dependencies = metadata_snapshot->getColumnDependencies(new_updated_columns);
auto new_dependencies = metadata_snapshot->getColumnDependencies(new_updated_columns, true);
for (const auto & dependency : new_dependencies)
{
if (dependency.kind == ColumnDependency::SKIP_INDEX || dependency.kind == ColumnDependency::PROJECTION)
dependencies.insert(dependency);
}
}
if (dependencies.empty())
{
/// Very rare case. It can happen if we have only one MOVE TTL with constant expression.
/// But we still have to read at least one column.
dependencies.emplace(all_columns.front().name, ColumnDependency::TTL_EXPRESSION);
}
if (dependencies.empty())
{
/// Very rare case. It can happen if we have only one MOVE TTL with constant expression.
/// But we still have to read at least one column.
dependencies.emplace(all_columns.front().name, ColumnDependency::TTL_EXPRESSION);
}
}
else if (command.type == MutationCommand::READ_COLUMN)

View File

@ -111,11 +111,6 @@ bool ParserInsertQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
{
data = pos->begin;
}
else if (s_from_infile.ignore(pos, expected))
{
if (!infile_name_p.parse(pos, infile, expected))
return false;
}
else if (s_format.ignore(pos, expected))
{
if (!name_p.parse(pos, format, expected))

View File

@ -0,0 +1,28 @@
#include <Parsers/getInsertQuery.h>
#include <Parsers/ASTInsertQuery.h>
#include <Parsers/ASTIdentifier.h>
#include <Parsers/ASTExpressionList.h>
#include <IO/WriteBufferFromString.h>
namespace DB
{
std::string getInsertQuery(const std::string & db_name, const std::string & table_name, const ColumnsWithTypeAndName & columns, IdentifierQuotingStyle quoting)
{
ASTInsertQuery query;
query.table_id.database_name = db_name;
query.table_id.table_name = table_name;
query.columns = std::make_shared<ASTExpressionList>(',');
query.children.push_back(query.columns);
for (const auto & column : columns)
query.columns->children.emplace_back(std::make_shared<ASTIdentifier>(column.name));
WriteBufferFromOwnString buf;
IAST::FormatSettings settings(buf, true);
settings.always_quote_identifiers = true;
settings.identifier_quoting_style = quoting;
query.IAST::format(settings);
return buf.str();
}
}

View File

@ -0,0 +1,8 @@
#pragma once
#include <Core/ColumnsWithTypeAndName.h>
#include <Parsers/IdentifierQuotingStyle.h>
namespace DB
{
std::string getInsertQuery(const std::string & db_name, const std::string & table_name, const ColumnsWithTypeAndName & columns, IdentifierQuotingStyle quoting);
}

View File

@ -194,7 +194,7 @@ KeeperTCPHandler::KeeperTCPHandler(IServer & server_, const Poco::Net::StreamSoc
, server(server_)
, log(&Poco::Logger::get("NuKeeperTCPHandler"))
, global_context(Context::createCopy(server.context()))
, keeper_dispatcher(global_context->getKeeperStorageDispatcher())
, keeper_dispatcher(global_context->getKeeperDispatcher())
, operation_timeout(0, global_context->getConfigRef().getUInt("keeper_server.operation_timeout_ms", Coordination::DEFAULT_OPERATION_TIMEOUT_MS) * 1000)
, session_timeout(0, global_context->getConfigRef().getUInt("keeper_server.session_timeout_ms", Coordination::DEFAULT_SESSION_TIMEOUT_MS) * 1000)
, poll_wrapper(std::make_unique<SocketInterruptablePollWrapper>(socket_))

View File

@ -13,7 +13,7 @@
#include <Interpreters/Context.h>
#include <Common/ZooKeeper/ZooKeeperCommon.h>
#include <Common/ZooKeeper/ZooKeeperConstants.h>
#include <Coordination/KeeperStorageDispatcher.h>
#include <Coordination/KeeperDispatcher.h>
#include <IO/WriteBufferFromPocoSocket.h>
#include <IO/ReadBufferFromPocoSocket.h>
#include <Coordination/ThreadSafeQueue.h>
@ -38,7 +38,7 @@ private:
IServer & server;
Poco::Logger * log;
ContextPtr global_context;
std::shared_ptr<KeeperStorageDispatcher> keeper_dispatcher;
std::shared_ptr<KeeperDispatcher> keeper_dispatcher;
Poco::Timespan operation_timeout;
Poco::Timespan session_timeout;
int64_t session_id{-1};

View File

@ -10,6 +10,7 @@
#include <Storages/MergeTree/MergeTreeDataWriter.h>
#include <Storages/MergeTree/StorageFromMergeTreeDataPart.h>
#include <DataStreams/TTLBlockInputStream.h>
#include <DataStreams/TTLCalcInputStream.h>
#include <DataStreams/DistinctSortedBlockInputStream.h>
#include <DataStreams/ExpressionBlockInputStream.h>
#include <DataStreams/MaterializingBlockInputStream.h>
@ -493,7 +494,6 @@ static void extractMergingAndGatheringColumns(
const NamesAndTypesList & storage_columns,
const ExpressionActionsPtr & sorting_key_expr,
const IndicesDescription & indexes,
const ProjectionsDescription & projections,
const MergeTreeData::MergingParams & merging_params,
NamesAndTypesList & gathering_columns, Names & gathering_column_names,
NamesAndTypesList & merging_columns, Names & merging_column_names)
@ -507,13 +507,6 @@ static void extractMergingAndGatheringColumns(
std::inserter(key_columns, key_columns.end()));
}
for (const auto & projection : projections)
{
Names projection_columns_vec = projection.required_columns;
std::copy(projection_columns_vec.cbegin(), projection_columns_vec.cend(),
std::inserter(key_columns, key_columns.end()));
}
/// Force sign column for Collapsing mode
if (merging_params.mode == MergeTreeData::MergingParams::Collapsing)
key_columns.emplace(merging_params.sign_column);
@ -727,7 +720,6 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mergePartsToTempor
storage_columns,
metadata_snapshot->getSortingKey().expression,
metadata_snapshot->getSecondaryIndices(),
metadata_snapshot->getProjections(),
merging_params,
gathering_columns,
gathering_column_names,
@ -1288,10 +1280,10 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor
auto mrk_extension = source_part->index_granularity_info.is_adaptive ? getAdaptiveMrkExtension(new_data_part->getType())
: getNonAdaptiveMrkExtension();
bool need_sync = needSyncPart(source_part->rows_count, source_part->getBytesOnDisk(), *data_settings);
bool need_remove_expired_values = false;
auto execute_ttl_type = ExecuteTTLType::NONE;
if (in && shouldExecuteTTL(metadata_snapshot, interpreter->getColumnDependencies(), commands_for_part))
need_remove_expired_values = true;
if (in)
execute_ttl_type = shouldExecuteTTL(metadata_snapshot, interpreter->getColumnDependencies());
/// All columns from part are changed and may be some more that were missing before in part
/// TODO We can materialize compact part without copying data
@ -1319,7 +1311,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor
time_of_mutation,
compression_codec,
merge_entry,
need_remove_expired_values,
execute_ttl_type,
need_sync,
space_reservation,
holder,
@ -1356,7 +1348,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor
return data.cloneAndLoadDataPartOnSameDisk(source_part, "tmp_clone_", future_part.part_info, metadata_snapshot);
}
if (need_remove_expired_values)
if (execute_ttl_type != ExecuteTTLType::NONE)
files_to_skip.insert("ttl.txt");
disk->createDirectories(new_part_tmp_path);
@ -1416,7 +1408,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor
time_of_mutation,
compression_codec,
merge_entry,
need_remove_expired_values,
execute_ttl_type,
need_sync,
space_reservation,
holder,
@ -1437,7 +1429,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor
}
}
finalizeMutatedPart(source_part, new_data_part, need_remove_expired_values, compression_codec);
finalizeMutatedPart(source_part, new_data_part, execute_ttl_type, compression_codec);
}
return new_data_part;
@ -1984,21 +1976,22 @@ std::set<MergeTreeProjectionPtr> MergeTreeDataMergerMutator::getProjectionsToRec
return projections_to_recalc;
}
bool MergeTreeDataMergerMutator::shouldExecuteTTL(
const StorageMetadataPtr & metadata_snapshot, const ColumnDependencies & dependencies, const MutationCommands & commands)
ExecuteTTLType MergeTreeDataMergerMutator::shouldExecuteTTL(const StorageMetadataPtr & metadata_snapshot, const ColumnDependencies & dependencies)
{
if (!metadata_snapshot->hasAnyTTL())
return false;
return ExecuteTTLType::NONE;
for (const auto & command : commands)
if (command.type == MutationCommand::MATERIALIZE_TTL)
return true;
bool has_ttl_expression = false;
for (const auto & dependency : dependencies)
if (dependency.kind == ColumnDependency::TTL_EXPRESSION || dependency.kind == ColumnDependency::TTL_TARGET)
return true;
{
if (dependency.kind == ColumnDependency::TTL_EXPRESSION)
has_ttl_expression = true;
return false;
if (dependency.kind == ColumnDependency::TTL_TARGET)
return ExecuteTTLType::NORMAL;
}
return has_ttl_expression ? ExecuteTTLType::RECALCULATE : ExecuteTTLType::NONE;
}
// 1. get projection pipeline and a sink to write parts
@ -2172,7 +2165,7 @@ void MergeTreeDataMergerMutator::mutateAllPartColumns(
time_t time_of_mutation,
const CompressionCodecPtr & compression_codec,
MergeListEntry & merge_entry,
bool need_remove_expired_values,
ExecuteTTLType execute_ttl_type,
bool need_sync,
const ReservationPtr & space_reservation,
TableLockHolder & holder,
@ -2185,9 +2178,12 @@ void MergeTreeDataMergerMutator::mutateAllPartColumns(
mutating_stream = std::make_shared<MaterializingBlockInputStream>(
std::make_shared<ExpressionBlockInputStream>(mutating_stream, data.getPrimaryKeyAndSkipIndicesExpression(metadata_snapshot)));
if (need_remove_expired_values)
if (execute_ttl_type == ExecuteTTLType::NORMAL)
mutating_stream = std::make_shared<TTLBlockInputStream>(mutating_stream, data, metadata_snapshot, new_data_part, time_of_mutation, true);
if (execute_ttl_type == ExecuteTTLType::RECALCULATE)
mutating_stream = std::make_shared<TTLCalcInputStream>(mutating_stream, data, metadata_snapshot, new_data_part, time_of_mutation, true);
IMergeTreeDataPart::MinMaxIndex minmax_idx;
MergedBlockOutputStream out{
@ -2229,7 +2225,7 @@ void MergeTreeDataMergerMutator::mutateSomePartColumns(
time_t time_of_mutation,
const CompressionCodecPtr & compression_codec,
MergeListEntry & merge_entry,
bool need_remove_expired_values,
ExecuteTTLType execute_ttl_type,
bool need_sync,
const ReservationPtr & space_reservation,
TableLockHolder & holder,
@ -2238,9 +2234,12 @@ void MergeTreeDataMergerMutator::mutateSomePartColumns(
if (mutating_stream == nullptr)
throw Exception("Cannot mutate part columns with uninitialized mutations stream. It's a bug", ErrorCodes::LOGICAL_ERROR);
if (need_remove_expired_values)
if (execute_ttl_type == ExecuteTTLType::NORMAL)
mutating_stream = std::make_shared<TTLBlockInputStream>(mutating_stream, data, metadata_snapshot, new_data_part, time_of_mutation, true);
if (execute_ttl_type == ExecuteTTLType::RECALCULATE)
mutating_stream = std::make_shared<TTLCalcInputStream>(mutating_stream, data, metadata_snapshot, new_data_part, time_of_mutation, true);
IMergedBlockOutputStream::WrittenOffsetColumns unused_written_offsets;
MergedColumnOnlyOutputStream out(
new_data_part,
@ -2279,7 +2278,7 @@ void MergeTreeDataMergerMutator::mutateSomePartColumns(
void MergeTreeDataMergerMutator::finalizeMutatedPart(
const MergeTreeDataPartPtr & source_part,
MergeTreeData::MutableDataPartPtr new_data_part,
bool need_remove_expired_values,
ExecuteTTLType execute_ttl_type,
const CompressionCodecPtr & codec)
{
auto disk = new_data_part->volume->getDisk();
@ -2293,7 +2292,7 @@ void MergeTreeDataMergerMutator::finalizeMutatedPart(
new_data_part->checksums.files[IMergeTreeDataPart::UUID_FILE_NAME].file_hash = out_hashing.getHash();
}
if (need_remove_expired_values)
if (execute_ttl_type != ExecuteTTLType::NONE)
{
/// Write a file with ttl infos in json format.
auto out_ttl = disk->writeFile(fs::path(new_data_part->getFullRelativePath()) / "ttl.txt", 4096);

View File

@ -23,6 +23,13 @@ enum class SelectPartsDecision
NOTHING_TO_MERGE = 2,
};
enum class ExecuteTTLType
{
NONE = 0,
NORMAL = 1,
RECALCULATE= 2,
};
/// Auxiliary struct holding metainformation for the future merged or mutated part.
struct FutureMergedMutatedPart
{
@ -200,8 +207,7 @@ private:
const ProjectionsDescription & all_projections,
const MutationCommands & commands_for_removes);
static bool shouldExecuteTTL(
const StorageMetadataPtr & metadata_snapshot, const ColumnDependencies & dependencies, const MutationCommands & commands);
static ExecuteTTLType shouldExecuteTTL(const StorageMetadataPtr & metadata_snapshot, const ColumnDependencies & dependencies);
/// Return set of indices which should be recalculated during mutation also
/// wraps input stream into additional expression stream
@ -242,7 +248,7 @@ private:
time_t time_of_mutation,
const CompressionCodecPtr & compression_codec,
MergeListEntry & merge_entry,
bool need_remove_expired_values,
ExecuteTTLType execute_ttl_type,
bool need_sync,
const ReservationPtr & space_reservation,
TableLockHolder & holder,
@ -260,7 +266,7 @@ private:
time_t time_of_mutation,
const CompressionCodecPtr & compression_codec,
MergeListEntry & merge_entry,
bool need_remove_expired_values,
ExecuteTTLType execute_ttl_type,
bool need_sync,
const ReservationPtr & space_reservation,
TableLockHolder & holder,
@ -271,7 +277,7 @@ private:
static void finalizeMutatedPart(
const MergeTreeDataPartPtr & source_part,
MergeTreeData::MutableDataPartPtr new_data_part,
bool need_remove_expired_values,
ExecuteTTLType execute_ttl_type,
const CompressionCodecPtr & codec);
public :

View File

@ -117,6 +117,7 @@ struct Settings;
M(Int64, merge_with_ttl_timeout, 3600 * 4, "Minimal time in seconds, when merge with delete TTL can be repeated.", 0) \
M(Int64, merge_with_recompression_ttl_timeout, 3600 * 4, "Minimal time in seconds, when merge with recompression TTL can be repeated.", 0) \
M(Bool, ttl_only_drop_parts, false, "Only drop altogether the expired parts and not partially prune them.", 0) \
M(Bool, materialize_ttl_recalculate_only, false, "Only recalculate ttl info when MATERIALIZE TTL", 0) \
M(Bool, write_final_mark, true, "Write final mark after end of column (0 - disabled, do nothing if index_granularity_bytes=0)", 0) \
M(Bool, enable_mixed_granularity_parts, true, "Enable parts with adaptive and non adaptive granularity", 0) \
M(MaxThreads, max_part_loading_threads, 0, "The number of threads to load data parts at startup.", 0) \

View File

@ -47,8 +47,12 @@ MergeTreeWhereOptimizer::MergeTreeWhereOptimizer(
if (!primary_key.column_names.empty())
first_primary_key_column = primary_key.column_names[0];
for (const auto & [_, size] : column_sizes)
total_size_of_queried_columns += size;
for (const auto & name : queried_columns)
{
auto it = column_sizes.find(name);
if (it != column_sizes.end())
total_size_of_queried_columns += it->second;
}
determineArrayJoinedNames(query_info.query->as<ASTSelectQuery &>());
optimize(query_info.query->as<ASTSelectQuery &>());

View File

@ -73,6 +73,11 @@ public:
return storage.getPartitionIDFromQuery(ast, context);
}
bool materializeTTLRecalculateOnly() const
{
return parts.front()->storage.getSettings()->materialize_ttl_recalculate_only;
}
protected:
/// Used in part mutation.
StorageFromMergeTreeDataPart(const MergeTreeData::DataPartPtr & part_)

View File

@ -5,6 +5,7 @@
#include <Common/quoteString.h>
#include <Common/StringUtils/StringUtils.h>
#include <Core/ColumnWithTypeAndName.h>
#include <DataTypes/DataTypeEnum.h>
#include <IO/ReadBufferFromString.h>
#include <IO/ReadHelpers.h>
#include <IO/Operators.h>
@ -214,7 +215,7 @@ bool StorageInMemoryMetadata::hasAnyGroupByTTL() const
return !table_ttl.group_by_ttl.empty();
}
ColumnDependencies StorageInMemoryMetadata::getColumnDependencies(const NameSet & updated_columns) const
ColumnDependencies StorageInMemoryMetadata::getColumnDependencies(const NameSet & updated_columns, bool include_ttl_target) const
{
if (updated_columns.empty())
return {};
@ -250,7 +251,7 @@ ColumnDependencies StorageInMemoryMetadata::getColumnDependencies(const NameSet
if (hasRowsTTL())
{
auto rows_expression = getRowsTTL().expression;
if (add_dependent_columns(rows_expression, required_ttl_columns))
if (add_dependent_columns(rows_expression, required_ttl_columns) && include_ttl_target)
{
/// Filter all columns, if rows TTL expression have to be recalculated.
for (const auto & column : getColumns().getAllPhysical())
@ -263,13 +264,15 @@ ColumnDependencies StorageInMemoryMetadata::getColumnDependencies(const NameSet
for (const auto & [name, entry] : getColumnTTLs())
{
if (add_dependent_columns(entry.expression, required_ttl_columns))
if (add_dependent_columns(entry.expression, required_ttl_columns) && include_ttl_target)
updated_ttl_columns.insert(name);
}
for (const auto & entry : getMoveTTLs())
add_dependent_columns(entry.expression, required_ttl_columns);
//TODO what about rows_where_ttl and group_by_ttl ??
for (const auto & column : indices_columns)
res.emplace(column, ColumnDependency::SKIP_INDEX);
for (const auto & column : projections_columns)
@ -493,6 +496,23 @@ namespace
return res;
}
/*
* This function checks compatibility of enums. It returns true if:
* 1. Both types are enums.
* 2. The first type can represent all possible values of the second one.
* 3. Both types require the same amount of memory.
*/
bool isCompatibleEnumTypes(const IDataType * lhs, const IDataType * rhs)
{
if (IDataTypeEnum const * enum_type = dynamic_cast<IDataTypeEnum const *>(lhs))
{
if (!enum_type->contains(*rhs))
return false;
return enum_type->getMaximumSizeOfValueInMemory() == rhs->getMaximumSizeOfValueInMemory();
}
return false;
}
}
void StorageInMemoryMetadata::check(const Names & column_names, const NamesAndTypesList & virtuals, const StorageID & storage_id) const
@ -544,12 +564,13 @@ void StorageInMemoryMetadata::check(const NamesAndTypesList & provided_columns)
column.name,
listOfColumns(available_columns));
if (!column.type->equals(*it->getMapped()))
const auto * available_type = it->getMapped();
if (!column.type->equals(*available_type) && !isCompatibleEnumTypes(available_type, column.type.get()))
throw Exception(
ErrorCodes::TYPE_MISMATCH,
"Type mismatch for column {}. Column has type {}, got type {}",
column.name,
it->getMapped()->getName(),
available_type->getName(),
column.type->getName());
if (unique_names.end() != unique_names.find(column.name))
@ -588,16 +609,16 @@ void StorageInMemoryMetadata::check(const NamesAndTypesList & provided_columns,
name,
listOfColumns(available_columns));
const auto & provided_column_type = *it->getMapped();
const auto & available_column_type = *jt->getMapped();
const auto * provided_column_type = it->getMapped();
const auto * available_column_type = jt->getMapped();
if (!provided_column_type.equals(available_column_type))
if (!provided_column_type->equals(*available_column_type) && !isCompatibleEnumTypes(available_column_type, provided_column_type))
throw Exception(
ErrorCodes::TYPE_MISMATCH,
"Type mismatch for column {}. Column has type {}, got type {}",
name,
provided_column_type.getName(),
available_column_type.getName());
available_column_type->getName(),
provided_column_type->getName());
if (unique_names.end() != unique_names.find(name))
throw Exception(ErrorCodes::COLUMN_QUERIED_MORE_THAN_ONCE,
@ -632,12 +653,13 @@ void StorageInMemoryMetadata::check(const Block & block, bool need_all) const
column.name,
listOfColumns(available_columns));
if (!column.type->equals(*it->getMapped()))
const auto * available_type = it->getMapped();
if (!column.type->equals(*available_type) && !isCompatibleEnumTypes(available_type, column.type.get()))
throw Exception(
ErrorCodes::TYPE_MISMATCH,
"Type mismatch for column {}. Column has type {}, got type {}",
column.name,
it->getMapped()->getName(),
available_type->getName(),
column.type->getName());
}

View File

@ -143,7 +143,7 @@ struct StorageInMemoryMetadata
/// Returns columns, which will be needed to calculate dependencies (skip
/// indices, TTL expressions) if we update @updated_columns set of columns.
ColumnDependencies getColumnDependencies(const NameSet & updated_columns) const;
ColumnDependencies getColumnDependencies(const NameSet & updated_columns, bool include_ttl_target) const;
/// Block with ordinary + materialized columns.
Block getSampleBlock() const;

View File

@ -29,6 +29,8 @@
#include <Processors/Pipe.h>
#include <Processors/Sinks/SinkToStorage.h>
#include <IO/WriteHelpers.h>
#include <Parsers/getInsertQuery.h>
#include <IO/Operators.h>
namespace DB
@ -47,10 +49,12 @@ StoragePostgreSQL::StoragePostgreSQL(
const ColumnsDescription & columns_,
const ConstraintsDescription & constraints_,
const String & comment,
const String & remote_table_schema_)
const String & remote_table_schema_,
const String & on_conflict_)
: IStorage(table_id_)
, remote_table_name(remote_table_name_)
, remote_table_schema(remote_table_schema_)
, on_conflict(on_conflict_)
, pool(std::move(pool_))
{
StorageInMemoryMetadata storage_metadata;
@ -94,17 +98,22 @@ Pipe StoragePostgreSQL::read(
class PostgreSQLSink : public SinkToStorage
{
using Row = std::vector<std::optional<std::string>>;
public:
explicit PostgreSQLSink(
const StorageMetadataPtr & metadata_snapshot_,
postgres::ConnectionHolderPtr connection_holder_,
const String & remote_table_name_,
const String & remote_table_schema_)
const String & remote_table_schema_,
const String & on_conflict_)
: SinkToStorage(metadata_snapshot_->getSampleBlock())
, metadata_snapshot(metadata_snapshot_)
, connection_holder(std::move(connection_holder_))
, remote_table_name(remote_table_name_)
, remote_table_schema(remote_table_schema_)
, on_conflict(on_conflict_)
{
}
@ -113,11 +122,21 @@ public:
void consume(Chunk chunk) override
{
auto block = getPort().getHeader().cloneWithColumns(chunk.detachColumns());
if (!inserter)
inserter = std::make_unique<StreamTo>(connection_holder->get(),
remote_table_schema.empty() ? pqxx::table_path({remote_table_name})
: pqxx::table_path({remote_table_schema, remote_table_name}),
block.getNames());
{
if (on_conflict.empty())
{
inserter = std::make_unique<StreamTo>(connection_holder->get(),
remote_table_schema.empty() ? pqxx::table_path({remote_table_name})
: pqxx::table_path({remote_table_schema, remote_table_name}), block.getNames());
}
else
{
inserter = std::make_unique<PreparedInsert>(connection_holder->get(), remote_table_name,
remote_table_schema, block.getColumnsWithTypeAndName(), on_conflict);
}
}
const auto columns = block.getColumns();
const size_t num_rows = block.rows(), num_cols = block.columns();
@ -151,7 +170,7 @@ public:
}
}
inserter->stream.write_values(row);
inserter->insert(row);
}
}
@ -268,37 +287,92 @@ public:
}
private:
struct StreamTo
struct Inserter
{
pqxx::connection & connection;
pqxx::work tx;
explicit Inserter(pqxx::connection & connection_)
: connection(connection_)
, tx(connection) {}
virtual ~Inserter() = default;
virtual void insert(const Row & row) = 0;
virtual void complete() = 0;
};
struct StreamTo : Inserter
{
Names columns;
pqxx::stream_to stream;
StreamTo(pqxx::connection & connection, pqxx::table_path table_, Names columns_)
: tx(connection)
StreamTo(pqxx::connection & connection_, pqxx::table_path table_, Names columns_)
: Inserter(connection_)
, columns(std::move(columns_))
, stream(pqxx::stream_to::raw_table(tx, connection.quote_table(table_), connection.quote_columns(columns)))
{
}
void complete()
void complete() override
{
stream.complete();
tx.commit();
}
void insert(const Row & row) override
{
stream.write_values(row);
}
};
struct PreparedInsert : Inserter
{
PreparedInsert(pqxx::connection & connection_, const String & table, const String & schema,
const ColumnsWithTypeAndName & columns, const String & on_conflict_)
: Inserter(connection_)
{
WriteBufferFromOwnString buf;
buf << getInsertQuery(schema, table, columns, IdentifierQuotingStyle::DoubleQuotes);
buf << " (";
for (size_t i = 1; i <= columns.size(); ++i)
{
if (i > 1)
buf << ", ";
buf << "$" << i;
}
buf << ") ";
buf << on_conflict_;
connection.prepare("insert", buf.str());
}
void complete() override
{
connection.unprepare("insert");
tx.commit();
}
void insert(const Row & row) override
{
pqxx::params params;
params.reserve(row.size());
params.append_multi(row);
tx.exec_prepared("insert", params);
}
};
StorageMetadataPtr metadata_snapshot;
postgres::ConnectionHolderPtr connection_holder;
const String remote_table_name, remote_table_schema;
std::unique_ptr<StreamTo> inserter;
const String remote_db_name, remote_table_name, remote_table_schema, on_conflict;
std::unique_ptr<Inserter> inserter;
};
SinkToStoragePtr StoragePostgreSQL::write(
const ASTPtr & /*query*/, const StorageMetadataPtr & metadata_snapshot, ContextPtr /* context */)
{
return std::make_shared<PostgreSQLSink>(metadata_snapshot, pool->get(), remote_table_name, remote_table_schema);
return std::make_shared<PostgreSQLSink>(metadata_snapshot, pool->get(), remote_table_name, remote_table_schema, on_conflict);
}
@ -308,9 +382,9 @@ void registerStoragePostgreSQL(StorageFactory & factory)
{
ASTs & engine_args = args.engine_args;
if (engine_args.size() < 5 || engine_args.size() > 6)
throw Exception("Storage PostgreSQL requires from 5 to 6 parameters: "
"PostgreSQL('host:port', 'database', 'table', 'username', 'password' [, 'schema']",
if (engine_args.size() < 5 || engine_args.size() > 7)
throw Exception("Storage PostgreSQL requires from 5 to 7 parameters: "
"PostgreSQL('host:port', 'database', 'table', 'username', 'password' [, 'schema', 'ON CONFLICT ...']",
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
for (auto & engine_arg : engine_args)
@ -326,9 +400,11 @@ void registerStoragePostgreSQL(StorageFactory & factory)
const String & username = engine_args[3]->as<ASTLiteral &>().value.safeGet<String>();
const String & password = engine_args[4]->as<ASTLiteral &>().value.safeGet<String>();
String remote_table_schema;
if (engine_args.size() == 6)
String remote_table_schema, on_conflict;
if (engine_args.size() >= 6)
remote_table_schema = engine_args[5]->as<ASTLiteral &>().value.safeGet<String>();
if (engine_args.size() >= 7)
on_conflict = engine_args[6]->as<ASTLiteral &>().value.safeGet<String>();
auto pool = std::make_shared<postgres::PoolWithFailover>(
remote_database,
@ -345,7 +421,8 @@ void registerStoragePostgreSQL(StorageFactory & factory)
args.columns,
args.constraints,
args.comment,
remote_table_schema);
remote_table_schema,
on_conflict);
},
{
.source_access_type = AccessType::POSTGRES,

View File

@ -27,7 +27,8 @@ public:
const ColumnsDescription & columns_,
const ConstraintsDescription & constraints_,
const String & comment,
const std::string & remote_table_schema_ = "");
const String & remote_table_schema_ = "",
const String & on_conflict = "");
String getName() const override { return "PostgreSQL"; }
@ -47,6 +48,7 @@ private:
String remote_table_name;
String remote_table_schema;
String on_conflict;
postgres::PoolWithFailoverPtr pool;
};

View File

@ -22,4 +22,3 @@ target_link_libraries (transform_part_zk_nodes
dbms
string_utils
)

View File

@ -1,11 +1,7 @@
add_executable (mergetree_checksum_fuzzer
mergetree_checksum_fuzzer.cpp
"${ClickHouse_SOURCE_DIR}/src/Storages/MergeTree/MergeTreeDataPartChecksum.cpp"
"${ClickHouse_SOURCE_DIR}/src/Compression/CompressedReadBuffer.cpp"
"${ClickHouse_SOURCE_DIR}/src/Compression/CompressedWriteBuffer.cpp"
)
target_link_libraries (mergetree_checksum_fuzzer PRIVATE clickhouse_common_io fuzz_compression ${LIB_FUZZING_ENGINE})
add_executable (mergetree_checksum_fuzzer mergetree_checksum_fuzzer.cpp)
# Look at comment around fuzz_compression target declaration
target_link_libraries (mergetree_checksum_fuzzer PRIVATE dbms ${LIB_FUZZING_ENGINE})
add_executable (columns_description_fuzzer columns_description_fuzzer.cpp)
target_link_libraries (columns_description_fuzzer PRIVATE dbms ${LIB_FUZZING_ENGINE})

View File

@ -37,7 +37,8 @@ StoragePtr TableFunctionPostgreSQL::executeImpl(const ASTPtr & /*ast_function*/,
columns,
ConstraintsDescription{},
String{},
remote_table_schema);
remote_table_schema,
on_conflict);
result->startup();
return result;
@ -67,9 +68,9 @@ void TableFunctionPostgreSQL::parseArguments(const ASTPtr & ast_function, Contex
ASTs & args = func_args.arguments->children;
if (args.size() < 5 || args.size() > 6)
throw Exception("Table function 'PostgreSQL' requires from 5 to 6 parameters: "
"PostgreSQL('host:port', 'database', 'table', 'user', 'password', [, 'schema']).",
if (args.size() < 5 || args.size() > 7)
throw Exception("Table function 'PostgreSQL' requires from 5 to 7 parameters: "
"PostgreSQL('host:port', 'database', 'table', 'user', 'password', [, 'schema', 'ON CONFLICT ...']).",
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
for (auto & arg : args)
@ -82,8 +83,10 @@ void TableFunctionPostgreSQL::parseArguments(const ASTPtr & ast_function, Contex
remote_table_name = args[2]->as<ASTLiteral &>().value.safeGet<String>();
if (args.size() == 6)
if (args.size() >= 6)
remote_table_schema = args[5]->as<ASTLiteral &>().value.safeGet<String>();
if (args.size() >= 7)
on_conflict = args[6]->as<ASTLiteral &>().value.safeGet<String>();
connection_pool = std::make_shared<postgres::PoolWithFailover>(
args[1]->as<ASTLiteral &>().value.safeGet<String>(),

View File

@ -28,7 +28,7 @@ private:
void parseArguments(const ASTPtr & ast_function, ContextPtr context) override;
String connection_str;
String remote_table_name, remote_table_schema;
String remote_table_name, remote_table_schema, on_conflict;
postgres::PoolWithFailoverPtr connection_pool;
};

View File

@ -634,6 +634,7 @@ def run_tests_array(all_tests_with_params):
open(stdout_file).read().split('\n')[:100])
status += '\n'
status += "\nstdout:\n{}\n".format(stdout)
status += 'Database: ' + testcase_args.testcase_database
elif stderr:
@ -643,6 +644,7 @@ def run_tests_array(all_tests_with_params):
status += print_test_time(total_time)
status += " - having stderror:\n{}\n".format(
'\n'.join(stderr.split('\n')[:100]))
status += "\nstdout:\n{}\n".format(stdout)
status += 'Database: ' + testcase_args.testcase_database
elif 'Exception' in stdout:
failures += 1

View File

@ -0,0 +1,5 @@
<yandex>
<merge_tree>
<number_of_free_entries_in_pool_to_execute_mutation>8</number_of_free_entries_in_pool_to_execute_mutation>
</merge_tree>
</yandex>

View File

@ -31,6 +31,7 @@ ln -sf $SRC_PATH/config.d/max_concurrent_queries.xml $DEST_SERVER_PATH/config.d/
ln -sf $SRC_PATH/config.d/test_cluster_with_incorrect_pw.xml $DEST_SERVER_PATH/config.d/
ln -sf $SRC_PATH/config.d/keeper_port.xml $DEST_SERVER_PATH/config.d/
ln -sf $SRC_PATH/config.d/logging_no_rotate.xml $DEST_SERVER_PATH/config.d/
ln -sf $SRC_PATH/config.d/merge_tree.xml $DEST_SERVER_PATH/config.d/
ln -sf $SRC_PATH/config.d/tcp_with_proxy.xml $DEST_SERVER_PATH/config.d/
ln -sf $SRC_PATH/config.d/top_level_domains_lists.xml $DEST_SERVER_PATH/config.d/
ln -sf $SRC_PATH/config.d/top_level_domains_path.xml $DEST_SERVER_PATH/config.d/

View File

@ -291,7 +291,7 @@ def test_postgres_distributed(started_cluster):
node2.query('DROP TABLE test_shards')
node2.query('DROP TABLE test_replicas')
def test_datetime_with_timezone(started_cluster):
cursor = started_cluster.postgres_conn.cursor()
cursor.execute("DROP TABLE IF EXISTS test_timezone")
@ -328,6 +328,32 @@ def test_postgres_ndim(started_cluster):
cursor.execute("DROP TABLE arr1, arr2")
def test_postgres_on_conflict(started_cluster):
cursor = started_cluster.postgres_conn.cursor()
table = 'test_conflict'
cursor.execute(f'DROP TABLE IF EXISTS {table}')
cursor.execute(f'CREATE TABLE {table} (a integer PRIMARY KEY, b text, c integer)')
node1.query('''
CREATE TABLE test_conflict (a UInt32, b String, c Int32)
ENGINE PostgreSQL('postgres1:5432', 'postgres', 'test_conflict', 'postgres', 'mysecretpassword', '', 'ON CONFLICT DO NOTHING');
''')
node1.query(f''' INSERT INTO {table} SELECT number, concat('name_', toString(number)), 3 from numbers(100)''')
node1.query(f''' INSERT INTO {table} SELECT number, concat('name_', toString(number)), 4 from numbers(100)''')
check1 = f"SELECT count() FROM {table}"
assert (node1.query(check1)).rstrip() == '100'
table_func = f'''postgresql('{started_cluster.postgres_ip}:{started_cluster.postgres_port}', 'postgres', '{table}', 'postgres', 'mysecretpassword', '', 'ON CONFLICT DO NOTHING')'''
node1.query(f'''INSERT INTO TABLE FUNCTION {table_func} SELECT number, concat('name_', toString(number)), 3 from numbers(100)''')
node1.query(f'''INSERT INTO TABLE FUNCTION {table_func} SELECT number, concat('name_', toString(number)), 3 from numbers(100)''')
check1 = f"SELECT count() FROM {table}"
assert (node1.query(check1)).rstrip() == '100'
cursor.execute(f'DROP TABLE {table} ')
if __name__ == '__main__':
cluster.start()
input("Cluster created, press any key to destroy...")

View File

@ -68,6 +68,7 @@
(do
(c/exec :mkdir :-p common-prefix)
(c/exec :mkdir :-p data-dir)
(c/exec :mkdir :-p coordination-data-dir)
(c/exec :mkdir :-p logs-dir)
(c/exec :mkdir :-p configs-dir)
(c/exec :mkdir :-p sub-configs-dir)

View File

@ -0,0 +1,2 @@
Replication did not hang: synced all replicas of alter_table
Consistency: 1

View File

@ -3,15 +3,17 @@
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
# shellcheck source=./replication.lib
. "$CURDIR"/replication.lib
set -e
$CLICKHOUSE_CLIENT -n -q "
DROP TABLE IF EXISTS alter_table;
DROP TABLE IF EXISTS alter_table2;
DROP TABLE IF EXISTS alter_table0;
DROP TABLE IF EXISTS alter_table1;
CREATE TABLE alter_table (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r1') ORDER BY a PARTITION BY b % 10 SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0;
CREATE TABLE alter_table2 (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r2') ORDER BY a PARTITION BY b % 10 SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0
CREATE TABLE alter_table0 (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r1') ORDER BY a PARTITION BY b % 10 SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0;
CREATE TABLE alter_table1 (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r2') ORDER BY a PARTITION BY b % 10 SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0
"
function thread1()
@ -22,22 +24,22 @@ function thread1()
function thread2()
{
while true; do $CLICKHOUSE_CLIENT -n --query "ALTER TABLE alter_table ADD COLUMN h String DEFAULT '0'; ALTER TABLE alter_table MODIFY COLUMN h UInt64; ALTER TABLE alter_table DROP COLUMN h;"; done
while true; do $CLICKHOUSE_CLIENT -n --query "ALTER TABLE alter_table0 ADD COLUMN h String DEFAULT '0'; ALTER TABLE alter_table0 MODIFY COLUMN h UInt64; ALTER TABLE alter_table0 DROP COLUMN h;"; done
}
function thread3()
{
while true; do $CLICKHOUSE_CLIENT -q "INSERT INTO alter_table SELECT rand(1), rand(2), 1 / rand(3), toString(rand(4)), [rand(5), rand(6)], rand(7) % 2 ? NULL : generateUUIDv4(), (rand(8), rand(9)) FROM numbers(100000)"; done
while true; do $CLICKHOUSE_CLIENT -q "INSERT INTO alter_table0 SELECT rand(1), rand(2), 1 / rand(3), toString(rand(4)), [rand(5), rand(6)], rand(7) % 2 ? NULL : generateUUIDv4(), (rand(8), rand(9)) FROM numbers(100000)"; done
}
function thread4()
{
while true; do $CLICKHOUSE_CLIENT -q "OPTIMIZE TABLE alter_table FINAL"; done
while true; do $CLICKHOUSE_CLIENT -q "OPTIMIZE TABLE alter_table0 FINAL"; done
}
function thread5()
{
while true; do $CLICKHOUSE_CLIENT -q "ALTER TABLE alter_table DELETE WHERE cityHash64(a,b,c,d,e,g) % 1048576 < 524288"; done
while true; do $CLICKHOUSE_CLIENT -q "ALTER TABLE alter_table0 DELETE WHERE cityHash64(a,b,c,d,e,g) % 1048576 < 524288"; done
}
# https://stackoverflow.com/questions/9954794/execute-a-shell-function-with-timeout
@ -74,8 +76,9 @@ timeout $TIMEOUT bash -c thread4 2> /dev/null &
timeout $TIMEOUT bash -c thread5 2> /dev/null &
wait
check_replication_consistency "alter_table" "count(), sum(a), sum(b), round(sum(c))"
$CLICKHOUSE_CLIENT -n -q "DROP TABLE alter_table;" &
$CLICKHOUSE_CLIENT -n -q "DROP TABLE alter_table2;" &
$CLICKHOUSE_CLIENT -n -q "DROP TABLE alter_table0;" &
$CLICKHOUSE_CLIENT -n -q "DROP TABLE alter_table1;" &
wait

View File

@ -0,0 +1,2 @@
Replication did not hang: synced all replicas of alter_table_
Consistency: 1

View File

@ -3,6 +3,8 @@
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
# shellcheck source=./replication.lib
. "$CURDIR"/replication.lib
set -e
@ -99,6 +101,8 @@ timeout $TIMEOUT bash -c thread6 2>&1 | grep "was not completely removed from Zo
wait
check_replication_consistency "alter_table_" "count(), sum(a), sum(b), round(sum(c))"
for i in {0..9}; do
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS alter_table_$i" 2>&1 | grep "was not completely removed from ZooKeeper" &
done

Some files were not shown because too many files have changed in this diff Show More