Merge branch 'master' into Fix_parameterized_view_with_cte_multiple_usage

This commit is contained in:
Smita Kulkarni 2023-07-25 12:08:38 +02:00
commit 94ff9a4696
509 changed files with 9568 additions and 5138 deletions

3
.gitignore vendored
View File

@ -69,6 +69,7 @@ cmake-build-*
*.pyc
__pycache__
*.pytest_cache
.mypy_cache
test.cpp
CPackConfig.cmake
@ -161,8 +162,10 @@ tests/queries/0_stateless/test_*
tests/queries/0_stateless/*.binary
tests/queries/0_stateless/*.generated-expect
tests/queries/0_stateless/*.expect.history
tests/integration/**/_gen
# rust
/rust/**/target
# It is autogenerated from *.in
/rust/**/.cargo/config.toml
/rust/**/vendor

6
.gitmodules vendored
View File

@ -258,9 +258,6 @@
[submodule "contrib/wyhash"]
path = contrib/wyhash
url = https://github.com/wangyi-fudan/wyhash
[submodule "contrib/hashidsxx"]
path = contrib/hashidsxx
url = https://github.com/schoentoon/hashidsxx
[submodule "contrib/nats-io"]
path = contrib/nats-io
url = https://github.com/ClickHouse/nats.c
@ -343,3 +340,6 @@
[submodule "contrib/c-ares"]
path = contrib/c-ares
url = https://github.com/c-ares/c-ares.git
[submodule "contrib/incbin"]
path = contrib/incbin
url = https://github.com/graphitemaster/incbin.git

View File

@ -448,7 +448,7 @@ inline char * find_last_not_symbols_or_null(char * begin, char * end)
/// See https://github.com/boostorg/algorithm/issues/63
/// And https://bugs.llvm.org/show_bug.cgi?id=41141
template <char... symbols, typename To>
inline void splitInto(To & to, const std::string & what, bool token_compress = false)
inline To & splitInto(To & to, std::string_view what, bool token_compress = false)
{
const char * pos = what.data();
const char * end = pos + what.size();
@ -464,4 +464,6 @@ inline void splitInto(To & to, const std::string & what, bool token_compress = f
else
pos = delimiter_or_end;
}
return to;
}

9
base/base/move_extend.h Normal file
View File

@ -0,0 +1,9 @@
#pragma once
/// Extend @p to by moving elements from @p from to @p to end
/// @return @p to iterator to first of moved elements.
template <class To, class From>
typename To::iterator moveExtend(To & to, From && from)
{
return to.insert(to.end(), std::make_move_iterator(from.begin()), std::make_move_iterator(from.end()));
}

View File

@ -57,7 +57,7 @@ public:
URI();
/// Creates an empty URI.
explicit URI(const std::string & uri);
explicit URI(const std::string & uri, bool disable_url_encoding = false);
/// Parses an URI from the given string. Throws a
/// SyntaxException if the uri is not valid.
@ -350,6 +350,10 @@ protected:
static const std::string ILLEGAL;
private:
void encodePath(std::string & encodedStr) const;
void decodePath(const std::string & encodedStr);
std::string _scheme;
std::string _userInfo;
std::string _host;
@ -357,6 +361,8 @@ private:
std::string _path;
std::string _query;
std::string _fragment;
bool _disable_url_encoding = false;
};

View File

@ -36,8 +36,8 @@ URI::URI():
}
URI::URI(const std::string& uri):
_port(0)
URI::URI(const std::string& uri, bool decode_and_encode_path):
_port(0), _disable_url_encoding(decode_and_encode_path)
{
parse(uri);
}
@ -107,7 +107,8 @@ URI::URI(const URI& uri):
_port(uri._port),
_path(uri._path),
_query(uri._query),
_fragment(uri._fragment)
_fragment(uri._fragment),
_disable_url_encoding(uri._disable_url_encoding)
{
}
@ -119,7 +120,8 @@ URI::URI(const URI& baseURI, const std::string& relativeURI):
_port(baseURI._port),
_path(baseURI._path),
_query(baseURI._query),
_fragment(baseURI._fragment)
_fragment(baseURI._fragment),
_disable_url_encoding(baseURI._disable_url_encoding)
{
resolve(relativeURI);
}
@ -151,6 +153,7 @@ URI& URI::operator = (const URI& uri)
_path = uri._path;
_query = uri._query;
_fragment = uri._fragment;
_disable_url_encoding = uri._disable_url_encoding;
}
return *this;
}
@ -181,6 +184,7 @@ void URI::swap(URI& uri)
std::swap(_path, uri._path);
std::swap(_query, uri._query);
std::swap(_fragment, uri._fragment);
std::swap(_disable_url_encoding, uri._disable_url_encoding);
}
@ -201,7 +205,7 @@ std::string URI::toString() const
std::string uri;
if (isRelative())
{
encode(_path, RESERVED_PATH, uri);
encodePath(uri);
}
else
{
@ -217,7 +221,7 @@ std::string URI::toString() const
{
if (!auth.empty() && _path[0] != '/')
uri += '/';
encode(_path, RESERVED_PATH, uri);
encodePath(uri);
}
else if (!_query.empty() || !_fragment.empty())
{
@ -313,7 +317,7 @@ void URI::setAuthority(const std::string& authority)
void URI::setPath(const std::string& path)
{
_path.clear();
decode(path, _path);
decodePath(path);
}
@ -418,7 +422,7 @@ void URI::setPathEtc(const std::string& pathEtc)
std::string URI::getPathEtc() const
{
std::string pathEtc;
encode(_path, RESERVED_PATH, pathEtc);
encodePath(pathEtc);
if (!_query.empty())
{
pathEtc += '?';
@ -436,7 +440,7 @@ std::string URI::getPathEtc() const
std::string URI::getPathAndQuery() const
{
std::string pathAndQuery;
encode(_path, RESERVED_PATH, pathAndQuery);
encodePath(pathAndQuery);
if (!_query.empty())
{
pathAndQuery += '?';
@ -681,6 +685,21 @@ void URI::decode(const std::string& str, std::string& decodedStr, bool plusAsSpa
}
}
void URI::encodePath(std::string & encodedStr) const
{
if (_disable_url_encoding)
encodedStr = _path;
else
encode(_path, RESERVED_PATH, encodedStr);
}
void URI::decodePath(const std::string & encodedStr)
{
if (_disable_url_encoding)
_path = encodedStr;
else
decode(encodedStr, _path);
}
bool URI::isWellKnownPort() const
{
@ -820,7 +839,7 @@ void URI::parsePath(std::string::const_iterator& it, const std::string::const_it
{
std::string path;
while (it != end && *it != '?' && *it != '#') path += *it++;
decode(path, _path);
decodePath(path);
}

View File

@ -1,58 +0,0 @@
# Embed a set of resource files into a resulting object file.
#
# Signature: `clickhouse_embed_binaries(TARGET <target> RESOURCE_DIR <dir> RESOURCES <resource> ...)
#
# This will generate a static library target named `<target>`, which contains the contents of
# each `<resource>` file. The files should be located in `<dir>`. <dir> defaults to
# ${CMAKE_CURRENT_SOURCE_DIR}, and the resources may not be empty.
#
# Each resource will result in three symbols in the final archive, based on the name `<resource>`.
# These are:
# 1. `_binary_<name>_start`: Points to the start of the binary data from `<resource>`.
# 2. `_binary_<name>_end`: Points to the end of the binary data from `<resource>`.
# 2. `_binary_<name>_size`: Points to the size of the binary data from `<resource>`.
#
# `<name>` is a normalized name derived from `<resource>`, by replacing the characters "./-" with
# the character "_", and the character "+" with "_PLUS_". This scheme is similar to those generated
# by `ld -r -b binary`, and matches the expectations in `./base/common/getResource.cpp`.
macro(clickhouse_embed_binaries)
set(one_value_args TARGET RESOURCE_DIR)
set(resources RESOURCES)
cmake_parse_arguments(EMBED "" "${one_value_args}" ${resources} ${ARGN})
if (NOT DEFINED EMBED_TARGET)
message(FATAL_ERROR "A target name must be provided for embedding binary resources into")
endif()
if (NOT DEFINED EMBED_RESOURCE_DIR)
set(EMBED_RESOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}")
endif()
list(LENGTH EMBED_RESOURCES N_RESOURCES)
if (N_RESOURCES LESS 1)
message(FATAL_ERROR "The list of binary resources to embed may not be empty")
endif()
add_library("${EMBED_TARGET}" STATIC)
set_target_properties("${EMBED_TARGET}" PROPERTIES LINKER_LANGUAGE C)
set(EMBED_TEMPLATE_FILE "${PROJECT_SOURCE_DIR}/programs/embed_binary.S.in")
foreach(RESOURCE_FILE ${EMBED_RESOURCES})
set(ASSEMBLY_FILE_NAME "${RESOURCE_FILE}.S")
set(BINARY_FILE_NAME "${RESOURCE_FILE}")
# Normalize the name of the resource.
string(REGEX REPLACE "[\./-]" "_" SYMBOL_NAME "${RESOURCE_FILE}") # - must be last in regex
string(REPLACE "+" "_PLUS_" SYMBOL_NAME "${SYMBOL_NAME}")
# Generate the configured assembly file in the output directory.
configure_file("${EMBED_TEMPLATE_FILE}" "${CMAKE_CURRENT_BINARY_DIR}/${ASSEMBLY_FILE_NAME}" @ONLY)
# Set the include directory for relative paths specified for `.incbin` directive.
set_property(SOURCE "${CMAKE_CURRENT_BINARY_DIR}/${ASSEMBLY_FILE_NAME}" APPEND PROPERTY INCLUDE_DIRECTORIES "${EMBED_RESOURCE_DIR}")
target_sources("${EMBED_TARGET}" PRIVATE "${CMAKE_CURRENT_BINARY_DIR}/${ASSEMBLY_FILE_NAME}")
set_target_properties("${EMBED_TARGET}" PROPERTIES OBJECT_DEPENDS "${RESOURCE_FILE}")
endforeach()
endmacro()

View File

@ -164,14 +164,13 @@ add_contrib (libpq-cmake libpq)
add_contrib (nuraft-cmake NuRaft)
add_contrib (fast_float-cmake fast_float)
add_contrib (datasketches-cpp-cmake datasketches-cpp)
add_contrib (hashidsxx-cmake hashidsxx)
add_contrib (incbin-cmake incbin)
option(ENABLE_NLP "Enable NLP functions support" ${ENABLE_LIBRARIES})
if (ENABLE_NLP)
add_contrib (libstemmer-c-cmake libstemmer_c)
add_contrib (wordnet-blast-cmake wordnet-blast)
add_contrib (lemmagen-c-cmake lemmagen-c)
add_contrib (nlp-data-cmake nlp-data)
add_contrib (cld2-cmake cld2)
endif()

2
contrib/NuRaft vendored

@ -1 +1 @@
Subproject commit 491eaf592d950e0e37accbe8b3f217e068c9fecf
Subproject commit eb1572129c71beb2156dcdaadc3fb136954aed96

View File

@ -1,4 +1,3 @@
include(${ClickHouse_SOURCE_DIR}/cmake/embed_binary.cmake)
set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/cctz")
set (SRCS
@ -23,12 +22,10 @@ if (OS_FREEBSD)
endif ()
# Related to time_zones table:
# StorageSystemTimeZones.generated.cpp is autogenerated each time during a build
# data in this file will be used to populate the system.time_zones table, this is specific to OS_LINUX
# as the library that's built using embedded tzdata is also specific to OS_LINUX
set(SYSTEM_STORAGE_TZ_FILE "${PROJECT_BINARY_DIR}/src/Storages/System/StorageSystemTimeZones.generated.cpp")
# TimeZones.generated.cpp is autogenerated each time during a build
set(TIMEZONES_FILE "${CMAKE_CURRENT_BINARY_DIR}/TimeZones.generated.cpp")
# remove existing copies so that its generated fresh on each build.
file(REMOVE ${SYSTEM_STORAGE_TZ_FILE})
file(REMOVE ${TIMEZONES_FILE})
# get the list of timezones from tzdata shipped with cctz
set(TZDIR "${LIBRARY_DIR}/testdata/zoneinfo")
@ -36,28 +33,44 @@ file(STRINGS "${LIBRARY_DIR}/testdata/version" TZDATA_VERSION)
set_property(GLOBAL PROPERTY TZDATA_VERSION_PROP "${TZDATA_VERSION}")
message(STATUS "Packaging with tzdata version: ${TZDATA_VERSION}")
set(TIMEZONE_RESOURCE_FILES)
# each file in that dir (except of tab and localtime) store the info about timezone
execute_process(COMMAND
bash -c "cd ${TZDIR} && find * -type f -and ! -name '*.tab' -and ! -name 'localtime' | LC_ALL=C sort | paste -sd ';' -"
OUTPUT_STRIP_TRAILING_WHITESPACE
OUTPUT_VARIABLE TIMEZONES)
file(APPEND ${SYSTEM_STORAGE_TZ_FILE} "// autogenerated by ClickHouse/contrib/cctz-cmake/CMakeLists.txt\n")
file(APPEND ${SYSTEM_STORAGE_TZ_FILE} "const char * auto_time_zones[] {\n" )
file(APPEND ${TIMEZONES_FILE} "// autogenerated by ClickHouse/contrib/cctz-cmake/CMakeLists.txt\n")
file(APPEND ${TIMEZONES_FILE} "#include <incbin.h>\n")
set (COUNTER 1)
foreach(TIMEZONE ${TIMEZONES})
file(APPEND ${TIMEZONES_FILE} "INCBIN(resource_timezone${COUNTER}, \"${TZDIR}/${TIMEZONE}\");\n")
MATH(EXPR COUNTER "${COUNTER}+1")
endforeach(TIMEZONE)
file(APPEND ${TIMEZONES_FILE} "const char * auto_time_zones[] {\n" )
foreach(TIMEZONE ${TIMEZONES})
file(APPEND ${SYSTEM_STORAGE_TZ_FILE} " \"${TIMEZONE}\",\n")
list(APPEND TIMEZONE_RESOURCE_FILES "${TIMEZONE}")
file(APPEND ${TIMEZONES_FILE} " \"${TIMEZONE}\",\n")
MATH(EXPR COUNTER "${COUNTER}+1")
endforeach(TIMEZONE)
file(APPEND ${SYSTEM_STORAGE_TZ_FILE} " nullptr};\n")
clickhouse_embed_binaries(
TARGET tzdata
RESOURCE_DIR "${TZDIR}"
RESOURCES ${TIMEZONE_RESOURCE_FILES}
)
add_dependencies(_cctz tzdata)
target_link_libraries(_cctz INTERFACE "-Wl,${WHOLE_ARCHIVE} $<TARGET_FILE:tzdata> -Wl,${NO_WHOLE_ARCHIVE}")
file(APPEND ${TIMEZONES_FILE} " nullptr\n};\n\n")
file(APPEND ${TIMEZONES_FILE} "#include <string_view>\n\n")
file(APPEND ${TIMEZONES_FILE} "std::string_view getTimeZone(const char * name)\n{\n" )
set (COUNTER 1)
foreach(TIMEZONE ${TIMEZONES})
file(APPEND ${TIMEZONES_FILE} " if (std::string_view(\"${TIMEZONE}\") == name) return { reinterpret_cast<const char *>(gresource_timezone${COUNTER}Data), gresource_timezone${COUNTER}Size };\n")
MATH(EXPR COUNTER "${COUNTER}+1")
endforeach(TIMEZONE)
file(APPEND ${TIMEZONES_FILE} " return {};\n")
file(APPEND ${TIMEZONES_FILE} "}\n")
add_library (tzdata ${TIMEZONES_FILE})
target_link_libraries(tzdata ch_contrib::incbin)
target_link_libraries(_cctz tzdata)
add_library(ch_contrib::cctz ALIAS _cctz)

1
contrib/hashidsxx vendored

@ -1 +0,0 @@
Subproject commit 783f6911ccfdaca83e3cfac084c4aad888a80cee

View File

@ -1,14 +0,0 @@
set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/hashidsxx")
set (SRCS
"${LIBRARY_DIR}/hashids.cpp"
)
set (HDRS
"${LIBRARY_DIR}/hashids.h"
)
add_library(_hashidsxx ${SRCS} ${HDRS})
target_include_directories(_hashidsxx SYSTEM PUBLIC "${LIBRARY_DIR}")
add_library(ch_contrib::hashidsxx ALIAS _hashidsxx)

1
contrib/incbin vendored Submodule

@ -0,0 +1 @@
Subproject commit 6e576cae5ab5810f25e2631f2e0b80cbe7dc8cbf

View File

@ -0,0 +1,8 @@
set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/incbin")
add_library(_incbin INTERFACE)
target_include_directories(_incbin SYSTEM INTERFACE ${LIBRARY_DIR})
add_library(ch_contrib::incbin ALIAS _incbin)
# Warning "incbin is incompatible with bitcode. Using the library will break upload to App Store if you have bitcode enabled.
# Add `#define INCBIN_SILENCE_BITCODE_WARNING` before including this header to silence this warning."
target_compile_definitions(_incbin INTERFACE INCBIN_SILENCE_BITCODE_WARNING)

View File

@ -1,15 +0,0 @@
include(${ClickHouse_SOURCE_DIR}/cmake/embed_binary.cmake)
set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/nlp-data")
add_library (_nlp_data INTERFACE)
clickhouse_embed_binaries(
TARGET nlp_dictionaries
RESOURCE_DIR "${LIBRARY_DIR}"
RESOURCES charset.zst tonality_ru.zst programming.zst
)
add_dependencies(_nlp_data nlp_dictionaries)
target_link_libraries(_nlp_data INTERFACE "-Wl,${WHOLE_ARCHIVE} $<TARGET_FILE:nlp_dictionaries> -Wl,${NO_WHOLE_ARCHIVE}")
add_library(ch_contrib::nlp_data ALIAS _nlp_data)

View File

@ -58,6 +58,33 @@ RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \
rustup target add aarch64-apple-darwin && \
rustup target add powerpc64le-unknown-linux-gnu
# Create vendor cache for cargo.
#
# Note, that the config.toml for the root is used, you will not be able to
# install any other crates, except those which had been vendored (since if
# there is "replace-with" for some source, then cargo will not look to other
# remotes except this).
#
# Notes for the command itself:
# - --chown is required to preserve the rights
# - unstable-options for -C
# - chmod is required to fix the permissions, since builds are running from a different user
# - copy of the Cargo.lock is required for proper dependencies versions
# - cargo vendor --sync is requried to overcome [1] bug.
#
# [1]: https://github.com/rust-lang/wg-cargo-std-aware/issues/23
COPY --chown=root:root /rust /rust/packages
RUN cargo -Z unstable-options -C /rust/packages vendor > $CARGO_HOME/config.toml && \
cp "$(rustc --print=sysroot)"/lib/rustlib/src/rust/Cargo.lock "$(rustc --print=sysroot)"/lib/rustlib/src/rust/library/test/ && \
cargo -Z unstable-options -C /rust/packages vendor --sync "$(rustc --print=sysroot)"/lib/rustlib/src/rust/library/test/Cargo.toml && \
rm "$(rustc --print=sysroot)"/lib/rustlib/src/rust/library/test/Cargo.lock && \
sed -i "s#\"vendor\"#\"/rust/vendor\"#" $CARGO_HOME/config.toml && \
cat $CARGO_HOME/config.toml && \
mv /rust/packages/vendor /rust/vendor && \
chmod -R o=r+X /rust/vendor && \
ls -R -l /rust/packages && \
rm -r /rust/packages
# NOTE: Seems like gcc-11 is too new for ubuntu20 repository
# A cross-linker for RISC-V 64 (we need it, because LLVM's LLD does not work):
RUN add-apt-repository ppa:ubuntu-toolchain-r/test --yes \

1
docker/packager/binary/rust Symbolic link
View File

@ -0,0 +1 @@
../../../rust

View File

@ -141,13 +141,13 @@ function clone_submodules
contrib/jemalloc
contrib/replxx
contrib/wyhash
contrib/hashidsxx
contrib/c-ares
contrib/morton-nd
contrib/xxHash
contrib/simdjson
contrib/liburing
contrib/libfiu
contrib/incbin
)
git submodule sync

View File

@ -135,4 +135,5 @@ ENV MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1'
EXPOSE 2375
ENTRYPOINT ["dockerd-entrypoint.sh"]
CMD ["sh", "-c", "pytest $PYTEST_OPTS"]
# To pass additional arguments (i.e. list of tests) use PYTEST_ADDOPTS
CMD ["sh", "-c", "pytest"]

View File

@ -4,6 +4,9 @@
set -e -x -a
# Choose random timezone for this test run.
#
# NOTE: that clickhouse-test will randomize session_timezone by itself as well
# (it will choose between default server timezone and something specific).
TZ="$(rg -v '#' /usr/share/zoneinfo/zone.tab | awk '{print $3}' | shuf | head -n1)"
echo "Choosen random timezone $TZ"
ln -snf "/usr/share/zoneinfo/$TZ" /etc/localtime && echo "$TZ" > /etc/timezone

View File

@ -106,3 +106,4 @@ For partitioning by month, use the `toYYYYMM(date_column)` expression, where `da
## Storage Settings {#storage-settings}
- [engine_url_skip_empty_files](/docs/en/operations/settings/settings.md#engine_url_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
- [disable_url_encoding](/docs/en/operations/settings/settings.md#disable_url_encoding) -allows to disable decoding/encoding path in uri. Disabled by default.

View File

@ -537,6 +537,8 @@ Possible values:
The first phase of a grace join reads the right table and splits it into N buckets depending on the hash value of key columns (initially, N is `grace_hash_join_initial_buckets`). This is done in a way to ensure that each bucket can be processed independently. Rows from the first bucket are added to an in-memory hash table while the others are saved to disk. If the hash table grows beyond the memory limit (e.g., as set by [`max_bytes_in_join`](/docs/en/operations/settings/query-complexity.md/#settings-max_bytes_in_join)), the number of buckets is increased and the assigned bucket for each row. Any rows which dont belong to the current bucket are flushed and reassigned.
Supports `INNER/LEFT/RIGHT/FULL ALL/ANY JOIN`.
- hash
[Hash join algorithm](https://en.wikipedia.org/wiki/Hash_join) is used. The most generic implementation that supports all combinations of kind and strictness and multiple join keys that are combined with `OR` in the `JOIN ON` section.
@ -3466,6 +3468,12 @@ Possible values:
Default value: `0`.
## disable_url_encoding {#disable_url_encoding}
Allows to disable decoding/encoding path in uri in [URL](../../engines/table-engines/special/url.md) engine tables.
Disabled by default.
## database_atomic_wait_for_drop_and_detach_synchronously {#database_atomic_wait_for_drop_and_detach_synchronously}
Adds a modifier `SYNC` to all `DROP` and `DETACH` queries.

View File

@ -7,11 +7,17 @@ Contains information about settings for `MergeTree` tables.
Columns:
- `name` (String) — Setting name.
- `value` (String) — Setting value.
- `description` (String) — Setting description.
- `type` (String) — Setting type (implementation specific string value).
- `changed` (UInt8) — Whether the setting was explicitly defined in the config or explicitly changed.
- `name` ([String](../../sql-reference/data-types/string.md)) — Setting name.
- `value` ([String](../../sql-reference/data-types/string.md)) — Setting value.
- `changed` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Whether the setting was explicitly defined in the config or explicitly changed.
- `description` ([String](../../sql-reference/data-types/string.md)) — Setting description.
- `min` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — Minimum value of the setting, if any is set via [constraints](../../operations/settings/constraints-on-settings.md#constraints-on-settings). If the setting has no minimum value, contains [NULL](../../sql-reference/syntax.md#null-literal).
- `max` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — Maximum value of the setting, if any is set via [constraints](../../operations/settings/constraints-on-settings.md#constraints-on-settings). If the setting has no maximum value, contains [NULL](../../sql-reference/syntax.md#null-literal).
- `readonly` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Shows whether the current user can change the setting:
- `0` — Current user can change the setting.
- `1` — Current user cant change the setting.
- `type` ([String](../../sql-reference/data-types/string.md)) — Setting type (implementation specific string value).
- `is_obsolete` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) _ Shows whether a setting is obsolete.
**Example**
```sql
@ -21,35 +27,51 @@ SELECT * FROM system.merge_tree_settings LIMIT 4 FORMAT Vertical;
```response
Row 1:
──────
name: min_compress_block_size
value: 0
changed: 0
description: When granule is written, compress the data in buffer if the size of pending uncompressed data is larger or equal than the specified threshold. If this setting is not set, the corresponding global setting is used.
min: ____
max: ____
readonly: 0
type: UInt64
is_obsolete: 0
Row 2:
──────
name: max_compress_block_size
value: 0
changed: 0
description: Compress the pending uncompressed data in buffer if its size is larger or equal than the specified threshold. Block of data will be compressed even if the current granule is not finished. If this setting is not set, the corresponding global setting is used.
min: ____
max: ____
readonly: 0
type: UInt64
is_obsolete: 0
Row 3:
──────
name: index_granularity
value: 8192
changed: 0
description: How many rows correspond to one primary key value.
type: SettingUInt64
Row 2:
──────
name: min_bytes_for_wide_part
value: 0
changed: 0
description: Minimal uncompressed size in bytes to create part in wide format instead of compact
type: SettingUInt64
Row 3:
──────
name: min_rows_for_wide_part
value: 0
changed: 0
description: Minimal number of rows to create part in wide format instead of compact
type: SettingUInt64
min: ____
max: ____
readonly: 0
type: UInt64
is_obsolete: 0
Row 4:
──────
name: merge_max_block_size
value: 8192
name: max_digestion_size_per_segment
value: 268435456
changed: 0
description: How many rows in blocks should be formed for merge operations.
type: SettingUInt64
description: Max number of bytes to digest per segment to build GIN index.
min: ____
max: ____
readonly: 0
type: UInt64
is_obsolete: 0
4 rows in set. Elapsed: 0.001 sec.
4 rows in set. Elapsed: 0.009 sec.
```

View File

@ -14,6 +14,7 @@ Columns:
- `changed` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Shows whether a setting was specified in `config.xml`
- `description` ([String](../../sql-reference/data-types/string.md)) — Short server setting description.
- `type` ([String](../../sql-reference/data-types/string.md)) — Server setting value type.
- `is_obsolete` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) _ Shows whether a setting is obsolete.
**Example**
@ -26,14 +27,22 @@ WHERE name LIKE '%thread_pool%'
```
``` text
┌─name─────────────────────────┬─value─┬─default─┬─changed─┬─description─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬─type───┐
│ max_thread_pool_size │ 5000 │ 10000 │ 1 │ The maximum number of threads that could be allocated from the OS and used for query execution and background operations. │ UInt64 │
│ max_thread_pool_free_size │ 1000 │ 1000 │ 0 │ The maximum number of threads that will always stay in a global thread pool once allocated and remain idle in case of insufficient number of tasks. │ UInt64 │
│ thread_pool_queue_size │ 10000 │ 10000 │ 0 │ The maximum number of tasks that will be placed in a queue and wait for execution. │ UInt64 │
│ max_io_thread_pool_size │ 100 │ 100 │ 0 │ The maximum number of threads that would be used for IO operations │ UInt64 │
│ max_io_thread_pool_free_size │ 0 │ 0 │ 0 │ Max free size for IO thread pool. │ UInt64 │
│ io_thread_pool_queue_size │ 10000 │ 10000 │ 0 │ Queue size for IO thread pool. │ UInt64 │
└──────────────────────────────┴───────┴─────────┴─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴────────┘
┌─name────────────────────────────────────────_─value─_─default─_─changed─_─description──────────────────────────────────────────────────────────────────────────────────────────────────────
───────────────────────────────────_─type───_─is_obsolete─┐
│ max_thread_pool_size │ 10000 │ 10000 │ 1 │ The maximum number of threads that could be allocated from the OS and used for query execution and background operations. │ UInt64 │ 0 │
│ max_thread_pool_free_size │ 1000 │ 1000 │ 0 │ The maximum number of threads that will always stay in a global thread pool once allocated and remain idle in case of insufficient number of tasks. │ UInt64 │ 0 │
│ thread_pool_queue_size │ 10000 │ 10000 │ 0 │ The maximum number of tasks that will be placed in a queue and wait for execution. │ UInt64 │ 0 │
│ max_io_thread_pool_size │ 100 │ 100 │ 0 │ The maximum number of threads that would be used for IO operations │ UInt64 │ 0 │
│ max_io_thread_pool_free_size │ 0 │ 0 │ 0 │ Max free size for IO thread pool. │ UInt64 │ 0 │
│ io_thread_pool_queue_size │ 10000 │ 10000 │ 0 │ Queue size for IO thread pool. │ UInt64 │ 0 │
│ max_active_parts_loading_thread_pool_size │ 64 │ 64 │ 0 │ The number of threads to load active set of data parts (Active ones) at startup. │ UInt64 │ 0 │
│ max_outdated_parts_loading_thread_pool_size │ 32 │ 32 │ 0 │ The number of threads to load inactive set of data parts (Outdated ones) at startup. │ UInt64 │ 0 │
│ max_parts_cleaning_thread_pool_size │ 128 │ 128 │ 0 │ The number of threads for concurrent removal of inactive data parts. │ UInt64 │ 0 │
│ max_backups_io_thread_pool_size │ 1000 │ 1000 │ 0 │ The maximum number of threads that would be used for IO operations for BACKUP queries │ UInt64 │ 0 │
│ max_backups_io_thread_pool_free_size │ 0 │ 0 │ 0 │ Max free size for backups IO thread pool. │ UInt64 │ 0 │
│ backups_io_thread_pool_queue_size │ 0 │ 0 │ 0 │ Queue size for backups IO thread pool. │ UInt64 │ 0 │
└─────────────────────────────────────────────┴───────┴─────────┴─────────┴──────────────────────────────────────────────────────────────────────────────────────────────────────────────────
───────────────────────────────────┴────────┴─────────────┘
```
Using of `WHERE changed` can be useful, for example, when you want to check

View File

@ -17,6 +17,7 @@ Columns:
- `0` — Current user can change the setting.
- `1` — Current user cant change the setting.
- `default` ([String](../../sql-reference/data-types/string.md)) — Setting default value.
- `is_obsolete` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) _ Shows whether a setting is obsolete.
**Example**
@ -29,11 +30,14 @@ WHERE name LIKE '%min_i%'
```
``` text
┌─name────────────────────────────────────────┬─value─────┬─changed─┬─description───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬─min──┬─max──┬─readonly─┐
│ min_insert_block_size_rows │ 1048576 │ 0 │ Squash blocks passed to INSERT query to specified size in rows, if blocks are not big enough. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │
│ min_insert_block_size_bytes │ 268435456 │ 0 │ Squash blocks passed to INSERT query to specified size in bytes, if blocks are not big enough. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │
│ read_backoff_min_interval_between_events_ms │ 1000 │ 0 │ Settings to reduce the number of threads in case of slow reads. Do not pay attention to the event, if the previous one has passed less than a certain amount of time. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │
└─────────────────────────────────────────────┴───────────┴─────────┴───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────┴──────┴──────────┘
┌─name───────────────────────────────────────────────_─value─────_─changed─_─description───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────_─min──_─max──_─readonly─_─type─────────_─default───_─alias_for─_─is_obsolete─┐
│ min_insert_block_size_rows │ 1048449 │ 0 │ Squash blocks passed to INSERT query to specified size in rows, if blocks are not big enough. │ ________ │ 0 │ UInt64 │ 1048449 │ │ 0 │
│ min_insert_block_size_bytes │ 268402944 │ 0 │ Squash blocks passed to INSERT query to specified size in bytes, if blocks are not big enough. │ ________ │ 0 │ UInt64 │ 268402944 │ │ 0 │
│ min_insert_block_size_rows_for_materialized_views │ 0 │ 0 │ Like min_insert_block_size_rows, but applied only during pushing to MATERIALIZED VIEW (default: min_insert_block_size_rows) │ ________ │ 0 │ UInt64 │ 0 │ │ 0 │
│ min_insert_block_size_bytes_for_materialized_views │ 0 │ 0 │ Like min_insert_block_size_bytes, but applied only during pushing to MATERIALIZED VIEW (default: min_insert_block_size_bytes) │ ________ │ 0 │ UInt64 │ 0 │ │ 0 │
│ read_backoff_min_interval_between_events_ms │ 1000 │ 0 │ Settings to reduce the number of threads in case of slow reads. Do not pay attention to the event, if the previous one has passed less than a certain amount of time. │ ________ │ 0 │ Milliseconds │ 1000 │ │ 0 │
└────────────────────────────────────────────────────┴───────────┴─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────
──────────────────────────────────────────────────────┴──────┴──────┴──────────┴──────────────┴───────────┴───────────┴─────────────┘
```
Using of `WHERE changed` can be useful, for example, when you want to check:

View File

@ -1449,7 +1449,7 @@ Using replacement fields, you can define a pattern for the resulting string. “
| %n | new-line character () | |
| %p | AM or PM designation | PM |
| %Q | Quarter (1-4) | 1 |
| %r | 12-hour HH:MM AM/PM time, equivalent to %H:%i %p | 10:30 PM |
| %r | 12-hour HH:MM AM/PM time, equivalent to %h:%i %p | 10:30 PM |
| %R | 24-hour HH:MM time, equivalent to %H:%i | 22:33 |
| %s | second (00-59) | 44 |
| %S | second (00-59) | 44 |

View File

@ -51,7 +51,7 @@ Calculates the MD5 from a string and returns the resulting set of bytes as Fixed
If you do not need MD5 in particular, but you need a decent cryptographic 128-bit hash, use the sipHash128 function instead.
If you want to get the same result as output by the md5sum utility, use lower(hex(MD5(s))).
## sipHash64 {#hash_functions-siphash64}
## sipHash64 (#hash_functions-siphash64)
Produces a 64-bit [SipHash](https://en.wikipedia.org/wiki/SipHash) hash value.
@ -63,9 +63,9 @@ This is a cryptographic hash function. It works at least three times faster than
The function [interprets](/docs/en/sql-reference/functions/type-conversion-functions.md/#type_conversion_functions-reinterpretAsString) all the input parameters as strings and calculates the hash value for each of them. It then combines the hashes by the following algorithm:
1. The first and the second hash value are concatenated to an array which is hashed.
2. The previously calculated hash value and the hash of the third input parameter are hashed in a similar way.
3. This calculation is repeated for all remaining hash values of the original input.
1. The first and the second hash value are concatenated to an array which is hashed.
2. The previously calculated hash value and the hash of the third input parameter are hashed in a similar way.
3. This calculation is repeated for all remaining hash values of the original input.
**Arguments**

View File

@ -631,3 +631,53 @@ Result:
│ 100 │ 200 │ 100-200 │ 100 │
└──────────────────────────────────────────────┴──────────────────────────────────────────────┴──────────────────────────────────────────────┴───────────────────────────────────────────┘
```
## hasSubsequence
Returns 1 if needle is a subsequence of haystack, or 0 otherwise.
A subsequence of a string is a sequence that can be derived from the given string by deleting zero or more elements without changing the order of the remaining elements.
**Syntax**
``` sql
hasSubsequence(haystack, needle)
```
**Arguments**
- `haystack` — String in which the search is performed. [String](../../sql-reference/syntax.md#syntax-string-literal).
- `needle` — Subsequence to be searched. [String](../../sql-reference/syntax.md#syntax-string-literal).
**Returned values**
- 1, if needle is a subsequence of haystack.
- 0, otherwise.
Type: `UInt8`.
**Examples**
``` sql
SELECT hasSubsequence('garbage', 'arg') ;
```
Result:
``` text
┌─hasSubsequence('garbage', 'arg')─┐
│ 1 │
└──────────────────────────────────┘
```
## hasSubsequenceCaseInsensitive
Like [hasSubsequence](#hasSubsequence) but searches case-insensitively.
## hasSubsequenceUTF8
Like [hasSubsequence](#hasSubsequence) but assumes `haystack` and `needle` are UTF-8 encoded strings.
## hasSubsequenceCaseInsensitiveUTF8
Like [hasSubsequenceUTF8](#hasSubsequenceUTF8) but searches case-insensitively.

View File

@ -945,44 +945,6 @@ Result:
└────────────┴───────┘
```
## toDecimalString
Converts a numeric value to String with the number of fractional digits in the output specified by the user.
**Syntax**
``` sql
toDecimalString(number, scale)
```
**Parameters**
- `number` — Value to be represented as String, [Int, UInt](/docs/en/sql-reference/data-types/int-uint.md), [Float](/docs/en/sql-reference/data-types/float.md), [Decimal](/docs/en/sql-reference/data-types/decimal.md),
- `scale` — Number of fractional digits, [UInt8](/docs/en/sql-reference/data-types/int-uint.md).
* Maximum scale for [Decimal](/docs/en/sql-reference/data-types/decimal.md) and [Int, UInt](/docs/en/sql-reference/data-types/int-uint.md) types is 77 (it is the maximum possible number of significant digits for Decimal),
* Maximum scale for [Float](/docs/en/sql-reference/data-types/float.md) is 60.
**Returned value**
- Input value represented as [String](/docs/en/sql-reference/data-types/string.md) with given number of fractional digits (scale).
The number is rounded up or down according to common arithmetic in case requested scale is smaller than original number's scale.
**Example**
Query:
``` sql
SELECT toDecimalString(CAST('64.32', 'Float64'), 5);
```
Result:
```response
┌toDecimalString(CAST('64.32', 'Float64'), 5)─┐
│ 64.32000 │
└─────────────────────────────────────────────┘
```
## reinterpretAsUInt(8\|16\|32\|64)
## reinterpretAsInt(8\|16\|32\|64)

View File

@ -205,7 +205,7 @@ The optional keyword `EXTENDED` currently has no effect, it only exists for MySQ
The optional keyword `FULL` causes the output to include the collation, comment and privilege columns.
`SHOW COLUMNS` produces a result table with the following structure:
The statement produces a result table with the following structure:
- field - The name of the column (String)
- type - The column data type (String)
- null - If the column data type is Nullable (UInt8)
@ -272,6 +272,10 @@ SHOW DICTIONARIES FROM db LIKE '%reg%' LIMIT 2
Displays a list of primary and data skipping indexes of a table.
This statement mostly exists for compatibility with MySQL. System tables [system.tables](../../operations/system-tables/tables.md) (for
primary keys) and [system.data_skipping_indices](../../operations/system-tables/data_skipping_indices.md) (for data skipping indices)
provide equivalent information but in a fashion more native to ClickHouse.
```sql
SHOW [EXTENDED] {INDEX | INDEXES | INDICES | KEYS } {FROM | IN} <table> [{FROM | IN} <db>] [WHERE <expr>] [INTO OUTFILE <filename>] [FORMAT <format>]
```
@ -281,22 +285,22 @@ equivalent. If no database is specified, the query assumes the current database
The optional keyword `EXTENDED` currently has no effect, it only exists for MySQL compatibility.
`SHOW INDEX` produces a result table with the following structure:
- table - The name of the table (String)
- non_unique - 0 if the index cannot contain duplicates, 1 otherwise (UInt8)
- key_name - The name of the index, `PRIMARY` if the index is a primary key index (String)
- seq_in_index - Currently unused
- column_name - Currently unused
- collation - The sorting of the column in the index, `A` if ascending, `D` if descending, `NULL` if unsorted (Nullable(String))
- cardinality - Currently unused
- sub_part - Currently unused
- packed - Currently unused
The statement produces a result table with the following structure:
- table - The name of the table. (String)
- non_unique - Always `1` as ClickHouse does not support uniqueness constraints. (UInt8)
- key_name - The name of the index, `PRIMARY` if the index is a primary key index. (String)
- seq_in_index - For a primary key index, the position of the column starting from `1`. For a data skipping index: always `1`. (UInt8)
- column_name - For a primary key index, the name of the column. For a data skipping index: `''` (empty string), see field "expression". (String)
- collation - The sorting of the column in the index: `A` if ascending, `D` if descending, `NULL` if unsorted. (Nullable(String))
- cardinality - An estimation of the index cardinality (number of unique values in the index). Currently always 0. (UInt64)
- sub_part - Always `NULL` because ClickHouse does not support index prefixes like MySQL. (Nullable(String))
- packed - Always `NULL` because ClickHouse does not support packed indexes (like MySQL). (Nullable(String))
- null - Currently unused
- index_type - The index type, e.g. `primary`, `minmax`, `bloom_filter` etc. (String)
- comment - Currently unused
- index_comment - Currently unused
- visible - If the index is visible to the optimizer, always `YES` (String)
- expression - The index expression (String)
- index_type - The index type, e.g. `PRIMARY`, `MINMAX`, `BLOOM_FILTER` etc. (String)
- comment - Additional information about the index, currently always `''` (empty string). (String)
- index_comment - `''` (empty string) because indexes in ClickHouse cannot have a `COMMENT` field (like in MySQL). (String)
- visible - If the index is visible to the optimizer, always `YES`. (String)
- expression - For a data skipping index, the index expression. For a primary key index: `''` (empty string). (String)
**Examples**
@ -310,11 +314,12 @@ Result:
``` text
┌─table─┬─non_unique─┬─key_name─┬─seq_in_index─┬─column_name─┬─collation─┬─cardinality─┬─sub_part─┬─packed─┬─null─┬─index_type───┬─comment─┬─index_comment─┬─visible─┬─expression─┐
│ tbl │ 0 │ blf_idx │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ bloom_filter │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ YES │ d, b │
│ tbl │ 0 │ mm1_idx │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ minmax │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ YES │ a, c, d │
│ tbl │ 0 │ mm2_idx │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ minmax │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ YES │ c, d, e │
│ tbl │ 0 │ PRIMARY │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ A │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ primary │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ YES │ c, a │
│ tbl │ 0 │ set_idx │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ set │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ YES │ e │
│ tbl │ 1 │ blf_idx │ 1 │ 1 │ ᴺᵁᴸᴸ │ 0 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ BLOOM_FILTER │ │ │ YES │ d, b │
│ tbl │ 1 │ mm1_idx │ 1 │ 1 │ ᴺᵁᴸᴸ │ 0 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ MINMAX │ │ │ YES │ a, c, d │
│ tbl │ 1 │ mm2_idx │ 1 │ 1 │ ᴺᵁᴸᴸ │ 0 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ MINMAX │ │ │ YES │ c, d, e │
│ tbl │ 1 │ PRIMARY │ 1 │ c │ A │ 0 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ PRIMARY │ │ │ YES │ │
│ tbl │ 1 │ PRIMARY │ 2 │ a │ A │ 0 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ PRIMARY │ │ │ YES │ │
│ tbl │ 1 │ set_idx │ 1 │ 1 │ ᴺᵁᴸᴸ │ 0 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ SET │ │ │ YES │ e │
└───────┴────────────┴──────────┴──────────────┴─────────────┴───────────┴─────────────┴──────────┴────────┴──────┴──────────────┴─────────┴───────────────┴─────────┴────────────┘
```

View File

@ -56,6 +56,7 @@ Character `|` inside patterns is used to specify failover addresses. They are it
## Storage Settings {#storage-settings}
- [engine_url_skip_empty_files](/docs/en/operations/settings/settings.md#engine_url_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
- [disable_url_encoding](/docs/en/operations/settings/settings.md#disable_url_encoding) - allows to disable decoding/encoding path in uri. Disabled by default.
**See Also**

View File

@ -801,3 +801,55 @@ SELECT countSubstringsCaseInsensitiveUTF8('аБв__АбВ__абв', 'Абв');
│ 3 │
└────────────────────────────────────────────────────────────┘
```
## hasSubsequence(haystack, needle) {#hasSubsequence}
Возвращает 1 если needle является подпоследовательностью haystack, иначе 0.
**Синтаксис**
``` sql
hasSubsequence(haystack, needle)
```
**Аргументы**
- `haystack` — строка, по которой выполняется поиск. [Строка](../syntax.md#syntax-string-literal).
- `needle` — подпоследовательность, которую необходимо найти. [Строка](../syntax.md#syntax-string-literal).
**Возвращаемые значения**
- 1, если
- 0, если подстрока не найдена.
Тип: `UInt8`.
**Примеры**
Запрос:
``` sql
SELECT hasSubsequence('garbage', 'arg') ;
```
Результат:
``` text
┌─hasSubsequence('garbage', 'arg')─┐
│ 1 │
└──────────────────────────────────┘
```
## hasSubsequenceCaseInsensitive
Такая же, как и [hasSubsequence](#hasSubsequence), но работает без учета регистра.
## hasSubsequenceUTF8
Такая же, как и [hasSubsequence](#hasSubsequence) при допущении что `haystack` и `needle` содержат набор кодовых точек, представляющий текст в кодировке UTF-8.
## hasSubsequenceCaseInsensitiveUTF8
Такая же, как и [hasSubsequenceUTF8](#hasSubsequenceUTF8), но работает без учета регистра.

View File

@ -762,44 +762,6 @@ SELECT toFixedString('foo\0bar', 8) AS s, toStringCutToZero(s) AS s_cut;
└────────────┴───────┘
```
## toDecimalString
Принимает любой численный тип первым аргументом, возвращает строковое десятичное представление числа с точностью, заданной вторым аргументом.
**Синтаксис**
``` sql
toDecimalString(number, scale)
```
**Параметры**
- `number` — Значение любого числового типа: [Int, UInt](/docs/ru/sql-reference/data-types/int-uint.md), [Float](/docs/ru/sql-reference/data-types/float.md), [Decimal](/docs/ru/sql-reference/data-types/decimal.md),
- `scale` — Требуемое количество десятичных знаков после запятой, [UInt8](/docs/ru/sql-reference/data-types/int-uint.md).
* Значение `scale` для типов [Decimal](/docs/ru/sql-reference/data-types/decimal.md) и [Int, UInt](/docs/ru/sql-reference/data-types/int-uint.md) должно не превышать 77 (так как это наибольшее количество значимых символов для этих типов),
* Значение `scale` для типа [Float](/docs/ru/sql-reference/data-types/float.md) не должно превышать 60.
**Возвращаемое значение**
- Строка ([String](/docs/en/sql-reference/data-types/string.md)), представляющая собой десятичное представление входного числа с заданной длиной дробной части.
При необходимости число округляется по стандартным правилам арифметики.
**Пример использования**
Запрос:
``` sql
SELECT toDecimalString(CAST('64.32', 'Float64'), 5);
```
Результат:
```response
┌─toDecimalString(CAST('64.32', 'Float64'), 5)┐
│ 64.32000 │
└─────────────────────────────────────────────┘
```
## reinterpretAsUInt(8\|16\|32\|64) {#reinterpretasuint8163264}
## reinterpretAsInt(8\|16\|32\|64) {#reinterpretasint8163264}

View File

@ -29,6 +29,7 @@ EnvironmentFile=-/etc/default/clickhouse
LimitCORE=infinity
LimitNOFILE=500000
CapabilityBoundingSet=CAP_NET_ADMIN CAP_IPC_LOCK CAP_SYS_NICE CAP_NET_BIND_SERVICE
AmbientCapabilities=CAP_NET_ADMIN CAP_IPC_LOCK CAP_SYS_NICE CAP_NET_BIND_SERVICE
[Install]
# ClickHouse should not start from the rescue shell (rescue.target).

View File

@ -20,10 +20,7 @@
#include <Common/formatReadable.h>
#include <Common/Config/ConfigProcessor.h>
#include <Common/OpenSSLHelpers.h>
#include <base/hex.h>
#include <Common/getResource.h>
#include <base/sleep.h>
#include <IO/ReadBufferFromFileDescriptor.h>
#include <IO/WriteBufferFromFileDescriptor.h>
#include <IO/ReadBufferFromFile.h>
#include <IO/WriteBufferFromFile.h>
@ -35,6 +32,14 @@
#include <Poco/Util/XMLConfiguration.h>
#include <incbin.h>
#include "config.h"
/// Embedded configuration files used inside the install program
INCBIN(resource_config_xml, SOURCE_DIR "/programs/server/config.xml");
INCBIN(resource_users_xml, SOURCE_DIR "/programs/server/users.xml");
/** This tool can be used to install ClickHouse without a deb/rpm/tgz package, having only "clickhouse" binary.
* It also allows to avoid dependency on systemd, upstart, SysV init.
@ -560,7 +565,7 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
if (!fs::exists(main_config_file))
{
std::string_view main_config_content = getResource("config.xml");
std::string_view main_config_content(reinterpret_cast<const char *>(gresource_config_xmlData), gresource_config_xmlSize);
if (main_config_content.empty())
{
fmt::print("There is no default config.xml, you have to download it and place to {}.\n", main_config_file.string());
@ -672,7 +677,7 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
if (!fs::exists(users_config_file))
{
std::string_view users_config_content = getResource("users.xml");
std::string_view users_config_content(reinterpret_cast<const char *>(gresource_users_xmlData), gresource_users_xmlSize);
if (users_config_content.empty())
{
fmt::print("There is no default users.xml, you have to download it and place to {}.\n", users_config_file.string());

View File

@ -1,16 +1,3 @@
include(${ClickHouse_SOURCE_DIR}/cmake/embed_binary.cmake)
if (OS_LINUX)
set (LINK_RESOURCE_LIB INTERFACE "-Wl,${WHOLE_ARCHIVE} $<TARGET_FILE:clickhouse_keeper_configs> -Wl,${NO_WHOLE_ARCHIVE}")
# for some reason INTERFACE linkage doesn't work for standalone binary
set (LINK_RESOURCE_LIB_STANDALONE_KEEPER "-Wl,${WHOLE_ARCHIVE} $<TARGET_FILE:clickhouse_keeper_configs> -Wl,${NO_WHOLE_ARCHIVE}")
endif ()
clickhouse_embed_binaries(
TARGET clickhouse_keeper_configs
RESOURCES keeper_config.xml keeper_embedded.xml
)
set(CLICKHOUSE_KEEPER_SOURCES
Keeper.cpp
)
@ -29,11 +16,12 @@ set (CLICKHOUSE_KEEPER_LINK
clickhouse_program_add(keeper)
install(FILES keeper_config.xml DESTINATION "${CLICKHOUSE_ETC_DIR}/clickhouse-keeper" COMPONENT clickhouse-keeper)
add_dependencies(clickhouse-keeper-lib clickhouse_keeper_configs)
if (BUILD_STANDALONE_KEEPER)
# Straight list of all required sources
set(CLICKHOUSE_KEEPER_STANDALONE_SOURCES
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperReconfiguration.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/RaftServerConfig.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/ACLMap.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/Changelog.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/CoordinationSettings.cpp
@ -213,7 +201,6 @@ if (BUILD_STANDALONE_KEEPER)
${LINK_RESOURCE_LIB_STANDALONE_KEEPER}
)
add_dependencies(clickhouse-keeper clickhouse_keeper_configs)
set_target_properties(clickhouse-keeper PROPERTIES RUNTIME_OUTPUT_DIRECTORY ../)
if (SPLIT_DEBUG_SYMBOLS)

View File

@ -457,8 +457,10 @@ try
const std::string key_path = config().getString("openSSL.server.privateKeyFile", "");
std::vector<std::string> extra_paths = {include_from_path};
if (!cert_path.empty()) extra_paths.emplace_back(cert_path);
if (!key_path.empty()) extra_paths.emplace_back(key_path);
if (!cert_path.empty())
extra_paths.emplace_back(cert_path);
if (!key_path.empty())
extra_paths.emplace_back(key_path);
/// ConfigReloader have to strict parameters which are redundant in our case
auto main_config_reloader = std::make_unique<ConfigReloader>(

View File

@ -1,12 +1,8 @@
include(${ClickHouse_SOURCE_DIR}/cmake/embed_binary.cmake)
set(CLICKHOUSE_SERVER_SOURCES
MetricsTransmitter.cpp
Server.cpp
)
set (LINK_RESOURCE_LIB INTERFACE "-Wl,${WHOLE_ARCHIVE} $<TARGET_FILE:clickhouse_server_configs> -Wl,${NO_WHOLE_ARCHIVE}")
set (CLICKHOUSE_SERVER_LINK
PRIVATE
clickhouse_aggregate_functions
@ -34,9 +30,3 @@ endif()
clickhouse_program_add(server)
install(FILES config.xml users.xml DESTINATION "${CLICKHOUSE_ETC_DIR}/clickhouse-server" COMPONENT clickhouse)
clickhouse_embed_binaries(
TARGET clickhouse_server_configs
RESOURCES config.xml users.xml embedded.xml play.html dashboard.html js/uplot.js
)
add_dependencies(clickhouse-server-lib clickhouse_server_configs)

View File

@ -128,6 +128,10 @@
# include <azure/storage/common/internal/xml_wrapper.hpp>
#endif
#include <incbin.h>
/// A minimal file used when the server is run without installation
INCBIN(resource_embedded_xml, SOURCE_DIR "/programs/server/embedded.xml");
namespace CurrentMetrics
{
extern const Metric Revision;
@ -393,6 +397,7 @@ int Server::run()
void Server::initialize(Poco::Util::Application & self)
{
ConfigProcessor::registerEmbeddedConfig("config.xml", std::string_view(reinterpret_cast<const char *>(gresource_embedded_xmlData), gresource_embedded_xmlSize));
BaseDaemon::initialize(self);
logger().information("starting up");
@ -739,11 +744,12 @@ try
[&]() -> std::vector<ProtocolServerMetrics>
{
std::vector<ProtocolServerMetrics> metrics;
metrics.reserve(servers_to_start_before_tables.size());
std::lock_guard lock(servers_lock);
metrics.reserve(servers_to_start_before_tables.size() + servers.size());
for (const auto & server : servers_to_start_before_tables)
metrics.emplace_back(ProtocolServerMetrics{server.getPortName(), server.currentThreads()});
std::lock_guard lock(servers_lock);
for (const auto & server : servers)
metrics.emplace_back(ProtocolServerMetrics{server.getPortName(), server.currentThreads()});
return metrics;
@ -1105,8 +1111,10 @@ try
const std::string key_path = config().getString("openSSL.server.privateKeyFile", "");
std::vector<std::string> extra_paths = {include_from_path};
if (!cert_path.empty()) extra_paths.emplace_back(cert_path);
if (!key_path.empty()) extra_paths.emplace_back(key_path);
if (!cert_path.empty())
extra_paths.emplace_back(cert_path);
if (!key_path.empty())
extra_paths.emplace_back(key_path);
auto main_config_reloader = std::make_unique<ConfigReloader>(
config_path,
@ -1304,7 +1312,7 @@ try
global_context->reloadAuxiliaryZooKeepersConfigIfChanged(config);
std::lock_guard lock(servers_lock);
updateServers(*config, server_pool, async_metrics, servers);
updateServers(*config, server_pool, async_metrics, servers, servers_to_start_before_tables);
}
global_context->updateStorageConfiguration(*config);
@ -1406,10 +1414,27 @@ try
}
for (auto & server : servers_to_start_before_tables)
{
server.start();
LOG_INFO(log, "Listening for {}", server.getDescription());
std::lock_guard lock(servers_lock);
/// We should start interserver communications before (and more imporant shutdown after) tables.
/// Because server can wait for a long-running queries (for example in tcp_handler) after interserver handler was already shut down.
/// In this case we will have replicated tables which are unable to send any parts to other replicas, but still can
/// communicate with zookeeper, execute merges, etc.
createInterserverServers(
config(),
interserver_listen_hosts,
listen_try,
server_pool,
async_metrics,
servers_to_start_before_tables,
/* start_servers= */ false);
for (auto & server : servers_to_start_before_tables)
{
server.start();
LOG_INFO(log, "Listening for {}", server.getDescription());
}
}
/// Initialize access storages.
@ -1529,10 +1554,13 @@ try
{
LOG_DEBUG(log, "Waiting for current connections to servers for tables to finish.");
size_t current_connections = 0;
for (auto & server : servers_to_start_before_tables)
{
server.stop();
current_connections += server.currentConnections();
std::lock_guard lock(servers_lock);
for (auto & server : servers_to_start_before_tables)
{
server.stop();
current_connections += server.currentConnections();
}
}
if (current_connections)
@ -1601,13 +1629,7 @@ try
global_context->setSystemZooKeeperLogAfterInitializationIfNeeded();
/// Build loggers before tables startup to make log messages from tables
/// attach available in system.text_log
{
String level_str = config().getString("text_log.level", "");
int level = level_str.empty() ? INT_MAX : Poco::Logger::parseLevel(level_str);
setTextLog(global_context->getTextLog(), level);
buildLoggers(config(), logger());
}
buildLoggers(config(), logger());
/// After the system database is created, attach virtual system tables (in addition to query_log and part_log)
attachSystemTablesServer(global_context, *database_catalog.getSystemDatabase(), has_zookeeper);
attachInformationSchema(global_context, *database_catalog.getDatabase(DatabaseCatalog::INFORMATION_SCHEMA));
@ -1711,7 +1733,7 @@ try
{
std::lock_guard lock(servers_lock);
createServers(config(), listen_hosts, interserver_listen_hosts, listen_try, server_pool, async_metrics, servers);
createServers(config(), listen_hosts, listen_try, server_pool, async_metrics, servers);
if (servers.empty())
throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG,
"No servers started (add valid listen_host and 'tcp_port' or 'http_port' "
@ -1969,7 +1991,6 @@ HTTPContextPtr Server::httpContext() const
void Server::createServers(
Poco::Util::AbstractConfiguration & config,
const Strings & listen_hosts,
const Strings & interserver_listen_hosts,
bool listen_try,
Poco::ThreadPool & server_pool,
AsynchronousMetrics & async_metrics,
@ -2191,6 +2212,23 @@ void Server::createServers(
httpContext(), createHandlerFactory(*this, config, async_metrics, "PrometheusHandler-factory"), server_pool, socket, http_params));
});
}
}
void Server::createInterserverServers(
Poco::Util::AbstractConfiguration & config,
const Strings & interserver_listen_hosts,
bool listen_try,
Poco::ThreadPool & server_pool,
AsynchronousMetrics & async_metrics,
std::vector<ProtocolServerAdapter> & servers,
bool start_servers)
{
const Settings & settings = global_context->getSettingsRef();
Poco::Timespan keep_alive_timeout(config.getUInt("keep_alive_timeout", 10), 0);
Poco::Net::HTTPServerParams::Ptr http_params = new Poco::Net::HTTPServerParams;
http_params->setTimeout(settings.http_receive_timeout);
http_params->setKeepAliveTimeout(keep_alive_timeout);
/// Now iterate over interserver_listen_hosts
for (const auto & interserver_listen_host : interserver_listen_hosts)
@ -2239,14 +2277,14 @@ void Server::createServers(
#endif
});
}
}
void Server::updateServers(
Poco::Util::AbstractConfiguration & config,
Poco::ThreadPool & server_pool,
AsynchronousMetrics & async_metrics,
std::vector<ProtocolServerAdapter> & servers)
std::vector<ProtocolServerAdapter> & servers,
std::vector<ProtocolServerAdapter> & servers_to_start_before_tables)
{
Poco::Logger * log = &logger();
@ -2272,11 +2310,19 @@ void Server::updateServers(
Poco::Util::AbstractConfiguration & previous_config = latest_config ? *latest_config : this->config();
std::vector<ProtocolServerAdapter *> all_servers;
all_servers.reserve(servers.size() + servers_to_start_before_tables.size());
for (auto & server : servers)
all_servers.push_back(&server);
for (auto & server : servers_to_start_before_tables)
all_servers.push_back(&server);
for (auto * server : all_servers)
{
if (!server.isStopping())
if (!server->isStopping())
{
std::string port_name = server.getPortName();
std::string port_name = server->getPortName();
bool has_host = false;
bool is_http = false;
if (port_name.starts_with("protocols."))
@ -2314,27 +2360,29 @@ void Server::updateServers(
/// NOTE: better to compare using getPortName() over using
/// dynamic_cast<> since HTTPServer is also used for prometheus and
/// internal replication communications.
is_http = server.getPortName() == "http_port" || server.getPortName() == "https_port";
is_http = server->getPortName() == "http_port" || server->getPortName() == "https_port";
}
if (!has_host)
has_host = std::find(listen_hosts.begin(), listen_hosts.end(), server.getListenHost()) != listen_hosts.end();
has_host = std::find(listen_hosts.begin(), listen_hosts.end(), server->getListenHost()) != listen_hosts.end();
bool has_port = !config.getString(port_name, "").empty();
bool force_restart = is_http && !isSameConfiguration(previous_config, config, "http_handlers");
if (force_restart)
LOG_TRACE(log, "<http_handlers> had been changed, will reload {}", server.getDescription());
LOG_TRACE(log, "<http_handlers> had been changed, will reload {}", server->getDescription());
if (!has_host || !has_port || config.getInt(server.getPortName()) != server.portNumber() || force_restart)
if (!has_host || !has_port || config.getInt(server->getPortName()) != server->portNumber() || force_restart)
{
server.stop();
LOG_INFO(log, "Stopped listening for {}", server.getDescription());
server->stop();
LOG_INFO(log, "Stopped listening for {}", server->getDescription());
}
}
}
createServers(config, listen_hosts, interserver_listen_hosts, listen_try, server_pool, async_metrics, servers, /* start_servers= */ true);
createServers(config, listen_hosts, listen_try, server_pool, async_metrics, servers, /* start_servers= */ true);
createInterserverServers(config, interserver_listen_hosts, listen_try, server_pool, async_metrics, servers_to_start_before_tables, /* start_servers= */ true);
std::erase_if(servers, std::bind_front(check_server, ""));
std::erase_if(servers_to_start_before_tables, std::bind_front(check_server, ""));
}
}

View File

@ -102,6 +102,14 @@ private:
void createServers(
Poco::Util::AbstractConfiguration & config,
const Strings & listen_hosts,
bool listen_try,
Poco::ThreadPool & server_pool,
AsynchronousMetrics & async_metrics,
std::vector<ProtocolServerAdapter> & servers,
bool start_servers = false);
void createInterserverServers(
Poco::Util::AbstractConfiguration & config,
const Strings & interserver_listen_hosts,
bool listen_try,
Poco::ThreadPool & server_pool,
@ -113,7 +121,8 @@ private:
Poco::Util::AbstractConfiguration & config,
Poco::ThreadPool & server_pool,
AsynchronousMetrics & async_metrics,
std::vector<ProtocolServerAdapter> & servers);
std::vector<ProtocolServerAdapter> & servers,
std::vector<ProtocolServerAdapter> & servers_to_start_before_tables);
};
}

View File

4
rust/.dockerignore Normal file
View File

@ -0,0 +1,4 @@
# Just in case ignore any cargo stuff (and just in case someone will run this
# docker build locally with build context using folder root):
target
vendor

4
rust/.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
# This is for tar --exclude-vcs-ignores (and just in case someone will run
# docker build locally with build context created via tar):
target
vendor

92
rust/BLAKE3/Cargo.lock generated
View File

@ -1,92 +0,0 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "_ch_rust_blake3"
version = "0.1.0"
dependencies = [
"blake3",
"libc",
]
[[package]]
name = "arrayref"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544"
[[package]]
name = "arrayvec"
version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6"
[[package]]
name = "blake3"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "526c210b4520e416420759af363083471656e819a75e831b8d2c9d5a584f2413"
dependencies = [
"arrayref",
"arrayvec",
"cc",
"cfg-if",
"constant_time_eq",
"digest",
]
[[package]]
name = "cc"
version = "1.0.73"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11"
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "constant_time_eq"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc"
[[package]]
name = "digest"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066"
dependencies = [
"generic-array",
]
[[package]]
name = "generic-array"
version = "0.14.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9"
dependencies = [
"typenum",
"version_check",
]
[[package]]
name = "libc"
version = "0.2.132"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8371e4e5341c3a96db127eb2465ac681ced4c433e01dd0e938adbef26ba93ba5"
[[package]]
name = "typenum"
version = "1.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
[[package]]
name = "version_check"
version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"

View File

@ -55,6 +55,8 @@ function(clickhouse_import_crate)
endif()
endif()
# Note, here --offline is not used, since on CI vendor archive is used, and
# passing --offline here will be inconvenient for local development.
corrosion_import_crate(NO_STD ${ARGN} PROFILE ${profile})
endfunction()

View File

@ -2,6 +2,22 @@
# It is not intended for manual editing.
version = 3
[[package]]
name = "_ch_rust_blake3"
version = "0.1.0"
dependencies = [
"blake3",
"libc",
]
[[package]]
name = "_ch_rust_prql"
version = "0.1.0"
dependencies = [
"prql-compiler",
"serde_json",
]
[[package]]
name = "_ch_rust_skim_rust"
version = "0.1.0"
@ -12,6 +28,32 @@ dependencies = [
"term",
]
[[package]]
name = "addr2line"
version = "0.20.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f4fa78e18c64fce05e902adecd7a5eed15a5e0a3439f7b0e169f0252214865e3"
dependencies = [
"gimli",
]
[[package]]
name = "adler"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
[[package]]
name = "ahash"
version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47"
dependencies = [
"getrandom",
"once_cell",
"version_check",
]
[[package]]
name = "aho-corasick"
version = "1.0.2"
@ -36,6 +78,31 @@ dependencies = [
"libc",
]
[[package]]
name = "anyhow"
version = "1.0.72"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3b13c32d80ecc7ab747b80c3784bce54ee8a7a0cc4fbda9bf4cda2cf6fe90854"
dependencies = [
"backtrace",
]
[[package]]
name = "ariadne"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "367fd0ad87307588d087544707bc5fbf4805ded96c7db922b70d368fa1cb5702"
dependencies = [
"unicode-width",
"yansi",
]
[[package]]
name = "arrayref"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545"
[[package]]
name = "arrayvec"
version = "0.7.4"
@ -48,6 +115,21 @@ version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "backtrace"
version = "0.3.68"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4319208da049c43661739c5fade2ba182f09d1dc2299b32298d3a31692b17e12"
dependencies = [
"addr2line",
"cc",
"cfg-if",
"libc",
"miniz_oxide",
"object",
"rustc-demangle",
]
[[package]]
name = "beef"
version = "0.5.2"
@ -60,6 +142,29 @@ version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "blake3"
version = "1.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "199c42ab6972d92c9f8995f086273d25c42fc0f7b2a1fcefba465c1352d25ba5"
dependencies = [
"arrayref",
"arrayvec",
"cc",
"cfg-if",
"constant_time_eq",
"digest",
]
[[package]]
name = "block-buffer"
version = "0.10.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71"
dependencies = [
"generic-array",
]
[[package]]
name = "bumpalo"
version = "3.13.0"
@ -93,6 +198,16 @@ dependencies = [
"winapi",
]
[[package]]
name = "chumsky"
version = "0.9.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "23170228b96236b5a7299057ac284a321457700bc8c41a4476052f0f4ba5349d"
dependencies = [
"hashbrown 0.12.3",
"stacker",
]
[[package]]
name = "codespan-reporting"
version = "0.11.1"
@ -103,6 +218,12 @@ dependencies = [
"unicode-width",
]
[[package]]
name = "constant_time_eq"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f7144d30dcf0fafbce74250a3963025d8d52177934239851c917d29f1df280c2"
[[package]]
name = "core-foundation-sys"
version = "0.8.4"
@ -177,10 +298,41 @@ dependencies = [
]
[[package]]
name = "cxx"
version = "1.0.101"
name = "crypto-common"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5032837c1384de3708043de9d4e97bb91290faca6c16529a28aa340592a78166"
checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3"
dependencies = [
"generic-array",
"typenum",
]
[[package]]
name = "csv"
version = "1.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "626ae34994d3d8d668f4269922248239db4ae42d538b14c398b74a52208e8086"
dependencies = [
"csv-core",
"itoa",
"ryu",
"serde",
]
[[package]]
name = "csv-core"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90"
dependencies = [
"memchr",
]
[[package]]
name = "cxx"
version = "1.0.102"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f68e12e817cb19eaab81aaec582b4052d07debd3c3c6b083b9d361db47c7dc9d"
dependencies = [
"cc",
"cxxbridge-flags",
@ -190,9 +342,9 @@ dependencies = [
[[package]]
name = "cxx-build"
version = "1.0.101"
version = "1.0.102"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "51368b3d0dbf356e10fcbfd455a038503a105ee556f7ee79b6bb8c53a7247456"
checksum = "e789217e4ab7cf8cc9ce82253180a9fe331f35f5d339f0ccfe0270b39433f397"
dependencies = [
"cc",
"codespan-reporting",
@ -200,24 +352,24 @@ dependencies = [
"proc-macro2",
"quote",
"scratch",
"syn 2.0.26",
"syn 2.0.27",
]
[[package]]
name = "cxxbridge-flags"
version = "1.0.101"
version = "1.0.102"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0d9062157072e4aafc8e56ceaf8325ce850c5ae37578c852a0d4de2cecdded13"
checksum = "78a19f4c80fd9ab6c882286fa865e92e07688f4387370a209508014ead8751d0"
[[package]]
name = "cxxbridge-macro"
version = "1.0.101"
version = "1.0.102"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf01e8a540f5a4e0f284595834f81cf88572f244b768f051724537afa99a2545"
checksum = "b8fcfa71f66c8563c4fa9dd2bb68368d50267856f831ac5d85367e0805f9606c"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.26",
"syn 2.0.27",
]
[[package]]
@ -296,6 +448,17 @@ dependencies = [
"syn 1.0.109",
]
[[package]]
name = "digest"
version = "0.10.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
dependencies = [
"block-buffer",
"crypto-common",
"subtle",
]
[[package]]
name = "dirs-next"
version = "2.0.0"
@ -319,9 +482,27 @@ dependencies = [
[[package]]
name = "either"
version = "1.8.1"
version = "1.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91"
checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07"
[[package]]
name = "enum-as-inner"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116"
dependencies = [
"heck",
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]]
name = "equivalent"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5"
[[package]]
name = "fnv"
@ -338,6 +519,16 @@ dependencies = [
"thread_local",
]
[[package]]
name = "generic-array"
version = "0.14.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a"
dependencies = [
"typenum",
"version_check",
]
[[package]]
name = "getrandom"
version = "0.2.10"
@ -349,6 +540,33 @@ dependencies = [
"wasi 0.11.0+wasi-snapshot-preview1",
]
[[package]]
name = "gimli"
version = "0.27.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e"
[[package]]
name = "hashbrown"
version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
dependencies = [
"ahash",
]
[[package]]
name = "hashbrown"
version = "0.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a"
[[package]]
name = "heck"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
[[package]]
name = "hermit-abi"
version = "0.3.2"
@ -384,6 +602,31 @@ version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39"
[[package]]
name = "indexmap"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d"
dependencies = [
"equivalent",
"hashbrown 0.14.0",
]
[[package]]
name = "itertools"
version = "0.10.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"
dependencies = [
"either",
]
[[package]]
name = "itoa"
version = "1.0.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38"
[[package]]
name = "js-sys"
version = "0.3.64"
@ -444,6 +687,21 @@ dependencies = [
"autocfg",
]
[[package]]
name = "minimal-lexical"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a"
[[package]]
name = "miniz_oxide"
version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7"
dependencies = [
"adler",
]
[[package]]
name = "nix"
version = "0.24.3"
@ -470,10 +728,20 @@ dependencies = [
]
[[package]]
name = "num-traits"
version = "0.2.15"
name = "nom"
version = "7.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd"
checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a"
dependencies = [
"memchr",
"minimal-lexical",
]
[[package]]
name = "num-traits"
version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2"
dependencies = [
"autocfg",
]
@ -488,6 +756,15 @@ dependencies = [
"libc",
]
[[package]]
name = "object"
version = "0.31.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8bda667d9f2b5051b8833f59f3bf748b28ef54f850f4fcb389a252aa383866d1"
dependencies = [
"memchr",
]
[[package]]
name = "once_cell"
version = "1.18.0"
@ -509,6 +786,41 @@ dependencies = [
"unicode-ident",
]
[[package]]
name = "prql-compiler"
version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c99b52154002ac7f286dd2293c2f8d4e30526c1d396b14deef5ada1deef3c9ff"
dependencies = [
"anyhow",
"ariadne",
"chumsky",
"csv",
"enum-as-inner",
"itertools",
"lazy_static",
"log",
"once_cell",
"regex",
"semver",
"serde",
"serde_json",
"serde_yaml",
"sqlformat",
"sqlparser",
"strum",
"strum_macros",
]
[[package]]
name = "psm"
version = "0.1.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5787f7cda34e3033a72192c018bc5883100330f362ef279a8cbccfce8bb4e874"
dependencies = [
"cc",
]
[[package]]
name = "quote"
version = "1.0.31"
@ -589,12 +901,24 @@ version = "0.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2"
[[package]]
name = "rustc-demangle"
version = "0.1.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76"
[[package]]
name = "rustversion"
version = "1.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4"
[[package]]
name = "ryu"
version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741"
[[package]]
name = "scopeguard"
version = "1.2.0"
@ -608,10 +932,57 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a3cf7c11c38cb994f3d40e8a8cde3bbd1f72a435e4c49e85d6553d8312306152"
[[package]]
name = "serde"
version = "1.0.171"
name = "semver"
version = "1.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "30e27d1e4fd7659406c492fd6cfaf2066ba8773de45ca75e855590f856dc34a9"
checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918"
dependencies = [
"serde",
]
[[package]]
name = "serde"
version = "1.0.174"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3b88756493a5bd5e5395d53baa70b194b05764ab85b59e43e4b8f4e1192fa9b1"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.174"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6e5c3a298c7f978e53536f95a63bdc4c4a64550582f31a0359a9afda6aede62e"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.27",
]
[[package]]
name = "serde_json"
version = "1.0.103"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d03b412469450d4404fe8499a268edd7f8b79fecb074b0d812ad64ca21f4031b"
dependencies = [
"itoa",
"ryu",
"serde",
]
[[package]]
name = "serde_yaml"
version = "0.9.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a49e178e4452f45cb61d0cd8cebc1b0fafd3e41929e996cef79aa3aca91f574"
dependencies = [
"indexmap",
"itoa",
"ryu",
"serde",
"unsafe-libyaml",
]
[[package]]
name = "skim"
@ -638,12 +1009,74 @@ dependencies = [
"vte",
]
[[package]]
name = "sqlformat"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0c12bc9199d1db8234678b7051747c07f517cdcf019262d1847b94ec8b1aee3e"
dependencies = [
"itertools",
"nom",
"unicode_categories",
]
[[package]]
name = "sqlparser"
version = "0.33.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "355dc4d4b6207ca8a3434fc587db0a8016130a574dbcdbfb93d7f7b5bc5b211a"
dependencies = [
"log",
"serde",
]
[[package]]
name = "stacker"
version = "0.1.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c886bd4480155fd3ef527d45e9ac8dd7118a898a46530b7b94c3e21866259fce"
dependencies = [
"cc",
"cfg-if",
"libc",
"psm",
"winapi",
]
[[package]]
name = "strsim"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
[[package]]
name = "strum"
version = "0.24.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f"
dependencies = [
"strum_macros",
]
[[package]]
name = "strum_macros"
version = "0.24.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59"
dependencies = [
"heck",
"proc-macro2",
"quote",
"rustversion",
"syn 1.0.109",
]
[[package]]
name = "subtle"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc"
[[package]]
name = "syn"
version = "1.0.109"
@ -657,9 +1090,9 @@ dependencies = [
[[package]]
name = "syn"
version = "2.0.26"
version = "2.0.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "45c3457aacde3c65315de5031ec191ce46604304d2446e803d71ade03308d970"
checksum = "b60f673f44a8255b9c8c657daf66a596d435f2da81a555b06dc644d080ba45e0"
dependencies = [
"proc-macro2",
"quote",
@ -688,22 +1121,22 @@ dependencies = [
[[package]]
name = "thiserror"
version = "1.0.43"
version = "1.0.44"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a35fc5b8971143ca348fa6df4f024d4d55264f3468c71ad1c2f365b0a4d58c42"
checksum = "611040a08a0439f8248d1990b111c95baa9c704c805fa1f62104b39655fd7f90"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.43"
version = "1.0.44"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "463fe12d7993d3b327787537ce8dd4dfa058de32fc2b195ef3cde03dc4771e8f"
checksum = "090198534930841fab3a5d1bb637cde49e339654e606195f8d9c76eeb081dc96"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.26",
"syn 2.0.27",
]
[[package]]
@ -766,6 +1199,12 @@ dependencies = [
"unicode-width",
]
[[package]]
name = "typenum"
version = "1.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba"
[[package]]
name = "unicode-ident"
version = "1.0.11"
@ -778,12 +1217,30 @@ version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b"
[[package]]
name = "unicode_categories"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e"
[[package]]
name = "unsafe-libyaml"
version = "0.2.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f28467d3e1d3c6586d8f25fa243f544f5800fec42d97032474e17222c2b75cfa"
[[package]]
name = "utf8parse"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a"
[[package]]
name = "version_check"
version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
[[package]]
name = "vte"
version = "0.11.1"
@ -838,7 +1295,7 @@ dependencies = [
"once_cell",
"proc-macro2",
"quote",
"syn 2.0.26",
"syn 2.0.27",
"wasm-bindgen-shared",
]
@ -860,7 +1317,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.26",
"syn 2.0.27",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
@ -967,3 +1424,9 @@ name = "windows_x86_64_msvc"
version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a"
[[package]]
name = "yansi"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec"

12
rust/Cargo.toml Normal file
View File

@ -0,0 +1,12 @@
# workspace is required to vendor crates for all packages.
[workspace]
members = [
"BLAKE3",
"skim",
"prql",
]
resolver = "2"
# FIXME: even though the profiles should be defined in the main cargo config we
# cannot do this yet, since we compile each package separatelly, so you should
# ignore warning from cargo about this.

569
rust/prql/Cargo.lock generated
View File

@ -1,569 +0,0 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "_ch_rust_prql"
version = "0.1.0"
dependencies = [
"prql-compiler",
"serde_json",
]
[[package]]
name = "addr2line"
version = "0.20.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f4fa78e18c64fce05e902adecd7a5eed15a5e0a3439f7b0e169f0252214865e3"
dependencies = [
"gimli",
]
[[package]]
name = "adler"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
[[package]]
name = "ahash"
version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47"
dependencies = [
"getrandom",
"once_cell",
"version_check",
]
[[package]]
name = "aho-corasick"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41"
dependencies = [
"memchr",
]
[[package]]
name = "anyhow"
version = "1.0.71"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8"
dependencies = [
"backtrace",
]
[[package]]
name = "ariadne"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "367fd0ad87307588d087544707bc5fbf4805ded96c7db922b70d368fa1cb5702"
dependencies = [
"unicode-width",
"yansi",
]
[[package]]
name = "backtrace"
version = "0.3.68"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4319208da049c43661739c5fade2ba182f09d1dc2299b32298d3a31692b17e12"
dependencies = [
"addr2line",
"cc",
"cfg-if",
"libc",
"miniz_oxide",
"object",
"rustc-demangle",
]
[[package]]
name = "cc"
version = "1.0.79"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f"
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "chumsky"
version = "0.9.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "23170228b96236b5a7299057ac284a321457700bc8c41a4476052f0f4ba5349d"
dependencies = [
"hashbrown 0.12.3",
"stacker",
]
[[package]]
name = "csv"
version = "1.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "626ae34994d3d8d668f4269922248239db4ae42d538b14c398b74a52208e8086"
dependencies = [
"csv-core",
"itoa",
"ryu",
"serde",
]
[[package]]
name = "csv-core"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90"
dependencies = [
"memchr",
]
[[package]]
name = "either"
version = "1.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91"
[[package]]
name = "enum-as-inner"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116"
dependencies = [
"heck",
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]]
name = "equivalent"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "88bffebc5d80432c9b140ee17875ff173a8ab62faad5b257da912bd2f6c1c0a1"
[[package]]
name = "getrandom"
version = "0.2.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427"
dependencies = [
"cfg-if",
"libc",
"wasi",
]
[[package]]
name = "gimli"
version = "0.27.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e"
[[package]]
name = "hashbrown"
version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
dependencies = [
"ahash",
]
[[package]]
name = "hashbrown"
version = "0.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a"
[[package]]
name = "heck"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
[[package]]
name = "indexmap"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d"
dependencies = [
"equivalent",
"hashbrown 0.14.0",
]
[[package]]
name = "itertools"
version = "0.10.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"
dependencies = [
"either",
]
[[package]]
name = "itoa"
version = "1.0.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "62b02a5381cc465bd3041d84623d0fa3b66738b52b8e2fc3bab8ad63ab032f4a"
[[package]]
name = "lazy_static"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "libc"
version = "0.2.147"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3"
[[package]]
name = "log"
version = "0.4.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4"
[[package]]
name = "memchr"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
[[package]]
name = "minimal-lexical"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a"
[[package]]
name = "miniz_oxide"
version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7"
dependencies = [
"adler",
]
[[package]]
name = "nom"
version = "7.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a"
dependencies = [
"memchr",
"minimal-lexical",
]
[[package]]
name = "object"
version = "0.31.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8bda667d9f2b5051b8833f59f3bf748b28ef54f850f4fcb389a252aa383866d1"
dependencies = [
"memchr",
]
[[package]]
name = "once_cell"
version = "1.18.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d"
[[package]]
name = "proc-macro2"
version = "1.0.63"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b368fba921b0dce7e60f5e04ec15e565b3303972b42bcfde1d0713b881959eb"
dependencies = [
"unicode-ident",
]
[[package]]
name = "prql-compiler"
version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c99b52154002ac7f286dd2293c2f8d4e30526c1d396b14deef5ada1deef3c9ff"
dependencies = [
"anyhow",
"ariadne",
"chumsky",
"csv",
"enum-as-inner",
"itertools",
"lazy_static",
"log",
"once_cell",
"regex",
"semver",
"serde",
"serde_json",
"serde_yaml",
"sqlformat",
"sqlparser",
"strum",
"strum_macros",
]
[[package]]
name = "psm"
version = "0.1.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5787f7cda34e3033a72192c018bc5883100330f362ef279a8cbccfce8bb4e874"
dependencies = [
"cc",
]
[[package]]
name = "quote"
version = "1.0.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "573015e8ab27661678357f27dc26460738fd2b6c86e46f386fde94cb5d913105"
dependencies = [
"proc-macro2",
]
[[package]]
name = "regex"
version = "1.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "89089e897c013b3deb627116ae56a6955a72b8bed395c9526af31c9fe528b484"
dependencies = [
"aho-corasick",
"memchr",
"regex-automata",
"regex-syntax",
]
[[package]]
name = "regex-automata"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fa250384981ea14565685dea16a9ccc4d1c541a13f82b9c168572264d1df8c56"
dependencies = [
"aho-corasick",
"memchr",
"regex-syntax",
]
[[package]]
name = "regex-syntax"
version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2ab07dc67230e4a4718e70fd5c20055a4334b121f1f9db8fe63ef39ce9b8c846"
[[package]]
name = "rustc-demangle"
version = "0.1.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76"
[[package]]
name = "rustversion"
version = "1.0.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc31bd9b61a32c31f9650d18add92aa83a49ba979c143eefd27fe7177b05bd5f"
[[package]]
name = "ryu"
version = "1.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fe232bdf6be8c8de797b22184ee71118d63780ea42ac85b61d1baa6d3b782ae9"
[[package]]
name = "semver"
version = "1.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed"
dependencies = [
"serde",
]
[[package]]
name = "serde"
version = "1.0.166"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d01b7404f9d441d3ad40e6a636a7782c377d2abdbe4fa2440e2edcc2f4f10db8"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.166"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5dd83d6dde2b6b2d466e14d9d1acce8816dedee94f735eac6395808b3483c6d6"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.23",
]
[[package]]
name = "serde_json"
version = "1.0.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0f1e14e89be7aa4c4b78bdbdc9eb5bf8517829a600ae8eaa39a6e1d960b5185c"
dependencies = [
"itoa",
"ryu",
"serde",
]
[[package]]
name = "serde_yaml"
version = "0.9.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "452e67b9c20c37fa79df53201dc03839651086ed9bbe92b3ca585ca9fdaa7d85"
dependencies = [
"indexmap",
"itoa",
"ryu",
"serde",
"unsafe-libyaml",
]
[[package]]
name = "sqlformat"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0c12bc9199d1db8234678b7051747c07f517cdcf019262d1847b94ec8b1aee3e"
dependencies = [
"itertools",
"nom",
"unicode_categories",
]
[[package]]
name = "sqlparser"
version = "0.33.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "355dc4d4b6207ca8a3434fc587db0a8016130a574dbcdbfb93d7f7b5bc5b211a"
dependencies = [
"log",
"serde",
]
[[package]]
name = "stacker"
version = "0.1.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c886bd4480155fd3ef527d45e9ac8dd7118a898a46530b7b94c3e21866259fce"
dependencies = [
"cc",
"cfg-if",
"libc",
"psm",
"winapi",
]
[[package]]
name = "strum"
version = "0.24.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f"
dependencies = [
"strum_macros",
]
[[package]]
name = "strum_macros"
version = "0.24.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59"
dependencies = [
"heck",
"proc-macro2",
"quote",
"rustversion",
"syn 1.0.109",
]
[[package]]
name = "syn"
version = "1.0.109"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "syn"
version = "2.0.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "59fb7d6d8281a51045d62b8eb3a7d1ce347b76f312af50cd3dc0af39c87c1737"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "unicode-ident"
version = "1.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "22049a19f4a68748a168c0fc439f9516686aa045927ff767eca0a85101fb6e73"
[[package]]
name = "unicode-width"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b"
[[package]]
name = "unicode_categories"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e"
[[package]]
name = "unsafe-libyaml"
version = "0.2.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1865806a559042e51ab5414598446a5871b561d21b6764f2eabb0dd481d880a6"
[[package]]
name = "version_check"
version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
[[package]]
name = "wasi"
version = "0.11.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
[[package]]
name = "winapi"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
dependencies = [
"winapi-i686-pc-windows-gnu",
"winapi-x86_64-pc-windows-gnu",
]
[[package]]
name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
[[package]]
name = "yansi"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec"

View File

@ -0,0 +1,198 @@
#include "UniqToCountPass.h"
#include <AggregateFunctions/AggregateFunctionFactory.h>
#include <AggregateFunctions/IAggregateFunction.h>
#include <Analyzer/ColumnNode.h>
#include <Analyzer/FunctionNode.h>
#include <Analyzer/InDepthQueryTreeVisitor.h>
#include <Analyzer/QueryNode.h>
namespace DB
{
namespace
{
bool matchFnUniq(String func_name)
{
auto name = Poco::toLower(func_name);
return name == "uniq" || name == "uniqHLL12" || name == "uniqExact" || name == "uniqTheta" || name == "uniqCombined"
|| name == "uniqCombined64";
}
/// Extract the corresponding projection columns for group by node list.
/// For example:
/// SELECT a as aa, any(b) FROM table group by a; -> aa(ColumnNode)
NamesAndTypes extractProjectionColumnsForGroupBy(const QueryNode * query_node)
{
if (!query_node->hasGroupBy())
return {};
NamesAndTypes result;
for (const auto & group_by_ele : query_node->getGroupByNode()->getChildren())
{
const auto & projection_columns = query_node->getProjectionColumns();
const auto & projection_nodes = query_node->getProjection().getNodes();
assert(projection_columns.size() == projection_nodes.size());
for (size_t i = 0; i < projection_columns.size(); i++)
{
if (projection_nodes[i]->isEqual(*group_by_ele))
result.push_back(projection_columns[i]);
}
}
return result;
}
/// Whether query_columns equals subquery_columns.
/// query_columns: query columns from query
/// subquery_columns: projection columns from subquery
bool nodeListEquals(const QueryTreeNodes & query_columns, const NamesAndTypes & subquery_columns)
{
if (query_columns.size() != subquery_columns.size())
return false;
for (const auto & query_column : query_columns)
{
auto find = std::find_if(
subquery_columns.begin(),
subquery_columns.end(),
[&](const auto & subquery_column) -> bool
{
if (auto * column_node = query_column->as<ColumnNode>())
{
return subquery_column == column_node->getColumn();
}
return false;
});
if (find == subquery_columns.end())
return false;
}
return true;
}
/// Whether subquery_columns contains all columns in subquery_columns.
/// query_columns: query columns from query
/// subquery_columns: projection columns from subquery
bool nodeListContainsAll(const QueryTreeNodes & query_columns, const NamesAndTypes & subquery_columns)
{
if (query_columns.size() > subquery_columns.size())
return false;
for (const auto & query_column : query_columns)
{
auto find = std::find_if(
subquery_columns.begin(),
subquery_columns.end(),
[&](const auto & subquery_column) -> bool
{
if (auto * column_node = query_column->as<ColumnNode>())
{
return subquery_column == column_node->getColumn();
}
return false;
});
if (find == subquery_columns.end())
return false;
}
return true;
}
}
class UniqToCountVisitor : public InDepthQueryTreeVisitor<UniqToCountVisitor>
{
public:
using Base = InDepthQueryTreeVisitor<UniqToCountVisitor>;
using Base::Base;
void visitImpl(QueryTreeNodePtr & node)
{
auto * query_node = node->as<QueryNode>();
if (!query_node)
return;
/// Check that query has only single table expression which is subquery
auto * subquery_node = query_node->getJoinTree()->as<QueryNode>();
if (!subquery_node)
return;
/// Check that query has only single node in projection
auto & projection_nodes = query_node->getProjection().getNodes();
if (projection_nodes.size() != 1)
return;
/// Check that projection_node is a function
auto & projection_node = projection_nodes[0];
auto * function_node = projection_node->as<FunctionNode>();
if (!function_node)
return;
/// Check that query single projection node is `uniq` or its variants
if (!matchFnUniq(function_node->getFunctionName()))
return;
auto & uniq_arguments_nodes = function_node->getArguments().getNodes();
/// Whether query matches 'SELECT uniq(x ...) FROM (SELECT DISTINCT x ...)'
auto match_subquery_with_distinct = [&]() -> bool
{
if (!subquery_node->isDistinct())
return false;
/// uniq expression list == subquery projection columns
if (!nodeListEquals(uniq_arguments_nodes, subquery_node->getProjectionColumns()))
return false;
return true;
};
/// Whether query matches 'SELECT uniq(x ...) FROM (SELECT x ... GROUP BY x ...)'
auto match_subquery_with_group_by = [&]() -> bool
{
if (!subquery_node->hasGroupBy())
return false;
/// uniq argument node list == subquery group by node list
auto group_by_columns = extractProjectionColumnsForGroupBy(subquery_node);
if (!nodeListEquals(uniq_arguments_nodes, group_by_columns))
return false;
/// subquery projection columns must contain all columns in uniq argument node list
if (!nodeListContainsAll(uniq_arguments_nodes, subquery_node->getProjectionColumns()))
return false;
return true;
};
/// Replace uniq of initial query to count
if (match_subquery_with_distinct() || match_subquery_with_group_by())
{
AggregateFunctionProperties properties;
auto aggregate_function = AggregateFunctionFactory::instance().get("count", {}, {}, properties);
function_node->resolveAsAggregateFunction(std::move(aggregate_function));
function_node->getArguments().getNodes().clear();
/// Update projection columns
query_node->resolveProjectionColumns({{"count()", function_node->getResultType()}});
}
}
};
void UniqToCountPass::run(QueryTreeNodePtr query_tree_node, ContextPtr context)
{
if (!context->getSettings().optimize_uniq_to_count)
return;
UniqToCountVisitor visitor;
visitor.visit(query_tree_node);
}
}

View File

@ -0,0 +1,30 @@
#pragma once
#include <Analyzer/IQueryTreePass.h>
namespace DB
{
/** Optimize `uniq` and its variants(except uniqUpTo) into `count` over subquery.
* Example: 'SELECT uniq(x ...) FROM (SELECT DISTINCT x ...)' to
* Result: 'SELECT count() FROM (SELECT DISTINCT x ...)'
*
* Example: 'SELECT uniq(x ...) FROM (SELECT x ... GROUP BY x ...)' to
* Result: 'SELECT count() FROM (SELECT x ... GROUP BY x ...)'
*
* Note that we can rewrite all uniq variants except uniqUpTo.
*/
class UniqToCountPass final : public IQueryTreePass
{
public:
String getName() override { return "UniqToCount"; }
String getDescription() override
{
return "Rewrite uniq and its variants(except uniqUpTo) to count if subquery has distinct or group by clause.";
}
void run(QueryTreeNodePtr query_tree_node, ContextPtr context) override;
};
}

View File

@ -18,6 +18,7 @@
#include <Analyzer/Utils.h>
#include <Analyzer/Passes/QueryAnalysisPass.h>
#include <Analyzer/Passes/CountDistinctPass.h>
#include <Analyzer/Passes/UniqToCountPass.h>
#include <Analyzer/Passes/FunctionToSubcolumnsPass.h>
#include <Analyzer/Passes/RewriteAggregateFunctionWithIfPass.h>
#include <Analyzer/Passes/SumIfToCountIfPass.h>
@ -246,6 +247,7 @@ void addQueryTreePasses(QueryTreePassManager & manager)
manager.addPass(std::make_unique<ConvertLogicalExpressionToCNFPass>());
manager.addPass(std::make_unique<CountDistinctPass>());
manager.addPass(std::make_unique<UniqToCountPass>());
manager.addPass(std::make_unique<RewriteAggregateFunctionWithIfPass>());
manager.addPass(std::make_unique<SumIfToCountIfPass>());
manager.addPass(std::make_unique<RewriteArrayExistsToHasPass>());

View File

@ -210,7 +210,7 @@ if (TARGET ch_contrib::jemalloc)
target_link_libraries (clickhouse_storages_system PRIVATE ch_contrib::jemalloc)
endif()
target_link_libraries (clickhouse_common_io PUBLIC ch_contrib::sparsehash)
target_link_libraries (clickhouse_common_io PUBLIC ch_contrib::sparsehash ch_contrib::incbin)
add_subdirectory(Access/Common)
add_subdirectory(Common/ZooKeeper)
@ -561,7 +561,6 @@ if (ENABLE_NLP)
dbms_target_link_libraries (PUBLIC ch_contrib::stemmer)
dbms_target_link_libraries (PUBLIC ch_contrib::wnb)
dbms_target_link_libraries (PUBLIC ch_contrib::lemmagen)
dbms_target_link_libraries (PUBLIC ch_contrib::nlp_data)
endif()
if (TARGET ch_contrib::ulid)

View File

@ -353,6 +353,8 @@ bool HedgedConnections::resumePacketReceiver(const HedgedConnections::ReplicaLoc
if (replica_state.packet_receiver->isPacketReady())
{
/// Reset the socket timeout after some packet received
replica_state.packet_receiver->setTimeout(hedged_connections_factory.getConnectionTimeouts().receive_timeout);
last_received_packet = replica_state.packet_receiver->getPacket();
return true;
}

View File

@ -9,5 +9,5 @@ if (ENABLE_EXAMPLES)
endif()
if (ENABLE_MYSQL)
add_subdirectory (mysqlxx)
add_subdirectory(mysqlxx)
endif ()

View File

@ -19,7 +19,6 @@
#include <Common/ZooKeeper/KeeperException.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/Exception.h>
#include <Common/getResource.h>
#include <Common/XMLUtils.h>
#include <Common/logger_useful.h>
#include <base/errnoToString.h>
@ -83,6 +82,13 @@ ConfigProcessor::~ConfigProcessor()
Poco::Logger::destroy("ConfigProcessor");
}
static std::unordered_map<std::string, std::string_view> embedded_configs;
void ConfigProcessor::registerEmbeddedConfig(std::string name, std::string_view content)
{
embedded_configs[name] = content;
}
/// Vector containing the name of the element and a sorted list of attribute names and values
/// (except "remove" and "replace" attributes).
@ -281,15 +287,15 @@ void ConfigProcessor::doIncludesRecursive(
{
std::string value = node->nodeValue();
bool replace_occured = false;
bool replace_occurred = false;
size_t pos;
while ((pos = value.find(substitution.first)) != std::string::npos)
{
value.replace(pos, substitution.first.length(), substitution.second);
replace_occured = true;
replace_occurred = true;
}
if (replace_occured)
if (replace_occurred)
node->setNodeValue(value);
}
}
@ -528,26 +534,14 @@ XMLDocumentPtr ConfigProcessor::processConfig(
}
else
{
/// These embedded files added during build with some cmake magic.
/// Look at the end of programs/server/CMakeLists.txt.
std::string embedded_name;
if (path == "config.xml")
embedded_name = "embedded.xml";
if (path == "keeper_config.xml")
embedded_name = "keeper_embedded.xml";
/// When we can use config embedded in binary.
if (!embedded_name.empty())
/// When we can use a config embedded in the binary.
if (auto it = embedded_configs.find(path); it != embedded_configs.end())
{
auto resource = getResource(embedded_name);
if (resource.empty())
throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "Configuration file {} doesn't exist and there is no embedded config", path);
LOG_DEBUG(log, "There is no file '{}', will use embedded config.", path);
config = dom_parser.parseMemory(resource.data(), resource.size());
config = dom_parser.parseMemory(it->second.data(), it->second.size());
}
else
throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "Configuration file {} doesn't exist", path);
throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "Configuration file {} doesn't exist and there is no embedded config", path);
}
std::vector<std::string> contributing_files;

View File

@ -65,6 +65,9 @@ public:
zkutil::ZooKeeperNodeCache * zk_node_cache = nullptr,
const zkutil::EventPtr & zk_changed_event = nullptr);
/// These configurations will be used if there is no configuration file.
static void registerEmbeddedConfig(std::string name, std::string_view content);
/// loadConfig* functions apply processConfig and create Poco::Util::XMLConfiguration.
/// The resulting XML document is saved into a file with the name

View File

@ -187,6 +187,7 @@
M(CacheFileSegments, "Number of existing cache file segments") \
M(CacheDetachedFileSegments, "Number of existing detached cache file segments") \
M(FilesystemCacheSize, "Filesystem cache size in bytes") \
M(FilesystemCacheSizeLimit, "Filesystem cache size limit in bytes") \
M(FilesystemCacheElements, "Filesystem cache elements (file segments)") \
M(FilesystemCacheDownloadQueueElements, "Filesystem cache elements in download queue") \
M(AsyncInsertCacheSize, "Number of async insert hash id in cache") \

View File

@ -3,7 +3,6 @@
#include "CurrentThread.h"
#include <Common/logger_useful.h>
#include <Common/ThreadStatus.h>
#include <Common/TaskStatsInfoGetter.h>
#include <Interpreters/ProcessList.h>
#include <Interpreters/Context.h>
#include <base/getThreadId.h>

View File

@ -3,7 +3,6 @@
#include <cctz/civil_time.h>
#include <cctz/time_zone.h>
#include <cctz/zone_info_source.h>
#include <Common/getResource.h>
#include <Poco/Exception.h>
#include <algorithm>
@ -11,6 +10,11 @@
#include <chrono>
#include <cstring>
#include <memory>
#include <iostream>
/// Embedded timezones.
std::string_view getTimeZone(const char * name);
namespace
@ -249,9 +253,10 @@ namespace cctz_extension
const std::string & name,
const std::function<std::unique_ptr<cctz::ZoneInfoSource>(const std::string & name)> & fallback)
{
std::string_view resource = getResource(name);
if (!resource.empty())
return std::make_unique<Source>(resource.data(), resource.size());
std::string_view tz_file = getTimeZone(name.data());
if (!tz_file.empty())
return std::make_unique<Source>(tz_file.data(), tz_file.size());
return fallback(name);
}

View File

@ -0,0 +1,185 @@
#include <Common/FrequencyHolder.h>
#if USE_NLP
#include <incbin.h>
/// Embedded SQL definitions
INCBIN(resource_charset_zst, SOURCE_DIR "/contrib/nlp-data/charset.zst");
INCBIN(resource_tonality_ru_zst, SOURCE_DIR "/contrib/nlp-data/tonality_ru.zst");
INCBIN(resource_programming_zst, SOURCE_DIR "/contrib/nlp-data/programming.zst");
namespace DB
{
namespace ErrorCodes
{
extern const int FILE_DOESNT_EXIST;
}
FrequencyHolder & FrequencyHolder::getInstance()
{
static FrequencyHolder instance;
return instance;
}
FrequencyHolder::FrequencyHolder()
{
loadEmotionalDict();
loadEncodingsFrequency();
loadProgrammingFrequency();
}
void FrequencyHolder::loadEncodingsFrequency()
{
Poco::Logger * log = &Poco::Logger::get("EncodingsFrequency");
LOG_TRACE(log, "Loading embedded charset frequencies");
std::string_view resource(reinterpret_cast<const char *>(gresource_charset_zstData), gresource_charset_zstSize);
if (resource.empty())
throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "There is no embedded charset frequencies");
String line;
UInt16 bigram;
Float64 frequency;
String charset_name;
auto buf = std::make_unique<ReadBufferFromMemory>(resource.data(), resource.size());
ZstdInflatingReadBuffer in(std::move(buf));
while (!in.eof())
{
readString(line, in);
in.ignore();
if (line.empty())
continue;
ReadBufferFromString buf_line(line);
// Start loading a new charset
if (line.starts_with("// "))
{
// Skip "// "
buf_line.ignore(3);
readString(charset_name, buf_line);
/* In our dictionary we have lines with form: <Language>_<Charset>
* If we need to find language of data, we return <Language>
* If we need to find charset of data, we return <Charset>.
*/
size_t sep = charset_name.find('_');
Encoding enc;
enc.lang = charset_name.substr(0, sep);
enc.name = charset_name.substr(sep + 1);
encodings_freq.push_back(std::move(enc));
}
else
{
readIntText(bigram, buf_line);
buf_line.ignore();
readFloatText(frequency, buf_line);
encodings_freq.back().map[bigram] = frequency;
}
}
LOG_TRACE(log, "Charset frequencies was added, charsets count: {}", encodings_freq.size());
}
void FrequencyHolder::loadEmotionalDict()
{
Poco::Logger * log = &Poco::Logger::get("EmotionalDict");
LOG_TRACE(log, "Loading embedded emotional dictionary");
std::string_view resource(reinterpret_cast<const char *>(gresource_tonality_ru_zstData), gresource_tonality_ru_zstSize);
if (resource.empty())
throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "There is no embedded emotional dictionary");
String line;
String word;
Float64 tonality;
size_t count = 0;
auto buf = std::make_unique<ReadBufferFromMemory>(resource.data(), resource.size());
ZstdInflatingReadBuffer in(std::move(buf));
while (!in.eof())
{
readString(line, in);
in.ignore();
if (line.empty())
continue;
ReadBufferFromString buf_line(line);
readStringUntilWhitespace(word, buf_line);
buf_line.ignore();
readFloatText(tonality, buf_line);
StringRef ref{string_pool.insert(word.data(), word.size()), word.size()};
emotional_dict[ref] = tonality;
++count;
}
LOG_TRACE(log, "Emotional dictionary was added. Word count: {}", std::to_string(count));
}
void FrequencyHolder::loadProgrammingFrequency()
{
Poco::Logger * log = &Poco::Logger::get("ProgrammingFrequency");
LOG_TRACE(log, "Loading embedded programming languages frequencies loading");
std::string_view resource(reinterpret_cast<const char *>(gresource_programming_zstData), gresource_programming_zstSize);
if (resource.empty())
throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "There is no embedded programming languages frequencies");
String line;
String bigram;
Float64 frequency;
String programming_language;
auto buf = std::make_unique<ReadBufferFromMemory>(resource.data(), resource.size());
ZstdInflatingReadBuffer in(std::move(buf));
while (!in.eof())
{
readString(line, in);
in.ignore();
if (line.empty())
continue;
ReadBufferFromString buf_line(line);
// Start loading a new language
if (line.starts_with("// "))
{
// Skip "// "
buf_line.ignore(3);
readString(programming_language, buf_line);
Language lang;
lang.name = programming_language;
programming_freq.push_back(std::move(lang));
}
else
{
readStringUntilWhitespace(bigram, buf_line);
buf_line.ignore();
readFloatText(frequency, buf_line);
StringRef ref{string_pool.insert(bigram.data(), bigram.size()), bigram.size()};
programming_freq.back().map[ref] = frequency;
}
}
LOG_TRACE(log, "Programming languages frequencies was added");
}
}
#endif

View File

@ -1,5 +1,9 @@
#pragma once
#include "config.h"
#if USE_NLP
#include <base/StringRef.h>
#include <Common/logger_useful.h>
@ -7,7 +11,6 @@
#include <unordered_map>
#include <Common/Arena.h>
#include <Common/getResource.h>
#include <Common/HashTable/HashMap.h>
#include <Common/StringUtils/StringUtils.h>
#include <IO/ReadBufferFromFile.h>
@ -20,11 +23,6 @@
namespace DB
{
namespace ErrorCodes
{
extern const int FILE_DOESNT_EXIST;
}
/// FrequencyHolder class is responsible for storing and loading dictionaries
/// needed for text classification functions:
///
@ -56,11 +54,7 @@ public:
using EncodingMap = HashMap<UInt16, Float64>;
using EncodingContainer = std::vector<Encoding>;
static FrequencyHolder & getInstance()
{
static FrequencyHolder instance;
return instance;
}
static FrequencyHolder & getInstance();
const Map & getEmotionalDict() const
{
@ -78,161 +72,11 @@ public:
}
private:
FrequencyHolder();
FrequencyHolder()
{
loadEmotionalDict();
loadEncodingsFrequency();
loadProgrammingFrequency();
}
void loadEncodingsFrequency()
{
Poco::Logger * log = &Poco::Logger::get("EncodingsFrequency");
LOG_TRACE(log, "Loading embedded charset frequencies");
auto resource = getResource("charset.zst");
if (resource.empty())
throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "There is no embedded charset frequencies");
String line;
UInt16 bigram;
Float64 frequency;
String charset_name;
auto buf = std::make_unique<ReadBufferFromMemory>(resource.data(), resource.size());
ZstdInflatingReadBuffer in(std::move(buf));
while (!in.eof())
{
readString(line, in);
in.ignore();
if (line.empty())
continue;
ReadBufferFromString buf_line(line);
// Start loading a new charset
if (line.starts_with("// "))
{
// Skip "// "
buf_line.ignore(3);
readString(charset_name, buf_line);
/* In our dictionary we have lines with form: <Language>_<Charset>
* If we need to find language of data, we return <Language>
* If we need to find charset of data, we return <Charset>.
*/
size_t sep = charset_name.find('_');
Encoding enc;
enc.lang = charset_name.substr(0, sep);
enc.name = charset_name.substr(sep + 1);
encodings_freq.push_back(std::move(enc));
}
else
{
readIntText(bigram, buf_line);
buf_line.ignore();
readFloatText(frequency, buf_line);
encodings_freq.back().map[bigram] = frequency;
}
}
LOG_TRACE(log, "Charset frequencies was added, charsets count: {}", encodings_freq.size());
}
void loadEmotionalDict()
{
Poco::Logger * log = &Poco::Logger::get("EmotionalDict");
LOG_TRACE(log, "Loading embedded emotional dictionary");
auto resource = getResource("tonality_ru.zst");
if (resource.empty())
throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "There is no embedded emotional dictionary");
String line;
String word;
Float64 tonality;
size_t count = 0;
auto buf = std::make_unique<ReadBufferFromMemory>(resource.data(), resource.size());
ZstdInflatingReadBuffer in(std::move(buf));
while (!in.eof())
{
readString(line, in);
in.ignore();
if (line.empty())
continue;
ReadBufferFromString buf_line(line);
readStringUntilWhitespace(word, buf_line);
buf_line.ignore();
readFloatText(tonality, buf_line);
StringRef ref{string_pool.insert(word.data(), word.size()), word.size()};
emotional_dict[ref] = tonality;
++count;
}
LOG_TRACE(log, "Emotional dictionary was added. Word count: {}", std::to_string(count));
}
void loadProgrammingFrequency()
{
Poco::Logger * log = &Poco::Logger::get("ProgrammingFrequency");
LOG_TRACE(log, "Loading embedded programming languages frequencies loading");
auto resource = getResource("programming.zst");
if (resource.empty())
throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "There is no embedded programming languages frequencies");
String line;
String bigram;
Float64 frequency;
String programming_language;
auto buf = std::make_unique<ReadBufferFromMemory>(resource.data(), resource.size());
ZstdInflatingReadBuffer in(std::move(buf));
while (!in.eof())
{
readString(line, in);
in.ignore();
if (line.empty())
continue;
ReadBufferFromString buf_line(line);
// Start loading a new language
if (line.starts_with("// "))
{
// Skip "// "
buf_line.ignore(3);
readString(programming_language, buf_line);
Language lang;
lang.name = programming_language;
programming_freq.push_back(std::move(lang));
}
else
{
readStringUntilWhitespace(bigram, buf_line);
buf_line.ignore();
readFloatText(frequency, buf_line);
StringRef ref{string_pool.insert(bigram.data(), bigram.size()), bigram.size()};
programming_freq.back().map[ref] = frequency;
}
}
LOG_TRACE(log, "Programming languages frequencies was added");
}
void loadEncodingsFrequency();
void loadEmotionalDict();
void loadProgrammingFrequency();
Arena string_pool;
@ -241,3 +85,5 @@ private:
EncodingContainer encodings_freq;
};
}
#endif

View File

@ -1,4 +1,4 @@
#include "TaskStatsInfoGetter.h"
#include "NetlinkMetricsProvider.h"
#include <Common/Exception.h>
#include <base/defines.h>
#include <base/types.h>
@ -200,7 +200,7 @@ bool checkPermissionsImpl()
if (!res)
return false;
/// Check that we can successfully initialize TaskStatsInfoGetter.
/// Check that we can successfully initialize NetlinkMetricsProvider.
/// It will ask about family id through Netlink.
/// On some LXC containers we have capability but we still cannot use Netlink.
/// There is an evidence that Linux fedora-riscv 6.1.22 gives something strange instead of the expected result.
@ -208,7 +208,7 @@ bool checkPermissionsImpl()
try
{
::taskstats stats{};
TaskStatsInfoGetter().getStat(stats, static_cast<pid_t>(getThreadId()));
NetlinkMetricsProvider().getStat(stats, static_cast<pid_t>(getThreadId()));
}
catch (const Exception & e)
{
@ -244,14 +244,14 @@ UInt16 getFamilyId(int fd)
}
bool TaskStatsInfoGetter::checkPermissions()
bool NetlinkMetricsProvider::checkPermissions()
{
static bool res = checkPermissionsImpl();
return res;
}
TaskStatsInfoGetter::TaskStatsInfoGetter()
NetlinkMetricsProvider::NetlinkMetricsProvider()
{
netlink_socket_fd = ::socket(PF_NETLINK, SOCK_RAW, NETLINK_GENERIC);
if (netlink_socket_fd < 0)
@ -293,7 +293,7 @@ TaskStatsInfoGetter::TaskStatsInfoGetter()
}
void TaskStatsInfoGetter::getStat(::taskstats & out_stats, pid_t tid) const
void NetlinkMetricsProvider::getStat(::taskstats & out_stats, pid_t tid) const
{
NetlinkMessage answer = query(netlink_socket_fd, taskstats_family_id, tid, TASKSTATS_CMD_GET, TASKSTATS_CMD_ATTR_PID, &tid, sizeof(tid));
@ -318,7 +318,7 @@ void TaskStatsInfoGetter::getStat(::taskstats & out_stats, pid_t tid) const
}
TaskStatsInfoGetter::~TaskStatsInfoGetter()
NetlinkMetricsProvider::~NetlinkMetricsProvider()
{
if (netlink_socket_fd >= 0)
{
@ -335,15 +335,15 @@ TaskStatsInfoGetter::~TaskStatsInfoGetter()
namespace DB
{
bool TaskStatsInfoGetter::checkPermissions()
bool NetlinkMetricsProvider::checkPermissions()
{
return false;
}
TaskStatsInfoGetter::TaskStatsInfoGetter() = default;
TaskStatsInfoGetter::~TaskStatsInfoGetter() = default;
NetlinkMetricsProvider::NetlinkMetricsProvider() = default;
NetlinkMetricsProvider::~NetlinkMetricsProvider() = default;
void TaskStatsInfoGetter::getStat(::taskstats &, pid_t) const
void NetlinkMetricsProvider::getStat(::taskstats &, pid_t) const
{
}

View File

@ -15,11 +15,11 @@ namespace DB
///
/// [1]: https://elixir.bootlin.com/linux/v5.18-rc4/source/kernel/tsacct.c#L101
///
class TaskStatsInfoGetter : private boost::noncopyable
class NetlinkMetricsProvider : private boost::noncopyable
{
public:
TaskStatsInfoGetter();
~TaskStatsInfoGetter();
NetlinkMetricsProvider();
~NetlinkMetricsProvider();
void getStat(::taskstats & out_stats, pid_t tid) const;

View File

@ -1,6 +1,8 @@
#include <limits>
#include <Common/Exception.h>
#include <Common/logger_useful.h>
#include <Common/PODArray.h>
#include <Common/checkStackSize.h>
#include <Common/OptimizedRegularExpression.h>
#define MIN_LENGTH_FOR_STRSTR 3
@ -50,6 +52,8 @@ const char * analyzeImpl(
bool & is_trivial,
Literals & global_alternatives)
{
checkStackSize();
/** The expression is trivial if all the metacharacters in it are escaped.
* The non-alternative string is
* a string outside parentheses,
@ -420,6 +424,7 @@ void OptimizedRegularExpressionImpl<thread_safe>::analyze(
bool & is_trivial,
bool & required_substring_is_prefix,
std::vector<std::string> & alternatives)
try
{
Literals alternative_literals;
Literal required_literal;
@ -429,12 +434,20 @@ void OptimizedRegularExpressionImpl<thread_safe>::analyze(
for (auto & lit : alternative_literals)
alternatives.push_back(std::move(lit.literal));
}
catch (...)
{
required_substring = "";
is_trivial = false;
required_substring_is_prefix = false;
alternatives.clear();
LOG_ERROR(&Poco::Logger::get("OptimizeRegularExpression"), "Analyze RegularExpression failed, got error: {}", DB::getCurrentExceptionMessage(false));
}
template <bool thread_safe>
OptimizedRegularExpressionImpl<thread_safe>::OptimizedRegularExpressionImpl(const std::string & regexp_, int options)
{
std::vector<std::string> alternativesDummy; /// this vector extracts patterns a,b,c from pattern (a|b|c). for now it's not used.
analyze(regexp_, required_substring, is_trivial, required_substring_is_prefix, alternativesDummy);
std::vector<std::string> alternatives_dummy; /// this vector extracts patterns a,b,c from pattern (a|b|c). for now it's not used.
analyze(regexp_, required_substring, is_trivial, required_substring_is_prefix, alternatives_dummy);
/// Just three following options are supported

View File

@ -45,6 +45,7 @@
M(MMappedFileCacheMisses, "Number of times a file has not been found in the MMap cache (for the 'mmap' read_method), so we had to mmap it again.") \
M(OpenedFileCacheHits, "Number of times a file has been found in the opened file cache, so we didn't have to open it again.") \
M(OpenedFileCacheMisses, "Number of times a file has been found in the opened file cache, so we had to open it again.") \
M(OpenedFileCacheMicroseconds, "Amount of time spent executing OpenedFileCache methods.") \
M(AIOWrite, "Number of writes with Linux or FreeBSD AIO interface") \
M(AIOWriteBytes, "Number of bytes written with Linux or FreeBSD AIO interface") \
M(AIORead, "Number of reads with Linux or FreeBSD AIO interface") \
@ -125,6 +126,7 @@
M(ZooKeeperMulti, "Number of 'multi' requests to ZooKeeper (compound transactions).") \
M(ZooKeeperCheck, "Number of 'check' requests to ZooKeeper. Usually they don't make sense in isolation, only as part of a complex transaction.") \
M(ZooKeeperSync, "Number of 'sync' requests to ZooKeeper. These requests are rarely needed or usable.") \
M(ZooKeeperReconfig, "Number of 'reconfig' requests to ZooKeeper.") \
M(ZooKeeperClose, "Number of times connection with ZooKeeper has been closed voluntary.") \
M(ZooKeeperWatchResponse, "Number of times watch notification has been received from ZooKeeper.") \
M(ZooKeeperUserExceptions, "Number of exceptions while working with ZooKeeper related to the data (no node, bad version or similar).") \
@ -503,6 +505,7 @@ The server successfully detected this situation and will download merged part fr
M(KeeperCreateRequest, "Number of create requests")\
M(KeeperRemoveRequest, "Number of remove requests")\
M(KeeperSetRequest, "Number of set requests")\
M(KeeperReconfigRequest, "Number of reconfig requests")\
M(KeeperCheckRequest, "Number of check requests")\
M(KeeperMultiRequest, "Number of multi requests")\
M(KeeperMultiReadRequest, "Number of multi read requests")\

View File

@ -87,50 +87,13 @@ namespace
/// https://stackoverflow.com/questions/32088140/multiple-string-tables-in-elf-object
void updateResources(ElfW(Addr) base_address, std::string_view object_name, std::string_view name, const void * address, SymbolIndex::Resources & resources)
{
const char * char_address = static_cast<const char *>(address);
if (name.starts_with("_binary_") || name.starts_with("binary_"))
{
if (name.ends_with("_start"))
{
name = name.substr((name[0] == '_') + strlen("binary_"));
name = name.substr(0, name.size() - strlen("_start"));
auto & resource = resources[name];
if (!resource.base_address || resource.base_address == base_address)
{
resource.base_address = base_address;
resource.start = std::string_view{char_address, 0}; // NOLINT(bugprone-string-constructor)
resource.object_name = object_name;
}
}
if (name.ends_with("_end"))
{
name = name.substr((name[0] == '_') + strlen("binary_"));
name = name.substr(0, name.size() - strlen("_end"));
auto & resource = resources[name];
if (!resource.base_address || resource.base_address == base_address)
{
resource.base_address = base_address;
resource.end = std::string_view{char_address, 0}; // NOLINT(bugprone-string-constructor)
resource.object_name = object_name;
}
}
}
}
/// Based on the code of musl-libc and the answer of Kanalpiroge on
/// https://stackoverflow.com/questions/15779185/list-all-the-functions-symbols-on-the-fly-in-c-code-on-a-linux-architecture
/// It does not extract all the symbols (but only public - exported and used for dynamic linking),
/// but will work if we cannot find or parse ELF files.
void collectSymbolsFromProgramHeaders(
dl_phdr_info * info,
std::vector<SymbolIndex::Symbol> & symbols,
SymbolIndex::Resources & resources)
std::vector<SymbolIndex::Symbol> & symbols)
{
/* Iterate over all headers of the current shared lib
* (first call is for the executable itself)
@ -248,9 +211,6 @@ void collectSymbolsFromProgramHeaders(
/// We are not interested in empty symbols.
if (elf_sym[sym_index].st_size)
symbols.push_back(symbol);
/// But resources can be represented by a pair of empty symbols (indicating their boundaries).
updateResources(base_address, info->dlpi_name, symbol.name, symbol.address_begin, resources);
}
break;
@ -281,8 +241,7 @@ void collectSymbolsFromELFSymbolTable(
const Elf & elf,
const Elf::Section & symbol_table,
const Elf::Section & string_table,
std::vector<SymbolIndex::Symbol> & symbols,
SymbolIndex::Resources & resources)
std::vector<SymbolIndex::Symbol> & symbols)
{
/// Iterate symbol table.
const ElfSym * symbol_table_entry = reinterpret_cast<const ElfSym *>(symbol_table.begin());
@ -312,8 +271,6 @@ void collectSymbolsFromELFSymbolTable(
if (symbol_table_entry->st_size)
symbols.push_back(symbol);
updateResources(info->dlpi_addr, info->dlpi_name, symbol.name, symbol.address_begin, resources);
}
}
@ -323,8 +280,7 @@ bool searchAndCollectSymbolsFromELFSymbolTable(
const Elf & elf,
unsigned section_header_type,
const char * string_table_name,
std::vector<SymbolIndex::Symbol> & symbols,
SymbolIndex::Resources & resources)
std::vector<SymbolIndex::Symbol> & symbols)
{
std::optional<Elf::Section> symbol_table;
std::optional<Elf::Section> string_table;
@ -342,7 +298,7 @@ bool searchAndCollectSymbolsFromELFSymbolTable(
return false;
}
collectSymbolsFromELFSymbolTable(info, elf, *symbol_table, *string_table, symbols, resources);
collectSymbolsFromELFSymbolTable(info, elf, *symbol_table, *string_table, symbols);
return true;
}
@ -351,7 +307,6 @@ void collectSymbolsFromELF(
dl_phdr_info * info,
std::vector<SymbolIndex::Symbol> & symbols,
std::vector<SymbolIndex::Object> & objects,
SymbolIndex::Resources & resources,
String & build_id)
{
String object_name;
@ -462,11 +417,11 @@ void collectSymbolsFromELF(
object.name = object_name;
objects.push_back(std::move(object));
searchAndCollectSymbolsFromELFSymbolTable(info, *objects.back().elf, SHT_SYMTAB, ".strtab", symbols, resources);
searchAndCollectSymbolsFromELFSymbolTable(info, *objects.back().elf, SHT_SYMTAB, ".strtab", symbols);
/// Unneeded if they were parsed from "program headers" of loaded objects.
#if defined USE_MUSL
searchAndCollectSymbolsFromELFSymbolTable(info, *objects.back().elf, SHT_DYNSYM, ".dynstr", symbols, resources);
searchAndCollectSymbolsFromELFSymbolTable(info, *objects.back().elf, SHT_DYNSYM, ".dynstr", symbols);
#endif
}
@ -479,8 +434,8 @@ int collectSymbols(dl_phdr_info * info, size_t, void * data_ptr)
{
SymbolIndex::Data & data = *reinterpret_cast<SymbolIndex::Data *>(data_ptr);
collectSymbolsFromProgramHeaders(info, data.symbols, data.resources);
collectSymbolsFromELF(info, data.symbols, data.objects, data.resources, data.build_id);
collectSymbolsFromProgramHeaders(info, data.symbols);
collectSymbolsFromELF(info, data.symbols, data.objects, data.build_id);
/* Continue iterations */
return 0;

View File

@ -8,6 +8,7 @@
#include <Common/Elf.h>
#include <boost/noncopyable.hpp>
namespace DB
{
@ -45,44 +46,15 @@ public:
const std::vector<Symbol> & symbols() const { return data.symbols; }
const std::vector<Object> & objects() const { return data.objects; }
std::string_view getResource(String name) const
{
if (auto it = data.resources.find(name); it != data.resources.end())
return it->second.data();
return {};
}
/// The BuildID that is generated by compiler.
String getBuildID() const { return data.build_id; }
String getBuildIDHex() const;
struct ResourcesBlob
{
/// Symbol can be presented in multiple shared objects,
/// base_address will be used to compare only symbols from the same SO.
ElfW(Addr) base_address = 0;
/// Just a human name of the SO.
std::string_view object_name;
/// Data blob.
std::string_view start;
std::string_view end;
std::string_view data() const
{
assert(end.data() >= start.data());
return std::string_view{start.data(), static_cast<size_t>(end.data() - start.data())};
}
};
using Resources = std::unordered_map<std::string_view /* symbol name */, ResourcesBlob>;
struct Data
{
std::vector<Symbol> symbols;
std::vector<Object> objects;
String build_id;
/// Resources (embedded binary data) are located by symbols in form of _binary_name_start and _binary_name_end.
Resources resources;
};
private:
Data data;

View File

@ -38,43 +38,30 @@ namespace
ISystemLog::~ISystemLog() = default;
void ISystemLog::stopFlushThread()
{
{
std::lock_guard lock(mutex);
if (!saving_thread || !saving_thread->joinable())
return;
if (is_shutdown)
return;
is_shutdown = true;
/// Tell thread to shutdown.
flush_event.notify_all();
}
saving_thread->join();
}
void ISystemLog::startup()
{
std::lock_guard lock(mutex);
saving_thread = std::make_unique<ThreadFromGlobalPool>([this] { savingThreadFunction(); });
}
static thread_local bool recursive_add_call = false;
template <typename LogElement>
void SystemLogBase<LogElement>::add(const LogElement & element)
SystemLogQueue<LogElement>::SystemLogQueue(
const String & table_name_,
size_t flush_interval_milliseconds_,
bool turn_off_logger_)
: log(&Poco::Logger::get("SystemLogQueue (" + table_name_ + ")"))
, flush_interval_milliseconds(flush_interval_milliseconds_)
{
if (turn_off_logger_)
log->setLevel(0);
}
static thread_local bool recursive_push_call = false;
template <typename LogElement>
void SystemLogQueue<LogElement>::push(const LogElement & element)
{
/// It is possible that the method will be called recursively.
/// Better to drop these events to avoid complications.
if (recursive_add_call)
if (recursive_push_call)
return;
recursive_add_call = true;
SCOPE_EXIT({ recursive_add_call = false; });
recursive_push_call = true;
SCOPE_EXIT({ recursive_push_call = false; });
/// Memory can be allocated while resizing on queue.push_back.
/// The size of allocation can be in order of a few megabytes.
@ -137,10 +124,16 @@ void SystemLogBase<LogElement>::add(const LogElement & element)
template <typename LogElement>
void SystemLogBase<LogElement>::flush(bool force)
{
uint64_t this_thread_requested_offset = notifyFlushImpl(force);
uint64_t this_thread_requested_offset = queue->notifyFlush(force);
if (this_thread_requested_offset == uint64_t(-1))
return;
queue->waitFlush(this_thread_requested_offset);
}
template <typename LogElement>
void SystemLogQueue<LogElement>::waitFlush(uint64_t expected_flushed_up_to)
{
// Use an arbitrary timeout to avoid endless waiting. 60s proved to be
// too fast for our parallel functional tests, probably because they
// heavily load the disk.
@ -148,7 +141,7 @@ void SystemLogBase<LogElement>::flush(bool force)
std::unique_lock lock(mutex);
bool result = flush_event.wait_for(lock, std::chrono::seconds(timeout_seconds), [&]
{
return flushed_up_to >= this_thread_requested_offset && !is_force_prepare_tables;
return flushed_up_to >= expected_flushed_up_to && !is_force_prepare_tables;
});
if (!result)
@ -159,10 +152,7 @@ void SystemLogBase<LogElement>::flush(bool force)
}
template <typename LogElement>
void SystemLogBase<LogElement>::notifyFlush(bool force) { notifyFlushImpl(force); }
template <typename LogElement>
uint64_t SystemLogBase<LogElement>::notifyFlushImpl(bool force)
uint64_t SystemLogQueue<LogElement>::notifyFlush(bool should_prepare_tables_anyway)
{
uint64_t this_thread_requested_offset;
@ -175,7 +165,7 @@ uint64_t SystemLogBase<LogElement>::notifyFlushImpl(bool force)
// Publish our flush request, taking care not to overwrite the requests
// made by other threads.
is_force_prepare_tables |= force;
is_force_prepare_tables |= should_prepare_tables_anyway;
requested_flush_up_to = std::max(requested_flush_up_to, this_thread_requested_offset);
flush_event.notify_all();
@ -185,7 +175,77 @@ uint64_t SystemLogBase<LogElement>::notifyFlushImpl(bool force)
return this_thread_requested_offset;
}
template <typename LogElement>
void SystemLogQueue<LogElement>::confirm(uint64_t to_flush_end)
{
std::lock_guard lock(mutex);
flushed_up_to = to_flush_end;
is_force_prepare_tables = false;
flush_event.notify_all();
}
template <typename LogElement>
SystemLogQueue<LogElement>::Index SystemLogQueue<LogElement>::pop(std::vector<LogElement>& output, bool& should_prepare_tables_anyway, bool& exit_this_thread)
{
std::unique_lock lock(mutex);
flush_event.wait_for(lock,
std::chrono::milliseconds(flush_interval_milliseconds),
[&] ()
{
return requested_flush_up_to > flushed_up_to || is_shutdown || is_force_prepare_tables;
}
);
queue_front_index += queue.size();
// Swap with existing array from previous flush, to save memory
// allocations.
output.resize(0);
queue.swap(output);
should_prepare_tables_anyway = is_force_prepare_tables;
exit_this_thread = is_shutdown;
return queue_front_index;
}
template <typename LogElement>
void SystemLogQueue<LogElement>::shutdown()
{
std::unique_lock lock(mutex);
is_shutdown = true;
/// Tell thread to shutdown.
flush_event.notify_all();
}
template <typename LogElement>
SystemLogBase<LogElement>::SystemLogBase(
const String& table_name_,
size_t flush_interval_milliseconds_,
std::shared_ptr<SystemLogQueue<LogElement>> queue_)
: queue(queue_ ? queue_ : std::make_shared<SystemLogQueue<LogElement>>(table_name_, flush_interval_milliseconds_))
{
}
template <typename LogElement>
void SystemLogBase<LogElement>::startup()
{
std::lock_guard lock(thread_mutex);
saving_thread = std::make_unique<ThreadFromGlobalPool>([this] { savingThreadFunction(); });
}
template <typename LogElement>
void SystemLogBase<LogElement>::add(const LogElement & element)
{
queue->push(element);
}
template <typename LogElement>
void SystemLogBase<LogElement>::notifyFlush(bool force) { queue->notifyFlush(force); }
#define INSTANTIATE_SYSTEM_LOG_BASE(ELEMENT) template class SystemLogBase<ELEMENT>;
SYSTEM_LOG_ELEMENTS(INSTANTIATE_SYSTEM_LOG_BASE)
#define INSTANTIATE_SYSTEM_LOG_QUEUE(ELEMENT) template class SystemLogQueue<ELEMENT>;
SYSTEM_LOG_ELEMENTS(INSTANTIATE_SYSTEM_LOG_QUEUE)
}

View File

@ -55,33 +55,88 @@ public:
virtual void prepareTable() = 0;
/// Start the background thread.
virtual void startup();
virtual void startup() = 0;
/// Stop the background flush thread before destructor. No more data will be written.
virtual void shutdown() = 0;
virtual void stopFlushThread() = 0;
virtual ~ISystemLog();
virtual void savingThreadFunction() = 0;
protected:
std::mutex thread_mutex;
std::unique_ptr<ThreadFromGlobalPool> saving_thread;
bool is_shutdown = false;
};
template <typename LogElement>
class SystemLogQueue
{
using Index = uint64_t;
public:
SystemLogQueue(
const String & table_name_,
size_t flush_interval_milliseconds_,
bool turn_off_logger_ = false);
void shutdown();
// producer methods
void push(const LogElement & element);
Index notifyFlush(bool should_prepare_tables_anyway);
void waitFlush(Index expected_flushed_up_to);
// consumer methods
Index pop(std::vector<LogElement>& output, bool& should_prepare_tables_anyway, bool& exit_this_thread);
void confirm(Index to_flush_end);
private:
/// Data shared between callers of add()/flush()/shutdown(), and the saving thread
std::mutex mutex;
bool is_shutdown = false;
std::condition_variable flush_event;
Poco::Logger * log;
void stopFlushThread();
// Queue is bounded. But its size is quite large to not block in all normal cases.
std::vector<LogElement> queue;
// An always-incrementing index of the first message currently in the queue.
// We use it to give a global sequential index to every message, so that we
// can wait until a particular message is flushed. This is used to implement
// synchronous log flushing for SYSTEM FLUSH LOGS.
Index queue_front_index = 0;
// A flag that says we must create the tables even if the queue is empty.
bool is_force_prepare_tables = false;
// Requested to flush logs up to this index, exclusive
Index requested_flush_up_to = 0;
// Flushed log up to this index, exclusive
Index flushed_up_to = 0;
// Logged overflow message at this queue front index
Index logged_queue_full_at_index = -1;
bool is_shutdown = false;
std::condition_variable flush_event;
const size_t flush_interval_milliseconds;
};
template <typename LogElement>
class SystemLogBase : public ISystemLog
{
public:
using Self = SystemLogBase;
SystemLogBase(
const String& table_name_,
size_t flush_interval_milliseconds_,
std::shared_ptr<SystemLogQueue<LogElement>> queue_ = nullptr);
void startup() override;
/** Append a record into log.
* Writing to table will be done asynchronously and in case of failure, record could be lost.
*/
@ -98,27 +153,6 @@ public:
static const char * getDefaultOrderBy() { return "event_date, event_time"; }
protected:
Poco::Logger * log;
// Queue is bounded. But its size is quite large to not block in all normal cases.
std::vector<LogElement> queue;
// An always-incrementing index of the first message currently in the queue.
// We use it to give a global sequential index to every message, so that we
// can wait until a particular message is flushed. This is used to implement
// synchronous log flushing for SYSTEM FLUSH LOGS.
uint64_t queue_front_index = 0;
// A flag that says we must create the tables even if the queue is empty.
bool is_force_prepare_tables = false;
// Requested to flush logs up to this index, exclusive
uint64_t requested_flush_up_to = 0;
// Flushed log up to this index, exclusive
uint64_t flushed_up_to = 0;
// Logged overflow message at this queue front index
uint64_t logged_queue_full_at_index = -1;
private:
uint64_t notifyFlushImpl(bool force);
std::shared_ptr<SystemLogQueue<LogElement>> queue;
};
}

View File

@ -2,7 +2,7 @@
#if defined(OS_LINUX)
#include "TaskStatsInfoGetter.h"
#include "NetlinkMetricsProvider.h"
#include "ProcfsMetricsProvider.h"
#include "hasLinuxCapability.h"
@ -99,7 +99,7 @@ TasksStatsCounters::MetricsProvider TasksStatsCounters::findBestAvailableProvide
static std::optional<MetricsProvider> provider =
[]() -> MetricsProvider
{
if (TaskStatsInfoGetter::checkPermissions())
if (NetlinkMetricsProvider::checkPermissions())
{
return MetricsProvider::Netlink;
}
@ -119,7 +119,7 @@ TasksStatsCounters::TasksStatsCounters(const UInt64 tid, const MetricsProvider p
switch (provider)
{
case MetricsProvider::Netlink:
stats_getter = [metrics_provider = std::make_shared<TaskStatsInfoGetter>(), tid]()
stats_getter = [metrics_provider = std::make_shared<NetlinkMetricsProvider>(), tid]()
{
::taskstats result{};
metrics_provider->getStat(result, static_cast<pid_t>(tid));

View File

@ -350,6 +350,29 @@ struct SyncResponse : virtual Response
size_t bytesSize() const override { return path.size(); }
};
struct ReconfigRequest : virtual Request
{
String joining;
String leaving;
String new_members;
int32_t version;
String getPath() const final { return keeper_config_path; }
size_t bytesSize() const final
{
return joining.size() + leaving.size() + new_members.size() + sizeof(version);
}
};
struct ReconfigResponse : virtual Response
{
String value;
Stat stat;
size_t bytesSize() const override { return value.size() + sizeof(stat); }
};
struct MultiRequest : virtual Request
{
Requests requests;
@ -395,9 +418,9 @@ using SetCallback = std::function<void(const SetResponse &)>;
using ListCallback = std::function<void(const ListResponse &)>;
using CheckCallback = std::function<void(const CheckResponse &)>;
using SyncCallback = std::function<void(const SyncResponse &)>;
using ReconfigCallback = std::function<void(const ReconfigResponse &)>;
using MultiCallback = std::function<void(const MultiResponse &)>;
/// For watches.
enum State
{
@ -526,6 +549,13 @@ public:
const String & path,
SyncCallback callback) = 0;
virtual void reconfig(
std::string_view joining,
std::string_view leaving,
std::string_view new_members,
int32_t version,
ReconfigCallback callback) = 0;
virtual void multi(
const Requests & requests,
MultiCallback callback) = 0;
@ -539,3 +569,11 @@ public:
};
}
template <> struct fmt::formatter<Coordination::Error> : fmt::formatter<std::string_view>
{
constexpr auto format(Coordination::Error code, auto & ctx)
{
return formatter<string_view>::format(Coordination::errorMessage(code), ctx);
}
};

View File

@ -3,12 +3,8 @@
#include <Common/setThreadName.h>
#include <Common/StringUtils/StringUtils.h>
#include <base/types.h>
#include <sstream>
#include <iomanip>
#include <functional>
namespace Coordination
{
@ -147,6 +143,14 @@ struct TestKeeperSyncRequest final : SyncRequest, TestKeeperRequest
std::pair<ResponsePtr, Undo> process(TestKeeper::Container & container, int64_t zxid) const override;
};
struct TestKeeperReconfigRequest final : ReconfigRequest, TestKeeperRequest
{
TestKeeperReconfigRequest() = default;
explicit TestKeeperReconfigRequest(const ReconfigRequest & base) : ReconfigRequest(base) {}
ResponsePtr createResponse() const override;
std::pair<ResponsePtr, Undo> process(TestKeeper::Container & container, int64_t zxid) const override;
};
struct TestKeeperMultiRequest final : MultiRequest, TestKeeperRequest
{
explicit TestKeeperMultiRequest(const Requests & generic_requests)
@ -226,15 +230,7 @@ std::pair<ResponsePtr, Undo> TestKeeperCreateRequest::process(TestKeeper::Contai
std::string path_created = path;
if (is_sequential)
{
auto seq_num = it->second.seq_num;
std::stringstream seq_num_str; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
seq_num_str.exceptions(std::ios::failbit);
seq_num_str << std::setw(10) << std::setfill('0') << seq_num;
path_created += seq_num_str.str();
}
path_created += fmt::format("{:0>10}", it->second.seq_num);
/// Increment sequential number even if node is not sequential
++it->second.seq_num;
@ -446,6 +442,17 @@ std::pair<ResponsePtr, Undo> TestKeeperSyncRequest::process(TestKeeper::Containe
return { std::make_shared<SyncResponse>(std::move(response)), {} };
}
std::pair<ResponsePtr, Undo> TestKeeperReconfigRequest::process(TestKeeper::Container &, int64_t) const
{
// In TestKeeper we assume data is stored on one server, so this is a dummy implementation to
// satisfy IKeeper interface.
// We can't even check the validity of input data, neither can we create the /keeper/config znode
// as we don't know the id of current "server".
ReconfigResponse response;
response.error = Error::ZOK;
return { std::make_shared<ReconfigResponse>(std::move(response)), {} };
}
std::pair<ResponsePtr, Undo> TestKeeperMultiRequest::process(TestKeeper::Container & container, int64_t zxid) const
{
MultiResponse response;
@ -505,6 +512,7 @@ ResponsePtr TestKeeperSetRequest::createResponse() const { return std::make_shar
ResponsePtr TestKeeperListRequest::createResponse() const { return std::make_shared<ListResponse>(); }
ResponsePtr TestKeeperCheckRequest::createResponse() const { return std::make_shared<CheckResponse>(); }
ResponsePtr TestKeeperSyncRequest::createResponse() const { return std::make_shared<SyncResponse>(); }
ResponsePtr TestKeeperReconfigRequest::createResponse() const { return std::make_shared<ReconfigResponse>(); }
ResponsePtr TestKeeperMultiRequest::createResponse() const { return std::make_shared<MultiResponse>(); }
@ -828,6 +836,28 @@ void TestKeeper::sync(
pushRequest(std::move(request_info));
}
void TestKeeper::reconfig(
std::string_view joining,
std::string_view leaving,
std::string_view new_members,
int32_t version,
ReconfigCallback callback)
{
TestKeeperReconfigRequest req;
req.joining = joining;
req.leaving = leaving;
req.new_members = new_members;
req.version = version;
pushRequest({
.request = std::make_shared<TestKeeperReconfigRequest>(std::move(req)),
.callback = [callback](const Response & response)
{
callback(dynamic_cast<const ReconfigResponse &>(response));
}
});
}
void TestKeeper::multi(
const Requests & requests,
MultiCallback callback)

View File

@ -87,6 +87,13 @@ public:
const String & path,
SyncCallback callback) override;
void reconfig(
std::string_view joining,
std::string_view leaving,
std::string_view new_members,
int32_t version,
ReconfigCallback callback) final;
void multi(
const Requests & requests,
MultiCallback callback) override;

View File

@ -15,6 +15,7 @@
#include <base/sort.h>
#include <base/getFQDNOrHostName.h>
#include "Common/ZooKeeper/IKeeper.h"
#include <Common/DNSResolver.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/Exception.h>
#include <Common/logger_useful.h>
@ -77,13 +78,17 @@ void ZooKeeper::init(ZooKeeperArgs args_)
auto & host_string = host.host;
try
{
bool secure = startsWith(host_string, "secure://");
const bool secure = startsWith(host_string, "secure://");
if (secure)
host_string.erase(0, strlen("secure://"));
LOG_TEST(log, "Adding ZooKeeper host {} ({})", host_string, Poco::Net::SocketAddress{host_string}.toString());
nodes.emplace_back(Coordination::ZooKeeper::Node{Poco::Net::SocketAddress{host_string}, secure});
/// We want to resolve all hosts without DNS cache for keeper connection.
Coordination::DNSResolver::instance().removeHostFromCache(host_string);
const Poco::Net::SocketAddress host_socket_addr{host_string};
LOG_TEST(log, "Adding ZooKeeper host {} ({})", host_string, host_socket_addr.toString());
nodes.emplace_back(Coordination::ZooKeeper::Node{host_socket_addr, secure});
}
catch (const Poco::Net::HostNotFoundException & e)
{
@ -193,12 +198,7 @@ std::vector<ShuffleHost> ZooKeeper::shuffleHosts() const
shuffle_hosts.emplace_back(shuffle_host);
}
::sort(
shuffle_hosts.begin(), shuffle_hosts.end(),
[](const ShuffleHost & lhs, const ShuffleHost & rhs)
{
return ShuffleHost::compare(lhs, rhs);
});
::sort(shuffle_hosts.begin(), shuffle_hosts.end(), ShuffleHost::compare);
return shuffle_hosts;
}
@ -233,7 +233,7 @@ Coordination::Error ZooKeeper::getChildrenImpl(const std::string & path, Strings
if (future_result.wait_for(std::chrono::milliseconds(args.operation_timeout_ms)) != std::future_status::ready)
{
impl->finalize(fmt::format("Operation timeout on {} {}", toString(Coordination::OpNum::List), path));
impl->finalize(fmt::format("Operation timeout on {} {}", Coordination::OpNum::List, path));
return Coordination::Error::ZOPERATIONTIMEOUT;
}
else
@ -300,7 +300,7 @@ Coordination::Error ZooKeeper::createImpl(const std::string & path, const std::s
if (future_result.wait_for(std::chrono::milliseconds(args.operation_timeout_ms)) != std::future_status::ready)
{
impl->finalize(fmt::format("Operation timeout on {} {}", toString(Coordination::OpNum::Create), path));
impl->finalize(fmt::format("Operation timeout on {} {}", Coordination::OpNum::Create, path));
return Coordination::Error::ZOPERATIONTIMEOUT;
}
else
@ -415,7 +415,7 @@ Coordination::Error ZooKeeper::removeImpl(const std::string & path, int32_t vers
if (future_result.wait_for(std::chrono::milliseconds(args.operation_timeout_ms)) != std::future_status::ready)
{
impl->finalize(fmt::format("Operation timeout on {} {}", toString(Coordination::OpNum::Remove), path));
impl->finalize(fmt::format("Operation timeout on {} {}", Coordination::OpNum::Remove, path));
return Coordination::Error::ZOPERATIONTIMEOUT;
}
else
@ -447,7 +447,7 @@ Coordination::Error ZooKeeper::existsImpl(const std::string & path, Coordination
if (future_result.wait_for(std::chrono::milliseconds(args.operation_timeout_ms)) != std::future_status::ready)
{
impl->finalize(fmt::format("Operation timeout on {} {}", toString(Coordination::OpNum::Exists), path));
impl->finalize(fmt::format("Operation timeout on {} {}", Coordination::OpNum::Exists, path));
return Coordination::Error::ZOPERATIONTIMEOUT;
}
else
@ -481,7 +481,7 @@ Coordination::Error ZooKeeper::getImpl(const std::string & path, std::string & r
if (future_result.wait_for(std::chrono::milliseconds(args.operation_timeout_ms)) != std::future_status::ready)
{
impl->finalize(fmt::format("Operation timeout on {} {}", toString(Coordination::OpNum::Get), path));
impl->finalize(fmt::format("Operation timeout on {} {}", Coordination::OpNum::Get, path));
return Coordination::Error::ZOPERATIONTIMEOUT;
}
else
@ -553,7 +553,7 @@ Coordination::Error ZooKeeper::setImpl(const std::string & path, const std::stri
if (future_result.wait_for(std::chrono::milliseconds(args.operation_timeout_ms)) != std::future_status::ready)
{
impl->finalize(fmt::format("Operation timeout on {} {}", toString(Coordination::OpNum::Set), path));
impl->finalize(fmt::format("Operation timeout on {} {}", Coordination::OpNum::Set, path));
return Coordination::Error::ZOPERATIONTIMEOUT;
}
else
@ -605,7 +605,7 @@ Coordination::Error ZooKeeper::multiImpl(const Coordination::Requests & requests
if (future_result.wait_for(std::chrono::milliseconds(args.operation_timeout_ms)) != std::future_status::ready)
{
impl->finalize(fmt::format("Operation timeout on {} {}", toString(Coordination::OpNum::Multi), requests[0]->getPath()));
impl->finalize(fmt::format("Operation timeout on {} {}", Coordination::OpNum::Multi, requests[0]->getPath()));
return Coordination::Error::ZOPERATIONTIMEOUT;
}
else
@ -639,7 +639,7 @@ Coordination::Error ZooKeeper::syncImpl(const std::string & path, std::string &
if (future_result.wait_for(std::chrono::milliseconds(args.operation_timeout_ms)) != std::future_status::ready)
{
impl->finalize(fmt::format("Operation timeout on {} {}", toString(Coordination::OpNum::Sync), path));
impl->finalize(fmt::format("Operation timeout on {} {}", Coordination::OpNum::Sync, path));
return Coordination::Error::ZOPERATIONTIMEOUT;
}
else
@ -1251,7 +1251,7 @@ size_t getFailedOpIndex(Coordination::Error exception_code, const Coordination::
if (!Coordination::isUserError(exception_code))
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR,
"There are no failed OPs because '{}' is not valid response code for that",
std::string(Coordination::errorMessage(exception_code)));
exception_code);
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "There is no failed OpResult");
}

View File

@ -36,7 +36,7 @@ std::string ZooKeeperRequest::toString() const
"OpNum = {}\n"
"Additional info:\n{}",
xid,
Coordination::toString(getOpNum()),
getOpNum(),
toStringImpl());
}
@ -76,6 +76,41 @@ void ZooKeeperSyncResponse::writeImpl(WriteBuffer & out) const
Coordination::write(path, out);
}
void ZooKeeperReconfigRequest::writeImpl(WriteBuffer & out) const
{
Coordination::write(joining, out);
Coordination::write(leaving, out);
Coordination::write(new_members, out);
Coordination::write(version, out);
}
void ZooKeeperReconfigRequest::readImpl(ReadBuffer & in)
{
Coordination::read(joining, in);
Coordination::read(leaving, in);
Coordination::read(new_members, in);
Coordination::read(version, in);
}
std::string ZooKeeperReconfigRequest::toStringImpl() const
{
return fmt::format(
"joining = {}\nleaving = {}\nnew_members = {}\nversion = {}",
joining, leaving, new_members, version);
}
void ZooKeeperReconfigResponse::readImpl(ReadBuffer & in)
{
Coordination::read(value, in);
Coordination::read(stat, in);
}
void ZooKeeperReconfigResponse::writeImpl(WriteBuffer & out) const
{
Coordination::write(value, out);
Coordination::write(stat, out);
}
void ZooKeeperWatchResponse::readImpl(ReadBuffer & in)
{
Coordination::read(type, in);
@ -664,6 +699,7 @@ ZooKeeperResponsePtr ZooKeeperRemoveRequest::makeResponse() const { return setTi
ZooKeeperResponsePtr ZooKeeperExistsRequest::makeResponse() const { return setTime(std::make_shared<ZooKeeperExistsResponse>()); }
ZooKeeperResponsePtr ZooKeeperGetRequest::makeResponse() const { return setTime(std::make_shared<ZooKeeperGetResponse>()); }
ZooKeeperResponsePtr ZooKeeperSetRequest::makeResponse() const { return setTime(std::make_shared<ZooKeeperSetResponse>()); }
ZooKeeperResponsePtr ZooKeeperReconfigRequest::makeResponse() const { return setTime(std::make_shared<ZooKeeperReconfigResponse>()); }
ZooKeeperResponsePtr ZooKeeperListRequest::makeResponse() const { return setTime(std::make_shared<ZooKeeperListResponse>()); }
ZooKeeperResponsePtr ZooKeeperSimpleListRequest::makeResponse() const { return setTime(std::make_shared<ZooKeeperSimpleListResponse>()); }
@ -861,7 +897,8 @@ void ZooKeeperMultiResponse::fillLogElements(LogElements & elems, size_t idx) co
void ZooKeeperRequestFactory::registerRequest(OpNum op_num, Creator creator)
{
if (!op_num_to_request.try_emplace(op_num, creator).second)
throw Coordination::Exception("Request type " + toString(op_num) + " already registered", Coordination::Error::ZRUNTIMEINCONSISTENCY);
throw Coordination::Exception(Coordination::Error::ZRUNTIMEINCONSISTENCY,
"Request type {} already registered", op_num);
}
std::shared_ptr<ZooKeeperRequest> ZooKeeperRequest::read(ReadBuffer & in)
@ -916,7 +953,7 @@ ZooKeeperRequestPtr ZooKeeperRequestFactory::get(OpNum op_num) const
{
auto it = op_num_to_request.find(op_num);
if (it == op_num_to_request.end())
throw Exception("Unknown operation type " + toString(op_num), Error::ZBADARGUMENTS);
throw Exception(Error::ZBADARGUMENTS, "Unknown operation type {}", op_num);
return it->second();
}
@ -960,6 +997,7 @@ ZooKeeperRequestFactory::ZooKeeperRequestFactory()
registerZooKeeperRequest<OpNum::SimpleList, ZooKeeperSimpleListRequest>(*this);
registerZooKeeperRequest<OpNum::List, ZooKeeperListRequest>(*this);
registerZooKeeperRequest<OpNum::Check, ZooKeeperCheckRequest>(*this);
registerZooKeeperRequest<OpNum::Reconfig, ZooKeeperReconfigRequest>(*this);
registerZooKeeperRequest<OpNum::Multi, ZooKeeperMultiRequest>(*this);
registerZooKeeperRequest<OpNum::MultiRead, ZooKeeperMultiRequest>(*this);
registerZooKeeperRequest<OpNum::SessionID, ZooKeeperSessionIDRequest>(*this);

View File

@ -117,6 +117,35 @@ struct ZooKeeperSyncResponse final : SyncResponse, ZooKeeperResponse
OpNum getOpNum() const override { return OpNum::Sync; }
};
struct ZooKeeperReconfigRequest final : ZooKeeperRequest
{
String joining;
String leaving;
String new_members;
int64_t version; // kazoo sends a 64bit integer in this request
String getPath() const override { return keeper_config_path; }
OpNum getOpNum() const override { return OpNum::Reconfig; }
void writeImpl(WriteBuffer & out) const override;
void readImpl(ReadBuffer & in) override;
std::string toStringImpl() const override;
ZooKeeperResponsePtr makeResponse() const override;
bool isReadRequest() const override { return false; }
size_t bytesSize() const override
{
return ZooKeeperRequest::bytesSize() + joining.size() + leaving.size() + new_members.size()
+ sizeof(version);
}
};
struct ZooKeeperReconfigResponse final : ReconfigResponse, ZooKeeperResponse
{
void readImpl(ReadBuffer & in) override;
void writeImpl(WriteBuffer & out) const override;
OpNum getOpNum() const override { return OpNum::Reconfig; }
};
struct ZooKeeperHeartbeatResponse final : ZooKeeperResponse
{
void readImpl(ReadBuffer &) override {}

View File

@ -19,6 +19,7 @@ static const std::unordered_set<int32_t> VALID_OPERATIONS =
static_cast<int32_t>(OpNum::Heartbeat),
static_cast<int32_t>(OpNum::List),
static_cast<int32_t>(OpNum::Check),
static_cast<int32_t>(OpNum::Reconfig),
static_cast<int32_t>(OpNum::Multi),
static_cast<int32_t>(OpNum::MultiRead),
static_cast<int32_t>(OpNum::Auth),
@ -29,55 +30,6 @@ static const std::unordered_set<int32_t> VALID_OPERATIONS =
static_cast<int32_t>(OpNum::CheckNotExists),
};
std::string toString(OpNum op_num)
{
switch (op_num)
{
case OpNum::Close:
return "Close";
case OpNum::Error:
return "Error";
case OpNum::Create:
return "Create";
case OpNum::Remove:
return "Remove";
case OpNum::Exists:
return "Exists";
case OpNum::Get:
return "Get";
case OpNum::Set:
return "Set";
case OpNum::SimpleList:
return "SimpleList";
case OpNum::List:
return "List";
case OpNum::Check:
return "Check";
case OpNum::Multi:
return "Multi";
case OpNum::MultiRead:
return "MultiRead";
case OpNum::Sync:
return "Sync";
case OpNum::Heartbeat:
return "Heartbeat";
case OpNum::Auth:
return "Auth";
case OpNum::SessionID:
return "SessionID";
case OpNum::SetACL:
return "SetACL";
case OpNum::GetACL:
return "GetACL";
case OpNum::FilteredList:
return "FilteredList";
case OpNum::CheckNotExists:
return "CheckNotExists";
}
int32_t raw_op = static_cast<int32_t>(op_num);
throw Exception("Operation " + std::to_string(raw_op) + " is unknown", Error::ZUNIMPLEMENTED);
}
OpNum getOpNum(int32_t raw_op_num)
{
if (!VALID_OPERATIONS.contains(raw_op_num))

View File

@ -31,6 +31,7 @@ enum class OpNum : int32_t
List = 12,
Check = 13,
Multi = 14,
Reconfig = 16,
MultiRead = 22,
Auth = 100,
@ -41,7 +42,6 @@ enum class OpNum : int32_t
SessionID = 997, /// Special internal request
};
std::string toString(OpNum op_num);
OpNum getOpNum(int32_t raw_op_num);
static constexpr int32_t ZOOKEEPER_PROTOCOL_VERSION = 0;

View File

@ -35,6 +35,7 @@ namespace ProfileEvents
extern const Event ZooKeeperRemove;
extern const Event ZooKeeperExists;
extern const Event ZooKeeperMulti;
extern const Event ZooKeeperReconfig;
extern const Event ZooKeeperGet;
extern const Event ZooKeeperSet;
extern const Event ZooKeeperList;
@ -571,7 +572,7 @@ void ZooKeeper::sendAuth(const String & scheme, const String & data)
if (err != Error::ZOK)
throw Exception(Error::ZMARSHALLINGERROR, "Error received in reply to auth request. Code: {}. Message: {}",
static_cast<int32_t>(err), errorMessage(err));
static_cast<int32_t>(err), err);
}
void ZooKeeper::sendThread()
@ -697,7 +698,7 @@ void ZooKeeper::receiveThread()
if (earliest_operation)
{
throw Exception(Error::ZOPERATIONTIMEOUT, "Operation timeout (no response in {} ms) for request {} for path: {}",
args.operation_timeout_ms, toString(earliest_operation->request->getOpNum()), earliest_operation->request->getPath());
args.operation_timeout_ms, earliest_operation->request->getOpNum(), earliest_operation->request->getPath());
}
waited_us += max_wait_us;
if (waited_us >= args.session_timeout_ms * 1000)
@ -738,7 +739,7 @@ void ZooKeeper::receiveEvent()
if (xid == PING_XID)
{
if (err != Error::ZOK)
throw Exception(Error::ZRUNTIMEINCONSISTENCY, "Received error in heartbeat response: {}", errorMessage(err));
throw Exception(Error::ZRUNTIMEINCONSISTENCY, "Received error in heartbeat response: {}", err);
response = std::make_shared<ZooKeeperHeartbeatResponse>();
}
@ -1195,7 +1196,6 @@ void ZooKeeper::create(
ProfileEvents::increment(ProfileEvents::ZooKeeperCreate);
}
void ZooKeeper::remove(
const String & path,
int32_t version,
@ -1335,6 +1335,26 @@ void ZooKeeper::sync(
ProfileEvents::increment(ProfileEvents::ZooKeeperSync);
}
void ZooKeeper::reconfig(
std::string_view joining,
std::string_view leaving,
std::string_view new_members,
int32_t version,
ReconfigCallback callback)
{
ZooKeeperReconfigRequest request;
request.joining = joining;
request.leaving = leaving;
request.new_members = new_members;
request.version = version;
RequestInfo request_info;
request_info.request = std::make_shared<ZooKeeperReconfigRequest>(std::move(request));
request_info.callback = [callback](const Response & response) { callback(dynamic_cast<const ReconfigResponse &>(response)); };
pushRequest(std::move(request_info));
ProfileEvents::increment(ProfileEvents::ZooKeeperReconfig);
}
void ZooKeeper::multi(
const Requests & requests,

View File

@ -178,6 +178,13 @@ public:
const String & path,
SyncCallback callback) override;
void reconfig(
std::string_view joining,
std::string_view leaving,
std::string_view new_members,
int32_t version,
ReconfigCallback callback) final;
void multi(
const Requests & requests,
MultiCallback callback) override;

View File

@ -59,3 +59,7 @@
#cmakedefine01 USE_ULID
#cmakedefine01 FIU_ENABLE
#cmakedefine01 USE_BCRYPT
/// This is needed for .incbin in assembly. For some reason, include paths don't work there in presence of LTO.
/// That's why we use absolute paths.
#cmakedefine SOURCE_DIR "@SOURCE_DIR@"

View File

@ -1,52 +0,0 @@
#include "getResource.h"
#include <dlfcn.h>
#include <string>
#include <boost/algorithm/string/replace.hpp>
#include <Common/SymbolIndex.h>
std::string_view getResource(std::string_view name)
{
// Convert the resource file name into the form generated by `ld -r -b binary`.
std::string name_replaced(name);
std::replace(name_replaced.begin(), name_replaced.end(), '/', '_');
std::replace(name_replaced.begin(), name_replaced.end(), '-', '_');
std::replace(name_replaced.begin(), name_replaced.end(), '.', '_');
boost::replace_all(name_replaced, "+", "_PLUS_");
#if defined USE_MUSL
/// If static linking is used, we cannot use dlsym and have to parse ELF symbol table by ourself.
return DB::SymbolIndex::instance().getResource(name_replaced);
#else
// In most `dlsym(3)` APIs, one passes the symbol name as it appears via
// something like `nm` or `objdump -t`. For example, a symbol `_foo` would be
// looked up with the string `"_foo"`.
//
// Apple's linker is confusingly different. The NOTES on the man page for
// `dlsym(3)` claim that one looks up the symbol with "the name used in C
// source code". In this example, that would mean using the string `"foo"`.
// This apparently applies even in the case where the symbol did not originate
// from C source, such as the embedded binary resource files used here. So
// the symbol name must not have a leading `_` on Apple platforms. It's not
// clear how this applies to other symbols, such as those which _have_ a leading
// underscore in them by design, many leading underscores, etc.
#if defined OS_DARWIN
std::string prefix = "binary_";
#else
std::string prefix = "_binary_";
#endif
std::string symbol_name_start = prefix + name_replaced + "_start";
std::string symbol_name_end = prefix + name_replaced + "_end";
const char * sym_start = reinterpret_cast<const char *>(dlsym(RTLD_DEFAULT, symbol_name_start.c_str()));
const char * sym_end = reinterpret_cast<const char *>(dlsym(RTLD_DEFAULT, symbol_name_end.c_str()));
if (sym_start && sym_end)
{
auto resource_size = static_cast<size_t>(std::distance(sym_start, sym_end));
return { sym_start, resource_size };
}
return {};
#endif
}

View File

@ -1,7 +0,0 @@
#pragma once
#include <string_view>
/// Get resource from binary if exists. Otherwise return empty string view.
/// Resources are data that is embedded into executable at link time.
std::string_view getResource(std::string_view name);

View File

@ -44,4 +44,15 @@ String backQuoteIfNeed(StringRef x)
return res;
}
String backQuoteMySQL(StringRef x)
{
String res(x.size, '\0');
{
WriteBufferFromString wb(res);
writeBackQuotedStringMySQL(x, wb);
}
return res;
}
}

View File

@ -24,4 +24,7 @@ String backQuote(StringRef x);
/// Quote the identifier with backquotes, if required.
String backQuoteIfNeed(StringRef x);
/// Quote the identifier with backquotes, for use in MySQL queries.
String backQuoteMySQL(StringRef x);
}

View File

@ -548,4 +548,3 @@ INSTANTIATE_TEST_SUITE_P(AllTimezones_Year1970,
// {0, 0 + 11 * 3600 * 24 + 12, 11},
}))
);

View File

@ -1,5 +1,4 @@
#pragma once
#include <IO/WriteHelpers.h>
namespace DB
@ -14,8 +13,8 @@ enum class KeeperApiVersion : uint8_t
WITH_CHECK_NOT_EXISTS,
};
const std::string keeper_system_path = "/keeper";
const std::string keeper_api_version_path = keeper_system_path + "/api_version";
const std::string keeper_api_feature_flags_path = keeper_system_path + "/feature_flags";
const String keeper_system_path = "/keeper";
const String keeper_api_version_path = keeper_system_path + "/api_version";
const String keeper_api_feature_flags_path = keeper_system_path + "/feature_flags";
const String keeper_config_path = keeper_system_path + "/config";
}

View File

@ -32,8 +32,9 @@ KeeperContext::KeeperContext(bool standalone_keeper_)
system_nodes_with_data[keeper_api_version_path] = toString(static_cast<uint8_t>(KeeperApiVersion::WITH_MULTI_READ));
}
void KeeperContext::initialize(const Poco::Util::AbstractConfiguration & config)
void KeeperContext::initialize(const Poco::Util::AbstractConfiguration & config, KeeperDispatcher * dispatcher_)
{
dispatcher = dispatcher_;
digest_enabled = config.getBool("keeper_server.digest_enabled", false);
ignore_system_path_on_startup = config.getBool("keeper_server.ignore_system_path_on_startup", false);

View File

@ -1,10 +1,8 @@
#pragma once
#include <Poco/Util/AbstractConfiguration.h>
#include <Coordination/KeeperFeatureFlags.h>
#include <IO/WriteBufferFromString.h>
#include <Disks/DiskSelector.h>
#include <IO/WriteBufferFromString.h>
#include <Poco/Util/AbstractConfiguration.h>
#include <cstdint>
#include <memory>
@ -12,6 +10,8 @@
namespace DB
{
class KeeperDispatcher;
class KeeperContext
{
public:
@ -24,7 +24,7 @@ public:
SHUTDOWN
};
void initialize(const Poco::Util::AbstractConfiguration & config);
void initialize(const Poco::Util::AbstractConfiguration & config, KeeperDispatcher * dispatcher_);
Phase getServerState() const;
void setServerState(Phase server_state_);
@ -51,6 +51,9 @@ public:
const KeeperFeatureFlags & getFeatureFlags() const;
void dumpConfiguration(WriteBufferFromOwnString & buf) const;
constexpr KeeperDispatcher * getDispatcher() const { return dispatcher; }
private:
/// local disk defined using path or disk name
using Storage = std::variant<DiskPtr, std::string>;
@ -85,8 +88,8 @@ private:
std::unordered_map<std::string, std::string> system_nodes_with_data;
KeeperFeatureFlags feature_flags;
KeeperDispatcher * dispatcher{nullptr};
};
using KeeperContextPtr = std::shared_ptr<KeeperContext>;
}

View File

@ -38,6 +38,8 @@ namespace ProfileEvents
extern const Event MemoryAllocatorPurgeTimeMicroseconds;
}
using namespace std::chrono_literals;
namespace DB
{
@ -80,6 +82,7 @@ void KeeperDispatcher::requestThread()
/// requests into a batch we must check that the new request is not read request. Otherwise we have to
/// process all already accumulated write requests, wait them synchronously and only after that process
/// read request. So reads are some kind of "separator" for writes.
/// Also there is a special reconfig request also being a separator.
try
{
if (requests_queue->tryPop(request, max_wait))
@ -92,10 +95,13 @@ void KeeperDispatcher::requestThread()
size_t current_batch_bytes_size = 0;
bool has_read_request = false;
bool has_reconfig_request = false;
/// If new request is not read request or we must to process it through quorum.
/// If new request is not read request or reconfig request we must process it through quorum.
/// Otherwise we will process it locally.
if (coordination_settings->quorum_reads || !request.request->isReadRequest())
if (request.request->getOpNum() == Coordination::OpNum::Reconfig)
has_reconfig_request = true;
else if (coordination_settings->quorum_reads || !request.request->isReadRequest())
{
current_batch_bytes_size += request.request->bytesSize();
current_batch.emplace_back(request);
@ -113,6 +119,11 @@ void KeeperDispatcher::requestThread()
std::lock_guard lock(read_request_queue_mutex);
read_request_queue[last_request.session_id][last_request.request->xid].push_back(request);
}
else if (request.request->getOpNum() == Coordination::OpNum::Reconfig)
{
has_reconfig_request = true;
return false;
}
else
{
current_batch_bytes_size += request.request->bytesSize();
@ -128,6 +139,7 @@ void KeeperDispatcher::requestThread()
/// TODO: Deprecate max_requests_quick_batch_size and use only max_requests_batch_size and max_requests_batch_bytes_size
size_t max_quick_batch_size = coordination_settings->max_requests_quick_batch_size;
while (!shutdown_called && !has_read_request &&
!has_reconfig_request &&
current_batch.size() < max_quick_batch_size && current_batch_bytes_size < max_batch_bytes_size &&
try_get_request())
;
@ -140,8 +152,10 @@ void KeeperDispatcher::requestThread()
};
/// Waiting until previous append will be successful, or batch is big enough
while (!shutdown_called && !has_read_request && !prev_result_done() &&
current_batch.size() <= max_batch_size && current_batch_bytes_size < max_batch_bytes_size)
while (!shutdown_called && !has_read_request &&
!has_reconfig_request && !prev_result_done() &&
current_batch.size() <= max_batch_size
&& current_batch_bytes_size < max_batch_bytes_size)
{
try_get_request();
}
@ -165,7 +179,8 @@ void KeeperDispatcher::requestThread()
if (result)
{
if (has_read_request) /// If we will execute read request next, than we have to process result now
/// If we will execute read or reconfig next, we have to process result now
if (has_read_request || has_reconfig_request)
forceWaitAndProcessResult(result, current_batch);
}
else
@ -179,6 +194,9 @@ void KeeperDispatcher::requestThread()
prev_result = result;
}
if (has_reconfig_request)
server->getKeeperStateMachine()->reconfigure(request);
/// Read request always goes after write batch (last request)
if (has_read_request)
{
@ -335,7 +353,7 @@ void KeeperDispatcher::initialize(const Poco::Util::AbstractConfiguration & conf
snapshot_s3.startup(config, macros);
keeper_context = std::make_shared<KeeperContext>(standalone_keeper);
keeper_context->initialize(config);
keeper_context->initialize(config, this);
server = std::make_unique<KeeperServer>(
configuration_and_settings,
@ -392,7 +410,10 @@ void KeeperDispatcher::initialize(const Poco::Util::AbstractConfiguration & conf
/// Start it after keeper server start
session_cleaner_thread = ThreadFromGlobalPool([this] { sessionCleanerTask(); });
update_configuration_thread = ThreadFromGlobalPool([this] { updateConfigurationThread(); });
update_configuration_thread = reconfigEnabled()
? ThreadFromGlobalPool([this] { clusterUpdateThread(); })
: ThreadFromGlobalPool([this] { clusterUpdateWithReconfigDisabledThread(); });
LOG_DEBUG(log, "Dispatcher initialized");
}
@ -429,7 +450,7 @@ void KeeperDispatcher::shutdown()
if (snapshot_thread.joinable())
snapshot_thread.join();
update_configuration_queue.finish();
cluster_update_queue.finish();
if (update_configuration_thread.joinable())
update_configuration_thread.join();
}
@ -473,23 +494,30 @@ void KeeperDispatcher::shutdown()
session_to_response_callback.clear();
}
// if there is no leader, there is no reason to do CLOSE because it's a write request
if (server && hasLeader() && !close_requests.empty())
if (server && !close_requests.empty())
{
LOG_INFO(log, "Trying to close {} session(s)", close_requests.size());
const auto raft_result = server->putRequestBatch(close_requests);
auto sessions_closing_done_promise = std::make_shared<std::promise<void>>();
auto sessions_closing_done = sessions_closing_done_promise->get_future();
raft_result->when_ready([my_sessions_closing_done_promise = std::move(sessions_closing_done_promise)](
nuraft::cmd_result<nuraft::ptr<nuraft::buffer>> & /*result*/,
nuraft::ptr<std::exception> & /*exception*/) { my_sessions_closing_done_promise->set_value(); });
// if there is no leader, there is no reason to do CLOSE because it's a write request
if (hasLeader())
{
LOG_INFO(log, "Trying to close {} session(s)", close_requests.size());
const auto raft_result = server->putRequestBatch(close_requests);
auto sessions_closing_done_promise = std::make_shared<std::promise<void>>();
auto sessions_closing_done = sessions_closing_done_promise->get_future();
raft_result->when_ready([my_sessions_closing_done_promise = std::move(sessions_closing_done_promise)](
nuraft::cmd_result<nuraft::ptr<nuraft::buffer>> & /*result*/,
nuraft::ptr<std::exception> & /*exception*/) { my_sessions_closing_done_promise->set_value(); });
auto session_shutdown_timeout = configuration_and_settings->coordination_settings->session_shutdown_timeout.totalMilliseconds();
if (sessions_closing_done.wait_for(std::chrono::milliseconds(session_shutdown_timeout)) != std::future_status::ready)
LOG_WARNING(
log,
"Failed to close sessions in {}ms. If they are not closed, they will be closed after session timeout.",
session_shutdown_timeout);
auto session_shutdown_timeout = configuration_and_settings->coordination_settings->session_shutdown_timeout.totalMilliseconds();
if (sessions_closing_done.wait_for(std::chrono::milliseconds(session_shutdown_timeout)) != std::future_status::ready)
LOG_WARNING(
log,
"Failed to close sessions in {}ms. If they are not closed, they will be closed after session timeout.",
session_shutdown_timeout);
}
else
{
LOG_INFO(log, "Sessions cannot be closed during shutdown because there is no active leader");
}
}
if (server)
@ -608,7 +636,7 @@ void KeeperDispatcher::addErrorResponses(const KeeperStorage::RequestsForSession
"Could not push error response xid {} zxid {} error message {} to responses queue",
response->xid,
response->zxid,
errorMessage(error));
error);
}
}
@ -653,7 +681,7 @@ int64_t KeeperDispatcher::getSessionID(int64_t session_timeout_ms)
{
if (response->getOpNum() != Coordination::OpNum::SessionID)
promise->set_exception(std::make_exception_ptr(Exception(ErrorCodes::LOGICAL_ERROR,
"Incorrect response of type {} instead of SessionID response", Coordination::toString(response->getOpNum()))));
"Incorrect response of type {} instead of SessionID response", response->getOpNum())));
auto session_id_response = dynamic_cast<const Coordination::ZooKeeperSessionIDResponse &>(*response);
if (session_id_response.internal_id != internal_id)
@ -685,17 +713,12 @@ int64_t KeeperDispatcher::getSessionID(int64_t session_timeout_ms)
return future.get();
}
void KeeperDispatcher::updateConfigurationThread()
void KeeperDispatcher::clusterUpdateWithReconfigDisabledThread()
{
while (true)
while (!shutdown_called)
{
if (shutdown_called)
return;
try
{
using namespace std::chrono_literals;
if (!server->checkInit())
{
LOG_INFO(log, "Server still not initialized, will not apply configuration until initialization finished");
@ -710,11 +733,10 @@ void KeeperDispatcher::updateConfigurationThread()
continue;
}
ConfigUpdateAction action;
if (!update_configuration_queue.pop(action))
ClusterUpdateAction action;
if (!cluster_update_queue.pop(action))
break;
/// We must wait this update from leader or apply it ourself (if we are leader)
bool done = false;
while (!done)
@ -727,15 +749,13 @@ void KeeperDispatcher::updateConfigurationThread()
if (isLeader())
{
server->applyConfigurationUpdate(action);
server->applyConfigUpdateWithReconfigDisabled(action);
done = true;
}
else
{
done = server->waitConfigurationUpdate(action);
if (!done)
LOG_INFO(log, "Cannot wait for configuration update, maybe we become leader, or maybe update is invalid, will try to wait one more time");
}
else if (done = server->waitForConfigUpdateWithReconfigDisabled(action); !done)
LOG_INFO(log,
"Cannot wait for configuration update, maybe we became leader "
"or maybe update is invalid, will try to wait one more time");
}
}
catch (...)
@ -745,6 +765,41 @@ void KeeperDispatcher::updateConfigurationThread()
}
}
void KeeperDispatcher::clusterUpdateThread()
{
while (!shutdown_called)
{
ClusterUpdateAction action;
if (!cluster_update_queue.pop(action))
return;
if (server->applyConfigUpdate(action))
LOG_DEBUG(log, "Processing config update {}: accepted", action);
else // TODO (myrrc) sleep a random amount? sleep less?
{
(void)cluster_update_queue.pushFront(action);
LOG_DEBUG(log, "Processing config update {}: declined, backoff", action);
std::this_thread::sleep_for(50ms);
}
}
}
void KeeperDispatcher::pushClusterUpdates(ClusterUpdateActions && actions)
{
if (shutdown_called) return;
for (auto && action : actions)
{
if (!cluster_update_queue.push(std::move(action)))
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot push configuration update");
LOG_DEBUG(log, "Processing config update {}: pushed", action);
}
}
bool KeeperDispatcher::reconfigEnabled() const
{
return server->reconfigEnabled();
}
bool KeeperDispatcher::isServerActive() const
{
return checkInit() && hasLeader() && !server->isRecovering();
@ -752,20 +807,25 @@ bool KeeperDispatcher::isServerActive() const
void KeeperDispatcher::updateConfiguration(const Poco::Util::AbstractConfiguration & config, const MultiVersion<Macros>::Version & macros)
{
auto diff = server->getConfigurationDiff(config);
auto diff = server->getRaftConfigurationDiff(config);
if (diff.empty())
LOG_TRACE(log, "Configuration update triggered, but nothing changed for RAFT");
LOG_TRACE(log, "Configuration update triggered, but nothing changed for Raft");
else if (reconfigEnabled())
LOG_WARNING(log,
"Raft configuration changed, but keeper_server.enable_reconfiguration is on. "
"This update will be ignored. Use \"reconfig\" instead");
else if (diff.size() > 1)
LOG_WARNING(log, "Configuration changed for more than one server ({}) from cluster, it's strictly not recommended", diff.size());
LOG_WARNING(log,
"Configuration changed for more than one server ({}) from cluster, "
"it's strictly not recommended", diff.size());
else
LOG_DEBUG(log, "Configuration change size ({})", diff.size());
for (auto & change : diff)
{
bool push_result = update_configuration_queue.push(change);
if (!push_result)
throw Exception(ErrorCodes::SYSTEM_ERROR, "Cannot push configuration update to queue");
}
if (!reconfigEnabled())
for (auto & change : diff)
if (!cluster_update_queue.push(change))
throw Exception(ErrorCodes::SYSTEM_ERROR, "Cannot push configuration update to queue");
snapshot_s3.updateS3Configuration(config, macros);
}

View File

@ -31,7 +31,7 @@ private:
using RequestsQueue = ConcurrentBoundedQueue<KeeperStorage::RequestForSession>;
using SessionToResponseCallback = std::unordered_map<int64_t, ZooKeeperResponseCallback>;
using UpdateConfigurationQueue = ConcurrentBoundedQueue<ConfigUpdateAction>;
using ClusterUpdateQueue = ConcurrentBoundedQueue<ClusterUpdateAction>;
/// Size depends on coordination settings
std::unique_ptr<RequestsQueue> requests_queue;
@ -39,7 +39,7 @@ private:
SnapshotsQueue snapshots_queue{1};
/// More than 1k updates is definitely misconfiguration.
UpdateConfigurationQueue update_configuration_queue{1000};
ClusterUpdateQueue cluster_update_queue{1000};
std::atomic<bool> shutdown_called{false};
@ -91,8 +91,10 @@ private:
void sessionCleanerTask();
/// Thread create snapshots in the background
void snapshotThread();
/// Thread apply or wait configuration changes from leader
void updateConfigurationThread();
// TODO (myrrc) this should be removed once "reconfig" is stabilized
void clusterUpdateWithReconfigDisabledThread();
void clusterUpdateThread();
void setResponse(int64_t session_id, const Coordination::ZooKeeperResponsePtr & response);
@ -132,10 +134,9 @@ public:
/// and achieved quorum
bool isServerActive() const;
/// Registered in ConfigReloader callback. Add new configuration changes to
/// update_configuration_queue. Keeper Dispatcher apply them asynchronously.
/// 'macros' are used to substitute macros in endpoint of disks
void updateConfiguration(const Poco::Util::AbstractConfiguration & config, const MultiVersion<Macros>::Version & macros);
void pushClusterUpdates(ClusterUpdateActions && actions);
bool reconfigEnabled() const;
/// Shutdown internal keeper parts (server, state machine, log storage, etc)
void shutdown();

View File

@ -0,0 +1,91 @@
#include "KeeperReconfiguration.h"
#include <unordered_set>
#include <base/find_symbols.h>
#include <fmt/format.h>
namespace DB
{
ClusterUpdateActions joiningToClusterUpdates(const ClusterConfigPtr & cfg, std::string_view joining)
{
ClusterUpdateActions out;
std::unordered_set<String> endpoints;
for (const auto & server : cfg->get_servers())
endpoints.emplace(server->get_endpoint());
// We can either add new servers or change weight of existing ones.
// It makes no sense having a server in _joining_ which is identical to existing one including
// weight, so such requests are declined.
for (const RaftServerConfig & update : parseRaftServers(joining))
if (auto server_ptr = cfg->get_server(update.id))
{
if (update.endpoint != server_ptr->get_endpoint() || update.learner != server_ptr->is_learner()
|| update.priority == server_ptr->get_priority())
return {}; // can't change server endpoint/type due to NuRaft API limitations
out.emplace_back(UpdateRaftServerPriority{.id = update.id, .priority = update.priority});
}
else if (endpoints.contains(update.endpoint))
return {};
else
out.emplace_back(AddRaftServer{update});
return out;
}
ClusterUpdateActions leavingToClusterUpdates(const ClusterConfigPtr & cfg, std::string_view leaving)
{
std::vector<std::string_view> leaving_arr;
splitInto<','>(leaving_arr, leaving);
if (leaving_arr.size() >= cfg->get_servers().size())
return {};
std::unordered_set<int32_t> remove_ids;
ClusterUpdateActions out;
for (std::string_view leaving_server : leaving_arr)
{
int32_t id;
if (!tryParse(id, leaving_server))
return {};
if (remove_ids.contains(id))
continue;
if (auto ptr = cfg->get_server(id))
out.emplace_back(RemoveRaftServer{.id = id});
else
return {};
remove_ids.emplace(id);
}
return out;
}
String serializeClusterConfig(const ClusterConfigPtr & cfg, const ClusterUpdateActions & updates)
{
RaftServers new_config;
std::unordered_set<int32_t> remove_update_ids;
for (const auto & update : updates)
{
if (const auto * add = std::get_if<AddRaftServer>(&update))
new_config.emplace_back(*add);
else if (const auto * remove = std::get_if<RemoveRaftServer>(&update))
remove_update_ids.insert(remove->id);
else if (const auto * priority = std::get_if<UpdateRaftServerPriority>(&update))
{
remove_update_ids.insert(priority->id);
new_config.emplace_back(RaftServerConfig{*cfg->get_server(priority->id)});
}
else
UNREACHABLE();
}
for (const auto & item : cfg->get_servers())
if (!remove_update_ids.contains(item->get_id()))
new_config.emplace_back(RaftServerConfig{*item});
return fmt::format("{}", fmt::join(new_config.begin(), new_config.end(), "\n"));
}
}

View File

@ -0,0 +1,10 @@
#pragma once
#include <Coordination/KeeperSnapshotManager.h>
#include <Coordination/RaftServerConfig.h>
namespace DB
{
ClusterUpdateActions joiningToClusterUpdates(const ClusterConfigPtr & cfg, std::string_view joining);
ClusterUpdateActions leavingToClusterUpdates(const ClusterConfigPtr & cfg, std::string_view leaving);
String serializeClusterConfig(const ClusterConfigPtr & cfg, const ClusterUpdateActions & updates = {});
}

View File

@ -27,6 +27,7 @@
#include <Common/Stopwatch.h>
#include <Common/getMultipleKeysFromConfig.h>
#include <Disks/DiskLocal.h>
#include <fmt/chrono.h>
namespace DB
{
@ -40,6 +41,8 @@ namespace ErrorCodes
extern const int INVALID_CONFIG_PARAMETER;
}
using namespace std::chrono_literals;
namespace
{
@ -118,6 +121,7 @@ KeeperServer::KeeperServer(
, is_recovering(config.getBool("keeper_server.force_recovery", false))
, keeper_context{std::move(keeper_context_)}
, create_snapshot_on_exit(config.getBool("keeper_server.create_snapshot_on_exit", true))
, enable_reconfiguration(config.getBool("keeper_server.enable_reconfiguration", false))
{
if (coordination_settings->quorum_reads)
LOG_WARNING(log, "Quorum reads enabled, Keeper will work slower.");
@ -450,7 +454,7 @@ void KeeperServer::shutdownRaftServer()
size_t count = 0;
while (asio_service->get_active_workers() != 0 && count < timeout * 100)
{
std::this_thread::sleep_for(std::chrono::milliseconds(10));
std::this_thread::sleep_for(10ms);
count++;
}
}
@ -715,10 +719,12 @@ nuraft::cb_func::ReturnCode KeeperServer::callbackFunc(nuraft::cb_func::Type typ
if (next_index < last_commited || next_index - last_commited <= 1)
commited_store = true;
auto set_initialized = [this]()
auto set_initialized = [this]
{
std::lock_guard lock(initialized_mutex);
initialized_flag = true;
{
std::lock_guard lock(initialized_mutex);
initialized_flag = true;
}
initialized_cv.notify_all();
};
@ -783,9 +789,45 @@ std::vector<int64_t> KeeperServer::getDeadSessions()
return state_machine->getDeadSessions();
}
ConfigUpdateActions KeeperServer::getConfigurationDiff(const Poco::Util::AbstractConfiguration & config)
bool KeeperServer::applyConfigUpdate(const ClusterUpdateAction & action)
{
auto diff = state_manager->getConfigurationDiff(config);
std::lock_guard _{server_write_mutex};
if (const auto * add = std::get_if<AddRaftServer>(&action))
return raft_instance->get_srv_config(add->id) != nullptr
|| raft_instance->add_srv(static_cast<nuraft::srv_config>(*add))->get_accepted();
else if (const auto * remove = std::get_if<RemoveRaftServer>(&action))
{
if (remove->id == raft_instance->get_leader())
{
if (isLeader())
raft_instance->yield_leadership();
else
raft_instance->request_leadership();
return false;
}
return raft_instance->get_srv_config(remove->id) == nullptr
|| raft_instance->remove_srv(remove->id)->get_accepted();
}
else if (const auto * update = std::get_if<UpdateRaftServerPriority>(&action))
{
if (auto ptr = raft_instance->get_srv_config(update->id); ptr == nullptr)
throw Exception(ErrorCodes::RAFT_ERROR,
"Attempt to apply {} but server is not present in Raft",
action);
else if (ptr->get_priority() == update->priority)
return true;
raft_instance->set_priority(update->id, update->priority, /*broadcast on live leader*/true);
return true;
}
UNREACHABLE();
}
ClusterUpdateActions KeeperServer::getRaftConfigurationDiff(const Poco::Util::AbstractConfiguration & config)
{
auto diff = state_manager->getRaftConfigurationDiff(config);
if (!diff.empty())
{
@ -796,160 +838,103 @@ ConfigUpdateActions KeeperServer::getConfigurationDiff(const Poco::Util::Abstrac
return diff;
}
void KeeperServer::applyConfigurationUpdate(const ConfigUpdateAction & task)
void KeeperServer::applyConfigUpdateWithReconfigDisabled(const ClusterUpdateAction& action)
{
std::lock_guard lock{server_write_mutex};
if (is_recovering)
return;
std::lock_guard _{server_write_mutex};
if (is_recovering) return;
constexpr auto sleep_time = 500ms;
size_t sleep_ms = 500;
if (task.action_type == ConfigUpdateActionType::AddServer)
LOG_INFO(log, "Will try to apply {}", action);
auto applied = [&] { LOG_INFO(log, "Applied {}", action); };
auto not_leader = [&] { LOG_INFO(log, "Not leader anymore, aborting"); };
auto backoff_on_refusal = [&](size_t i)
{
LOG_INFO(log, "Update was not accepted (try {}), backing off for {}", i + 1, sleep_time * (i + 1));
std::this_thread::sleep_for(sleep_time * (i + 1));
};
if (const auto * add = std::get_if<AddRaftServer>(&action))
{
LOG_INFO(log, "Will try to add server with id {}", task.server->get_id());
bool added = false;
for (size_t i = 0; i < coordination_settings->configuration_change_tries_count && !is_recovering; ++i)
{
if (raft_instance->get_srv_config(task.server->get_id()) != nullptr)
{
LOG_INFO(log, "Server with id {} was successfully added", task.server->get_id());
added = true;
break;
}
if (raft_instance->get_srv_config(add->id) != nullptr)
return applied();
if (!isLeader())
{
LOG_INFO(log, "We are not leader anymore, will not try to add server {}", task.server->get_id());
break;
}
auto result = raft_instance->add_srv(*task.server);
if (!result->get_accepted())
LOG_INFO(
log,
"Command to add server {} was not accepted for the {} time, will sleep for {} ms and retry",
task.server->get_id(),
i + 1,
sleep_ms * (i + 1));
std::this_thread::sleep_for(std::chrono::milliseconds(sleep_ms * (i + 1)));
return not_leader();
if (!raft_instance->add_srv(static_cast<nuraft::srv_config>(*add))->get_accepted())
backoff_on_refusal(i);
}
if (!added)
throw Exception(
ErrorCodes::RAFT_ERROR,
"Configuration change to add server (id {}) was not accepted by RAFT after all {} retries",
task.server->get_id(),
coordination_settings->configuration_change_tries_count);
}
else if (task.action_type == ConfigUpdateActionType::RemoveServer)
else if (const auto * remove = std::get_if<RemoveRaftServer>(&action))
{
LOG_INFO(log, "Will try to remove server with id {}", task.server->get_id());
bool removed = false;
if (task.server->get_id() == state_manager->server_id())
if (remove->id == state_manager->server_id())
{
LOG_INFO(
log,
"Trying to remove leader node (ourself), so will yield leadership and some other node (new leader) will try remove us. "
LOG_INFO(log,
"Trying to remove leader node (ourself), so will yield leadership and some other node "
"(new leader) will try to remove us. "
"Probably you will have to run SYSTEM RELOAD CONFIG on the new leader node");
raft_instance->yield_leadership();
return;
return raft_instance->yield_leadership();
}
for (size_t i = 0; i < coordination_settings->configuration_change_tries_count && !is_recovering; ++i)
{
if (raft_instance->get_srv_config(task.server->get_id()) == nullptr)
{
LOG_INFO(log, "Server with id {} was successfully removed", task.server->get_id());
removed = true;
break;
}
if (raft_instance->get_srv_config(remove->id) == nullptr)
return applied();
if (!isLeader())
{
LOG_INFO(log, "We are not leader anymore, will not try to remove server {}", task.server->get_id());
break;
}
auto result = raft_instance->remove_srv(task.server->get_id());
if (!result->get_accepted())
LOG_INFO(
log,
"Command to remove server {} was not accepted for the {} time, will sleep for {} ms and retry",
task.server->get_id(),
i + 1,
sleep_ms * (i + 1));
std::this_thread::sleep_for(std::chrono::milliseconds(sleep_ms * (i + 1)));
return not_leader();
if (!raft_instance->remove_srv(remove->id)->get_accepted())
backoff_on_refusal(i);
}
if (!removed)
throw Exception(
ErrorCodes::RAFT_ERROR,
"Configuration change to remove server (id {}) was not accepted by RAFT after all {} retries",
task.server->get_id(),
coordination_settings->configuration_change_tries_count);
}
else if (task.action_type == ConfigUpdateActionType::UpdatePriority)
raft_instance->set_priority(task.server->get_id(), task.server->get_priority());
else
LOG_WARNING(log, "Unknown configuration update type {}", static_cast<uint64_t>(task.action_type));
else if (const auto * update = std::get_if<UpdateRaftServerPriority>(&action))
{
raft_instance->set_priority(update->id, update->priority, /*broadcast on live leader*/true);
return;
}
throw Exception(ErrorCodes::RAFT_ERROR,
"Configuration change {} was not accepted by Raft after {} retries",
action, coordination_settings->configuration_change_tries_count);
}
bool KeeperServer::waitConfigurationUpdate(const ConfigUpdateAction & task)
bool KeeperServer::waitForConfigUpdateWithReconfigDisabled(const ClusterUpdateAction& action)
{
if (is_recovering)
return false;
if (is_recovering) return false;
constexpr auto sleep_time = 500ms;
size_t sleep_ms = 500;
if (task.action_type == ConfigUpdateActionType::AddServer)
LOG_INFO(log, "Will try to wait for {}", action);
auto applied = [&] { LOG_INFO(log, "Applied {}", action); return true; };
auto became_leader = [&] { LOG_INFO(log, "Became leader, aborting"); return false; };
auto backoff = [&](size_t i) { std::this_thread::sleep_for(sleep_time * (i + 1)); };
if (const auto* add = std::get_if<AddRaftServer>(&action))
{
LOG_INFO(log, "Will try to wait server with id {} to be added", task.server->get_id());
for (size_t i = 0; i < coordination_settings->configuration_change_tries_count && !is_recovering; ++i)
{
if (raft_instance->get_srv_config(task.server->get_id()) != nullptr)
{
LOG_INFO(log, "Server with id {} was successfully added by leader", task.server->get_id());
return true;
}
if (raft_instance->get_srv_config(add->id) != nullptr)
return applied();
if (isLeader())
{
LOG_INFO(log, "We are leader now, probably we will have to add server {}", task.server->get_id());
return false;
}
std::this_thread::sleep_for(std::chrono::milliseconds(sleep_ms * (i + 1)));
return became_leader();
backoff(i);
}
return false;
}
else if (task.action_type == ConfigUpdateActionType::RemoveServer)
else if (const auto* remove = std::get_if<RemoveRaftServer>(&action))
{
LOG_INFO(log, "Will try to wait remove of server with id {}", task.server->get_id());
for (size_t i = 0; i < coordination_settings->configuration_change_tries_count && !is_recovering; ++i)
{
if (raft_instance->get_srv_config(task.server->get_id()) == nullptr)
{
LOG_INFO(log, "Server with id {} was successfully removed by leader", task.server->get_id());
return true;
}
if (raft_instance->get_srv_config(remove->id) == nullptr)
return applied();
if (isLeader())
{
LOG_INFO(log, "We are leader now, probably we will have to remove server {}", task.server->get_id());
return false;
}
std::this_thread::sleep_for(std::chrono::milliseconds(sleep_ms * (i + 1)));
return became_leader();
backoff(i);
}
return false;
}
else if (task.action_type == ConfigUpdateActionType::UpdatePriority)
else if (std::holds_alternative<UpdateRaftServerPriority>(action))
return true;
else
LOG_WARNING(log, "Unknown configuration update type {}", static_cast<uint64_t>(task.action_type));
return true;
return false;
}
Keeper4LWInfo KeeperServer::getPartiallyFilled4LWInfo() const

View File

@ -10,6 +10,7 @@
#include <Poco/Util/AbstractConfiguration.h>
#include <Coordination/Keeper4LWInfo.h>
#include <Coordination/KeeperContext.h>
#include <Coordination/RaftServerConfig.h>
namespace DB
{
@ -28,9 +29,10 @@ private:
nuraft::ptr<KeeperStateManager> state_manager;
struct KeeperRaftServer;
nuraft::ptr<KeeperRaftServer> raft_instance;
nuraft::ptr<KeeperRaftServer> raft_instance; // TSA_GUARDED_BY(server_write_mutex);
nuraft::ptr<nuraft::asio_service> asio_service;
std::vector<nuraft::ptr<nuraft::rpc_listener>> asio_listeners;
// because some actions can be applied
// when we are sure that there are no requests currently being
// processed (e.g. recovery) we do all write actions
@ -65,6 +67,7 @@ private:
std::shared_ptr<KeeperContext> keeper_context;
const bool create_snapshot_on_exit;
const bool enable_reconfiguration;
public:
KeeperServer(
@ -84,6 +87,7 @@ public:
void putLocalReadRequest(const KeeperStorage::RequestForSession & request);
bool isRecovering() const { return is_recovering; }
bool reconfigEnabled() const { return enable_reconfiguration; }
/// Put batch of requests into Raft and get result of put. Responses will be set separately into
/// responses_queue.
@ -122,17 +126,12 @@ public:
int getServerID() const { return server_id; }
/// Get configuration diff between current configuration in RAFT and in XML file
ConfigUpdateActions getConfigurationDiff(const Poco::Util::AbstractConfiguration & config);
bool applyConfigUpdate(const ClusterUpdateAction& action);
/// Apply action for configuration update. Actually call raft_instance->remove_srv or raft_instance->add_srv.
/// Synchronously check for update results with retries.
void applyConfigurationUpdate(const ConfigUpdateAction & task);
/// Wait configuration update for action. Used by followers.
/// Return true if update was successfully received.
bool waitConfigurationUpdate(const ConfigUpdateAction & task);
// TODO (myrrc) these functions should be removed once "reconfig" is stabilized
void applyConfigUpdateWithReconfigDisabled(const ClusterUpdateAction& action);
bool waitForConfigUpdateWithReconfigDisabled(const ClusterUpdateAction& action);
ClusterUpdateActions getRaftConfigurationDiff(const Poco::Util::AbstractConfiguration & config);
uint64_t createSnapshot();

View File

@ -2,24 +2,27 @@
#include <future>
#include <Coordination/KeeperSnapshotManager.h>
#include <Coordination/KeeperStateMachine.h>
#include <Coordination/KeeperDispatcher.h>
#include <Coordination/KeeperStorage.h>
#include <Coordination/KeeperReconfiguration.h>
#include <Coordination/ReadBufferFromNuraftBuffer.h>
#include <Coordination/WriteBufferFromNuraftBuffer.h>
#include <IO/ReadHelpers.h>
#include <base/defines.h>
#include <base/errnoToString.h>
#include <base/move_extend.h>
#include <sys/mman.h>
#include <Common/ProfileEvents.h>
#include <Common/ZooKeeper/ZooKeeperCommon.h>
#include <Common/ZooKeeper/ZooKeeperIO.h>
#include <Common/logger_useful.h>
#include "Coordination/KeeperStorage.h"
#include <Disks/DiskLocal.h>
namespace ProfileEvents
{
extern const Event KeeperCommits;
extern const Event KeeperReconfigRequest;
extern const Event KeeperCommitsFailed;
extern const Event KeeperSnapshotCreations;
extern const Event KeeperSnapshotCreationsFailed;
@ -146,7 +149,7 @@ void assertDigest(
"Digest for nodes is not matching after {} request of type '{}'.\nExpected digest - {}, actual digest - {} (digest "
"{}). Keeper will terminate to avoid inconsistencies.\nExtra information about the request:\n{}",
committing ? "committing" : "preprocessing",
Coordination::toString(request.getOpNum()),
request.getOpNum(),
first.value,
second.value,
first.version,
@ -261,7 +264,8 @@ std::shared_ptr<KeeperStorage::RequestForSession> KeeperStateMachine::parseReque
bool KeeperStateMachine::preprocess(const KeeperStorage::RequestForSession & request_for_session)
{
if (request_for_session.request->getOpNum() == Coordination::OpNum::SessionID)
const auto op_num = request_for_session.request->getOpNum();
if (op_num == Coordination::OpNum::SessionID || op_num == Coordination::OpNum::Reconfig)
return true;
std::lock_guard lock(storage_and_responses_lock);
@ -291,14 +295,105 @@ bool KeeperStateMachine::preprocess(const KeeperStorage::RequestForSession & req
return true;
}
void KeeperStateMachine::reconfigure(const KeeperStorage::RequestForSession& request_for_session)
{
std::lock_guard _(storage_and_responses_lock);
KeeperStorage::ResponseForSession response = processReconfiguration(request_for_session);
if (!responses_queue.push(response))
{
ProfileEvents::increment(ProfileEvents::KeeperCommitsFailed);
LOG_WARNING(log,
"Failed to push response with session id {} to the queue, probably because of shutdown",
response.session_id);
}
}
KeeperStorage::ResponseForSession KeeperStateMachine::processReconfiguration(
const KeeperStorage::RequestForSession & request_for_session)
{
ProfileEvents::increment(ProfileEvents::KeeperReconfigRequest);
const auto & request = static_cast<const Coordination::ZooKeeperReconfigRequest&>(*request_for_session.request);
const int64_t session_id = request_for_session.session_id;
const int64_t zxid = request_for_session.zxid;
using enum Coordination::Error;
auto bad_request = [&](Coordination::Error code = ZBADARGUMENTS) -> KeeperStorage::ResponseForSession
{
auto res = std::make_shared<Coordination::ZooKeeperReconfigResponse>();
res->xid = request.xid;
res->zxid = zxid;
res->error = code;
return { session_id, std::move(res) };
};
if (!storage->checkACL(keeper_config_path, Coordination::ACL::Write, session_id, true))
return bad_request(ZNOAUTH);
KeeperDispatcher& dispatcher = *keeper_context->getDispatcher();
if (!dispatcher.reconfigEnabled())
return bad_request(ZUNIMPLEMENTED);
if (request.version != -1)
return bad_request(ZBADVERSION);
const bool has_new_members = !request.new_members.empty();
const bool has_joining = !request.joining.empty();
const bool has_leaving = !request.leaving.empty();
const bool incremental_reconfig = (has_joining || has_leaving) && !has_new_members;
if (!incremental_reconfig)
return bad_request();
const ClusterConfigPtr config = getClusterConfig();
if (!config) // Server can be uninitialized yet
return bad_request();
ClusterUpdateActions updates;
if (has_joining)
{
if (auto join_updates = joiningToClusterUpdates(config, request.joining); !join_updates.empty())
moveExtend(updates, std::move(join_updates));
else
return bad_request();
}
if (has_leaving)
{
if (auto leave_updates = leavingToClusterUpdates(config, request.leaving); !leave_updates.empty())
moveExtend(updates, std::move(leave_updates));
else
return bad_request();
}
auto response = std::make_shared<Coordination::ZooKeeperReconfigResponse>();
response->xid = request.xid;
response->zxid = zxid;
response->error = Coordination::Error::ZOK;
response->value = serializeClusterConfig(config, updates);
dispatcher.pushClusterUpdates(std::move(updates));
return { session_id, std::move(response) };
}
nuraft::ptr<nuraft::buffer> KeeperStateMachine::commit(const uint64_t log_idx, nuraft::buffer & data)
{
auto request_for_session = parseRequest(data, true);
if (!request_for_session->zxid)
request_for_session->zxid = log_idx;
/// Special processing of session_id request
if (request_for_session->request->getOpNum() == Coordination::OpNum::SessionID)
auto try_push = [this](const KeeperStorage::ResponseForSession& response)
{
if (!responses_queue.push(response))
{
ProfileEvents::increment(ProfileEvents::KeeperCommitsFailed);
LOG_WARNING(log,
"Failed to push response with session id {} to the queue, probably because of shutdown",
response.session_id);
}
};
const auto op_num = request_for_session->request->getOpNum();
if (op_num == Coordination::OpNum::SessionID)
{
const Coordination::ZooKeeperSessionIDRequest & session_id_request
= dynamic_cast<const Coordination::ZooKeeperSessionIDRequest &>(*request_for_session->request);
@ -309,21 +404,16 @@ nuraft::ptr<nuraft::buffer> KeeperStateMachine::commit(const uint64_t log_idx, n
KeeperStorage::ResponseForSession response_for_session;
response_for_session.session_id = -1;
response_for_session.response = response;
{
std::lock_guard lock(storage_and_responses_lock);
session_id = storage->getSessionID(session_id_request.session_timeout_ms);
LOG_DEBUG(log, "Session ID response {} with timeout {}", session_id, session_id_request.session_timeout_ms);
response->session_id = session_id;
if (!responses_queue.push(response_for_session))
{
ProfileEvents::increment(ProfileEvents::KeeperCommitsFailed);
LOG_WARNING(log, "Failed to push response with session id {} to the queue, probably because of shutdown", session_id);
}
}
std::lock_guard lock(storage_and_responses_lock);
session_id = storage->getSessionID(session_id_request.session_timeout_ms);
LOG_DEBUG(log, "Session ID response {} with timeout {}", session_id, session_id_request.session_timeout_ms);
response->session_id = session_id;
try_push(response_for_session);
}
else
{
if (request_for_session->request->getOpNum() == Coordination::OpNum::Close)
if (op_num == Coordination::OpNum::Close)
{
std::lock_guard lock(request_cache_mutex);
parsed_request_cache.erase(request_for_session->session_id);
@ -333,14 +423,7 @@ nuraft::ptr<nuraft::buffer> KeeperStateMachine::commit(const uint64_t log_idx, n
KeeperStorage::ResponsesForSessions responses_for_sessions
= storage->processRequest(request_for_session->request, request_for_session->session_id, request_for_session->zxid);
for (auto & response_for_session : responses_for_sessions)
if (!responses_queue.push(response_for_session))
{
ProfileEvents::increment(ProfileEvents::KeeperCommitsFailed);
LOG_WARNING(
log,
"Failed to push response with session id {} to the queue, probably because of shutdown",
response_for_session.session_id);
}
try_push(response_for_session);
if (keeper_context->digestEnabled() && request_for_session->digest)
assertDigest(*request_for_session->digest, storage->getNodesDigest(true), *request_for_session->request, true);
@ -390,7 +473,7 @@ bool KeeperStateMachine::apply_snapshot(nuraft::snapshot & s)
/// maybe some logs were preprocessed with log idx larger than the snapshot idx
/// we have to apply them to the new storage
storage->applyUncommittedState(*snapshot_deserialization_result.storage, s.get_last_log_idx());
storage->applyUncommittedState(*snapshot_deserialization_result.storage, snapshot_deserialization_result.storage->getZXID());
storage = std::move(snapshot_deserialization_result.storage);
latest_snapshot_meta = snapshot_deserialization_result.snapshot_meta;
cluster_config = snapshot_deserialization_result.cluster_config;
@ -782,5 +865,4 @@ void KeeperStateMachine::recalculateStorageStats()
storage->recalculateStats();
LOG_INFO(log, "Done recalculating storage stats");
}
}

View File

@ -12,7 +12,6 @@
namespace DB
{
using ResponsesQueue = ConcurrentBoundedQueue<KeeperStorage::ResponseForSession>;
using SnapshotsQueue = ConcurrentBoundedQueue<CreateSnapshotTask>;
@ -67,7 +66,9 @@ public:
// (can happen in case of exception during preprocessing)
void rollbackRequest(const KeeperStorage::RequestForSession & request_for_session, bool allow_missing);
void rollbackRequestNoLock(const KeeperStorage::RequestForSession & request_for_session, bool allow_missing);
void rollbackRequestNoLock(
const KeeperStorage::RequestForSession & request_for_session,
bool allow_missing) TSA_NO_THREAD_SAFETY_ANALYSIS;
uint64_t last_commit_index() override { return last_committed_idx; }
@ -87,8 +88,13 @@ public:
int read_logical_snp_obj(
nuraft::snapshot & s, void *& user_snp_ctx, uint64_t obj_id, nuraft::ptr<nuraft::buffer> & data_out, bool & is_last_obj) override;
/// just for test
KeeperStorage & getStorage() { return *storage; }
// This should be used only for tests or keeper-data-dumper because it violates
// TSA -- we can't acquire the lock outside of this class or return a storage under lock
// in a reasonable way.
KeeperStorage & getStorageUnsafe() TSA_NO_THREAD_SAFETY_ANALYSIS
{
return *storage;
}
void shutdownStorage();
@ -122,6 +128,9 @@ public:
uint64_t getLatestSnapshotBufSize() const;
void recalculateStorageStats();
void reconfigure(const KeeperStorage::RequestForSession& request_for_session);
private:
CommitCallback commit_callback;
/// In our state machine we always have a single snapshot which is stored
@ -133,7 +142,7 @@ private:
CoordinationSettingsPtr coordination_settings;
/// Main state machine logic
KeeperStoragePtr storage;
KeeperStoragePtr storage TSA_PT_GUARDED_BY(storage_and_responses_lock);
/// Save/Load and Serialize/Deserialize logic for snapshots.
KeeperSnapshotManager snapshot_manager;
@ -178,6 +187,9 @@ private:
KeeperContextPtr keeper_context;
KeeperSnapshotManagerS3 * snapshot_manager_s3;
};
KeeperStorage::ResponseForSession processReconfiguration(
const KeeperStorage::RequestForSession& request_for_session)
TSA_REQUIRES(storage_and_responses_lock);
};
}

View File

@ -451,7 +451,7 @@ nuraft::ptr<nuraft::srv_state> KeeperStateManager::read_state()
return nullptr;
}
ConfigUpdateActions KeeperStateManager::getConfigurationDiff(const Poco::Util::AbstractConfiguration & config) const
ClusterUpdateActions KeeperStateManager::getRaftConfigurationDiff(const Poco::Util::AbstractConfiguration & config) const
{
auto new_configuration_wrapper = parseServersConfiguration(config, true);
@ -465,14 +465,14 @@ ConfigUpdateActions KeeperStateManager::getConfigurationDiff(const Poco::Util::A
old_ids[old_server->get_id()] = old_server;
}
ConfigUpdateActions result;
ClusterUpdateActions result;
/// First of all add new servers
for (const auto & [new_id, server_config] : new_ids)
{
auto old_server_it = old_ids.find(new_id);
if (old_server_it == old_ids.end())
result.emplace_back(ConfigUpdateAction{ConfigUpdateActionType::AddServer, server_config});
result.emplace_back(AddRaftServer{RaftServerConfig{*server_config}});
else
{
const auto & old_endpoint = old_server_it->second->get_endpoint();
@ -491,10 +491,8 @@ ConfigUpdateActions KeeperStateManager::getConfigurationDiff(const Poco::Util::A
/// After that remove old ones
for (auto [old_id, server_config] : old_ids)
{
if (!new_ids.contains(old_id))
result.emplace_back(ConfigUpdateAction{ConfigUpdateActionType::RemoveServer, server_config});
}
result.emplace_back(RemoveRaftServer{old_id});
{
std::lock_guard lock(configuration_wrapper_mutex);
@ -507,7 +505,10 @@ ConfigUpdateActions KeeperStateManager::getConfigurationDiff(const Poco::Util::A
{
if (old_server->get_priority() != new_server->get_priority())
{
result.emplace_back(ConfigUpdateAction{ConfigUpdateActionType::UpdatePriority, new_server});
result.emplace_back(UpdateRaftServerPriority{
.id = new_server->get_id(),
.priority = new_server->get_priority()
});
}
break;
}

View File

@ -7,31 +7,13 @@
#include <libnuraft/nuraft.hxx>
#include <Poco/Util/AbstractConfiguration.h>
#include "Coordination/KeeperStateMachine.h"
#include "Coordination/RaftServerConfig.h"
#include <Coordination/KeeperSnapshotManager.h>
namespace DB
{
using KeeperServerConfigPtr = nuraft::ptr<nuraft::srv_config>;
/// When our configuration changes the following action types
/// can happen
enum class ConfigUpdateActionType
{
RemoveServer,
AddServer,
UpdatePriority,
};
/// Action to update configuration
struct ConfigUpdateAction
{
ConfigUpdateActionType action_type;
KeeperServerConfigPtr server;
};
using ConfigUpdateActions = std::vector<ConfigUpdateAction>;
/// Responsible for managing our and cluster configuration
class KeeperStateManager : public nuraft::state_mgr
{
@ -74,7 +56,11 @@ public:
int32_t server_id() override { return my_server_id; }
nuraft::ptr<nuraft::srv_config> get_srv_config() const { return configuration_wrapper.config; } /// NOLINT
nuraft::ptr<nuraft::srv_config> get_srv_config() const
{
std::lock_guard lk(configuration_wrapper_mutex);
return configuration_wrapper.config;
}
void system_exit(const int exit_code) override; /// NOLINT
@ -106,8 +92,8 @@ public:
/// Read all log entries in log store from the begging and return latest config (with largest log_index)
ClusterConfigPtr getLatestConfigFromLogStore() const;
/// Get configuration diff between proposed XML and current state in RAFT
ConfigUpdateActions getConfigurationDiff(const Poco::Util::AbstractConfiguration & config) const;
// TODO (myrrc) This should be removed once "reconfig" is stabilized
ClusterUpdateActions getRaftConfigurationDiff(const Poco::Util::AbstractConfiguration & config) const;
private:
const String & getOldServerStatePath();
@ -133,7 +119,7 @@ private:
std::string config_prefix;
mutable std::mutex configuration_wrapper_mutex;
KeeperConfigurationWrapper configuration_wrapper;
KeeperConfigurationWrapper configuration_wrapper TSA_GUARDED_BY(configuration_wrapper_mutex);
nuraft::ptr<KeeperLogStore> log_store;

View File

@ -20,10 +20,10 @@
#include <Coordination/pathUtils.h>
#include <Coordination/KeeperConstants.h>
#include <Coordination/KeeperReconfiguration.h>
#include <Coordination/KeeperStorage.h>
#include <Coordination/KeeperDispatcher.h>
#include <sstream>
#include <iomanip>
#include <mutex>
#include <functional>
#include <base/defines.h>
@ -53,7 +53,6 @@ namespace ErrorCodes
namespace
{
String getSHA1(const String & userdata)
{
Poco::SHA1Engine engine;
@ -1060,7 +1059,8 @@ struct KeeperStorageGetRequestProcessor final : public KeeperStorageRequestProce
ProfileEvents::increment(ProfileEvents::KeeperGetRequest);
Coordination::ZooKeeperGetRequest & request = dynamic_cast<Coordination::ZooKeeperGetRequest &>(*zk_request);
if (request.path == Coordination::keeper_api_feature_flags_path)
if (request.path == Coordination::keeper_api_feature_flags_path
|| request.path == Coordination::keeper_config_path)
return {};
if (!storage.uncommitted_state.getNode(request.path))
@ -1085,6 +1085,14 @@ struct KeeperStorageGetRequestProcessor final : public KeeperStorageRequestProce
}
}
if (request.path == Coordination::keeper_config_path)
{
response.data = serializeClusterConfig(
storage.keeper_context->getDispatcher()->getStateMachine().getClusterConfig());
response.error = Coordination::Error::ZOK;
return response_ptr;
}
auto & container = storage.container;
auto node_it = container.find(request.path);
if (node_it == container.end())
@ -1784,7 +1792,7 @@ struct KeeperStorageMultiRequestProcessor final : public KeeperStorageRequestPro
throw DB::Exception(
ErrorCodes::BAD_ARGUMENTS,
"Illegal command as part of multi ZooKeeper request {}",
Coordination::toString(sub_zk_request->getOpNum()));
sub_zk_request->getOpNum());
}
}
@ -1975,7 +1983,7 @@ public:
{
auto request_it = op_num_to_request.find(zk_request->getOpNum());
if (request_it == op_num_to_request.end())
throw DB::Exception(ErrorCodes::LOGICAL_ERROR, "Unknown operation type {}", toString(zk_request->getOpNum()));
throw DB::Exception(ErrorCodes::LOGICAL_ERROR, "Unknown operation type {}", zk_request->getOpNum());
return request_it->second(zk_request);
}

Some files were not shown because too many files have changed in this diff Show More