mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 23:21:59 +00:00
Merge branch 'master' into fix-gwp-asan
This commit is contained in:
commit
69fd05cb55
2
.github/workflows/reusable_test.yml
vendored
2
.github/workflows/reusable_test.yml
vendored
@ -58,7 +58,7 @@ jobs:
|
||||
env:
|
||||
GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches > 1 && format('-{0}',matrix.batch) || '' }}
|
||||
strategy:
|
||||
fail-fast: false # we always wait for entire matrix
|
||||
fail-fast: false # we always wait for the entire matrix
|
||||
matrix:
|
||||
batch: ${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].batches }}
|
||||
steps:
|
||||
|
3
.gitignore
vendored
3
.gitignore
vendored
@ -21,6 +21,9 @@
|
||||
*.stderr
|
||||
*.stdout
|
||||
|
||||
# llvm-xray logs
|
||||
xray-log.*
|
||||
|
||||
/docs/build
|
||||
/docs/publish
|
||||
/docs/edit
|
||||
|
29
.gitmessage
29
.gitmessage
@ -1,29 +0,0 @@
|
||||
|
||||
|
||||
### CI modificators (add a leading space to apply) ###
|
||||
|
||||
## To avoid a merge commit in CI:
|
||||
#no_merge_commit
|
||||
|
||||
## To discard CI cache:
|
||||
#no_ci_cache
|
||||
|
||||
## To not test (only style check):
|
||||
#do_not_test
|
||||
|
||||
## To run specified set of tests in CI:
|
||||
#ci_set_<SET_NAME>
|
||||
#ci_set_reduced
|
||||
#ci_set_arm
|
||||
#ci_set_integration
|
||||
#ci_set_old_analyzer
|
||||
|
||||
## To run specified job in CI:
|
||||
#job_<JOB NAME>
|
||||
#job_stateless_tests_release
|
||||
#job_package_debug
|
||||
#job_integration_tests_asan
|
||||
|
||||
## To run only specified batches for multi-batch job(s)
|
||||
#batch_2
|
||||
#batch_1_2_3
|
@ -11,7 +11,7 @@
|
||||
### <a id="245"></a> ClickHouse release 24.5, 2024-05-30
|
||||
|
||||
#### Backward Incompatible Change
|
||||
* Renamed "inverted indexes" to "full-text indexes" which is a less technical / more user-friendly name. This also changes internal table metadata and breaks tables with existing (experimental) inverted indexes. Please make to drop such indexes before upgrade and re-create them after upgrade. [#62884](https://github.com/ClickHouse/ClickHouse/pull/62884) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Renamed "inverted indexes" to "full-text indexes" which is a less technical / more user-friendly name. This also changes internal table metadata and breaks tables with existing (experimental) inverted indexes. Please make sure to drop such indexes before upgrade and re-create them after upgrade. [#62884](https://github.com/ClickHouse/ClickHouse/pull/62884) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Usage of functions `neighbor`, `runningAccumulate`, `runningDifferenceStartingWithFirstValue`, `runningDifference` deprecated (because it is error-prone). Proper window functions should be used instead. To enable them back, set `allow_deprecated_error_prone_window_functions = 1` or set `compatibility = '24.4'` or lower. [#63132](https://github.com/ClickHouse/ClickHouse/pull/63132) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Queries from `system.columns` will work faster if there is a large number of columns, but many databases or tables are not granted for `SHOW TABLES`. Note that in previous versions, if you grant `SHOW COLUMNS` to individual columns without granting `SHOW TABLES` to the corresponding tables, the `system.columns` table will show these columns, but in a new version, it will skip the table entirely. Remove trace log messages "Access granted" and "Access denied" that slowed down queries. [#63439](https://github.com/ClickHouse/ClickHouse/pull/63439) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
|
@ -122,6 +122,8 @@ add_library(global-libs INTERFACE)
|
||||
|
||||
include (cmake/sanitize.cmake)
|
||||
|
||||
include (cmake/instrument.cmake)
|
||||
|
||||
option(ENABLE_COLORED_BUILD "Enable colors in compiler output" ON)
|
||||
|
||||
set (CMAKE_COLOR_MAKEFILE ${ENABLE_COLORED_BUILD}) # works only for the makefile generator
|
||||
@ -208,8 +210,6 @@ option(OMIT_HEAVY_DEBUG_SYMBOLS
|
||||
"Do not generate debugger info for heavy modules (ClickHouse functions and dictionaries, some contrib)"
|
||||
${OMIT_HEAVY_DEBUG_SYMBOLS_DEFAULT})
|
||||
|
||||
option(USE_DEBUG_HELPERS "Enable debug helpers" ${USE_DEBUG_HELPERS})
|
||||
|
||||
option(BUILD_STANDALONE_KEEPER "Build keeper as small standalone binary" OFF)
|
||||
if (NOT BUILD_STANDALONE_KEEPER)
|
||||
option(CREATE_KEEPER_SYMLINK "Create symlink for clickhouse-keeper to main server binary" ON)
|
||||
|
@ -34,15 +34,6 @@ set (SRCS
|
||||
throwError.cpp
|
||||
)
|
||||
|
||||
if (USE_DEBUG_HELPERS)
|
||||
get_target_property(MAGIC_ENUM_INCLUDE_DIR ch_contrib::magic_enum INTERFACE_INCLUDE_DIRECTORIES)
|
||||
# CMake generator expression will do insane quoting when it encounters special character like quotes, spaces, etc.
|
||||
# Prefixing "SHELL:" will force it to use the original text.
|
||||
set (INCLUDE_DEBUG_HELPERS "SHELL:-I\"${MAGIC_ENUM_INCLUDE_DIR}\" -include \"${ClickHouse_SOURCE_DIR}/base/base/iostream_debug_helpers.h\"")
|
||||
# Use generator expression as we don't want to pollute CMAKE_CXX_FLAGS, which will interfere with CMake check system.
|
||||
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:${INCLUDE_DEBUG_HELPERS}>)
|
||||
endif ()
|
||||
|
||||
add_library (common ${SRCS})
|
||||
|
||||
if (WITH_COVERAGE)
|
||||
|
@ -1,187 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include "demangle.h"
|
||||
#include "getThreadId.h"
|
||||
#include <type_traits>
|
||||
#include <tuple>
|
||||
#include <iomanip>
|
||||
#include <iostream>
|
||||
#include <magic_enum.hpp>
|
||||
|
||||
/** Usage:
|
||||
*
|
||||
* DUMP(variable...)
|
||||
*/
|
||||
|
||||
|
||||
template <typename Out, typename T>
|
||||
Out & dumpValue(Out &, T &&);
|
||||
|
||||
|
||||
/// Catch-all case.
|
||||
template <int priority, typename Out, typename T>
|
||||
requires(priority == -1)
|
||||
Out & dumpImpl(Out & out, T &&) // NOLINT(cppcoreguidelines-missing-std-forward)
|
||||
{
|
||||
return out << "{...}";
|
||||
}
|
||||
|
||||
/// An object, that could be output with operator <<.
|
||||
template <int priority, typename Out, typename T>
|
||||
requires(priority == 0)
|
||||
Out & dumpImpl(Out & out, T && x, std::decay_t<decltype(std::declval<Out &>() << std::declval<T>())> * = nullptr) // NOLINT(cppcoreguidelines-missing-std-forward)
|
||||
{
|
||||
return out << x;
|
||||
}
|
||||
|
||||
/// A pointer-like object.
|
||||
template <int priority, typename Out, typename T>
|
||||
requires(priority == 1
|
||||
/// Protect from the case when operator * do effectively nothing (function pointer).
|
||||
&& !std::is_same_v<std::decay_t<T>, std::decay_t<decltype(*std::declval<T>())>>)
|
||||
Out & dumpImpl(Out & out, T && x, std::decay_t<decltype(*std::declval<T>())> * = nullptr) // NOLINT(cppcoreguidelines-missing-std-forward)
|
||||
{
|
||||
if (!x)
|
||||
return out << "nullptr";
|
||||
return dumpValue(out, *x);
|
||||
}
|
||||
|
||||
/// Container.
|
||||
template <int priority, typename Out, typename T>
|
||||
requires(priority == 2)
|
||||
Out & dumpImpl(Out & out, T && x, std::decay_t<decltype(std::begin(std::declval<T>()))> * = nullptr) // NOLINT(cppcoreguidelines-missing-std-forward)
|
||||
{
|
||||
bool first = true;
|
||||
out << "{";
|
||||
for (const auto & elem : x)
|
||||
{
|
||||
if (first)
|
||||
first = false;
|
||||
else
|
||||
out << ", ";
|
||||
dumpValue(out, elem);
|
||||
}
|
||||
return out << "}";
|
||||
}
|
||||
|
||||
|
||||
template <int priority, typename Out, typename T>
|
||||
requires(priority == 3 && std::is_enum_v<std::decay_t<T>>)
|
||||
Out & dumpImpl(Out & out, T && x) // NOLINT(cppcoreguidelines-missing-std-forward)
|
||||
{
|
||||
return out << magic_enum::enum_name(x);
|
||||
}
|
||||
|
||||
/// string and const char * - output not as container or pointer.
|
||||
|
||||
template <int priority, typename Out, typename T>
|
||||
requires(priority == 3 && (std::is_same_v<std::decay_t<T>, std::string> || std::is_same_v<std::decay_t<T>, const char *>))
|
||||
Out & dumpImpl(Out & out, T && x) // NOLINT(cppcoreguidelines-missing-std-forward)
|
||||
{
|
||||
return out << std::quoted(x);
|
||||
}
|
||||
|
||||
/// UInt8 - output as number, not char.
|
||||
|
||||
template <int priority, typename Out, typename T>
|
||||
requires(priority == 3 && std::is_same_v<std::decay_t<T>, unsigned char>)
|
||||
Out & dumpImpl(Out & out, T && x) // NOLINT(cppcoreguidelines-missing-std-forward)
|
||||
{
|
||||
return out << int(x);
|
||||
}
|
||||
|
||||
|
||||
/// Tuple, pair
|
||||
template <size_t N, typename Out, typename T>
|
||||
Out & dumpTupleImpl(Out & out, T && x) // NOLINT(cppcoreguidelines-missing-std-forward)
|
||||
{
|
||||
if constexpr (N == 0)
|
||||
out << "{";
|
||||
else
|
||||
out << ", ";
|
||||
|
||||
dumpValue(out, std::get<N>(x));
|
||||
|
||||
if constexpr (N + 1 == std::tuple_size_v<std::decay_t<T>>)
|
||||
out << "}";
|
||||
else
|
||||
dumpTupleImpl<N + 1>(out, x);
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
template <int priority, typename Out, typename T>
|
||||
requires(priority == 4)
|
||||
Out & dumpImpl(Out & out, T && x, std::decay_t<decltype(std::get<0>(std::declval<T>()))> * = nullptr) // NOLINT(cppcoreguidelines-missing-std-forward)
|
||||
{
|
||||
return dumpTupleImpl<0>(out, x);
|
||||
}
|
||||
|
||||
|
||||
template <int priority, typename Out, typename T>
|
||||
Out & dumpDispatchPriorities(Out & out, T && x, std::decay_t<decltype(dumpImpl<priority>(std::declval<Out &>(), std::declval<T>()))> *) // NOLINT(cppcoreguidelines-missing-std-forward)
|
||||
{
|
||||
return dumpImpl<priority>(out, x);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(google-explicit-constructor)
|
||||
struct LowPriority { LowPriority(void *) {} };
|
||||
|
||||
template <int priority, typename Out, typename T>
|
||||
Out & dumpDispatchPriorities(Out & out, T && x, LowPriority) // NOLINT(cppcoreguidelines-missing-std-forward)
|
||||
{
|
||||
return dumpDispatchPriorities<priority - 1>(out, x, nullptr);
|
||||
}
|
||||
|
||||
|
||||
template <typename Out, typename T>
|
||||
Out & dumpValue(Out & out, T && x) // NOLINT(cppcoreguidelines-missing-std-forward)
|
||||
{
|
||||
return dumpDispatchPriorities<5>(out, x, nullptr);
|
||||
}
|
||||
|
||||
|
||||
template <typename Out, typename T>
|
||||
Out & dump(Out & out, const char * name, T && x) // NOLINT(cppcoreguidelines-missing-std-forward)
|
||||
{
|
||||
// Dumping string literal, printing name and demangled type is irrelevant.
|
||||
if constexpr (std::is_same_v<const char *, std::decay_t<std::remove_reference_t<T>>>)
|
||||
{
|
||||
const auto name_len = strlen(name);
|
||||
const auto value_len = strlen(x);
|
||||
// `name` is the same as quoted `x`
|
||||
if (name_len > 2 && value_len > 0 && name[0] == '"' && name[name_len - 1] == '"'
|
||||
&& strncmp(name + 1, x, std::min(value_len, name_len) - 1) == 0)
|
||||
return out << x;
|
||||
}
|
||||
|
||||
out << demangle(typeid(x).name()) << " " << name << " = ";
|
||||
return dumpValue(out, x) << "; ";
|
||||
}
|
||||
|
||||
#pragma clang diagnostic ignored "-Wgnu-zero-variadic-macro-arguments"
|
||||
|
||||
#define DUMPVAR(VAR) ::dump(std::cerr, #VAR, (VAR));
|
||||
#define DUMPHEAD std::cerr << __FILE__ << ':' << __LINE__ << " [ " << getThreadId() << " ] ";
|
||||
#define DUMPTAIL std::cerr << '\n';
|
||||
|
||||
#define DUMP1(V1) do { DUMPHEAD DUMPVAR(V1) DUMPTAIL } while(0)
|
||||
#define DUMP2(V1, V2) do { DUMPHEAD DUMPVAR(V1) DUMPVAR(V2) DUMPTAIL } while(0)
|
||||
#define DUMP3(V1, V2, V3) do { DUMPHEAD DUMPVAR(V1) DUMPVAR(V2) DUMPVAR(V3) DUMPTAIL } while(0)
|
||||
#define DUMP4(V1, V2, V3, V4) do { DUMPHEAD DUMPVAR(V1) DUMPVAR(V2) DUMPVAR(V3) DUMPVAR(V4) DUMPTAIL } while(0)
|
||||
#define DUMP5(V1, V2, V3, V4, V5) do { DUMPHEAD DUMPVAR(V1) DUMPVAR(V2) DUMPVAR(V3) DUMPVAR(V4) DUMPVAR(V5) DUMPTAIL } while(0)
|
||||
#define DUMP6(V1, V2, V3, V4, V5, V6) do { DUMPHEAD DUMPVAR(V1) DUMPVAR(V2) DUMPVAR(V3) DUMPVAR(V4) DUMPVAR(V5) DUMPVAR(V6) DUMPTAIL } while(0)
|
||||
#define DUMP7(V1, V2, V3, V4, V5, V6, V7) do { DUMPHEAD DUMPVAR(V1) DUMPVAR(V2) DUMPVAR(V3) DUMPVAR(V4) DUMPVAR(V5) DUMPVAR(V6) DUMPVAR(V7) DUMPTAIL } while(0)
|
||||
#define DUMP8(V1, V2, V3, V4, V5, V6, V7, V8) do { DUMPHEAD DUMPVAR(V1) DUMPVAR(V2) DUMPVAR(V3) DUMPVAR(V4) DUMPVAR(V5) DUMPVAR(V6) DUMPVAR(V7) DUMPVAR(V8) DUMPTAIL } while(0)
|
||||
#define DUMP9(V1, V2, V3, V4, V5, V6, V7, V8, V9) do { DUMPHEAD DUMPVAR(V1) DUMPVAR(V2) DUMPVAR(V3) DUMPVAR(V4) DUMPVAR(V5) DUMPVAR(V6) DUMPVAR(V7) DUMPVAR(V8) DUMPVAR(V9) DUMPTAIL } while(0)
|
||||
|
||||
/// https://groups.google.com/forum/#!searchin/kona-dev/variadic$20macro%7Csort:date/kona-dev/XMA-lDOqtlI/GCzdfZsD41sJ
|
||||
|
||||
#define VA_NUM_ARGS_IMPL(x1, x2, x3, x4, x5, x6, x7, x8, x9, N, ...) N
|
||||
#define VA_NUM_ARGS(...) VA_NUM_ARGS_IMPL(__VA_ARGS__, 9, 8, 7, 6, 5, 4, 3, 2, 1)
|
||||
|
||||
#define MAKE_VAR_MACRO_IMPL_CONCAT(PREFIX, NUM_ARGS) PREFIX ## NUM_ARGS
|
||||
#define MAKE_VAR_MACRO_IMPL(PREFIX, NUM_ARGS) MAKE_VAR_MACRO_IMPL_CONCAT(PREFIX, NUM_ARGS)
|
||||
#define MAKE_VAR_MACRO(PREFIX, ...) MAKE_VAR_MACRO_IMPL(PREFIX, VA_NUM_ARGS(__VA_ARGS__))
|
||||
|
||||
#define DUMP(...) MAKE_VAR_MACRO(DUMP, __VA_ARGS__)(__VA_ARGS__)
|
@ -1,2 +0,0 @@
|
||||
clickhouse_add_executable (dump_variable dump_variable.cpp)
|
||||
target_link_libraries (dump_variable PRIVATE clickhouse_common_io)
|
@ -1,70 +0,0 @@
|
||||
#include <base/iostream_debug_helpers.h>
|
||||
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include <map>
|
||||
#include <set>
|
||||
#include <tuple>
|
||||
#include <array>
|
||||
#include <utility>
|
||||
|
||||
|
||||
struct S1;
|
||||
struct S2 {};
|
||||
|
||||
struct S3
|
||||
{
|
||||
std::set<const char *> m1;
|
||||
};
|
||||
|
||||
std::ostream & operator<<(std::ostream & stream, const S3 & what)
|
||||
{
|
||||
stream << "S3 {m1=";
|
||||
dumpValue(stream, what.m1) << "}";
|
||||
return stream;
|
||||
}
|
||||
|
||||
int main(int, char **)
|
||||
{
|
||||
int x = 1;
|
||||
|
||||
DUMP(x);
|
||||
DUMP(x, 1, &x);
|
||||
|
||||
DUMP(std::make_unique<int>(1));
|
||||
DUMP(std::make_shared<int>(1));
|
||||
|
||||
std::vector<int> vec{1, 2, 3};
|
||||
DUMP(vec);
|
||||
|
||||
auto pair = std::make_pair(1, 2);
|
||||
DUMP(pair);
|
||||
|
||||
auto tuple = std::make_tuple(1, 2, 3);
|
||||
DUMP(tuple);
|
||||
|
||||
std::map<int, std::string> map{{1, "hello"}, {2, "world"}};
|
||||
DUMP(map);
|
||||
|
||||
std::initializer_list<const char *> list{"hello", "world"};
|
||||
DUMP(list);
|
||||
|
||||
std::array<const char *, 2> arr{{"hello", "world"}};
|
||||
DUMP(arr);
|
||||
|
||||
//DUMP([]{});
|
||||
|
||||
S1 * s = nullptr;
|
||||
DUMP(s);
|
||||
|
||||
DUMP(S2());
|
||||
|
||||
std::set<const char *> variants = {"hello", "world"};
|
||||
DUMP(variants);
|
||||
|
||||
S3 s3 {{"hello", "world"}};
|
||||
DUMP(s3);
|
||||
|
||||
return 0;
|
||||
}
|
20
cmake/instrument.cmake
Normal file
20
cmake/instrument.cmake
Normal file
@ -0,0 +1,20 @@
|
||||
# https://llvm.org/docs/XRay.html
|
||||
|
||||
option (ENABLE_XRAY "Enable LLVM XRay" OFF)
|
||||
|
||||
if (NOT ENABLE_XRAY)
|
||||
message (STATUS "Not using LLVM XRay")
|
||||
return()
|
||||
endif()
|
||||
|
||||
if (NOT (ARCH_AMD64 AND (OS_LINUX OR OS_FREEBSD)))
|
||||
message (STATUS "Not using LLVM XRay, only amd64 Linux or FreeBSD are supported")
|
||||
return()
|
||||
endif()
|
||||
|
||||
# The target clang must support xray, otherwise it should error on invalid option
|
||||
set (XRAY_FLAGS "-fxray-instrument -DUSE_XRAY")
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${XRAY_FLAGS}")
|
||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${XRAY_FLAGS}")
|
||||
|
||||
message (STATUS "Using LLVM XRay")
|
@ -15,7 +15,6 @@ RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
|
||||
file \
|
||||
libxml2-utils \
|
||||
moreutils \
|
||||
python3-fuzzywuzzy \
|
||||
python3-pip \
|
||||
yamllint \
|
||||
locales \
|
||||
@ -23,8 +22,18 @@ RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||
|
||||
# python-magic is the same version as in Ubuntu 22.04
|
||||
RUN pip3 install black==23.12.0 boto3 codespell==2.2.1 mypy==1.8.0 PyGithub unidiff pylint==3.1.0 \
|
||||
python-magic==0.4.24 requests types-requests \
|
||||
RUN pip3 install \
|
||||
PyGithub \
|
||||
black==23.12.0 \
|
||||
boto3 \
|
||||
codespell==2.2.1 \
|
||||
mypy==1.8.0 \
|
||||
pylint==3.1.0 \
|
||||
python-magic==0.4.24 \
|
||||
requests \
|
||||
thefuzz \
|
||||
types-requests \
|
||||
unidiff \
|
||||
&& rm -rf /root/.cache/pip
|
||||
|
||||
RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen en_US.UTF-8
|
||||
|
@ -71,7 +71,7 @@ If it fails, fix the style errors following the [code style guide](style.md).
|
||||
```sh
|
||||
mkdir -p /tmp/test_output
|
||||
# running all checks
|
||||
docker run --rm --volume=.:/ClickHouse --volume=/tmp/test_output:/test_output -u $(id -u ${USER}):$(id -g ${USER}) --cap-add=SYS_PTRACE clickhouse/style-test
|
||||
python3 tests/ci/style_check.py --no-push
|
||||
|
||||
# run specified check script (e.g.: ./check-mypy)
|
||||
docker run --rm --volume=.:/ClickHouse --volume=/tmp/test_output:/test_output -u $(id -u ${USER}):$(id -g ${USER}) --cap-add=SYS_PTRACE --entrypoint= -w/ClickHouse/utils/check-style clickhouse/style-test ./check-mypy
|
||||
|
@ -37,7 +37,7 @@ ways, for example with respect to their DDL/DQL syntax or performance/compressio
|
||||
To use full-text indexes, first enable them in the configuration:
|
||||
|
||||
```sql
|
||||
SET allow_experimental_inverted_index = true;
|
||||
SET allow_experimental_full_text_index = true;
|
||||
```
|
||||
|
||||
An full-text index can be defined on a string column using the following syntax
|
||||
|
@ -178,6 +178,10 @@ Additional parameters that control the behavior of the `MergeTree` (optional):
|
||||
|
||||
`max_partitions_to_read` — Limits the maximum number of partitions that can be accessed in one query. You can also specify setting [max_partitions_to_read](/docs/en/operations/settings/merge-tree-settings.md/#max-partitions-to-read) in the global setting.
|
||||
|
||||
#### allow_experimental_optimized_row_order
|
||||
|
||||
`allow_experimental_optimized_row_order` - Experimental. Enables the optimization of the row order during inserts to improve the compressability of the data for compression codecs (e.g. LZ4). Analyzes and reorders the data, and thus increases the CPU overhead of inserts.
|
||||
|
||||
**Example of Sections Setting**
|
||||
|
||||
``` sql
|
||||
|
@ -885,3 +885,47 @@ Default value: false
|
||||
**See Also**
|
||||
|
||||
- [exclude_deleted_rows_for_part_size_in_merge](#exclude_deleted_rows_for_part_size_in_merge) setting
|
||||
|
||||
### allow_experimental_optimized_row_order
|
||||
|
||||
Controls if the row order should be optimized during inserts to improve the compressability of the newly inserted table part.
|
||||
|
||||
MergeTree tables are (optionally) compressed using [compression codecs](../../sql-reference/statements/create/table.md#column_compression_codec).
|
||||
Generic compression codecs such as LZ4 and ZSTD achieve maximum compression rates if the data exposes patterns.
|
||||
Long runs of the same value typically compress very well.
|
||||
|
||||
If this setting is enabled, ClickHouse attempts to store the data in newly inserted parts in a row order that minimizes the number of equal-value runs across the columns of the new table part.
|
||||
In other words, a small number of equal-value runs mean that individual runs are long and compress well.
|
||||
|
||||
Finding the optimal row order is computationally infeasible (NP hard).
|
||||
Therefore, ClickHouse uses a heuristics to quickly find a row order which still improves compression rates over the original row order.
|
||||
|
||||
<details markdown="1">
|
||||
|
||||
<summary>Heuristics for finding a row order</summary>
|
||||
|
||||
It is generally possible to shuffle the rows of a table (or table part) freely as SQL considers the same table (table part) in different row order equivalent.
|
||||
|
||||
This freedom of shuffling rows is restricted when a primary key is defined for the table.
|
||||
In ClickHouse, a primary key `C1, C2, ..., CN` enforces that the table rows are sorted by columns `C1`, `C2`, ... `Cn` ([clustered index](https://en.wikipedia.org/wiki/Database_index#Clustered)).
|
||||
As a result, rows can only be shuffled within "equivalence classes" of row, i.e. rows which have the same values in their primary key columns.
|
||||
The intuition is that primary keys with high-cardinality, e.g. primary keys involving a `DateTime64` timestamp column, lead to many small equivalence classes.
|
||||
Likewise, tables with a low-cardinality primary key, create few and large equivalence classes.
|
||||
A table with no primary key represents the extreme case of a single equivalence class which spans all rows.
|
||||
|
||||
The fewer and the larger the equivalence classes are, the higher the degree of freedom when re-shuffling rows.
|
||||
|
||||
The heuristics applied to find the best row order within each equivalence class is suggested by D. Lemir, O. Kaser in [Reordering columns for smaller indexes](https://doi.org/10.1016/j.ins.2011.02.002) and based on sorting the rows within each equivalence class by ascending cardinality of the non-primary key columns.
|
||||
It performs three steps:
|
||||
1. Find all equivalence classes based on the row values in primary key columns.
|
||||
2. For each equivalence class, calculate (usually estimate) the cardinalities of the non-primary-key columns.
|
||||
3. For each equivalence class, sort the rows in order of ascending non-primary-key column cardinality.
|
||||
|
||||
</details>
|
||||
|
||||
If enabled, insert operations incur additional CPU costs to analyze and optimize the row order of the new data.
|
||||
INSERTs are expected to take 30-50% longer depending on the data characteristics.
|
||||
Compression rates of LZ4 or ZSTD improve on average by 20-40%.
|
||||
|
||||
This setting works best for tables with no primary key or a low-cardinality primary key, i.e. a table with only few distinct primary key values.
|
||||
High-cardinality primary keys, e.g. involving timestamp columns of type `DateTime64`, are not expected to benefit from this setting.
|
||||
|
@ -5,10 +5,57 @@ sidebar_position: 107
|
||||
|
||||
# corr
|
||||
|
||||
Syntax: `corr(x, y)`
|
||||
Calculates the [Pearson correlation coefficient](https://en.wikipedia.org/wiki/Pearson_correlation_coefficient):
|
||||
|
||||
$$
|
||||
\frac{\Sigma{(x - \bar{x})(y - \bar{y})}}{\sqrt{\Sigma{(x - \bar{x})^2} * \Sigma{(y - \bar{y})^2}}}
|
||||
$$
|
||||
|
||||
Calculates the Pearson correlation coefficient: `Σ((x - x̅)(y - y̅)) / sqrt(Σ((x - x̅)^2) * Σ((y - y̅)^2))`.
|
||||
|
||||
:::note
|
||||
This function uses a numerically unstable algorithm. If you need [numerical stability](https://en.wikipedia.org/wiki/Numerical_stability) in calculations, use the `corrStable` function. It works slower but provides a lower computational error.
|
||||
:::
|
||||
This function uses a numerically unstable algorithm. If you need [numerical stability](https://en.wikipedia.org/wiki/Numerical_stability) in calculations, use the [`corrStable`](../reference/corrstable.md) function. It is slower but provides a more accurate result.
|
||||
:::
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
corr(x, y)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` — first variable. [(U)Int*](../../data-types/int-uint.md), [Float*](../../data-types/float.md), [Decimal](../../data-types/decimal.md).
|
||||
- `y` — second variable. [(U)Int*](../../data-types/int-uint.md), [Float*](../../data-types/float.md), [Decimal](../../data-types/decimal.md).
|
||||
|
||||
**Returned Value**
|
||||
|
||||
- The Pearson correlation coefficient. [Float64](../../data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
DROP TABLE IF EXISTS series;
|
||||
CREATE TABLE series
|
||||
(
|
||||
i UInt32,
|
||||
x_value Float64,
|
||||
y_value Float64
|
||||
)
|
||||
ENGINE = Memory;
|
||||
INSERT INTO series(i, x_value, y_value) VALUES (1, 5.6, -4.4),(2, -9.6, 3),(3, -1.3, -4),(4, 5.3, 9.7),(5, 4.4, 0.037),(6, -8.6, -7.8),(7, 5.1, 9.3),(8, 7.9, -3.6),(9, -8.2, 0.62),(10, -3, 7.3);
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT corr(x_value, y_value)
|
||||
FROM series;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─corr(x_value, y_value)─┐
|
||||
│ 0.1730265755453256 │
|
||||
└────────────────────────┘
|
||||
```
|
||||
|
@ -0,0 +1,55 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/corrmatrix
|
||||
sidebar_position: 108
|
||||
---
|
||||
|
||||
# corrMatrix
|
||||
|
||||
Computes the correlation matrix over N variables.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
corrMatrix(x[, ...])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` — a variable number of parameters. [(U)Int*](../../data-types/int-uint.md), [Float*](../../data-types/float.md), [Decimal](../../data-types/decimal.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Correlation matrix. [Array](../../data-types/array.md)([Array](../../data-types/array.md)([Float64](../../data-types/float.md))).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
DROP TABLE IF EXISTS test;
|
||||
CREATE TABLE test
|
||||
(
|
||||
a UInt32,
|
||||
b Float64,
|
||||
c Float64,
|
||||
d Float64
|
||||
)
|
||||
ENGINE = Memory;
|
||||
INSERT INTO test(a, b, c, d) VALUES (1, 5.6, -4.4, 2.6), (2, -9.6, 3, 3.3), (3, -1.3, -4, 1.2), (4, 5.3, 9.7, 2.3), (5, 4.4, 0.037, 1.222), (6, -8.6, -7.8, 2.1233), (7, 5.1, 9.3, 8.1222), (8, 7.9, -3.6, 9.837), (9, -8.2, 0.62, 8.43555), (10, -3, 7.3, 6.762);
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT arrayMap(x -> round(x, 3), arrayJoin(corrMatrix(a, b, c, d))) AS corrMatrix
|
||||
FROM test;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─corrMatrix─────────────┐
|
||||
1. │ [1,-0.096,0.243,0.746] │
|
||||
2. │ [-0.096,1,0.173,0.106] │
|
||||
3. │ [0.243,0.173,1,0.258] │
|
||||
4. │ [0.746,0.106,0.258,1] │
|
||||
└────────────────────────┘
|
||||
```
|
@ -0,0 +1,58 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/corrstable
|
||||
sidebar_position: 107
|
||||
---
|
||||
|
||||
# corrStable
|
||||
|
||||
Calculates the [Pearson correlation coefficient](https://en.wikipedia.org/wiki/Pearson_correlation_coefficient):
|
||||
|
||||
$$
|
||||
\frac{\Sigma{(x - \bar{x})(y - \bar{y})}}{\sqrt{\Sigma{(x - \bar{x})^2} * \Sigma{(y - \bar{y})^2}}}
|
||||
$$
|
||||
|
||||
Similar to the [`corr`](../reference/corr.md) function, but uses a numerically stable algorithm. As a result, `corrStable` is slower than `corr` but produces a more accurate result.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
corrStable(x, y)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` — first variable. [(U)Int*](../../data-types/int-uint.md), [Float*](../../data-types/float.md), [Decimal](../../data-types/decimal.md).
|
||||
- `y` — second variable. [(U)Int*](../../data-types/int-uint.md), [Float*](../../data-types/float.md), [Decimal](../../data-types/decimal.md).
|
||||
|
||||
**Returned Value**
|
||||
|
||||
- The Pearson correlation coefficient. [Float64](../../data-types/float.md).
|
||||
|
||||
***Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
DROP TABLE IF EXISTS series;
|
||||
CREATE TABLE series
|
||||
(
|
||||
i UInt32,
|
||||
x_value Float64,
|
||||
y_value Float64
|
||||
)
|
||||
ENGINE = Memory;
|
||||
INSERT INTO series(i, x_value, y_value) VALUES (1, 5.6, -4.4),(2, -9.6, 3),(3, -1.3, -4),(4, 5.3, 9.7),(5, 4.4, 0.037),(6, -8.6, -7.8),(7, 5.1, 9.3),(8, 7.9, -3.6),(9, -8.2, 0.62),(10, -3, 7.3);
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT corrStable(x_value, y_value)
|
||||
FROM series;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─corrStable(x_value, y_value)─┐
|
||||
│ 0.17302657554532558 │
|
||||
└──────────────────────────────┘
|
||||
```
|
@ -1,14 +1,54 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/covarpop
|
||||
sidebar_position: 36
|
||||
sidebar_position: 37
|
||||
---
|
||||
|
||||
# covarPop
|
||||
|
||||
Syntax: `covarPop(x, y)`
|
||||
Calculates the population covariance:
|
||||
|
||||
Calculates the value of `Σ((x - x̅)(y - y̅)) / n`.
|
||||
$$
|
||||
\frac{\Sigma{(x - \bar{x})(y - \bar{y})}}{n}
|
||||
$$
|
||||
|
||||
:::note
|
||||
This function uses a numerically unstable algorithm. If you need [numerical stability](https://en.wikipedia.org/wiki/Numerical_stability) in calculations, use the `covarPopStable` function. It works slower but provides a lower computational error.
|
||||
:::
|
||||
This function uses a numerically unstable algorithm. If you need [numerical stability](https://en.wikipedia.org/wiki/Numerical_stability) in calculations, use the [`covarPopStable`](../reference/covarpopstable.md) function. It works slower but provides a lower computational error.
|
||||
:::
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
covarPop(x, y)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` — first variable. [(U)Int*](../../data-types/int-uint.md), [Float*](../../data-types/float.md), [Decimal](../../data-types/decimal.md).
|
||||
- `y` — second variable. [(U)Int*](../../data-types/int-uint.md), [Float*](../../data-types/float.md), [Decimal](../../data-types/decimal.md).
|
||||
|
||||
**Returned Value**
|
||||
|
||||
- The population covariance between `x` and `y`. [Float64](../../data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
DROP TABLE IF EXISTS series;
|
||||
CREATE TABLE series(i UInt32, x_value Float64, y_value Float64) ENGINE = Memory;
|
||||
INSERT INTO series(i, x_value, y_value) VALUES (1, 5.6, -4.4),(2, -9.6, 3),(3, -1.3, -4),(4, 5.3, 9.7),(5, 4.4, 0.037),(6, -8.6, -7.8),(7, 5.1, 9.3),(8, 7.9, -3.6),(9, -8.2, 0.62),(10, -3, 7.3);
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT covarPop(x_value, y_value)
|
||||
FROM series;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```reference
|
||||
┌─covarPop(x_value, y_value)─┐
|
||||
│ 6.485648 │
|
||||
└────────────────────────────┘
|
||||
```
|
||||
|
@ -0,0 +1,55 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/covarpopmatrix
|
||||
sidebar_position: 36
|
||||
---
|
||||
|
||||
# covarPopMatrix
|
||||
|
||||
Returns the population covariance matrix over N variables.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
covarPopMatrix(x[, ...])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` — a variable number of parameters. [(U)Int*](../../data-types/int-uint.md), [Float*](../../data-types/float.md), [Decimal](../../data-types/decimal.md).
|
||||
|
||||
**Returned Value**
|
||||
|
||||
- Population covariance matrix. [Array](../../data-types/array.md)([Array](../../data-types/array.md)([Float64](../../data-types/float.md))).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
DROP TABLE IF EXISTS test;
|
||||
CREATE TABLE test
|
||||
(
|
||||
a UInt32,
|
||||
b Float64,
|
||||
c Float64,
|
||||
d Float64
|
||||
)
|
||||
ENGINE = Memory;
|
||||
INSERT INTO test(a, b, c, d) VALUES (1, 5.6, -4.4, 2.6), (2, -9.6, 3, 3.3), (3, -1.3, -4, 1.2), (4, 5.3, 9.7, 2.3), (5, 4.4, 0.037, 1.222), (6, -8.6, -7.8, 2.1233), (7, 5.1, 9.3, 8.1222), (8, 7.9, -3.6, 9.837), (9, -8.2, 0.62, 8.43555), (10, -3, 7.3, 6.762);
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT arrayMap(x -> round(x, 3), arrayJoin(covarPopMatrix(a, b, c, d))) AS covarPopMatrix
|
||||
FROM test;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```reference
|
||||
┌─covarPopMatrix────────────┐
|
||||
1. │ [8.25,-1.76,4.08,6.748] │
|
||||
2. │ [-1.76,41.07,6.486,2.132] │
|
||||
3. │ [4.08,6.486,34.21,4.755] │
|
||||
4. │ [6.748,2.132,4.755,9.93] │
|
||||
└───────────────────────────┘
|
||||
```
|
@ -0,0 +1,60 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/covarpopstable
|
||||
sidebar_position: 36
|
||||
---
|
||||
|
||||
# covarPopStable
|
||||
|
||||
Calculates the value of the population covariance:
|
||||
|
||||
$$
|
||||
\frac{\Sigma{(x - \bar{x})(y - \bar{y})}}{n}
|
||||
$$
|
||||
|
||||
It is similar to the [covarPop](../reference/covarpop.md) function, but uses a numerically stable algorithm. As a result, `covarPopStable` is slower than `covarPop` but produces a more accurate result.
|
||||
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
covarPop(x, y)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` — first variable. [(U)Int*](../../data-types/int-uint.md), [Float*](../../data-types/float.md), [Decimal](../../data-types/decimal.md).
|
||||
- `y` — second variable. [(U)Int*](../../data-types/int-uint.md), [Float*](../../data-types/float.md), [Decimal](../../data-types/decimal.md).
|
||||
|
||||
**Returned Value**
|
||||
|
||||
- The population covariance between `x` and `y`. [Float64](../../data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
DROP TABLE IF EXISTS series;
|
||||
CREATE TABLE series(i UInt32, x_value Float64, y_value Float64) ENGINE = Memory;
|
||||
INSERT INTO series(i, x_value, y_value) VALUES (1, 5.6,-4.4),(2, -9.6,3),(3, -1.3,-4),(4, 5.3,9.7),(5, 4.4,0.037),(6, -8.6,-7.8),(7, 5.1,9.3),(8, 7.9,-3.6),(9, -8.2,0.62),(10, -3,7.3);
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT covarPopStable(x_value, y_value)
|
||||
FROM
|
||||
(
|
||||
SELECT
|
||||
x_value,
|
||||
y_value
|
||||
FROM series
|
||||
);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```reference
|
||||
┌─covarPopStable(x_value, y_value)─┐
|
||||
│ 6.485648 │
|
||||
└──────────────────────────────────┘
|
||||
```
|
||||
|
@ -7,8 +7,74 @@ sidebar_position: 37
|
||||
|
||||
Calculates the value of `Σ((x - x̅)(y - y̅)) / (n - 1)`.
|
||||
|
||||
Returns Float64. When `n <= 1`, returns `nan`.
|
||||
|
||||
:::note
|
||||
This function uses a numerically unstable algorithm. If you need [numerical stability](https://en.wikipedia.org/wiki/Numerical_stability) in calculations, use the `covarSampStable` function. It works slower but provides a lower computational error.
|
||||
This function uses a numerically unstable algorithm. If you need [numerical stability](https://en.wikipedia.org/wiki/Numerical_stability) in calculations, use the [`covarSampStable`](../reference/covarsamp.md) function. It works slower but provides a lower computational error.
|
||||
:::
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
covarSamp(x, y)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` — first variable. [(U)Int*](../../data-types/int-uint.md), [Float*](../../data-types/float.md), [Decimal](../../data-types/decimal.md).
|
||||
- `y` — second variable. [(U)Int*](../../data-types/int-uint.md), [Float*](../../data-types/float.md), [Decimal](../../data-types/decimal.md).
|
||||
|
||||
**Returned Value**
|
||||
|
||||
- The sample covariance between `x` and `y`. For `n <= 1`, `nan` is returned. [Float64](../../data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
DROP TABLE IF EXISTS series;
|
||||
CREATE TABLE series(i UInt32, x_value Float64, y_value Float64) ENGINE = Memory;
|
||||
INSERT INTO series(i, x_value, y_value) VALUES (1, 5.6,-4.4),(2, -9.6,3),(3, -1.3,-4),(4, 5.3,9.7),(5, 4.4,0.037),(6, -8.6,-7.8),(7, 5.1,9.3),(8, 7.9,-3.6),(9, -8.2,0.62),(10, -3,7.3);
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT covarSamp(x_value, y_value)
|
||||
FROM
|
||||
(
|
||||
SELECT
|
||||
x_value,
|
||||
y_value
|
||||
FROM series
|
||||
);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```reference
|
||||
┌─covarSamp(x_value, y_value)─┐
|
||||
│ 7.206275555555556 │
|
||||
└─────────────────────────────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT covarSamp(x_value, y_value)
|
||||
FROM
|
||||
(
|
||||
SELECT
|
||||
x_value,
|
||||
y_value
|
||||
FROM series LIMIT 1
|
||||
);
|
||||
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```reference
|
||||
┌─covarSamp(x_value, y_value)─┐
|
||||
│ nan │
|
||||
└─────────────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
|
@ -0,0 +1,57 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/covarsampmatrix
|
||||
sidebar_position: 38
|
||||
---
|
||||
|
||||
# covarSampMatrix
|
||||
|
||||
Returns the sample covariance matrix over N variables.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
covarSampMatrix(x[, ...])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` — a variable number of parameters. [(U)Int*](../../data-types/int-uint.md), [Float*](../../data-types/float.md), [Decimal](../../data-types/decimal.md).
|
||||
|
||||
**Returned Value**
|
||||
|
||||
- Sample covariance matrix. [Array](../../data-types/array.md)([Array](../../data-types/array.md)([Float64](../../data-types/float.md))).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
DROP TABLE IF EXISTS test;
|
||||
CREATE TABLE test
|
||||
(
|
||||
a UInt32,
|
||||
b Float64,
|
||||
c Float64,
|
||||
d Float64
|
||||
)
|
||||
ENGINE = Memory;
|
||||
INSERT INTO test(a, b, c, d) VALUES (1, 5.6, -4.4, 2.6), (2, -9.6, 3, 3.3), (3, -1.3, -4, 1.2), (4, 5.3, 9.7, 2.3), (5, 4.4, 0.037, 1.222), (6, -8.6, -7.8, 2.1233), (7, 5.1, 9.3, 8.1222), (8, 7.9, -3.6, 9.837), (9, -8.2, 0.62, 8.43555), (10, -3, 7.3, 6.762);
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT arrayMap(x -> round(x, 3), arrayJoin(covarSampMatrix(a, b, c, d))) AS covarSampMatrix
|
||||
FROM test;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```reference
|
||||
┌─covarSampMatrix─────────────┐
|
||||
1. │ [9.167,-1.956,4.534,7.498] │
|
||||
2. │ [-1.956,45.634,7.206,2.369] │
|
||||
3. │ [4.534,7.206,38.011,5.283] │
|
||||
4. │ [7.498,2.369,5.283,11.034] │
|
||||
└─────────────────────────────┘
|
||||
```
|
||||
|
||||
|
@ -0,0 +1,73 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/covarsampstable
|
||||
sidebar_position: 37
|
||||
---
|
||||
|
||||
# covarSampStable
|
||||
|
||||
Calculates the value of `Σ((x - x̅)(y - y̅)) / (n - 1)`. Similar to [covarSamp](../reference/covarsamp.md) but works slower while providing a lower computational error.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
covarSampStable(x, y)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` — first variable. [(U)Int*](../../data-types/int-uint.md), [Float*](../../data-types/float.md), [Decimal](../../data-types/decimal.md).
|
||||
- `y` — second variable. [(U)Int*](../../data-types/int-uint.md), [Float*](../../data-types/float.md), [Decimal](../../data-types/decimal.md).
|
||||
|
||||
**Returned Value**
|
||||
|
||||
- The sample covariance between `x` and `y`. For `n <= 1`, `inf` is returned. [Float64](../../data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
DROP TABLE IF EXISTS series;
|
||||
CREATE TABLE series(i UInt32, x_value Float64, y_value Float64) ENGINE = Memory;
|
||||
INSERT INTO series(i, x_value, y_value) VALUES (1, 5.6,-4.4),(2, -9.6,3),(3, -1.3,-4),(4, 5.3,9.7),(5, 4.4,0.037),(6, -8.6,-7.8),(7, 5.1,9.3),(8, 7.9,-3.6),(9, -8.2,0.62),(10, -3,7.3);
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT covarSampStable(x_value, y_value)
|
||||
FROM
|
||||
(
|
||||
SELECT
|
||||
x_value,
|
||||
y_value
|
||||
FROM series
|
||||
);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```reference
|
||||
┌─covarSampStable(x_value, y_value)─┐
|
||||
│ 7.206275555555556 │
|
||||
└───────────────────────────────────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT covarSampStable(x_value, y_value)
|
||||
FROM
|
||||
(
|
||||
SELECT
|
||||
x_value,
|
||||
y_value
|
||||
FROM series LIMIT 1
|
||||
);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```reference
|
||||
┌─covarSampStable(x_value, y_value)─┐
|
||||
│ inf │
|
||||
└───────────────────────────────────┘
|
||||
```
|
@ -9,110 +9,116 @@ toc_hidden: true
|
||||
|
||||
Standard aggregate functions:
|
||||
|
||||
- [count](/docs/en/sql-reference/aggregate-functions/reference/count.md)
|
||||
- [min](/docs/en/sql-reference/aggregate-functions/reference/min.md)
|
||||
- [max](/docs/en/sql-reference/aggregate-functions/reference/max.md)
|
||||
- [sum](/docs/en/sql-reference/aggregate-functions/reference/sum.md)
|
||||
- [avg](/docs/en/sql-reference/aggregate-functions/reference/avg.md)
|
||||
- [any](/docs/en/sql-reference/aggregate-functions/reference/any.md)
|
||||
- [stddevPop](/docs/en/sql-reference/aggregate-functions/reference/stddevpop.md)
|
||||
- [stddevPopStable](/docs/en/sql-reference/aggregate-functions/reference/stddevpopstable.md)
|
||||
- [stddevSamp](/docs/en/sql-reference/aggregate-functions/reference/stddevsamp.md)
|
||||
- [stddevSampStable](/docs/en/sql-reference/aggregate-functions/reference/stddevsampstable.md)
|
||||
- [varPop](/docs/en/sql-reference/aggregate-functions/reference/varpop.md)
|
||||
- [varSamp](/docs/en/sql-reference/aggregate-functions/reference/varsamp.md)
|
||||
- [corr](./corr.md)
|
||||
- [covarPop](/docs/en/sql-reference/aggregate-functions/reference/covarpop.md)
|
||||
- [covarSamp](/docs/en/sql-reference/aggregate-functions/reference/covarsamp.md)
|
||||
- [entropy](./entropy.md)
|
||||
- [exponentialMovingAverage](./exponentialmovingaverage.md)
|
||||
- [intervalLengthSum](./intervalLengthSum.md)
|
||||
- [kolmogorovSmirnovTest](./kolmogorovsmirnovtest.md)
|
||||
- [mannwhitneyutest](./mannwhitneyutest.md)
|
||||
- [median](./median.md)
|
||||
- [rankCorr](./rankCorr.md)
|
||||
- [sumKahan](./sumkahan.md)
|
||||
- [studentTTest](./studentttest.md)
|
||||
- [welchTTest](./welchttest.md)
|
||||
- [count](../reference/count.md)
|
||||
- [min](../reference/min.md)
|
||||
- [max](../reference/max.md)
|
||||
- [sum](../reference/sum.md)
|
||||
- [avg](../reference/avg.md)
|
||||
- [any](../reference/any.md)
|
||||
- [stddevPop](../reference/stddevpop.md)
|
||||
- [stddevPopStable](../reference/stddevpopstable.md)
|
||||
- [stddevSamp](../reference/stddevsamp.md)
|
||||
- [stddevSampStable](../reference/stddevsampstable.md)
|
||||
- [varPop](../reference/varpop.md)
|
||||
- [varSamp](../reference/varsamp.md)
|
||||
- [corr](../reference/corr.md)
|
||||
- [corr](../reference/corrstable.md)
|
||||
- [corrMatrix](../reference/corrmatrix.md)
|
||||
- [covarPop](../reference/covarpop.md)
|
||||
- [covarStable](../reference/covarpopstable.md)
|
||||
- [covarPopMatrix](../reference/covarpopmatrix.md)
|
||||
- [covarSamp](../reference/covarsamp.md)
|
||||
- [covarSampStable](../reference/covarsampstable.md)
|
||||
- [covarSampMatrix](../reference/covarsampmatrix.md)
|
||||
- [entropy](../reference/entropy.md)
|
||||
- [exponentialMovingAverage](../reference/exponentialmovingaverage.md)
|
||||
- [intervalLengthSum](../reference/intervalLengthSum.md)
|
||||
- [kolmogorovSmirnovTest](../reference/kolmogorovsmirnovtest.md)
|
||||
- [mannwhitneyutest](../reference/mannwhitneyutest.md)
|
||||
- [median](../reference/median.md)
|
||||
- [rankCorr](../reference/rankCorr.md)
|
||||
- [sumKahan](../reference/sumkahan.md)
|
||||
- [studentTTest](../reference/studentttest.md)
|
||||
- [welchTTest](../reference/welchttest.md)
|
||||
|
||||
ClickHouse-specific aggregate functions:
|
||||
|
||||
- [analysisOfVariance](/docs/en/sql-reference/aggregate-functions/reference/analysis_of_variance.md)
|
||||
- [any](/docs/en/sql-reference/aggregate-functions/reference/any_respect_nulls.md)
|
||||
- [anyHeavy](/docs/en/sql-reference/aggregate-functions/reference/anyheavy.md)
|
||||
- [anyLast](/docs/en/sql-reference/aggregate-functions/reference/anylast.md)
|
||||
- [anyLast](/docs/en/sql-reference/aggregate-functions/reference/anylast_respect_nulls.md)
|
||||
- [boundingRatio](/docs/en/sql-reference/aggregate-functions/reference/boundrat.md)
|
||||
- [first_value](/docs/en/sql-reference/aggregate-functions/reference/first_value.md)
|
||||
- [last_value](/docs/en/sql-reference/aggregate-functions/reference/last_value.md)
|
||||
- [argMin](/docs/en/sql-reference/aggregate-functions/reference/argmin.md)
|
||||
- [argMax](/docs/en/sql-reference/aggregate-functions/reference/argmax.md)
|
||||
- [avgWeighted](/docs/en/sql-reference/aggregate-functions/reference/avgweighted.md)
|
||||
- [topK](/docs/en/sql-reference/aggregate-functions/reference/topk.md)
|
||||
- [topKWeighted](/docs/en/sql-reference/aggregate-functions/reference/topkweighted.md)
|
||||
- [deltaSum](./deltasum.md)
|
||||
- [deltaSumTimestamp](./deltasumtimestamp.md)
|
||||
- [groupArray](/docs/en/sql-reference/aggregate-functions/reference/grouparray.md)
|
||||
- [groupArrayLast](/docs/en/sql-reference/aggregate-functions/reference/grouparraylast.md)
|
||||
- [groupUniqArray](/docs/en/sql-reference/aggregate-functions/reference/groupuniqarray.md)
|
||||
- [groupArrayInsertAt](/docs/en/sql-reference/aggregate-functions/reference/grouparrayinsertat.md)
|
||||
- [groupArrayMovingAvg](/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingavg.md)
|
||||
- [groupArrayMovingSum](/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingsum.md)
|
||||
- [groupArraySample](./grouparraysample.md)
|
||||
- [groupArraySorted](/docs/en/sql-reference/aggregate-functions/reference/grouparraysorted.md)
|
||||
- [groupArrayIntersect](./grouparrayintersect.md)
|
||||
- [groupBitAnd](/docs/en/sql-reference/aggregate-functions/reference/groupbitand.md)
|
||||
- [groupBitOr](/docs/en/sql-reference/aggregate-functions/reference/groupbitor.md)
|
||||
- [groupBitXor](/docs/en/sql-reference/aggregate-functions/reference/groupbitxor.md)
|
||||
- [groupBitmap](/docs/en/sql-reference/aggregate-functions/reference/groupbitmap.md)
|
||||
- [groupBitmapAnd](/docs/en/sql-reference/aggregate-functions/reference/groupbitmapand.md)
|
||||
- [groupBitmapOr](/docs/en/sql-reference/aggregate-functions/reference/groupbitmapor.md)
|
||||
- [groupBitmapXor](/docs/en/sql-reference/aggregate-functions/reference/groupbitmapxor.md)
|
||||
- [sumWithOverflow](/docs/en/sql-reference/aggregate-functions/reference/sumwithoverflow.md)
|
||||
- [sumMap](/docs/en/sql-reference/aggregate-functions/reference/summap.md)
|
||||
- [sumMapWithOverflow](/docs/en/sql-reference/aggregate-functions/reference/summapwithoverflow.md)
|
||||
- [sumMapFiltered](/docs/en/sql-reference/aggregate-functions/parametric-functions.md/#summapfiltered)
|
||||
- [sumMapFilteredWithOverflow](/docs/en/sql-reference/aggregate-functions/parametric-functions.md/#summapfilteredwithoverflow)
|
||||
- [minMap](/docs/en/sql-reference/aggregate-functions/reference/minmap.md)
|
||||
- [maxMap](/docs/en/sql-reference/aggregate-functions/reference/maxmap.md)
|
||||
- [skewSamp](/docs/en/sql-reference/aggregate-functions/reference/skewsamp.md)
|
||||
- [skewPop](/docs/en/sql-reference/aggregate-functions/reference/skewpop.md)
|
||||
- [kurtSamp](/docs/en/sql-reference/aggregate-functions/reference/kurtsamp.md)
|
||||
- [kurtPop](/docs/en/sql-reference/aggregate-functions/reference/kurtpop.md)
|
||||
- [uniq](/docs/en/sql-reference/aggregate-functions/reference/uniq.md)
|
||||
- [uniqExact](/docs/en/sql-reference/aggregate-functions/reference/uniqexact.md)
|
||||
- [uniqCombined](/docs/en/sql-reference/aggregate-functions/reference/uniqcombined.md)
|
||||
- [uniqCombined64](/docs/en/sql-reference/aggregate-functions/reference/uniqcombined64.md)
|
||||
- [uniqHLL12](/docs/en/sql-reference/aggregate-functions/reference/uniqhll12.md)
|
||||
- [uniqTheta](/docs/en/sql-reference/aggregate-functions/reference/uniqthetasketch.md)
|
||||
- [quantile](/docs/en/sql-reference/aggregate-functions/reference/quantile.md)
|
||||
- [quantiles](/docs/en/sql-reference/aggregate-functions/reference/quantiles.md)
|
||||
- [quantileExact](/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md)
|
||||
- [quantileExactLow](/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md#quantileexactlow)
|
||||
- [quantileExactHigh](/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md#quantileexacthigh)
|
||||
- [quantileExactWeighted](/docs/en/sql-reference/aggregate-functions/reference/quantileexactweighted.md)
|
||||
- [quantileTiming](/docs/en/sql-reference/aggregate-functions/reference/quantiletiming.md)
|
||||
- [quantileTimingWeighted](/docs/en/sql-reference/aggregate-functions/reference/quantiletimingweighted.md)
|
||||
- [quantileDeterministic](/docs/en/sql-reference/aggregate-functions/reference/quantiledeterministic.md)
|
||||
- [quantileTDigest](/docs/en/sql-reference/aggregate-functions/reference/quantiletdigest.md)
|
||||
- [quantileTDigestWeighted](/docs/en/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md)
|
||||
- [quantileBFloat16](/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md#quantilebfloat16)
|
||||
- [quantileBFloat16Weighted](/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md#quantilebfloat16weighted)
|
||||
- [quantileDD](/docs/en/sql-reference/aggregate-functions/reference/quantileddsketch.md#quantileddsketch)
|
||||
- [simpleLinearRegression](/docs/en/sql-reference/aggregate-functions/reference/simplelinearregression.md)
|
||||
- [singleValueOrNull](/docs/en/sql-reference/aggregate-functions/reference/singlevalueornull.md)
|
||||
- [stochasticLinearRegression](/docs/en/sql-reference/aggregate-functions/reference/stochasticlinearregression.md)
|
||||
- [stochasticLogisticRegression](/docs/en/sql-reference/aggregate-functions/reference/stochasticlogisticregression.md)
|
||||
- [categoricalInformationValue](/docs/en/sql-reference/aggregate-functions/reference/categoricalinformationvalue.md)
|
||||
- [contingency](./contingency.md)
|
||||
- [cramersV](./cramersv.md)
|
||||
- [cramersVBiasCorrected](./cramersvbiascorrected.md)
|
||||
- [theilsU](./theilsu.md)
|
||||
- [maxIntersections](./maxintersections.md)
|
||||
- [maxIntersectionsPosition](./maxintersectionsposition.md)
|
||||
- [meanZTest](./meanztest.md)
|
||||
- [quantileGK](./quantileGK.md)
|
||||
- [quantileInterpolatedWeighted](./quantileinterpolatedweighted.md)
|
||||
- [sparkBar](./sparkbar.md)
|
||||
- [sumCount](./sumcount.md)
|
||||
- [largestTriangleThreeBuckets](./largestTriangleThreeBuckets.md)
|
||||
- [analysisOfVariance](../reference/analysis_of_variance.md)
|
||||
- [any](../reference/any_respect_nulls.md)
|
||||
- [anyHeavy](../reference/anyheavy.md)
|
||||
- [anyLast](../reference/anylast.md)
|
||||
- [anyLast](../reference/anylast_respect_nulls.md)
|
||||
- [boundingRatio](../reference/boundrat.md)
|
||||
- [first_value](../reference/first_value.md)
|
||||
- [last_value](../reference/last_value.md)
|
||||
- [argMin](../reference/argmin.md)
|
||||
- [argMax](../reference/argmax.md)
|
||||
- [avgWeighted](../reference/avgweighted.md)
|
||||
- [topK](../reference/topk.md)
|
||||
- [topKWeighted](../reference/topkweighted.md)
|
||||
- [deltaSum](../reference/deltasum.md)
|
||||
- [deltaSumTimestamp](../reference/deltasumtimestamp.md)
|
||||
- [groupArray](../reference/grouparray.md)
|
||||
- [groupArrayLast](../reference/grouparraylast.md)
|
||||
- [groupUniqArray](../reference/groupuniqarray.md)
|
||||
- [groupArrayInsertAt](../reference/grouparrayinsertat.md)
|
||||
- [groupArrayMovingAvg](../reference/grouparraymovingavg.md)
|
||||
- [groupArrayMovingSum](../reference/grouparraymovingsum.md)
|
||||
- [groupArraySample](../reference/grouparraysample.md)
|
||||
- [groupArraySorted](../reference/grouparraysorted.md)
|
||||
- [groupArrayIntersect](../reference/grouparrayintersect.md)
|
||||
- [groupBitAnd](../reference/groupbitand.md)
|
||||
- [groupBitOr](../reference/groupbitor.md)
|
||||
- [groupBitXor](../reference/groupbitxor.md)
|
||||
- [groupBitmap](../reference/groupbitmap.md)
|
||||
- [groupBitmapAnd](../reference/groupbitmapand.md)
|
||||
- [groupBitmapOr](../reference/groupbitmapor.md)
|
||||
- [groupBitmapXor](../reference/groupbitmapxor.md)
|
||||
- [sumWithOverflow](../reference/sumwithoverflow.md)
|
||||
- [sumMap](../reference/summap.md)
|
||||
- [sumMapWithOverflow](../reference/summapwithoverflow.md)
|
||||
- [sumMapFiltered](../parametric-functions.md/#summapfiltered)
|
||||
- [sumMapFilteredWithOverflow](../parametric-functions.md/#summapfilteredwithoverflow)
|
||||
- [minMap](../reference/minmap.md)
|
||||
- [maxMap](../reference/maxmap.md)
|
||||
- [skewSamp](../reference/skewsamp.md)
|
||||
- [skewPop](../reference/skewpop.md)
|
||||
- [kurtSamp](../reference/kurtsamp.md)
|
||||
- [kurtPop](../reference/kurtpop.md)
|
||||
- [uniq](../reference/uniq.md)
|
||||
- [uniqExact](../reference/uniqexact.md)
|
||||
- [uniqCombined](../reference/uniqcombined.md)
|
||||
- [uniqCombined64](../reference/uniqcombined64.md)
|
||||
- [uniqHLL12](../reference/uniqhll12.md)
|
||||
- [uniqTheta](../reference/uniqthetasketch.md)
|
||||
- [quantile](../reference/quantile.md)
|
||||
- [quantiles](../reference/quantiles.md)
|
||||
- [quantileExact](../reference/quantileexact.md)
|
||||
- [quantileExactLow](../reference/quantileexact.md#quantileexactlow)
|
||||
- [quantileExactHigh](../reference/quantileexact.md#quantileexacthigh)
|
||||
- [quantileExactWeighted](../reference/quantileexactweighted.md)
|
||||
- [quantileTiming](../reference/quantiletiming.md)
|
||||
- [quantileTimingWeighted](../reference/quantiletimingweighted.md)
|
||||
- [quantileDeterministic](../reference/quantiledeterministic.md)
|
||||
- [quantileTDigest](../reference/quantiletdigest.md)
|
||||
- [quantileTDigestWeighted](../reference/quantiletdigestweighted.md)
|
||||
- [quantileBFloat16](../reference/quantilebfloat16.md#quantilebfloat16)
|
||||
- [quantileBFloat16Weighted](../reference/quantilebfloat16.md#quantilebfloat16weighted)
|
||||
- [quantileDD](../reference/quantileddsketch.md#quantileddsketch)
|
||||
- [simpleLinearRegression](../reference/simplelinearregression.md)
|
||||
- [singleValueOrNull](../reference/singlevalueornull.md)
|
||||
- [stochasticLinearRegression](../reference/stochasticlinearregression.md)
|
||||
- [stochasticLogisticRegression](../reference/stochasticlogisticregression.md)
|
||||
- [categoricalInformationValue](../reference/categoricalinformationvalue.md)
|
||||
- [contingency](../reference/contingency.md)
|
||||
- [cramersV](../reference/cramersv.md)
|
||||
- [cramersVBiasCorrected](../reference/cramersvbiascorrected.md)
|
||||
- [theilsU](../reference/theilsu.md)
|
||||
- [maxIntersections](../reference/maxintersections.md)
|
||||
- [maxIntersectionsPosition](../reference/maxintersectionsposition.md)
|
||||
- [meanZTest](../reference/meanztest.md)
|
||||
- [quantileGK](../reference/quantileGK.md)
|
||||
- [quantileInterpolatedWeighted](../reference/quantileinterpolatedweighted.md)
|
||||
- [sparkBar](../reference/sparkbar.md)
|
||||
- [sumCount](../reference/sumcount.md)
|
||||
- [largestTriangleThreeBuckets](../reference/largestTriangleThreeBuckets.md)
|
||||
|
@ -415,8 +415,8 @@ Alias: `power(x, y)`
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` - [(U)Int8/16/32/64](../data-types/int-uint.md) or [Float*](../data-types/float.md)
|
||||
- `y` - [(U)Int8/16/32/64](../data-types/int-uint.md) or [Float*](../data-types/float.md)
|
||||
- `x` - [(U)Int8/16/32/64](../data-types/int-uint.md), [Float*](../data-types/float.md) or [Decimal*](../data-types/decimal.md)
|
||||
- `y` - [(U)Int8/16/32/64](../data-types/int-uint.md), [Float*](../data-types/float.md) or [Decimal*](../data-types/decimal.md)
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -635,8 +635,8 @@ atan2(y, x)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `y` — y-coordinate of the point through which the ray passes. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||
- `x` — x-coordinate of the point through which the ray passes. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||
- `y` — y-coordinate of the point through which the ray passes. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md) or [Decimal*](../data-types/decimal.md).
|
||||
- `x` — x-coordinate of the point through which the ray passes. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md) or [Decimal*](../data-types/decimal.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -670,8 +670,8 @@ hypot(x, y)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` — The first cathetus of a right-angle triangle. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||
- `y` — The second cathetus of a right-angle triangle. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||
- `x` — The first cathetus of a right-angle triangle. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md) or [Decimal*](../data-types/decimal.md).
|
||||
- `y` — The second cathetus of a right-angle triangle. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md) or [Decimal*](../data-types/decimal.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -838,6 +838,7 @@ degrees(x)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` — Input in radians. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md) or [Decimal*](../data-types/decimal.md).
|
||||
- `x` — Input in radians. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md) or [Decimal*](../data-types/decimal.md).
|
||||
|
||||
**Returned value**
|
||||
|
@ -735,6 +735,8 @@ LIMIT 10
|
||||
|
||||
Given a size (number of bytes), this function returns a readable, rounded size with suffix (KB, MB, etc.) as string.
|
||||
|
||||
The opposite operations of this function are [parseReadableSize](#parseReadableSize), [parseReadableSizeOrZero](#parseReadableSizeOrZero), and [parseReadableSizeOrNull](#parseReadableSizeOrNull).
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
@ -766,6 +768,8 @@ Result:
|
||||
|
||||
Given a size (number of bytes), this function returns a readable, rounded size with suffix (KiB, MiB, etc.) as string.
|
||||
|
||||
The opposite operations of this function are [parseReadableSize](#parseReadableSize), [parseReadableSizeOrZero](#parseReadableSizeOrZero), and [parseReadableSizeOrNull](#parseReadableSizeOrNull).
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
@ -890,6 +894,122 @@ SELECT
|
||||
└────────────────────┴────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## parseReadableSize
|
||||
|
||||
Given a string containing a byte size and `B`, `KiB`, `KB`, `MiB`, `MB`, etc. as a unit (i.e. [ISO/IEC 80000-13](https://en.wikipedia.org/wiki/ISO/IEC_80000) or decimal byte unit), this function returns the corresponding number of bytes.
|
||||
If the function is unable to parse the input value, it throws an exception.
|
||||
|
||||
The inverse operations of this function are [formatReadableSize](#formatReadableSize) and [formatReadableDecimalSize](#formatReadableDecimalSize).
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
formatReadableSize(x)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` : Readable size with ISO/IEC 80000-13 or decimal byte unit ([String](../../sql-reference/data-types/string.md)).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Number of bytes, rounded up to the nearest integer ([UInt64](../../sql-reference/data-types/int-uint.md)).
|
||||
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
arrayJoin(['1 B', '1 KiB', '3 MB', '5.314 KiB']) AS readable_sizes,
|
||||
parseReadableSize(readable_sizes) AS sizes;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─readable_sizes─┬───sizes─┐
|
||||
│ 1 B │ 1 │
|
||||
│ 1 KiB │ 1024 │
|
||||
│ 3 MB │ 3000000 │
|
||||
│ 5.314 KiB │ 5442 │
|
||||
└────────────────┴─────────┘
|
||||
```
|
||||
|
||||
## parseReadableSizeOrNull
|
||||
|
||||
Given a string containing a byte size and `B`, `KiB`, `KB`, `MiB`, `MB`, etc. as a unit (i.e. [ISO/IEC 80000-13](https://en.wikipedia.org/wiki/ISO/IEC_80000) or decimal byte unit), this function returns the corresponding number of bytes.
|
||||
If the function is unable to parse the input value, it returns `NULL`.
|
||||
|
||||
The inverse operations of this function are [formatReadableSize](#formatReadableSize) and [formatReadableDecimalSize](#formatReadableDecimalSize).
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
parseReadableSizeOrNull(x)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` : Readable size with ISO/IEC 80000-13 or decimal byte unit ([String](../../sql-reference/data-types/string.md)).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Number of bytes, rounded up to the nearest integer, or NULL if unable to parse the input (Nullable([UInt64](../../sql-reference/data-types/int-uint.md))).
|
||||
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
arrayJoin(['1 B', '1 KiB', '3 MB', '5.314 KiB', 'invalid']) AS readable_sizes,
|
||||
parseReadableSizeOrNull(readable_sizes) AS sizes;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─readable_sizes─┬───sizes─┐
|
||||
│ 1 B │ 1 │
|
||||
│ 1 KiB │ 1024 │
|
||||
│ 3 MB │ 3000000 │
|
||||
│ 5.314 KiB │ 5442 │
|
||||
│ invalid │ ᴺᵁᴸᴸ │
|
||||
└────────────────┴─────────┘
|
||||
```
|
||||
|
||||
## parseReadableSizeOrZero
|
||||
|
||||
Given a string containing a byte size and `B`, `KiB`, `KB`, `MiB`, `MB`, etc. as a unit (i.e. [ISO/IEC 80000-13](https://en.wikipedia.org/wiki/ISO/IEC_80000) or decimal byte unit), this function returns the corresponding number of bytes. If the function is unable to parse the input value, it returns `0`.
|
||||
|
||||
The inverse operations of this function are [formatReadableSize](#formatReadableSize) and [formatReadableDecimalSize](#formatReadableDecimalSize).
|
||||
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
parseReadableSizeOrZero(x)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` : Readable size with ISO/IEC 80000-13 or decimal byte unit ([String](../../sql-reference/data-types/string.md)).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Number of bytes, rounded up to the nearest integer, or 0 if unable to parse the input ([UInt64](../../sql-reference/data-types/int-uint.md)).
|
||||
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
arrayJoin(['1 B', '1 KiB', '3 MB', '5.314 KiB', 'invalid']) AS readable_sizes,
|
||||
parseReadableSizeOrZero(readable_sizes) AS sizes;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─readable_sizes─┬───sizes─┐
|
||||
│ 1 B │ 1 │
|
||||
│ 1 KiB │ 1024 │
|
||||
│ 3 MB │ 3000000 │
|
||||
│ 5.314 KiB │ 5442 │
|
||||
│ invalid │ 0 │
|
||||
└────────────────┴─────────┘
|
||||
```
|
||||
|
||||
## parseTimeDelta
|
||||
|
||||
Parse a sequence of numbers followed by something resembling a time unit.
|
||||
|
@ -337,7 +337,7 @@ Then, when executing the query `SELECT name FROM users_a WHERE length(name) < 5;
|
||||
|
||||
Defines storage time for values. Can be specified only for MergeTree-family tables. For the detailed description, see [TTL for columns and tables](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl).
|
||||
|
||||
## Column Compression Codecs
|
||||
## Column Compression Codecs {#column_compression_codec}
|
||||
|
||||
By default, ClickHouse applies `lz4` compression in the self-managed version, and `zstd` in ClickHouse Cloud.
|
||||
|
||||
|
@ -304,8 +304,8 @@ atan2(y, x)
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `y` — координата y точки, в которую проведена линия. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
- `x` — координата х точки, в которую проведена линия. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
- `y` — координата y точки, в которую проведена линия. [Float64](../../sql-reference/data-types/float.md#float32-float64) или [Decimal](../../sql-reference/data-types/decimal.md).
|
||||
- `x` — координата х точки, в которую проведена линия. [Float64](../../sql-reference/data-types/float.md#float32-float64) или [Decimal](../../sql-reference/data-types/decimal.md).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
@ -341,8 +341,8 @@ hypot(x, y)
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `x` — первый катет прямоугольного треугольника. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
- `y` — второй катет прямоугольного треугольника. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
- `x` — первый катет прямоугольного треугольника. [Float64](../../sql-reference/data-types/float.md#float32-float64) или [Decimal](../../sql-reference/data-types/decimal.md).
|
||||
- `y` — второй катет прямоугольного треугольника. [Float64](../../sql-reference/data-types/float.md#float32-float64) или [Decimal](../../sql-reference/data-types/decimal.md).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
|
@ -22,15 +22,6 @@ include (configure_config.cmake)
|
||||
configure_file (Common/config.h.in ${CONFIG_INCLUDE_PATH}/config.h)
|
||||
configure_file (Common/config_version.cpp.in ${CONFIG_INCLUDE_PATH}/config_version.cpp)
|
||||
|
||||
if (USE_DEBUG_HELPERS)
|
||||
get_target_property(MAGIC_ENUM_INCLUDE_DIR ch_contrib::magic_enum INTERFACE_INCLUDE_DIRECTORIES)
|
||||
# CMake generator expression will do insane quoting when it encounters special character like quotes, spaces, etc.
|
||||
# Prefixing "SHELL:" will force it to use the original text.
|
||||
set (INCLUDE_DEBUG_HELPERS "SHELL:-I\"${ClickHouse_SOURCE_DIR}/base\" -I\"${MAGIC_ENUM_INCLUDE_DIR}\" -include \"${ClickHouse_SOURCE_DIR}/src/Core/iostream_debug_helpers.h\"")
|
||||
# Use generator expression as we don't want to pollute CMAKE_CXX_FLAGS, which will interfere with CMake check system.
|
||||
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:${INCLUDE_DEBUG_HELPERS}>)
|
||||
endif ()
|
||||
|
||||
# ClickHouse developers may use platform-dependent code under some macro (e.g. `#ifdef ENABLE_MULTITARGET`).
|
||||
# If turned ON, this option defines such macro.
|
||||
# See `src/Common/TargetSpecific.h`
|
||||
|
@ -828,7 +828,7 @@ ColumnPtr ColumnArray::filterTuple(const Filter & filt, ssize_t result_size_hint
|
||||
size_t tuple_size = tuple.tupleSize();
|
||||
|
||||
if (tuple_size == 0)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Empty tuple");
|
||||
return filterGeneric(filt, result_size_hint);
|
||||
|
||||
Columns temporary_arrays(tuple_size);
|
||||
for (size_t i = 0; i < tuple_size; ++i)
|
||||
@ -1265,7 +1265,7 @@ ColumnPtr ColumnArray::replicateTuple(const Offsets & replicate_offsets) const
|
||||
size_t tuple_size = tuple.tupleSize();
|
||||
|
||||
if (tuple_size == 0)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Empty tuple");
|
||||
return replicateGeneric(replicate_offsets);
|
||||
|
||||
Columns temporary_arrays(tuple_size);
|
||||
for (size_t i = 0; i < tuple_size; ++i)
|
||||
|
@ -1,5 +1,6 @@
|
||||
#include <Common/Arena.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/HashTable/HashSet.h>
|
||||
#include <Common/HashTable/Hash.h>
|
||||
#include <Common/RadixSort.h>
|
||||
#include <Common/SipHash.h>
|
||||
@ -264,6 +265,23 @@ void ColumnDecimal<T>::updatePermutation(IColumn::PermutationSortDirection direc
|
||||
}
|
||||
}
|
||||
|
||||
template <is_decimal T>
|
||||
size_t ColumnDecimal<T>::estimateCardinalityInPermutedRange(const IColumn::Permutation & permutation, const EqualRange & equal_range) const
|
||||
{
|
||||
const size_t range_size = equal_range.size();
|
||||
if (range_size <= 1)
|
||||
return range_size;
|
||||
|
||||
/// TODO use sampling if the range is too large (e.g. 16k elements, but configurable)
|
||||
HashSet<T> elements;
|
||||
for (size_t i = equal_range.from; i < equal_range.to; ++i)
|
||||
{
|
||||
size_t permuted_i = permutation[i];
|
||||
elements.insert(data[permuted_i]);
|
||||
}
|
||||
return elements.size();
|
||||
}
|
||||
|
||||
template <is_decimal T>
|
||||
ColumnPtr ColumnDecimal<T>::permute(const IColumn::Permutation & perm, size_t limit) const
|
||||
{
|
||||
|
@ -97,6 +97,8 @@ public:
|
||||
size_t limit, int nan_direction_hint, IColumn::Permutation & res) const override;
|
||||
void updatePermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability,
|
||||
size_t limit, int, IColumn::Permutation & res, EqualRanges& equal_ranges) const override;
|
||||
size_t estimateCardinalityInPermutedRange(const IColumn::Permutation & permutation, const EqualRange & equal_range) const override;
|
||||
|
||||
|
||||
MutableColumnPtr cloneResized(size_t size) const override;
|
||||
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Common/Arena.h>
|
||||
#include <Common/HashTable/Hash.h>
|
||||
#include <Common/HashTable/StringHashSet.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/assert_cast.h>
|
||||
@ -200,6 +201,24 @@ void ColumnFixedString::updatePermutation(IColumn::PermutationSortDirection dire
|
||||
updatePermutationImpl(limit, res, equal_ranges, ComparatorDescendingStable(*this), comparator_equal, DefaultSort(), DefaultPartialSort());
|
||||
}
|
||||
|
||||
size_t ColumnFixedString::estimateCardinalityInPermutedRange(const Permutation & permutation, const EqualRange & equal_range) const
|
||||
{
|
||||
const size_t range_size = equal_range.size();
|
||||
if (range_size <= 1)
|
||||
return range_size;
|
||||
|
||||
/// TODO use sampling if the range is too large (e.g. 16k elements, but configurable)
|
||||
StringHashSet elements;
|
||||
bool inserted = false;
|
||||
for (size_t i = equal_range.from; i < equal_range.to; ++i)
|
||||
{
|
||||
size_t permuted_i = permutation[i];
|
||||
StringRef value = getDataAt(permuted_i);
|
||||
elements.emplace(value, inserted);
|
||||
}
|
||||
return elements.size();
|
||||
}
|
||||
|
||||
void ColumnFixedString::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||
{
|
||||
const ColumnFixedString & src_concrete = assert_cast<const ColumnFixedString &>(src);
|
||||
|
@ -142,6 +142,8 @@ public:
|
||||
void updatePermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability,
|
||||
size_t limit, int nan_direction_hint, Permutation & res, EqualRanges & equal_ranges) const override;
|
||||
|
||||
size_t estimateCardinalityInPermutedRange(const Permutation & permutation, const EqualRange & equal_range) const override;
|
||||
|
||||
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||
|
||||
ColumnPtr filter(const IColumn::Filter & filt, ssize_t result_size_hint) const override;
|
||||
|
@ -3,9 +3,12 @@
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <DataTypes/NumberTraits.h>
|
||||
#include <Common/HashTable/HashSet.h>
|
||||
#include <Common/HashTable/HashMap.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include "Storages/IndicesDescription.h"
|
||||
#include "base/types.h"
|
||||
#include <base/sort.h>
|
||||
#include <base/scope_guard.h>
|
||||
|
||||
@ -486,6 +489,21 @@ void ColumnLowCardinality::updatePermutationWithCollation(const Collator & colla
|
||||
updatePermutationImpl(limit, res, equal_ranges, comparator, equal_comparator, DefaultSort(), DefaultPartialSort());
|
||||
}
|
||||
|
||||
size_t ColumnLowCardinality::estimateCardinalityInPermutedRange(const Permutation & permutation, const EqualRange & equal_range) const
|
||||
{
|
||||
const size_t range_size = equal_range.size();
|
||||
if (range_size <= 1)
|
||||
return range_size;
|
||||
|
||||
HashSet<UInt64> elements;
|
||||
for (size_t i = equal_range.from; i < equal_range.to; ++i)
|
||||
{
|
||||
UInt64 index = getIndexes().getUInt(permutation[i]);
|
||||
elements.insert(index);
|
||||
}
|
||||
return elements.size();
|
||||
}
|
||||
|
||||
std::vector<MutableColumnPtr> ColumnLowCardinality::scatter(ColumnIndex num_columns, const Selector & selector) const
|
||||
{
|
||||
auto columns = getIndexes().scatter(num_columns, selector);
|
||||
|
@ -145,6 +145,8 @@ public:
|
||||
void updatePermutationWithCollation(const Collator & collator, IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability,
|
||||
size_t limit, int nan_direction_hint, Permutation & res, EqualRanges& equal_ranges) const override;
|
||||
|
||||
size_t estimateCardinalityInPermutedRange(const Permutation & permutation, const EqualRange & equal_range) const override;
|
||||
|
||||
ColumnPtr replicate(const Offsets & offsets) const override
|
||||
{
|
||||
return ColumnLowCardinality::create(dictionary.getColumnUniquePtr(), getIndexes().replicate(offsets));
|
||||
|
@ -1,4 +1,5 @@
|
||||
#include <Common/Arena.h>
|
||||
#include <Common/HashTable/StringHashSet.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/WeakHash.h>
|
||||
@ -621,7 +622,7 @@ void ColumnNullable::updatePermutationImpl(IColumn::PermutationSortDirection dir
|
||||
if (unlikely(stability == PermutationSortStability::Stable))
|
||||
{
|
||||
for (auto & null_range : null_ranges)
|
||||
::sort(res.begin() + null_range.first, res.begin() + null_range.second);
|
||||
::sort(std::ranges::next(res.begin(), null_range.from), std::ranges::next(res.begin(), null_range.to));
|
||||
}
|
||||
|
||||
if (is_nulls_last || null_ranges.empty())
|
||||
@ -660,6 +661,33 @@ void ColumnNullable::updatePermutationWithCollation(const Collator & collator, I
|
||||
updatePermutationImpl(direction, stability, limit, null_direction_hint, res, equal_ranges, &collator);
|
||||
}
|
||||
|
||||
|
||||
size_t ColumnNullable::estimateCardinalityInPermutedRange(const Permutation & permutation, const EqualRange & equal_range) const
|
||||
{
|
||||
const size_t range_size = equal_range.size();
|
||||
if (range_size <= 1)
|
||||
return range_size;
|
||||
|
||||
/// TODO use sampling if the range is too large (e.g. 16k elements, but configurable)
|
||||
StringHashSet elements;
|
||||
bool has_null = false;
|
||||
bool inserted = false;
|
||||
for (size_t i = equal_range.from; i < equal_range.to; ++i)
|
||||
{
|
||||
size_t permuted_i = permutation[i];
|
||||
if (isNullAt(permuted_i))
|
||||
{
|
||||
has_null = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
StringRef value = getDataAt(permuted_i);
|
||||
elements.emplace(value, inserted);
|
||||
}
|
||||
}
|
||||
return elements.size() + (has_null ? 1 : 0);
|
||||
}
|
||||
|
||||
void ColumnNullable::reserve(size_t n)
|
||||
{
|
||||
getNestedColumn().reserve(n);
|
||||
|
@ -109,6 +109,7 @@ public:
|
||||
size_t limit, int null_direction_hint, Permutation & res) const override;
|
||||
void updatePermutationWithCollation(const Collator & collator, IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability,
|
||||
size_t limit, int null_direction_hint, Permutation & res, EqualRanges& equal_ranges) const override;
|
||||
size_t estimateCardinalityInPermutedRange(const Permutation & permutation, const EqualRange & equal_range) const override;
|
||||
void reserve(size_t n) override;
|
||||
void shrinkToFit() override;
|
||||
void ensureOwnership() override;
|
||||
|
@ -820,6 +820,9 @@ ColumnPtr recursiveRemoveSparse(const ColumnPtr & column)
|
||||
if (const auto * column_tuple = typeid_cast<const ColumnTuple *>(column.get()))
|
||||
{
|
||||
auto columns = column_tuple->getColumns();
|
||||
if (columns.empty())
|
||||
return column;
|
||||
|
||||
for (auto & element : columns)
|
||||
element = recursiveRemoveSparse(element);
|
||||
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <Columns/ColumnCompressed.h>
|
||||
#include <Columns/MaskOperations.h>
|
||||
#include <Common/Arena.h>
|
||||
#include <Common/HashTable/StringHashSet.h>
|
||||
#include <Common/HashTable/Hash.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/assert_cast.h>
|
||||
@ -481,6 +482,23 @@ void ColumnString::updatePermutationWithCollation(const Collator & collator, Per
|
||||
DefaultPartialSort());
|
||||
}
|
||||
|
||||
size_t ColumnString::estimateCardinalityInPermutedRange(const Permutation & permutation, const EqualRange & equal_range) const
|
||||
{
|
||||
const size_t range_size = equal_range.size();
|
||||
if (range_size <= 1)
|
||||
return range_size;
|
||||
|
||||
/// TODO use sampling if the range is too large (e.g. 16k elements, but configurable)
|
||||
StringHashSet elements;
|
||||
bool inserted = false;
|
||||
for (size_t i = equal_range.from; i < equal_range.to; ++i)
|
||||
{
|
||||
size_t permuted_i = permutation[i];
|
||||
StringRef value = getDataAt(permuted_i);
|
||||
elements.emplace(value, inserted);
|
||||
}
|
||||
return elements.size();
|
||||
}
|
||||
|
||||
ColumnPtr ColumnString::replicate(const Offsets & replicate_offsets) const
|
||||
{
|
||||
|
@ -260,6 +260,8 @@ public:
|
||||
void updatePermutationWithCollation(const Collator & collator, IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability,
|
||||
size_t limit, int, Permutation & res, EqualRanges & equal_ranges) const override;
|
||||
|
||||
size_t estimateCardinalityInPermutedRange(const Permutation & permutation, const EqualRange & equal_range) const override;
|
||||
|
||||
ColumnPtr replicate(const Offsets & replicate_offsets) const override;
|
||||
|
||||
ColumnPtr compress() const override;
|
||||
|
@ -3,14 +3,16 @@
|
||||
#include <Columns/ColumnCompressed.h>
|
||||
#include <Columns/IColumnImpl.h>
|
||||
#include <Core/Field.h>
|
||||
#include <Processors/Transforms/ColumnGathererTransform.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <Common/Arena.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/iota.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Columns/ColumnsCommon.h>
|
||||
#include <DataTypes/Serializations/SerializationInfoTuple.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <Processors/Transforms/ColumnGathererTransform.h>
|
||||
#include <base/sort.h>
|
||||
|
||||
|
||||
@ -23,6 +25,7 @@ namespace ErrorCodes
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
extern const int CANNOT_INSERT_VALUE_OF_DIFFERENT_SIZE_INTO_TUPLE;
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int SIZES_OF_COLUMNS_DOESNT_MATCH;
|
||||
}
|
||||
|
||||
|
||||
@ -44,6 +47,9 @@ std::string ColumnTuple::getName() const
|
||||
|
||||
ColumnTuple::ColumnTuple(MutableColumns && mutable_columns)
|
||||
{
|
||||
if (mutable_columns.empty())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "This function cannot be used to construct empty tuple. It is a bug");
|
||||
|
||||
columns.reserve(mutable_columns.size());
|
||||
for (auto & column : mutable_columns)
|
||||
{
|
||||
@ -52,15 +58,21 @@ ColumnTuple::ColumnTuple(MutableColumns && mutable_columns)
|
||||
|
||||
columns.push_back(std::move(column));
|
||||
}
|
||||
column_length = columns[0]->size();
|
||||
}
|
||||
|
||||
ColumnTuple::ColumnTuple(size_t len) : column_length(len) {}
|
||||
|
||||
ColumnTuple::Ptr ColumnTuple::create(const Columns & columns)
|
||||
{
|
||||
if (columns.empty())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "This function cannot be used to construct empty tuple. It is a bug");
|
||||
|
||||
for (const auto & column : columns)
|
||||
if (isColumnConst(*column))
|
||||
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "ColumnTuple cannot have ColumnConst as its element");
|
||||
|
||||
auto column_tuple = ColumnTuple::create(MutableColumns());
|
||||
auto column_tuple = ColumnTuple::create(columns[0]->size());
|
||||
column_tuple->columns.assign(columns.begin(), columns.end());
|
||||
|
||||
return column_tuple;
|
||||
@ -68,11 +80,14 @@ ColumnTuple::Ptr ColumnTuple::create(const Columns & columns)
|
||||
|
||||
ColumnTuple::Ptr ColumnTuple::create(const TupleColumns & columns)
|
||||
{
|
||||
if (columns.empty())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "This function cannot be used to construct empty tuple. It is a bug");
|
||||
|
||||
for (const auto & column : columns)
|
||||
if (isColumnConst(*column))
|
||||
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "ColumnTuple cannot have ColumnConst as its element");
|
||||
|
||||
auto column_tuple = ColumnTuple::create(MutableColumns());
|
||||
auto column_tuple = ColumnTuple::create(columns[0]->size());
|
||||
column_tuple->columns = columns;
|
||||
|
||||
return column_tuple;
|
||||
@ -80,6 +95,9 @@ ColumnTuple::Ptr ColumnTuple::create(const TupleColumns & columns)
|
||||
|
||||
MutableColumnPtr ColumnTuple::cloneEmpty() const
|
||||
{
|
||||
if (columns.empty())
|
||||
return ColumnTuple::create(0);
|
||||
|
||||
const size_t tuple_size = columns.size();
|
||||
MutableColumns new_columns(tuple_size);
|
||||
for (size_t i = 0; i < tuple_size; ++i)
|
||||
@ -90,6 +108,9 @@ MutableColumnPtr ColumnTuple::cloneEmpty() const
|
||||
|
||||
MutableColumnPtr ColumnTuple::cloneResized(size_t new_size) const
|
||||
{
|
||||
if (columns.empty())
|
||||
return ColumnTuple::create(new_size);
|
||||
|
||||
const size_t tuple_size = columns.size();
|
||||
MutableColumns new_columns(tuple_size);
|
||||
for (size_t i = 0; i < tuple_size; ++i)
|
||||
@ -98,6 +119,16 @@ MutableColumnPtr ColumnTuple::cloneResized(size_t new_size) const
|
||||
return ColumnTuple::create(std::move(new_columns));
|
||||
}
|
||||
|
||||
size_t ColumnTuple::size() const
|
||||
{
|
||||
if (columns.empty())
|
||||
return column_length;
|
||||
|
||||
/// It's difficult to maintain a consistent `column_length` because there
|
||||
/// are many places that manipulates sub-columns directly.
|
||||
return columns.at(0)->size();
|
||||
}
|
||||
|
||||
Field ColumnTuple::operator[](size_t n) const
|
||||
{
|
||||
Field res;
|
||||
@ -144,6 +175,7 @@ void ColumnTuple::insert(const Field & x)
|
||||
if (tuple.size() != tuple_size)
|
||||
throw Exception(ErrorCodes::CANNOT_INSERT_VALUE_OF_DIFFERENT_SIZE_INTO_TUPLE, "Cannot insert value of different size into tuple");
|
||||
|
||||
++column_length;
|
||||
for (size_t i = 0; i < tuple_size; ++i)
|
||||
columns[i]->insert(tuple[i]);
|
||||
}
|
||||
@ -181,6 +213,7 @@ void ColumnTuple::insertFrom(const IColumn & src_, size_t n)
|
||||
if (src.columns.size() != tuple_size)
|
||||
throw Exception(ErrorCodes::CANNOT_INSERT_VALUE_OF_DIFFERENT_SIZE_INTO_TUPLE, "Cannot insert value of different size into tuple");
|
||||
|
||||
++column_length;
|
||||
for (size_t i = 0; i < tuple_size; ++i)
|
||||
columns[i]->insertFrom(*src.columns[i], n);
|
||||
}
|
||||
@ -199,18 +232,28 @@ void ColumnTuple::insertManyFrom(const IColumn & src, size_t position, size_t le
|
||||
|
||||
void ColumnTuple::insertDefault()
|
||||
{
|
||||
++column_length;
|
||||
for (auto & column : columns)
|
||||
column->insertDefault();
|
||||
}
|
||||
|
||||
void ColumnTuple::popBack(size_t n)
|
||||
{
|
||||
column_length -= n;
|
||||
for (auto & column : columns)
|
||||
column->popBack(n);
|
||||
}
|
||||
|
||||
StringRef ColumnTuple::serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const
|
||||
{
|
||||
if (columns.empty())
|
||||
{
|
||||
/// Has to put one useless byte into Arena, because serialization into zero number of bytes is ambiguous.
|
||||
char * res = arena.allocContinue(1, begin);
|
||||
*res = 0;
|
||||
return { res, 1 };
|
||||
}
|
||||
|
||||
StringRef res(begin, 0);
|
||||
for (const auto & column : columns)
|
||||
{
|
||||
@ -232,6 +275,11 @@ char * ColumnTuple::serializeValueIntoMemory(size_t n, char * memory) const
|
||||
|
||||
const char * ColumnTuple::deserializeAndInsertFromArena(const char * pos)
|
||||
{
|
||||
++column_length;
|
||||
|
||||
if (columns.empty())
|
||||
return pos + 1;
|
||||
|
||||
for (auto & column : columns)
|
||||
pos = column->deserializeAndInsertFromArena(pos);
|
||||
|
||||
@ -272,6 +320,7 @@ void ColumnTuple::updateHashFast(SipHash & hash) const
|
||||
|
||||
void ColumnTuple::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||
{
|
||||
column_length += length;
|
||||
const size_t tuple_size = columns.size();
|
||||
for (size_t i = 0; i < tuple_size; ++i)
|
||||
columns[i]->insertRangeFrom(
|
||||
@ -281,6 +330,12 @@ void ColumnTuple::insertRangeFrom(const IColumn & src, size_t start, size_t leng
|
||||
|
||||
ColumnPtr ColumnTuple::filter(const Filter & filt, ssize_t result_size_hint) const
|
||||
{
|
||||
if (columns.empty())
|
||||
{
|
||||
size_t bytes = countBytesInFilter(filt);
|
||||
return cloneResized(bytes);
|
||||
}
|
||||
|
||||
const size_t tuple_size = columns.size();
|
||||
Columns new_columns(tuple_size);
|
||||
|
||||
@ -292,12 +347,29 @@ ColumnPtr ColumnTuple::filter(const Filter & filt, ssize_t result_size_hint) con
|
||||
|
||||
void ColumnTuple::expand(const Filter & mask, bool inverted)
|
||||
{
|
||||
if (columns.empty())
|
||||
{
|
||||
size_t bytes = countBytesInFilter(mask);
|
||||
if (inverted)
|
||||
bytes = mask.size() - bytes;
|
||||
column_length = bytes;
|
||||
return;
|
||||
}
|
||||
|
||||
for (auto & column : columns)
|
||||
column->expand(mask, inverted);
|
||||
}
|
||||
|
||||
ColumnPtr ColumnTuple::permute(const Permutation & perm, size_t limit) const
|
||||
{
|
||||
if (columns.empty())
|
||||
{
|
||||
if (column_length != perm.size())
|
||||
throw Exception(ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH, "Size of permutation doesn't match size of column");
|
||||
|
||||
return cloneResized(limit ? std::min(column_length, limit) : column_length);
|
||||
}
|
||||
|
||||
const size_t tuple_size = columns.size();
|
||||
Columns new_columns(tuple_size);
|
||||
|
||||
@ -309,6 +381,14 @@ ColumnPtr ColumnTuple::permute(const Permutation & perm, size_t limit) const
|
||||
|
||||
ColumnPtr ColumnTuple::index(const IColumn & indexes, size_t limit) const
|
||||
{
|
||||
if (columns.empty())
|
||||
{
|
||||
if (indexes.size() < limit)
|
||||
throw Exception(ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH, "Size of indexes is less than required");
|
||||
|
||||
return cloneResized(limit ? limit : column_length);
|
||||
}
|
||||
|
||||
const size_t tuple_size = columns.size();
|
||||
Columns new_columns(tuple_size);
|
||||
|
||||
@ -320,6 +400,14 @@ ColumnPtr ColumnTuple::index(const IColumn & indexes, size_t limit) const
|
||||
|
||||
ColumnPtr ColumnTuple::replicate(const Offsets & offsets) const
|
||||
{
|
||||
if (columns.empty())
|
||||
{
|
||||
if (column_length != offsets.size())
|
||||
throw Exception(ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH, "Size of offsets doesn't match size of column");
|
||||
|
||||
return cloneResized(offsets.back());
|
||||
}
|
||||
|
||||
const size_t tuple_size = columns.size();
|
||||
Columns new_columns(tuple_size);
|
||||
|
||||
@ -331,6 +419,22 @@ ColumnPtr ColumnTuple::replicate(const Offsets & offsets) const
|
||||
|
||||
MutableColumns ColumnTuple::scatter(ColumnIndex num_columns, const Selector & selector) const
|
||||
{
|
||||
if (columns.empty())
|
||||
{
|
||||
if (column_length != selector.size())
|
||||
throw Exception(ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH, "Size of selector doesn't match size of column");
|
||||
|
||||
std::vector<size_t> counts(num_columns);
|
||||
for (auto idx : selector)
|
||||
++counts[idx];
|
||||
|
||||
MutableColumns res(num_columns);
|
||||
for (size_t i = 0; i < num_columns; ++i)
|
||||
res[i] = cloneResized(counts[i]);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
const size_t tuple_size = columns.size();
|
||||
std::vector<MutableColumns> scattered_tuple_elements(tuple_size);
|
||||
|
||||
@ -413,6 +517,9 @@ void ColumnTuple::getPermutationImpl(IColumn::PermutationSortDirection direction
|
||||
res.resize(rows);
|
||||
iota(res.data(), rows, IColumn::Permutation::value_type(0));
|
||||
|
||||
if (columns.empty())
|
||||
return;
|
||||
|
||||
if (limit >= rows)
|
||||
limit = 0;
|
||||
|
||||
@ -429,7 +536,7 @@ void ColumnTuple::updatePermutationImpl(IColumn::PermutationSortDirection direct
|
||||
|
||||
for (const auto & column : columns)
|
||||
{
|
||||
while (!equal_ranges.empty() && limit && limit <= equal_ranges.back().first)
|
||||
while (!equal_ranges.empty() && limit && limit <= equal_ranges.back().from)
|
||||
equal_ranges.pop_back();
|
||||
|
||||
if (collator && column->isCollationSupported())
|
||||
@ -603,6 +710,9 @@ void ColumnTuple::takeDynamicStructureFromSourceColumns(const Columns & source_c
|
||||
|
||||
ColumnPtr ColumnTuple::compress() const
|
||||
{
|
||||
if (columns.empty())
|
||||
return Ptr();
|
||||
|
||||
size_t byte_size = 0;
|
||||
Columns compressed;
|
||||
compressed.reserve(columns.size());
|
||||
|
@ -26,6 +26,13 @@ private:
|
||||
explicit ColumnTuple(MutableColumns && columns);
|
||||
ColumnTuple(const ColumnTuple &) = default;
|
||||
|
||||
/// Empty tuple needs a dedicated field to store its size.
|
||||
/// This field used *only* for zero-sized tuples.
|
||||
/// Otherwise `columns[0].size()` should be used to get a size of tuple column
|
||||
size_t column_length;
|
||||
|
||||
/// Dedicated constructor for empty tuples.
|
||||
explicit ColumnTuple(size_t len);
|
||||
public:
|
||||
/** Create immutable column using immutable arguments. This arguments may be shared with other columns.
|
||||
* Use IColumn::mutate in order to make mutable column and mutate shared nested columns.
|
||||
@ -39,6 +46,8 @@ public:
|
||||
requires std::is_rvalue_reference_v<Arg &&>
|
||||
static MutablePtr create(Arg && arg) { return Base::create(std::forward<Arg>(arg)); }
|
||||
|
||||
static MutablePtr create(size_t len_) { return Base::create(len_); }
|
||||
|
||||
std::string getName() const override;
|
||||
const char * getFamilyName() const override { return "Tuple"; }
|
||||
TypeIndex getDataType() const override { return TypeIndex::Tuple; }
|
||||
@ -46,10 +55,7 @@ public:
|
||||
MutableColumnPtr cloneEmpty() const override;
|
||||
MutableColumnPtr cloneResized(size_t size) const override;
|
||||
|
||||
size_t size() const override
|
||||
{
|
||||
return columns.at(0)->size();
|
||||
}
|
||||
size_t size() const override;
|
||||
|
||||
Field operator[](size_t n) const override;
|
||||
void get(size_t n, Field & res) const override;
|
||||
@ -117,6 +123,9 @@ public:
|
||||
bool hasDynamicStructure() const override;
|
||||
void takeDynamicStructureFromSourceColumns(const Columns & source_columns) override;
|
||||
|
||||
/// Empty tuple needs a public method to manage its size.
|
||||
void addSize(size_t delta) { column_length += delta; }
|
||||
|
||||
private:
|
||||
int compareAtImpl(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint, const Collator * collator=nullptr) const;
|
||||
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <Common/Arena.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/HashTable/Hash.h>
|
||||
#include <Common/HashTable/StringHashSet.h>
|
||||
#include <Common/NaNUtils.h>
|
||||
#include <Common/RadixSort.h>
|
||||
#include <Common/SipHash.h>
|
||||
@ -413,6 +414,25 @@ void ColumnVector<T>::updatePermutation(IColumn::PermutationSortDirection direct
|
||||
}
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
size_t ColumnVector<T>::estimateCardinalityInPermutedRange(const IColumn::Permutation & permutation, const EqualRange & equal_range) const
|
||||
{
|
||||
const size_t range_size = equal_range.size();
|
||||
if (range_size <= 1)
|
||||
return range_size;
|
||||
|
||||
/// TODO use sampling if the range is too large (e.g. 16k elements, but configurable)
|
||||
StringHashSet elements;
|
||||
bool inserted = false;
|
||||
for (size_t i = equal_range.from; i < equal_range.to; ++i)
|
||||
{
|
||||
size_t permuted_i = permutation[i];
|
||||
StringRef value = getDataAt(permuted_i);
|
||||
elements.emplace(value, inserted);
|
||||
}
|
||||
return elements.size();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
MutableColumnPtr ColumnVector<T>::cloneResized(size_t size) const
|
||||
{
|
||||
|
@ -161,6 +161,8 @@ public:
|
||||
void updatePermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability,
|
||||
size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges& equal_ranges) const override;
|
||||
|
||||
size_t estimateCardinalityInPermutedRange(const IColumn::Permutation & permutation, const EqualRange & equal_range) const override;
|
||||
|
||||
void reserve(size_t n) override
|
||||
{
|
||||
data.reserve_exact(n);
|
||||
|
@ -83,6 +83,11 @@ ColumnPtr IColumn::createWithOffsets(const Offsets & offsets, const ColumnConst
|
||||
return res;
|
||||
}
|
||||
|
||||
size_t IColumn::estimateCardinalityInPermutedRange(const IColumn::Permutation & /*permutation*/, const EqualRange & equal_range) const
|
||||
{
|
||||
return equal_range.size();
|
||||
}
|
||||
|
||||
void IColumn::forEachSubcolumn(ColumnCallback callback) const
|
||||
{
|
||||
const_cast<IColumn*>(this)->forEachSubcolumn([&callback](WrappedPtr & subcolumn)
|
||||
|
@ -36,11 +36,19 @@ class Field;
|
||||
class WeakHash32;
|
||||
class ColumnConst;
|
||||
|
||||
/*
|
||||
* Represents a set of equal ranges in previous column to perform sorting in current column.
|
||||
* Used in sorting by tuples.
|
||||
* */
|
||||
using EqualRanges = std::vector<std::pair<size_t, size_t> >;
|
||||
/// A range of column values between row indexes `from` and `to`. The name "equal range" is due to table sorting as its main use case: With
|
||||
/// a PRIMARY KEY (c_pk1, c_pk2, ...), the first PK column is fully sorted. The second PK column is sorted within equal-value runs of the
|
||||
/// first PK column, and so on. The number of runs (ranges) per column increases from one primary key column to the next. An "equal range"
|
||||
/// is a run in a previous column, within the values of the current column can be sorted.
|
||||
struct EqualRange
|
||||
{
|
||||
size_t from; /// inclusive
|
||||
size_t to; /// exclusive
|
||||
EqualRange(size_t from_, size_t to_) : from(from_), to(to_) { chassert(from <= to); }
|
||||
size_t size() const { return to - from; }
|
||||
};
|
||||
|
||||
using EqualRanges = std::vector<EqualRange>;
|
||||
|
||||
/// Declares interface to store columns in memory.
|
||||
class IColumn : public COW<IColumn>
|
||||
@ -399,6 +407,9 @@ public:
|
||||
"or for Array or Tuple, containing them.");
|
||||
}
|
||||
|
||||
/// Estimate the cardinality (number of unique values) of the values in 'equal_range' after permutation, formally: |{ column[permutation[r]] : r in equal_range }|.
|
||||
virtual size_t estimateCardinalityInPermutedRange(const Permutation & permutation, const EqualRange & equal_range) const;
|
||||
|
||||
/** Copies each element according offsets parameter.
|
||||
* (i-th element should be copied offsets[i] - offsets[i - 1] times.)
|
||||
* It is necessary in ARRAY JOIN operation.
|
||||
|
@ -139,7 +139,7 @@ void IColumn::updatePermutationImpl(
|
||||
if (equal_ranges.empty())
|
||||
return;
|
||||
|
||||
if (limit >= size() || limit > equal_ranges.back().second)
|
||||
if (limit >= size() || limit > equal_ranges.back().to)
|
||||
limit = 0;
|
||||
|
||||
EqualRanges new_ranges;
|
||||
|
@ -43,12 +43,13 @@ namespace
|
||||
endpoint,
|
||||
proxy_scheme,
|
||||
proxy_port,
|
||||
cache_ttl
|
||||
std::chrono::seconds {cache_ttl}
|
||||
};
|
||||
|
||||
return std::make_shared<RemoteProxyConfigurationResolver>(
|
||||
server_configuration,
|
||||
request_protocol,
|
||||
std::make_shared<RemoteProxyHostFetcherImpl>(),
|
||||
isTunnelingDisabledForHTTPSRequestsOverHTTPProxy(configuration));
|
||||
}
|
||||
|
||||
|
@ -6,22 +6,47 @@
|
||||
#include <Poco/Net/HTTPRequest.h>
|
||||
#include <Poco/Net/HTTPResponse.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Common/DNSResolver.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int RECEIVED_ERROR_FROM_REMOTE_IO_SERVER;
|
||||
}
|
||||
|
||||
std::string RemoteProxyHostFetcherImpl::fetch(const Poco::URI & endpoint, const ConnectionTimeouts & timeouts)
|
||||
{
|
||||
auto request = Poco::Net::HTTPRequest(Poco::Net::HTTPRequest::HTTP_GET, endpoint.getPath(), Poco::Net::HTTPRequest::HTTP_1_1);
|
||||
auto session = makeHTTPSession(HTTPConnectionGroupType::HTTP, endpoint, timeouts);
|
||||
|
||||
session->sendRequest(request);
|
||||
|
||||
Poco::Net::HTTPResponse response;
|
||||
auto & response_body_stream = session->receiveResponse(response);
|
||||
|
||||
if (response.getStatus() != Poco::Net::HTTPResponse::HTTP_OK)
|
||||
throw HTTPException(
|
||||
ErrorCodes::RECEIVED_ERROR_FROM_REMOTE_IO_SERVER,
|
||||
endpoint.toString(),
|
||||
response.getStatus(),
|
||||
response.getReason(),
|
||||
"");
|
||||
|
||||
std::string proxy_host;
|
||||
Poco::StreamCopier::copyToString(response_body_stream, proxy_host);
|
||||
|
||||
return proxy_host;
|
||||
}
|
||||
|
||||
RemoteProxyConfigurationResolver::RemoteProxyConfigurationResolver(
|
||||
const RemoteServerConfiguration & remote_server_configuration_,
|
||||
Protocol request_protocol_,
|
||||
std::shared_ptr<RemoteProxyHostFetcher> fetcher_,
|
||||
bool disable_tunneling_for_https_requests_over_http_proxy_
|
||||
)
|
||||
: ProxyConfigurationResolver(request_protocol_, disable_tunneling_for_https_requests_over_http_proxy_), remote_server_configuration(remote_server_configuration_)
|
||||
: ProxyConfigurationResolver(request_protocol_, disable_tunneling_for_https_requests_over_http_proxy_),
|
||||
remote_server_configuration(remote_server_configuration_), fetcher(fetcher_)
|
||||
{
|
||||
}
|
||||
|
||||
@ -29,9 +54,7 @@ ProxyConfiguration RemoteProxyConfigurationResolver::resolve()
|
||||
{
|
||||
auto logger = getLogger("RemoteProxyConfigurationResolver");
|
||||
|
||||
auto & [endpoint, proxy_protocol, proxy_port, cache_ttl_] = remote_server_configuration;
|
||||
|
||||
LOG_DEBUG(logger, "Obtain proxy using resolver: {}", endpoint.toString());
|
||||
auto & [endpoint, proxy_protocol_string, proxy_port, cache_ttl] = remote_server_configuration;
|
||||
|
||||
std::lock_guard lock(cache_mutex);
|
||||
|
||||
@ -55,66 +78,26 @@ ProxyConfiguration RemoteProxyConfigurationResolver::resolve()
|
||||
.withSendTimeout(1)
|
||||
.withReceiveTimeout(1);
|
||||
|
||||
try
|
||||
{
|
||||
/// It should be just empty GET request.
|
||||
Poco::Net::HTTPRequest request(Poco::Net::HTTPRequest::HTTP_GET, endpoint.getPath(), Poco::Net::HTTPRequest::HTTP_1_1);
|
||||
const auto proxy_host = fetcher->fetch(endpoint, timeouts);
|
||||
|
||||
const auto & host = endpoint.getHost();
|
||||
auto resolved_hosts = DNSResolver::instance().resolveHostAll(host);
|
||||
LOG_DEBUG(logger, "Use proxy: {}://{}:{}", proxy_protocol_string, proxy_host, proxy_port);
|
||||
|
||||
HTTPSessionPtr session;
|
||||
auto proxy_protocol = ProxyConfiguration::protocolFromString(proxy_protocol_string);
|
||||
|
||||
for (size_t i = 0; i < resolved_hosts.size(); ++i)
|
||||
{
|
||||
auto resolved_endpoint = endpoint;
|
||||
resolved_endpoint.setHost(resolved_hosts[i].toString());
|
||||
session = makeHTTPSession(HTTPConnectionGroupType::HTTP, resolved_endpoint, timeouts);
|
||||
bool use_tunneling_for_https_requests_over_http_proxy = useTunneling(
|
||||
request_protocol,
|
||||
proxy_protocol,
|
||||
disable_tunneling_for_https_requests_over_http_proxy);
|
||||
|
||||
try
|
||||
{
|
||||
session->sendRequest(request);
|
||||
break;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
if (i + 1 == resolved_hosts.size())
|
||||
throw;
|
||||
}
|
||||
}
|
||||
cached_config.protocol = proxy_protocol;
|
||||
cached_config.host = proxy_host;
|
||||
cached_config.port = proxy_port;
|
||||
cached_config.tunneling = use_tunneling_for_https_requests_over_http_proxy;
|
||||
cached_config.original_request_protocol = request_protocol;
|
||||
cache_timestamp = std::chrono::system_clock::now();
|
||||
cache_valid = true;
|
||||
|
||||
Poco::Net::HTTPResponse response;
|
||||
auto & response_body_stream = session->receiveResponse(response);
|
||||
|
||||
if (response.getStatus() != Poco::Net::HTTPResponse::HTTP_OK)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Proxy resolver returned not OK status: {}", response.getReason());
|
||||
|
||||
String proxy_host;
|
||||
/// Read proxy host as string from response body.
|
||||
Poco::StreamCopier::copyToString(response_body_stream, proxy_host);
|
||||
|
||||
LOG_DEBUG(logger, "Use proxy: {}://{}:{}", proxy_protocol, proxy_host, proxy_port);
|
||||
|
||||
bool use_tunneling_for_https_requests_over_http_proxy = useTunneling(
|
||||
request_protocol,
|
||||
cached_config.protocol,
|
||||
disable_tunneling_for_https_requests_over_http_proxy);
|
||||
|
||||
cached_config.protocol = ProxyConfiguration::protocolFromString(proxy_protocol);
|
||||
cached_config.host = proxy_host;
|
||||
cached_config.port = proxy_port;
|
||||
cached_config.tunneling = use_tunneling_for_https_requests_over_http_proxy;
|
||||
cached_config.original_request_protocol = request_protocol;
|
||||
cache_timestamp = std::chrono::system_clock::now();
|
||||
cache_valid = true;
|
||||
|
||||
return cached_config;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException("RemoteProxyConfigurationResolver", "Failed to obtain proxy");
|
||||
return {};
|
||||
}
|
||||
return cached_config;
|
||||
}
|
||||
|
||||
void RemoteProxyConfigurationResolver::errorReport(const ProxyConfiguration & config)
|
||||
@ -124,7 +107,7 @@ void RemoteProxyConfigurationResolver::errorReport(const ProxyConfiguration & co
|
||||
|
||||
std::lock_guard lock(cache_mutex);
|
||||
|
||||
if (!cache_ttl.count() || !cache_valid)
|
||||
if (!remote_server_configuration.cache_ttl_.count() || !cache_valid)
|
||||
return;
|
||||
|
||||
if (std::tie(cached_config.protocol, cached_config.host, cached_config.port)
|
||||
|
@ -10,6 +10,19 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
struct ConnectionTimeouts;
|
||||
|
||||
struct RemoteProxyHostFetcher
|
||||
{
|
||||
virtual ~RemoteProxyHostFetcher() = default;
|
||||
virtual std::string fetch(const Poco::URI & endpoint, const ConnectionTimeouts & timeouts) = 0;
|
||||
};
|
||||
|
||||
struct RemoteProxyHostFetcherImpl : public RemoteProxyHostFetcher
|
||||
{
|
||||
std::string fetch(const Poco::URI & endpoint, const ConnectionTimeouts & timeouts) override;
|
||||
};
|
||||
|
||||
/*
|
||||
* Makes an HTTP GET request to the specified endpoint to obtain a proxy host.
|
||||
* */
|
||||
@ -22,13 +35,14 @@ public:
|
||||
Poco::URI endpoint;
|
||||
String proxy_protocol;
|
||||
unsigned proxy_port;
|
||||
unsigned cache_ttl_;
|
||||
const std::chrono::seconds cache_ttl_;
|
||||
};
|
||||
|
||||
RemoteProxyConfigurationResolver(
|
||||
const RemoteServerConfiguration & remote_server_configuration_,
|
||||
Protocol request_protocol_,
|
||||
bool disable_tunneling_for_https_requests_over_http_proxy_ = true);
|
||||
std::shared_ptr<RemoteProxyHostFetcher> fetcher_,
|
||||
bool disable_tunneling_for_https_requests_over_http_proxy_ = false);
|
||||
|
||||
ProxyConfiguration resolve() override;
|
||||
|
||||
@ -36,11 +50,11 @@ public:
|
||||
|
||||
private:
|
||||
RemoteServerConfiguration remote_server_configuration;
|
||||
std::shared_ptr<RemoteProxyHostFetcher> fetcher;
|
||||
|
||||
std::mutex cache_mutex;
|
||||
bool cache_valid = false;
|
||||
std::chrono::time_point<std::chrono::system_clock> cache_timestamp;
|
||||
const std::chrono::seconds cache_ttl{0};
|
||||
ProxyConfiguration cached_config;
|
||||
};
|
||||
|
||||
|
@ -280,6 +280,10 @@ public:
|
||||
if (!initialized())
|
||||
abort();
|
||||
|
||||
/// Thread cannot join itself.
|
||||
if (state->thread_id == std::this_thread::get_id())
|
||||
abort();
|
||||
|
||||
state->event.wait();
|
||||
state.reset();
|
||||
}
|
||||
@ -293,12 +297,7 @@ public:
|
||||
|
||||
bool joinable() const
|
||||
{
|
||||
if (!state)
|
||||
return false;
|
||||
/// Thread cannot join itself.
|
||||
if (state->thread_id == std::this_thread::get_id())
|
||||
return false;
|
||||
return true;
|
||||
return initialized();
|
||||
}
|
||||
|
||||
std::thread::id get_id() const
|
||||
|
@ -637,6 +637,9 @@ void TestKeeper::finalize(const String &)
|
||||
expired = true;
|
||||
}
|
||||
|
||||
/// Signal request_queue to wake up processing thread without waiting for timeout
|
||||
requests_queue.finish();
|
||||
|
||||
processing_thread.join();
|
||||
|
||||
try
|
||||
|
@ -1,5 +1,4 @@
|
||||
#include "ZooKeeper.h"
|
||||
#include "Coordination/KeeperConstants.h"
|
||||
#include "Coordination/KeeperFeatureFlags.h"
|
||||
#include "ZooKeeperImpl.h"
|
||||
#include "KeeperException.h"
|
||||
@ -376,11 +375,14 @@ void ZooKeeper::createAncestors(const std::string & path)
|
||||
}
|
||||
|
||||
Coordination::Responses responses;
|
||||
Coordination::Error code = multiImpl(create_ops, responses, /*check_session_valid*/ false);
|
||||
const auto & [code, failure_reason] = multiImpl(create_ops, responses, /*check_session_valid*/ false);
|
||||
|
||||
if (code == Coordination::Error::ZOK)
|
||||
return;
|
||||
|
||||
if (!failure_reason.empty())
|
||||
throw KeeperException::fromMessage(code, failure_reason);
|
||||
|
||||
throw KeeperException::fromPath(code, path);
|
||||
}
|
||||
|
||||
@ -676,17 +678,19 @@ Coordination::Error ZooKeeper::trySet(const std::string & path, const std::strin
|
||||
}
|
||||
|
||||
|
||||
Coordination::Error ZooKeeper::multiImpl(const Coordination::Requests & requests, Coordination::Responses & responses, bool check_session_valid)
|
||||
std::pair<Coordination::Error, std::string>
|
||||
ZooKeeper::multiImpl(const Coordination::Requests & requests, Coordination::Responses & responses, bool check_session_valid)
|
||||
{
|
||||
if (requests.empty())
|
||||
return Coordination::Error::ZOK;
|
||||
return {Coordination::Error::ZOK, ""};
|
||||
|
||||
std::future<Coordination::MultiResponse> future_result;
|
||||
Coordination::Requests requests_with_check_session;
|
||||
if (check_session_valid)
|
||||
{
|
||||
Coordination::Requests new_requests = requests;
|
||||
addCheckSessionOp(new_requests);
|
||||
future_result = asyncTryMultiNoThrow(new_requests);
|
||||
requests_with_check_session = requests;
|
||||
addCheckSessionOp(requests_with_check_session);
|
||||
future_result = asyncTryMultiNoThrow(requests_with_check_session);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -696,7 +700,7 @@ Coordination::Error ZooKeeper::multiImpl(const Coordination::Requests & requests
|
||||
if (future_result.wait_for(std::chrono::milliseconds(args.operation_timeout_ms)) != std::future_status::ready)
|
||||
{
|
||||
impl->finalize(fmt::format("Operation timeout on {} {}", Coordination::OpNum::Multi, requests[0]->getPath()));
|
||||
return Coordination::Error::ZOPERATIONTIMEOUT;
|
||||
return {Coordination::Error::ZOPERATIONTIMEOUT, ""};
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -704,11 +708,14 @@ Coordination::Error ZooKeeper::multiImpl(const Coordination::Requests & requests
|
||||
Coordination::Error code = response.error;
|
||||
responses = response.responses;
|
||||
|
||||
std::string reason;
|
||||
|
||||
if (check_session_valid)
|
||||
{
|
||||
if (code != Coordination::Error::ZOK && !Coordination::isHardwareError(code) && getFailedOpIndex(code, responses) == requests.size())
|
||||
{
|
||||
impl->finalize(fmt::format("Session was killed: {}", requests.back()->getPath()));
|
||||
reason = fmt::format("Session was killed: {}", requests_with_check_session.back()->getPath());
|
||||
impl->finalize(reason);
|
||||
code = Coordination::Error::ZSESSIONMOVED;
|
||||
}
|
||||
responses.pop_back();
|
||||
@ -717,23 +724,33 @@ Coordination::Error ZooKeeper::multiImpl(const Coordination::Requests & requests
|
||||
chassert(code == Coordination::Error::ZOK || Coordination::isHardwareError(code) || responses.back()->error != Coordination::Error::ZOK);
|
||||
}
|
||||
|
||||
return code;
|
||||
return {code, std::move(reason)};
|
||||
}
|
||||
}
|
||||
|
||||
Coordination::Responses ZooKeeper::multi(const Coordination::Requests & requests, bool check_session_valid)
|
||||
{
|
||||
Coordination::Responses responses;
|
||||
Coordination::Error code = multiImpl(requests, responses, check_session_valid);
|
||||
const auto & [code, failure_reason] = multiImpl(requests, responses, check_session_valid);
|
||||
if (!failure_reason.empty())
|
||||
throw KeeperException::fromMessage(code, failure_reason);
|
||||
|
||||
KeeperMultiException::check(code, requests, responses);
|
||||
return responses;
|
||||
}
|
||||
|
||||
Coordination::Error ZooKeeper::tryMulti(const Coordination::Requests & requests, Coordination::Responses & responses, bool check_session_valid)
|
||||
{
|
||||
Coordination::Error code = multiImpl(requests, responses, check_session_valid);
|
||||
const auto & [code, failure_reason] = multiImpl(requests, responses, check_session_valid);
|
||||
|
||||
if (code != Coordination::Error::ZOK && !Coordination::isUserError(code))
|
||||
{
|
||||
if (!failure_reason.empty())
|
||||
throw KeeperException::fromMessage(code, failure_reason);
|
||||
|
||||
throw KeeperException(code);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
@ -1346,7 +1363,7 @@ Coordination::Error ZooKeeper::tryMultiNoThrow(const Coordination::Requests & re
|
||||
{
|
||||
try
|
||||
{
|
||||
return multiImpl(requests, responses, check_session_valid);
|
||||
return multiImpl(requests, responses, check_session_valid).first;
|
||||
}
|
||||
catch (const Coordination::Exception & e)
|
||||
{
|
||||
|
@ -2,10 +2,8 @@
|
||||
|
||||
#include "Types.h"
|
||||
#include <Poco/Util/LayeredConfiguration.h>
|
||||
#include <unordered_set>
|
||||
#include <future>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <string>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Common/ProfileEvents.h>
|
||||
@ -18,7 +16,6 @@
|
||||
#include <Common/thread_local_rng.h>
|
||||
#include <Coordination/KeeperFeatureFlags.h>
|
||||
#include <unistd.h>
|
||||
#include <random>
|
||||
|
||||
|
||||
namespace ProfileEvents
|
||||
@ -644,7 +641,11 @@ private:
|
||||
Coordination::Stat * stat,
|
||||
Coordination::WatchCallbackPtr watch_callback,
|
||||
Coordination::ListRequestType list_request_type);
|
||||
Coordination::Error multiImpl(const Coordination::Requests & requests, Coordination::Responses & responses, bool check_session_valid);
|
||||
|
||||
/// returns error code with optional reason
|
||||
std::pair<Coordination::Error, std::string>
|
||||
multiImpl(const Coordination::Requests & requests, Coordination::Responses & responses, bool check_session_valid);
|
||||
|
||||
Coordination::Error existsImpl(const std::string & path, Coordination::Stat * stat_, Coordination::WatchCallback watch_callback);
|
||||
Coordination::Error syncImpl(const std::string & path, std::string & returned_path);
|
||||
|
||||
|
172
src/Common/tests/gtest_proxy_remote_configuration_resolver.cpp
Normal file
172
src/Common/tests/gtest_proxy_remote_configuration_resolver.cpp
Normal file
@ -0,0 +1,172 @@
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <Common/RemoteProxyConfigurationResolver.h>
|
||||
#include <Poco/URI.h>
|
||||
#include <IO/ConnectionTimeouts.h>
|
||||
#include <base/sleep.h>
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
struct RemoteProxyHostFetcherMock : public DB::RemoteProxyHostFetcher
|
||||
{
|
||||
explicit RemoteProxyHostFetcherMock(const std::string & return_mock_) : return_mock(return_mock_) {}
|
||||
|
||||
std::string fetch(const Poco::URI &, const DB::ConnectionTimeouts &) override
|
||||
{
|
||||
fetch_count++;
|
||||
return return_mock;
|
||||
}
|
||||
|
||||
std::string return_mock;
|
||||
std::size_t fetch_count {0};
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
TEST(RemoteProxyConfigurationResolver, HTTPOverHTTP)
|
||||
{
|
||||
const char * proxy_server_mock = "proxy1";
|
||||
auto remote_server_configuration = RemoteProxyConfigurationResolver::RemoteServerConfiguration
|
||||
{
|
||||
Poco::URI("not_important"),
|
||||
"http",
|
||||
80,
|
||||
std::chrono::seconds {10}
|
||||
};
|
||||
|
||||
RemoteProxyConfigurationResolver resolver(
|
||||
remote_server_configuration,
|
||||
ProxyConfiguration::Protocol::HTTP,
|
||||
std::make_shared<RemoteProxyHostFetcherMock>(proxy_server_mock)
|
||||
);
|
||||
|
||||
auto configuration = resolver.resolve();
|
||||
|
||||
ASSERT_EQ(configuration.host, proxy_server_mock);
|
||||
ASSERT_EQ(configuration.port, 80);
|
||||
ASSERT_EQ(configuration.protocol, ProxyConfiguration::Protocol::HTTP);
|
||||
ASSERT_EQ(configuration.original_request_protocol, ProxyConfiguration::Protocol::HTTP);
|
||||
ASSERT_EQ(configuration.tunneling, false);
|
||||
}
|
||||
|
||||
TEST(RemoteProxyConfigurationResolver, HTTPSOverHTTPS)
|
||||
{
|
||||
const char * proxy_server_mock = "proxy1";
|
||||
auto remote_server_configuration = RemoteProxyConfigurationResolver::RemoteServerConfiguration
|
||||
{
|
||||
Poco::URI("not_important"),
|
||||
"https",
|
||||
443,
|
||||
std::chrono::seconds {10}
|
||||
};
|
||||
|
||||
RemoteProxyConfigurationResolver resolver(
|
||||
remote_server_configuration,
|
||||
ProxyConfiguration::Protocol::HTTPS,
|
||||
std::make_shared<RemoteProxyHostFetcherMock>(proxy_server_mock)
|
||||
);
|
||||
|
||||
auto configuration = resolver.resolve();
|
||||
|
||||
ASSERT_EQ(configuration.host, proxy_server_mock);
|
||||
ASSERT_EQ(configuration.port, 443);
|
||||
ASSERT_EQ(configuration.protocol, ProxyConfiguration::Protocol::HTTPS);
|
||||
ASSERT_EQ(configuration.original_request_protocol, ProxyConfiguration::Protocol::HTTPS);
|
||||
// tunneling should not be used, https over https.
|
||||
ASSERT_EQ(configuration.tunneling, false);
|
||||
}
|
||||
|
||||
TEST(RemoteProxyConfigurationResolver, HTTPSOverHTTP)
|
||||
{
|
||||
const char * proxy_server_mock = "proxy1";
|
||||
auto remote_server_configuration = RemoteProxyConfigurationResolver::RemoteServerConfiguration
|
||||
{
|
||||
Poco::URI("not_important"),
|
||||
"http",
|
||||
80,
|
||||
std::chrono::seconds {10}
|
||||
};
|
||||
|
||||
RemoteProxyConfigurationResolver resolver(
|
||||
remote_server_configuration,
|
||||
ProxyConfiguration::Protocol::HTTPS,
|
||||
std::make_shared<RemoteProxyHostFetcherMock>(proxy_server_mock)
|
||||
);
|
||||
|
||||
auto configuration = resolver.resolve();
|
||||
|
||||
ASSERT_EQ(configuration.host, proxy_server_mock);
|
||||
ASSERT_EQ(configuration.port, 80);
|
||||
ASSERT_EQ(configuration.protocol, ProxyConfiguration::Protocol::HTTP);
|
||||
ASSERT_EQ(configuration.original_request_protocol, ProxyConfiguration::Protocol::HTTPS);
|
||||
// tunneling should be used, https over http.
|
||||
ASSERT_EQ(configuration.tunneling, true);
|
||||
}
|
||||
|
||||
TEST(RemoteProxyConfigurationResolver, HTTPSOverHTTPNoTunneling)
|
||||
{
|
||||
const char * proxy_server_mock = "proxy1";
|
||||
auto remote_server_configuration = RemoteProxyConfigurationResolver::RemoteServerConfiguration
|
||||
{
|
||||
Poco::URI("not_important"),
|
||||
"http",
|
||||
80,
|
||||
std::chrono::seconds {10}
|
||||
};
|
||||
|
||||
RemoteProxyConfigurationResolver resolver(
|
||||
remote_server_configuration,
|
||||
ProxyConfiguration::Protocol::HTTPS,
|
||||
std::make_shared<RemoteProxyHostFetcherMock>(proxy_server_mock),
|
||||
true /* disable_tunneling_for_https_requests_over_http_proxy_ */
|
||||
);
|
||||
|
||||
auto configuration = resolver.resolve();
|
||||
|
||||
ASSERT_EQ(configuration.host, proxy_server_mock);
|
||||
ASSERT_EQ(configuration.port, 80);
|
||||
ASSERT_EQ(configuration.protocol, ProxyConfiguration::Protocol::HTTP);
|
||||
ASSERT_EQ(configuration.original_request_protocol, ProxyConfiguration::Protocol::HTTPS);
|
||||
// tunneling should be used, https over http.
|
||||
ASSERT_EQ(configuration.tunneling, false);
|
||||
}
|
||||
|
||||
TEST(RemoteProxyConfigurationResolver, SimpleCacheTest)
|
||||
{
|
||||
const char * proxy_server_mock = "proxy1";
|
||||
auto cache_ttl = 5u;
|
||||
auto remote_server_configuration = RemoteProxyConfigurationResolver::RemoteServerConfiguration
|
||||
{
|
||||
Poco::URI("not_important"),
|
||||
"http",
|
||||
80,
|
||||
std::chrono::seconds {cache_ttl}
|
||||
};
|
||||
|
||||
auto fetcher_mock = std::make_shared<RemoteProxyHostFetcherMock>(proxy_server_mock);
|
||||
|
||||
RemoteProxyConfigurationResolver resolver(
|
||||
remote_server_configuration,
|
||||
ProxyConfiguration::Protocol::HTTP,
|
||||
fetcher_mock
|
||||
);
|
||||
|
||||
resolver.resolve();
|
||||
resolver.resolve();
|
||||
resolver.resolve();
|
||||
|
||||
ASSERT_EQ(fetcher_mock->fetch_count, 1u);
|
||||
|
||||
sleepForSeconds(cache_ttl * 2);
|
||||
|
||||
resolver.resolve();
|
||||
|
||||
ASSERT_EQ(fetcher_mock->fetch_count, 2);
|
||||
}
|
||||
|
||||
}
|
@ -129,7 +129,6 @@ class IColumn;
|
||||
M(Bool, enable_s3_requests_logging, false, "Enable very explicit logging of S3 requests. Makes sense for debug only.", 0) \
|
||||
M(String, s3queue_default_zookeeper_path, "/clickhouse/s3queue/", "Default zookeeper path prefix for S3Queue engine", 0) \
|
||||
M(Bool, s3queue_enable_logging_to_s3queue_log, false, "Enable writing to system.s3queue_log. The value can be overwritten per table with table settings", 0) \
|
||||
M(Bool, s3queue_allow_experimental_sharded_mode, false, "Enable experimental sharded mode of S3Queue table engine. It is experimental because it will be rewritten", 0) \
|
||||
M(UInt64, hdfs_replication, 0, "The actual number of replications can be specified when the hdfs file is created.", 0) \
|
||||
M(Bool, hdfs_truncate_on_insert, false, "Enables or disables truncate before insert in s3 engine tables", 0) \
|
||||
M(Bool, hdfs_create_new_file_on_insert, false, "Enables or disables creating a new file on each insert in hdfs engine tables", 0) \
|
||||
@ -243,7 +242,8 @@ class IColumn;
|
||||
M(Bool, do_not_merge_across_partitions_select_final, false, "Merge parts only in one partition in select final", 0) \
|
||||
M(Bool, split_parts_ranges_into_intersecting_and_non_intersecting_final, true, "Split parts ranges into intersecting and non intersecting during FINAL optimization", 0) \
|
||||
M(Bool, split_intersecting_parts_ranges_into_layers_final, true, "Split intersecting parts ranges into layers during FINAL optimization", 0) \
|
||||
M(Bool, allow_experimental_inverted_index, false, "If it is set to true, allow to use experimental full-text index.", 0) \
|
||||
M(Bool, allow_experimental_inverted_index, false, "If it is set to true, allow to use experimental inverted index.", 0) \
|
||||
M(Bool, allow_experimental_full_text_index, false, "If it is set to true, allow to use experimental full-text index.", 0) \
|
||||
\
|
||||
M(UInt64, mysql_max_rows_to_insert, 65536, "The maximum number of rows in MySQL batch insertion of the MySQL storage engine", 0) \
|
||||
M(Bool, mysql_map_string_to_text_in_show_columns, true, "If enabled, String type will be mapped to TEXT in SHOW [FULL] COLUMNS, BLOB otherwise. Has an effect only when the connection is made through the MySQL wire protocol.", 0) \
|
||||
@ -963,6 +963,7 @@ class IColumn;
|
||||
MAKE_OBSOLETE(M, UInt64, partial_merge_join_optimizations, 0) \
|
||||
MAKE_OBSOLETE(M, MaxThreads, max_alter_threads, 0) \
|
||||
MAKE_OBSOLETE(M, Bool, use_mysql_types_in_show_columns, false) \
|
||||
MAKE_OBSOLETE(M, Bool, s3queue_allow_experimental_sharded_mode, false) \
|
||||
/* moved to config.xml: see also src/Core/ServerSettings.h */ \
|
||||
MAKE_DEPRECATED_BY_SERVER_CONFIG(M, UInt64, background_buffer_flush_schedule_pool_size, 16) \
|
||||
MAKE_DEPRECATED_BY_SERVER_CONFIG(M, UInt64, background_pool_size, 16) \
|
||||
|
@ -91,6 +91,7 @@ static std::map<ClickHouseVersion, SettingsChangesHistory::SettingsChanges> sett
|
||||
{"hdfs_throw_on_zero_files_match", false, false, "Allow to throw an error when ListObjects request cannot match any files in HDFS engine instead of empty query result"},
|
||||
{"azure_throw_on_zero_files_match", false, false, "Allow to throw an error when ListObjects request cannot match any files in AzureBlobStorage engine instead of empty query result"},
|
||||
{"s3_validate_request_settings", true, true, "Allow to disable S3 request settings validation"},
|
||||
{"allow_experimental_full_text_index", false, false, "Enable experimental full-text index"},
|
||||
{"azure_skip_empty_files", false, false, "Allow to skip empty files in azure table engine"},
|
||||
{"hdfs_ignore_file_doesnt_exist", false, false, "Allow to return 0 rows when the requested files don't exist instead of throwing an exception in HDFS table engine"},
|
||||
{"azure_ignore_file_doesnt_exist", false, false, "Allow to return 0 rows when the requested files don't exist instead of throwing an exception in AzureBlobStorage table engine"},
|
||||
|
@ -1,149 +0,0 @@
|
||||
#include "iostream_debug_helpers.h"
|
||||
|
||||
#include <iostream>
|
||||
#include <Client/Connection.h>
|
||||
#include <Core/Block.h>
|
||||
#include <Core/ColumnWithTypeAndName.h>
|
||||
#include <Core/Field.h>
|
||||
#include <Core/NamesAndTypes.h>
|
||||
#include <DataTypes/IDataType.h>
|
||||
#include <Functions/IFunction.h>
|
||||
#include <IO/WriteBufferFromOStream.h>
|
||||
#include <Interpreters/ExpressionAnalyzer.h>
|
||||
#include <Interpreters/ExpressionActions.h>
|
||||
#include <Parsers/IAST.h>
|
||||
#include <Storages/IStorage.h>
|
||||
#include <Common/COW.h>
|
||||
#include <Common/FieldVisitorDump.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
template <>
|
||||
std::ostream & operator<< <Field>(std::ostream & stream, const Field & what)
|
||||
{
|
||||
stream << applyVisitor(FieldVisitorDump(), what);
|
||||
return stream;
|
||||
}
|
||||
|
||||
std::ostream & operator<<(std::ostream & stream, const NameAndTypePair & what)
|
||||
{
|
||||
stream << "NameAndTypePair(name = " << what.name << ", type = " << what.type << ")";
|
||||
return stream;
|
||||
}
|
||||
|
||||
std::ostream & operator<<(std::ostream & stream, const IDataType & what)
|
||||
{
|
||||
stream << "IDataType(name = " << what.getName() << ", default = " << what.getDefault() << ")";
|
||||
return stream;
|
||||
}
|
||||
|
||||
std::ostream & operator<<(std::ostream & stream, const IStorage & what)
|
||||
{
|
||||
auto table_id = what.getStorageID();
|
||||
stream << "IStorage(name = " << what.getName() << ", tableName = " << table_id.table_name << ") {"
|
||||
<< what.getInMemoryMetadataPtr()->getColumns().getAllPhysical().toString() << "}";
|
||||
return stream;
|
||||
}
|
||||
|
||||
std::ostream & operator<<(std::ostream & stream, const TableLockHolder &)
|
||||
{
|
||||
stream << "TableStructureReadLock()";
|
||||
return stream;
|
||||
}
|
||||
|
||||
std::ostream & operator<<(std::ostream & stream, const IFunctionOverloadResolver & what)
|
||||
{
|
||||
stream << "IFunction(name = " << what.getName() << ", variadic = " << what.isVariadic() << ", args = " << what.getNumberOfArguments()
|
||||
<< ")";
|
||||
return stream;
|
||||
}
|
||||
|
||||
std::ostream & operator<<(std::ostream & stream, const Block & what)
|
||||
{
|
||||
stream << "Block("
|
||||
<< "num_columns = " << what.columns() << "){" << what.dumpStructure() << "}";
|
||||
return stream;
|
||||
}
|
||||
|
||||
std::ostream & operator<<(std::ostream & stream, const ColumnWithTypeAndName & what)
|
||||
{
|
||||
stream << "ColumnWithTypeAndName(name = " << what.name << ", type = " << *what.type << ", column = ";
|
||||
return dumpValue(stream, what.column) << ")";
|
||||
}
|
||||
|
||||
std::ostream & operator<<(std::ostream & stream, const IColumn & what)
|
||||
{
|
||||
stream << "IColumn(" << what.dumpStructure() << ")";
|
||||
stream << "{";
|
||||
for (size_t i = 0; i < what.size(); ++i)
|
||||
{
|
||||
if (i)
|
||||
stream << ", ";
|
||||
stream << applyVisitor(FieldVisitorDump(), what[i]);
|
||||
}
|
||||
stream << "}";
|
||||
|
||||
return stream;
|
||||
}
|
||||
|
||||
std::ostream & operator<<(std::ostream & stream, const Packet & what)
|
||||
{
|
||||
stream << "Packet("
|
||||
<< "type = " << what.type;
|
||||
// types description: Core/Protocol.h
|
||||
if (what.exception)
|
||||
stream << "exception = " << what.exception.get();
|
||||
// TODO: profile_info
|
||||
stream << ") {" << what.block << "}";
|
||||
return stream;
|
||||
}
|
||||
|
||||
std::ostream & operator<<(std::ostream & stream, const ExpressionActions & what)
|
||||
{
|
||||
stream << "ExpressionActions(" << what.dumpActions() << ")";
|
||||
return stream;
|
||||
}
|
||||
|
||||
std::ostream & operator<<(std::ostream & stream, const TreeRewriterResult & what)
|
||||
{
|
||||
stream << "SyntaxAnalyzerResult{";
|
||||
stream << "storage=" << what.storage << "; ";
|
||||
if (!what.source_columns.empty())
|
||||
{
|
||||
stream << "source_columns=";
|
||||
dumpValue(stream, what.source_columns);
|
||||
stream << "; ";
|
||||
}
|
||||
if (!what.aliases.empty())
|
||||
{
|
||||
stream << "aliases=";
|
||||
dumpValue(stream, what.aliases);
|
||||
stream << "; ";
|
||||
}
|
||||
if (!what.array_join_result_to_source.empty())
|
||||
{
|
||||
stream << "array_join_result_to_source=";
|
||||
dumpValue(stream, what.array_join_result_to_source);
|
||||
stream << "; ";
|
||||
}
|
||||
if (!what.array_join_alias_to_name.empty())
|
||||
{
|
||||
stream << "array_join_alias_to_name=";
|
||||
dumpValue(stream, what.array_join_alias_to_name);
|
||||
stream << "; ";
|
||||
}
|
||||
if (!what.array_join_name_to_alias.empty())
|
||||
{
|
||||
stream << "array_join_name_to_alias=";
|
||||
dumpValue(stream, what.array_join_name_to_alias);
|
||||
stream << "; ";
|
||||
}
|
||||
stream << "rewrite_subqueries=" << what.rewrite_subqueries << "; ";
|
||||
stream << "}";
|
||||
|
||||
return stream;
|
||||
}
|
||||
|
||||
}
|
@ -1,49 +0,0 @@
|
||||
#pragma once
|
||||
#include <iostream>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
// Use template to disable implicit casting for certain overloaded types such as Field, which leads
|
||||
// to overload resolution ambiguity.
|
||||
class Field;
|
||||
template <typename T>
|
||||
requires std::is_same_v<T, Field>
|
||||
std::ostream & operator<<(std::ostream & stream, const T & what);
|
||||
|
||||
struct NameAndTypePair;
|
||||
std::ostream & operator<<(std::ostream & stream, const NameAndTypePair & what);
|
||||
|
||||
class IDataType;
|
||||
std::ostream & operator<<(std::ostream & stream, const IDataType & what);
|
||||
|
||||
class IStorage;
|
||||
std::ostream & operator<<(std::ostream & stream, const IStorage & what);
|
||||
|
||||
class IFunctionOverloadResolver;
|
||||
std::ostream & operator<<(std::ostream & stream, const IFunctionOverloadResolver & what);
|
||||
|
||||
class IFunctionBase;
|
||||
std::ostream & operator<<(std::ostream & stream, const IFunctionBase & what);
|
||||
|
||||
class Block;
|
||||
std::ostream & operator<<(std::ostream & stream, const Block & what);
|
||||
|
||||
struct ColumnWithTypeAndName;
|
||||
std::ostream & operator<<(std::ostream & stream, const ColumnWithTypeAndName & what);
|
||||
|
||||
class IColumn;
|
||||
std::ostream & operator<<(std::ostream & stream, const IColumn & what);
|
||||
|
||||
struct Packet;
|
||||
std::ostream & operator<<(std::ostream & stream, const Packet & what);
|
||||
|
||||
class ExpressionActions;
|
||||
std::ostream & operator<<(std::ostream & stream, const ExpressionActions & what);
|
||||
|
||||
struct TreeRewriterResult;
|
||||
std::ostream & operator<<(std::ostream & stream, const TreeRewriterResult & what);
|
||||
}
|
||||
|
||||
/// some operator<< should be declared before operator<<(... std::shared_ptr<>)
|
||||
#include <base/iostream_debug_helpers.h>
|
@ -749,6 +749,7 @@ std::string BaseDaemon::getDefaultConfigFileName() const
|
||||
|
||||
void BaseDaemon::closeFDs()
|
||||
{
|
||||
#if !defined(USE_XRAY)
|
||||
/// NOTE: may benefit from close_range() (linux 5.9+)
|
||||
#if defined(OS_FREEBSD) || defined(OS_DARWIN)
|
||||
fs::path proc_path{"/dev/fd"};
|
||||
@ -796,13 +797,13 @@ void BaseDaemon::closeFDs()
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void BaseDaemon::initialize(Application & self)
|
||||
{
|
||||
closeFDs();
|
||||
|
||||
ServerApplication::initialize(self);
|
||||
|
||||
/// now highest priority (lowest value) is PRIO_APPLICATION = -100, we want higher!
|
||||
|
@ -75,6 +75,9 @@ ColumnPtr recursiveRemoveLowCardinality(const ColumnPtr & column)
|
||||
else if (const auto * column_tuple = typeid_cast<const ColumnTuple *>(column.get()))
|
||||
{
|
||||
auto columns = column_tuple->getColumns();
|
||||
if (columns.empty())
|
||||
return column;
|
||||
|
||||
for (auto & element : columns)
|
||||
element = recursiveRemoveLowCardinality(element);
|
||||
res = ColumnTuple::create(columns);
|
||||
|
@ -29,7 +29,6 @@ namespace ErrorCodes
|
||||
{
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int DUPLICATE_COLUMN;
|
||||
extern const int EMPTY_DATA_PASSED;
|
||||
extern const int NOT_FOUND_COLUMN_IN_BLOCK;
|
||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||
extern const int SIZES_OF_COLUMNS_IN_TUPLE_DOESNT_MATCH;
|
||||
@ -181,6 +180,9 @@ static void addElementSafe(const DataTypes & elems, IColumn & column, F && impl)
|
||||
|
||||
MutableColumnPtr DataTypeTuple::createColumn() const
|
||||
{
|
||||
if (elems.empty())
|
||||
return ColumnTuple::create(0);
|
||||
|
||||
size_t size = elems.size();
|
||||
MutableColumns tuple_columns(size);
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
@ -206,6 +208,9 @@ MutableColumnPtr DataTypeTuple::createColumn(const ISerialization & serializatio
|
||||
if (!serialization_tuple)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected serialization to create column of type Tuple");
|
||||
|
||||
if (elems.empty())
|
||||
return IDataType::createColumn(serialization);
|
||||
|
||||
const auto & element_serializations = serialization_tuple->getElementsSerializations();
|
||||
|
||||
size_t size = elems.size();
|
||||
@ -224,6 +229,12 @@ Field DataTypeTuple::getDefault() const
|
||||
|
||||
void DataTypeTuple::insertDefaultInto(IColumn & column) const
|
||||
{
|
||||
if (elems.empty())
|
||||
{
|
||||
column.insertDefault();
|
||||
return;
|
||||
}
|
||||
|
||||
addElementSafe(elems, column, [&]
|
||||
{
|
||||
for (const auto & i : collections::range(0, elems.size()))
|
||||
@ -388,7 +399,7 @@ void DataTypeTuple::forEachChild(const ChildCallback & callback) const
|
||||
static DataTypePtr create(const ASTPtr & arguments)
|
||||
{
|
||||
if (!arguments || arguments->children.empty())
|
||||
throw Exception(ErrorCodes::EMPTY_DATA_PASSED, "Tuple cannot be empty");
|
||||
return std::make_shared<DataTypeTuple>(DataTypes{});
|
||||
|
||||
DataTypes nested_types;
|
||||
nested_types.reserve(arguments->children.size());
|
||||
|
@ -20,7 +20,6 @@ namespace DB
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int EMPTY_DATA_PASSED;
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
@ -146,9 +145,6 @@ DataTypePtr FieldToDataType<on_error>::operator() (const Array & x) const
|
||||
template <LeastSupertypeOnError on_error>
|
||||
DataTypePtr FieldToDataType<on_error>::operator() (const Tuple & tuple) const
|
||||
{
|
||||
if (tuple.empty())
|
||||
throw Exception(ErrorCodes::EMPTY_DATA_PASSED, "Cannot infer type of an empty tuple");
|
||||
|
||||
DataTypes element_types;
|
||||
element_types.reserve(tuple.size());
|
||||
|
||||
|
@ -229,9 +229,10 @@ static std::pair<ColumnPtr, DataTypePtr> recursivlyConvertDynamicColumnToTuple(
|
||||
= recursivlyConvertDynamicColumnToTuple(tuple_columns[i], tuple_types[i]);
|
||||
}
|
||||
|
||||
auto new_column = tuple_size == 0 ? column : ColumnPtr(ColumnTuple::create(new_tuple_columns));
|
||||
return
|
||||
{
|
||||
ColumnTuple::create(new_tuple_columns),
|
||||
new_column,
|
||||
recreateTupleWithElements(*type_tuple, new_tuple_types)
|
||||
};
|
||||
}
|
||||
|
@ -70,13 +70,15 @@ void SerializationInfoTuple::add(const SerializationInfo & other)
|
||||
|
||||
void SerializationInfoTuple::addDefaults(size_t length)
|
||||
{
|
||||
SerializationInfo::addDefaults(length);
|
||||
|
||||
for (const auto & elem : elems)
|
||||
elem->addDefaults(length);
|
||||
}
|
||||
|
||||
void SerializationInfoTuple::replaceData(const SerializationInfo & other)
|
||||
{
|
||||
SerializationInfo::add(other);
|
||||
SerializationInfo::replaceData(other);
|
||||
|
||||
const auto & other_info = assert_cast<const SerializationInfoTuple &>(other);
|
||||
for (const auto & [name, elem] : name_to_elem)
|
||||
@ -94,7 +96,9 @@ MutableSerializationInfoPtr SerializationInfoTuple::clone() const
|
||||
for (const auto & elem : elems)
|
||||
elems_cloned.push_back(elem->clone());
|
||||
|
||||
return std::make_shared<SerializationInfoTuple>(std::move(elems_cloned), names, settings);
|
||||
auto ret = std::make_shared<SerializationInfoTuple>(std::move(elems_cloned), names, settings);
|
||||
ret->data = data;
|
||||
return ret;
|
||||
}
|
||||
|
||||
MutableSerializationInfoPtr SerializationInfoTuple::createWithType(
|
||||
|
@ -91,6 +91,10 @@ static ReturnType addElementSafe(size_t num_elems, IColumn & column, F && impl)
|
||||
restore_elements();
|
||||
return ReturnType(false);
|
||||
}
|
||||
else
|
||||
{
|
||||
assert_cast<ColumnTuple &>(column).addSize(1);
|
||||
}
|
||||
|
||||
// Check that all columns now have the same size.
|
||||
size_t new_size = column.size();
|
||||
@ -564,6 +568,12 @@ void SerializationTuple::enumerateStreams(
|
||||
const StreamCallback & callback,
|
||||
const SubstreamData & data) const
|
||||
{
|
||||
if (elems.empty())
|
||||
{
|
||||
ISerialization::enumerateStreams(settings, callback, data);
|
||||
return;
|
||||
}
|
||||
|
||||
const auto * type_tuple = data.type ? &assert_cast<const DataTypeTuple &>(*data.type) : nullptr;
|
||||
const auto * column_tuple = data.column ? &assert_cast<const ColumnTuple &>(*data.column) : nullptr;
|
||||
const auto * info_tuple = data.serialization_info ? &assert_cast<const SerializationInfoTuple &>(*data.serialization_info) : nullptr;
|
||||
@ -626,6 +636,22 @@ void SerializationTuple::serializeBinaryBulkWithMultipleStreams(
|
||||
SerializeBinaryBulkSettings & settings,
|
||||
SerializeBinaryBulkStatePtr & state) const
|
||||
{
|
||||
if (elems.empty())
|
||||
{
|
||||
if (WriteBuffer * stream = settings.getter(settings.path))
|
||||
{
|
||||
size_t size = column.size();
|
||||
|
||||
if (limit == 0 || offset + limit > size)
|
||||
limit = size - offset;
|
||||
|
||||
for (size_t i = 0; i < limit; ++i)
|
||||
stream->write('0');
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
auto * tuple_state = checkAndGetState<SerializeBinaryBulkStateTuple>(state);
|
||||
|
||||
for (size_t i = 0; i < elems.size(); ++i)
|
||||
@ -642,6 +668,24 @@ void SerializationTuple::deserializeBinaryBulkWithMultipleStreams(
|
||||
DeserializeBinaryBulkStatePtr & state,
|
||||
SubstreamsCache * cache) const
|
||||
{
|
||||
if (elems.empty())
|
||||
{
|
||||
auto cached_column = getFromSubstreamsCache(cache, settings.path);
|
||||
if (cached_column)
|
||||
{
|
||||
column = cached_column;
|
||||
}
|
||||
else if (ReadBuffer * stream = settings.getter(settings.path))
|
||||
{
|
||||
auto mutable_column = column->assumeMutable();
|
||||
typeid_cast<ColumnTuple &>(*mutable_column).addSize(stream->tryIgnore(limit));
|
||||
column = std::move(mutable_column);
|
||||
addToSubstreamsCache(cache, settings.path, column);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
auto * tuple_state = checkAndGetState<DeserializeBinaryBulkStateTuple>(state);
|
||||
|
||||
auto mutable_column = column->assumeMutable();
|
||||
@ -650,6 +694,8 @@ void SerializationTuple::deserializeBinaryBulkWithMultipleStreams(
|
||||
settings.avg_value_size_hint = 0;
|
||||
for (size_t i = 0; i < elems.size(); ++i)
|
||||
elems[i]->deserializeBinaryBulkWithMultipleStreams(column_tuple.getColumnPtr(i), limit, settings, tuple_state->states[i], cache);
|
||||
|
||||
typeid_cast<ColumnTuple &>(*mutable_column).addSize(column_tuple.getColumn(0).size());
|
||||
}
|
||||
|
||||
size_t SerializationTuple::getPositionByName(const String & name) const
|
||||
|
@ -1,4 +1,3 @@
|
||||
#include <Columns/IColumn.h>
|
||||
#include <Core/Field.h>
|
||||
#include <DataTypes/DataTypeFactory.h>
|
||||
#include <DataTypes/IDataType.h>
|
||||
@ -10,8 +9,6 @@
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include <Core/iostream_debug_helpers.h>
|
||||
|
||||
|
||||
template <typename T>
|
||||
inline std::ostream& operator<<(std::ostream & ostr, const std::vector<T> & v)
|
||||
@ -63,7 +60,7 @@ TEST_P(ParseDataTypeTest, parseStringValue)
|
||||
data_type->getDefaultSerialization()->deserializeWholeText(*col, buffer, FormatSettings{});
|
||||
}
|
||||
|
||||
ASSERT_EQ(p.expected_values.size(), col->size()) << "Actual items: " << *col;
|
||||
ASSERT_EQ(p.expected_values.size(), col->size());
|
||||
for (size_t i = 0; i < col->size(); ++i)
|
||||
{
|
||||
ASSERT_EQ(p.expected_values[i], (*col)[i]);
|
||||
|
@ -921,6 +921,7 @@ void DatabaseReplicated::recoverLostReplica(const ZooKeeperPtr & current_zookeep
|
||||
/// We will execute some CREATE queries for recovery (not ATTACH queries),
|
||||
/// so we need to allow experimental features that can be used in a CREATE query
|
||||
query_context->setSetting("allow_experimental_inverted_index", 1);
|
||||
query_context->setSetting("allow_experimental_full_text_index", 1);
|
||||
query_context->setSetting("allow_experimental_codecs", 1);
|
||||
query_context->setSetting("allow_experimental_live_view", 1);
|
||||
query_context->setSetting("allow_experimental_window_view", 1);
|
||||
|
@ -39,6 +39,11 @@ ObjectStorageKey CachedObjectStorage::generateObjectKeyForPath(const std::string
|
||||
return object_storage->generateObjectKeyForPath(path);
|
||||
}
|
||||
|
||||
ObjectStorageKey CachedObjectStorage::generateObjectKeyPrefixForDirectoryPath(const std::string & path) const
|
||||
{
|
||||
return object_storage->generateObjectKeyPrefixForDirectoryPath(path);
|
||||
}
|
||||
|
||||
ReadSettings CachedObjectStorage::patchSettings(const ReadSettings & read_settings) const
|
||||
{
|
||||
ReadSettings modified_settings{read_settings};
|
||||
|
@ -100,6 +100,12 @@ public:
|
||||
|
||||
ObjectStorageKey generateObjectKeyForPath(const std::string & path) const override;
|
||||
|
||||
ObjectStorageKey generateObjectKeyPrefixForDirectoryPath(const std::string & path) const override;
|
||||
|
||||
void setKeysGenerator(ObjectStorageKeysGeneratorPtr gen) override { object_storage->setKeysGenerator(gen); }
|
||||
|
||||
bool isPlain() const override { return object_storage->isPlain(); }
|
||||
|
||||
bool isRemote() const override { return object_storage->isRemote(); }
|
||||
|
||||
void removeCacheIfExists(const std::string & path_key_for_cache) override;
|
||||
|
@ -294,7 +294,7 @@ Block NativeReader::read()
|
||||
}
|
||||
|
||||
if (res.rows() != rows)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Row count mismatch after desirialization, got: {}, expected: {}", res.rows(), rows);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Row count mismatch after deserialization, got: {}, expected: {}", res.rows(), rows);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <Columns/ColumnConst.h>
|
||||
#include <Functions/IFunction.h>
|
||||
#include <Functions/FunctionHelpers.h>
|
||||
#include <Interpreters/castColumn.h>
|
||||
|
||||
#include "config.h"
|
||||
|
||||
@ -41,7 +42,7 @@ private:
|
||||
{
|
||||
const auto check_argument_type = [this] (const IDataType * arg)
|
||||
{
|
||||
if (!isNativeNumber(arg))
|
||||
if (!isNativeNumber(arg) && !isDecimal(arg))
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument of function {}",
|
||||
arg->getName(), getName());
|
||||
};
|
||||
@ -53,7 +54,7 @@ private:
|
||||
}
|
||||
|
||||
template <typename LeftType, typename RightType>
|
||||
ColumnPtr executeTyped(const ColumnConst * left_arg, const IColumn * right_arg) const
|
||||
static ColumnPtr executeTyped(const ColumnConst * left_arg, const IColumn * right_arg)
|
||||
{
|
||||
if (const auto right_arg_typed = checkAndGetColumn<ColumnVector<RightType>>(right_arg))
|
||||
{
|
||||
@ -91,7 +92,7 @@ private:
|
||||
}
|
||||
|
||||
template <typename LeftType, typename RightType>
|
||||
ColumnPtr executeTyped(const ColumnVector<LeftType> * left_arg, const IColumn * right_arg) const
|
||||
static ColumnPtr executeTyped(const ColumnVector<LeftType> * left_arg, const IColumn * right_arg)
|
||||
{
|
||||
if (const auto right_arg_typed = checkAndGetColumn<ColumnVector<RightType>>(right_arg))
|
||||
{
|
||||
@ -168,6 +169,25 @@ private:
|
||||
{
|
||||
const ColumnWithTypeAndName & col_left = arguments[0];
|
||||
const ColumnWithTypeAndName & col_right = arguments[1];
|
||||
|
||||
ColumnPtr col_ptr_left = col_left.column;
|
||||
ColumnPtr col_ptr_right = col_right.column;
|
||||
|
||||
TypeIndex left_index = col_left.type->getTypeId();
|
||||
TypeIndex right_index = col_right.type->getTypeId();
|
||||
|
||||
if (WhichDataType(col_left.type).isDecimal())
|
||||
{
|
||||
col_ptr_left = castColumn(col_left, std::make_shared<DataTypeFloat64>());
|
||||
left_index = TypeIndex::Float64;
|
||||
}
|
||||
|
||||
if (WhichDataType(col_right.type).isDecimal())
|
||||
{
|
||||
col_ptr_right = castColumn(col_right, std::make_shared<DataTypeFloat64>());
|
||||
right_index = TypeIndex::Float64;
|
||||
}
|
||||
|
||||
ColumnPtr res;
|
||||
|
||||
auto call = [&](const auto & types) -> bool
|
||||
@ -177,8 +197,8 @@ private:
|
||||
using RightType = typename Types::RightType;
|
||||
using ColVecLeft = ColumnVector<LeftType>;
|
||||
|
||||
const IColumn * left_arg = col_left.column.get();
|
||||
const IColumn * right_arg = col_right.column.get();
|
||||
const IColumn * left_arg = col_ptr_left.get();
|
||||
const IColumn * right_arg = col_ptr_right.get();
|
||||
|
||||
if (const auto left_arg_typed = checkAndGetColumn<ColVecLeft>(left_arg))
|
||||
{
|
||||
@ -200,9 +220,6 @@ private:
|
||||
return false;
|
||||
};
|
||||
|
||||
TypeIndex left_index = col_left.type->getTypeId();
|
||||
TypeIndex right_index = col_right.type->getTypeId();
|
||||
|
||||
if (!callOnBasicTypes<true, true, false, false>(left_index, right_index, call))
|
||||
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of argument of function {}",
|
||||
col_left.column->getName(), getName());
|
||||
|
@ -11,7 +11,6 @@
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <initializer_list>
|
||||
#include <Core/iostream_debug_helpers.h>
|
||||
|
||||
|
||||
namespace
|
||||
@ -41,23 +40,6 @@ std::string PrintMap(const auto & keys, const auto & values)
|
||||
return std::move(buff.str());
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
struct Dump
|
||||
{
|
||||
const T & value;
|
||||
|
||||
friend std::ostream & operator<<(std::ostream & ostr, const Dump & d)
|
||||
{
|
||||
return dumpValue(ostr, d.value);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
auto print_with_dump(const T & value)
|
||||
{
|
||||
return Dump<T>{value};
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
struct KeyValuePairExtractorTestParam
|
||||
@ -82,9 +64,7 @@ TEST_P(extractKVPairKeyValuePairExtractorTest, Match)
|
||||
auto values = ColumnString::create();
|
||||
|
||||
auto pairs_found = kv_parser->extract(input, keys, values);
|
||||
ASSERT_EQ(expected.size(), pairs_found)
|
||||
<< "\texpected: " << print_with_dump(expected) << "\n"
|
||||
<< "\tactual : " << print_with_dump(*ToColumnMap(keys, values));
|
||||
ASSERT_EQ(expected.size(), pairs_found);
|
||||
|
||||
size_t i = 0;
|
||||
for (const auto & expected_kv : expected)
|
||||
|
325
src/Functions/parseReadableSize.cpp
Normal file
325
src/Functions/parseReadableSize.cpp
Normal file
@ -0,0 +1,325 @@
|
||||
#include <base/types.h>
|
||||
#include <boost/algorithm/string/case_conv.hpp>
|
||||
|
||||
#include <Columns/ColumnNullable.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/FunctionDocumentation.h>
|
||||
#include <DataTypes/DataTypeNullable.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/FunctionHelpers.h>
|
||||
#include <Functions/IFunction.h>
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <cmath>
|
||||
#include <string_view>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int CANNOT_PARSE_INPUT_ASSERTION_FAILED;
|
||||
extern const int CANNOT_PARSE_NUMBER;
|
||||
extern const int CANNOT_PARSE_TEXT;
|
||||
extern const int ILLEGAL_COLUMN;
|
||||
extern const int UNEXPECTED_DATA_AFTER_PARSED_VALUE;
|
||||
}
|
||||
|
||||
enum class ErrorHandling : uint8_t
|
||||
{
|
||||
Exception,
|
||||
Zero,
|
||||
Null
|
||||
};
|
||||
|
||||
using ScaleFactors = std::unordered_map<std::string_view, size_t>;
|
||||
|
||||
/** parseReadableSize* - Returns the number of bytes corresponding to a given readable binary or decimal size.
|
||||
* Examples:
|
||||
* - `parseReadableSize('123 MiB')`
|
||||
* - `parseReadableSize('123 MB')`
|
||||
* Meant to be the inverse of `formatReadable*Size` with the following exceptions:
|
||||
* - Number of bytes is returned as an unsigned integer amount instead of a float. Decimal points are rounded up to the nearest integer.
|
||||
* - Negative numbers are not allowed as negative sizes don't make sense.
|
||||
* Flavours:
|
||||
* - parseReadableSize
|
||||
* - parseReadableSizeOrNull
|
||||
* - parseReadableSizeOrZero
|
||||
*/
|
||||
template <typename Name, ErrorHandling error_handling>
|
||||
class FunctionParseReadable : public IFunction
|
||||
{
|
||||
public:
|
||||
static constexpr auto name = Name::name;
|
||||
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionParseReadable<Name, error_handling>>(); }
|
||||
|
||||
String getName() const override { return name; }
|
||||
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
|
||||
bool useDefaultImplementationForConstants() const override { return true; }
|
||||
size_t getNumberOfArguments() const override { return 1; }
|
||||
|
||||
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
|
||||
{
|
||||
FunctionArgumentDescriptors args
|
||||
{
|
||||
{"readable_size", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isString), nullptr, "String"},
|
||||
};
|
||||
validateFunctionArgumentTypes(*this, arguments, args);
|
||||
DataTypePtr return_type = std::make_shared<DataTypeUInt64>();
|
||||
if constexpr (error_handling == ErrorHandling::Null)
|
||||
return std::make_shared<DataTypeNullable>(return_type);
|
||||
else
|
||||
return return_type;
|
||||
}
|
||||
|
||||
|
||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
||||
{
|
||||
const auto * col_str = checkAndGetColumn<ColumnString>(arguments[0].column.get());
|
||||
if (!col_str)
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_COLUMN,
|
||||
"Illegal column {} of first ('str') argument of function {}. Must be string.",
|
||||
arguments[0].column->getName(),
|
||||
getName()
|
||||
);
|
||||
}
|
||||
|
||||
auto col_res = ColumnUInt64::create(input_rows_count);
|
||||
|
||||
ColumnUInt8::MutablePtr col_null_map;
|
||||
if constexpr (error_handling == ErrorHandling::Null)
|
||||
col_null_map = ColumnUInt8::create(input_rows_count, 0);
|
||||
|
||||
auto & res_data = col_res->getData();
|
||||
|
||||
for (size_t i = 0; i < input_rows_count; ++i)
|
||||
{
|
||||
std::string_view value = col_str->getDataAt(i).toView();
|
||||
try
|
||||
{
|
||||
UInt64 num_bytes = parseReadableFormat(value);
|
||||
res_data[i] = num_bytes;
|
||||
}
|
||||
catch (const Exception &)
|
||||
{
|
||||
if constexpr (error_handling == ErrorHandling::Exception)
|
||||
{
|
||||
throw;
|
||||
}
|
||||
else
|
||||
{
|
||||
res_data[i] = 0;
|
||||
if constexpr (error_handling == ErrorHandling::Null)
|
||||
col_null_map->getData()[i] = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
if constexpr (error_handling == ErrorHandling::Null)
|
||||
return ColumnNullable::create(std::move(col_res), std::move(col_null_map));
|
||||
else
|
||||
return col_res;
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
UInt64 parseReadableFormat(const std::string_view & value) const
|
||||
{
|
||||
static const ScaleFactors scale_factors =
|
||||
{
|
||||
{"b", 1ull},
|
||||
// ISO/IEC 80000-13 binary units
|
||||
{"kib", 1024ull},
|
||||
{"mib", 1024ull * 1024ull},
|
||||
{"gib", 1024ull * 1024ull * 1024ull},
|
||||
{"tib", 1024ull * 1024ull * 1024ull * 1024ull},
|
||||
{"pib", 1024ull * 1024ull * 1024ull * 1024ull * 1024ull},
|
||||
{"eib", 1024ull * 1024ull * 1024ull * 1024ull * 1024ull * 1024ull},
|
||||
// Decimal units
|
||||
{"kb", 1000ull},
|
||||
{"mb", 1000ull * 1000ull},
|
||||
{"gb", 1000ull * 1000ull * 1000ull},
|
||||
{"tb", 1000ull * 1000ull * 1000ull * 1000ull},
|
||||
{"pb", 1000ull * 1000ull * 1000ull * 1000ull * 1000ull},
|
||||
{"eb", 1000ull * 1000ull * 1000ull * 1000ull * 1000ull * 1000ull},
|
||||
};
|
||||
ReadBufferFromString buf(value);
|
||||
|
||||
// tryReadFloatText does seem to not raise any error when there is leading whitespace so we check it explicitly
|
||||
skipWhitespaceIfAny(buf);
|
||||
if (buf.getPosition() > 0)
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::CANNOT_PARSE_INPUT_ASSERTION_FAILED,
|
||||
"Invalid expression for function {} - Leading whitespace is not allowed (\"{}\")",
|
||||
getName(),
|
||||
value
|
||||
);
|
||||
}
|
||||
|
||||
Float64 base = 0;
|
||||
if (!tryReadFloatTextPrecise(base, buf)) // If we use the default (fast) tryReadFloatText this returns True on garbage input so we use the Precise version
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::CANNOT_PARSE_NUMBER,
|
||||
"Invalid expression for function {} - Unable to parse readable size numeric component (\"{}\")",
|
||||
getName(),
|
||||
value
|
||||
);
|
||||
}
|
||||
else if (std::isnan(base) || !std::isfinite(base))
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::BAD_ARGUMENTS,
|
||||
"Invalid expression for function {} - Invalid numeric component: {}",
|
||||
getName(),
|
||||
base
|
||||
);
|
||||
}
|
||||
else if (base < 0)
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::BAD_ARGUMENTS,
|
||||
"Invalid expression for function {} - Negative sizes are not allowed ({})",
|
||||
getName(),
|
||||
base
|
||||
);
|
||||
}
|
||||
|
||||
skipWhitespaceIfAny(buf);
|
||||
|
||||
String unit;
|
||||
readStringUntilWhitespace(unit, buf);
|
||||
boost::algorithm::to_lower(unit);
|
||||
auto iter = scale_factors.find(unit);
|
||||
if (iter == scale_factors.end())
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::CANNOT_PARSE_TEXT,
|
||||
"Invalid expression for function {} - Unknown readable size unit (\"{}\")",
|
||||
getName(),
|
||||
unit
|
||||
);
|
||||
}
|
||||
else if (!buf.eof())
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::UNEXPECTED_DATA_AFTER_PARSED_VALUE,
|
||||
"Invalid expression for function {} - Found trailing characters after readable size string (\"{}\")",
|
||||
getName(),
|
||||
value
|
||||
);
|
||||
}
|
||||
|
||||
Float64 num_bytes_with_decimals = base * iter->second;
|
||||
if (num_bytes_with_decimals > std::numeric_limits<UInt64>::max())
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::BAD_ARGUMENTS,
|
||||
"Invalid expression for function {} - Result is too big for output type (\"{}\")",
|
||||
getName(),
|
||||
num_bytes_with_decimals
|
||||
);
|
||||
}
|
||||
// As the input might be an arbitrary decimal number we might end up with a non-integer amount of bytes when parsing binary (eg MiB) units.
|
||||
// This doesn't make sense so we round up to indicate the byte size that can fit the passed size.
|
||||
return static_cast<UInt64>(std::ceil(num_bytes_with_decimals));
|
||||
}
|
||||
};
|
||||
|
||||
struct NameParseReadableSize
|
||||
{
|
||||
static constexpr auto name = "parseReadableSize";
|
||||
};
|
||||
|
||||
struct NameParseReadableSizeOrNull
|
||||
{
|
||||
static constexpr auto name = "parseReadableSizeOrNull";
|
||||
};
|
||||
|
||||
struct NameParseReadableSizeOrZero
|
||||
{
|
||||
static constexpr auto name = "parseReadableSizeOrZero";
|
||||
};
|
||||
|
||||
using FunctionParseReadableSize = FunctionParseReadable<NameParseReadableSize, ErrorHandling::Exception>;
|
||||
using FunctionParseReadableSizeOrNull = FunctionParseReadable<NameParseReadableSizeOrNull, ErrorHandling::Null>;
|
||||
using FunctionParseReadableSizeOrZero = FunctionParseReadable<NameParseReadableSizeOrZero, ErrorHandling::Zero>;
|
||||
|
||||
FunctionDocumentation parseReadableSize_documentation {
|
||||
.description = "Given a string containing a byte size and `B`, `KiB`, `KB`, `MiB`, `MB`, etc. as a unit (i.e. [ISO/IEC 80000-13](https://en.wikipedia.org/wiki/ISO/IEC_80000) or decimal byte unit), this function returns the corresponding number of bytes. If the function is unable to parse the input value, it throws an exception.",
|
||||
.syntax = "parseReadableSize(x)",
|
||||
.arguments = {{"x", "Readable size with ISO/IEC 80000-13 or decimal byte unit ([String](../../sql-reference/data-types/string.md))"}},
|
||||
.returned_value = "Number of bytes, rounded up to the nearest integer ([UInt64](../../sql-reference/data-types/int-uint.md))",
|
||||
.examples = {
|
||||
{
|
||||
"basic",
|
||||
"SELECT arrayJoin(['1 B', '1 KiB', '3 MB', '5.314 KiB']) AS readable_sizes, parseReadableSize(readable_sizes) AS sizes;",
|
||||
R"(
|
||||
┌─readable_sizes─┬───sizes─┐
|
||||
│ 1 B │ 1 │
|
||||
│ 1 KiB │ 1024 │
|
||||
│ 3 MB │ 3000000 │
|
||||
│ 5.314 KiB │ 5442 │
|
||||
└────────────────┴─────────┘)"
|
||||
},
|
||||
},
|
||||
.categories = {"OtherFunctions"},
|
||||
};
|
||||
|
||||
FunctionDocumentation parseReadableSizeOrNull_documentation {
|
||||
.description = "Given a string containing a byte size and `B`, `KiB`, `KB`, `MiB`, `MB`, etc. as a unit (i.e. [ISO/IEC 80000-13](https://en.wikipedia.org/wiki/ISO/IEC_80000) or decimal byte unit), this function returns the corresponding number of bytes. If the function is unable to parse the input value, it returns `NULL`",
|
||||
.syntax = "parseReadableSizeOrNull(x)",
|
||||
.arguments = {{"x", "Readable size with ISO/IEC 80000-13 or decimal byte unit ([String](../../sql-reference/data-types/string.md))"}},
|
||||
.returned_value = "Number of bytes, rounded up to the nearest integer, or NULL if unable to parse the input (Nullable([UInt64](../../sql-reference/data-types/int-uint.md)))",
|
||||
.examples = {
|
||||
{
|
||||
"basic",
|
||||
"SELECT arrayJoin(['1 B', '1 KiB', '3 MB', '5.314 KiB', 'invalid']) AS readable_sizes, parseReadableSizeOrNull(readable_sizes) AS sizes;",
|
||||
R"(
|
||||
┌─readable_sizes─┬───sizes─┐
|
||||
│ 1 B │ 1 │
|
||||
│ 1 KiB │ 1024 │
|
||||
│ 3 MB │ 3000000 │
|
||||
│ 5.314 KiB │ 5442 │
|
||||
│ invalid │ ᴺᵁᴸᴸ │
|
||||
└────────────────┴─────────┘)"
|
||||
},
|
||||
},
|
||||
.categories = {"OtherFunctions"},
|
||||
};
|
||||
|
||||
FunctionDocumentation parseReadableSizeOrZero_documentation {
|
||||
.description = "Given a string containing a byte size and `B`, `KiB`, `KB`, `MiB`, `MB`, etc. as a unit (i.e. [ISO/IEC 80000-13](https://en.wikipedia.org/wiki/ISO/IEC_80000) or decimal byte unit), this function returns the corresponding number of bytes. If the function is unable to parse the input value, it returns `0`",
|
||||
.syntax = "parseReadableSizeOrZero(x)",
|
||||
.arguments = {{"x", "Readable size with ISO/IEC 80000-13 or decimal byte unit ([String](../../sql-reference/data-types/string.md))"}},
|
||||
.returned_value = "Number of bytes, rounded up to the nearest integer, or 0 if unable to parse the input ([UInt64](../../sql-reference/data-types/int-uint.md))",
|
||||
.examples = {
|
||||
{
|
||||
"basic",
|
||||
"SELECT arrayJoin(['1 B', '1 KiB', '3 MB', '5.314 KiB', 'invalid']) AS readable_sizes, parseReadableSizeOrZero(readable_sizes) AS sizes;",
|
||||
R"(
|
||||
┌─readable_sizes─┬───sizes─┐
|
||||
│ 1 B │ 1 │
|
||||
│ 1 KiB │ 1024 │
|
||||
│ 3 MB │ 3000000 │
|
||||
│ 5.314 KiB │ 5442 │
|
||||
│ invalid │ 0 │
|
||||
└────────────────┴─────────┘)",
|
||||
},
|
||||
},
|
||||
.categories = {"OtherFunctions"},
|
||||
};
|
||||
|
||||
REGISTER_FUNCTION(ParseReadableSize)
|
||||
{
|
||||
factory.registerFunction<FunctionParseReadableSize>(parseReadableSize_documentation);
|
||||
factory.registerFunction<FunctionParseReadableSizeOrNull>(parseReadableSizeOrNull_documentation);
|
||||
factory.registerFunction<FunctionParseReadableSizeOrZero>(parseReadableSizeOrZero_documentation);
|
||||
}
|
||||
}
|
@ -10,11 +10,6 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||
}
|
||||
|
||||
/** tuple(x, y, ...) is a function that allows you to group several columns
|
||||
* tupleElement(tuple, n) is a function that allows you to retrieve a column from tuple.
|
||||
*/
|
||||
@ -45,14 +40,14 @@ public:
|
||||
|
||||
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
|
||||
{
|
||||
if (arguments.empty())
|
||||
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} requires at least one argument.", getName());
|
||||
|
||||
return std::make_shared<DataTypeTuple>(arguments);
|
||||
}
|
||||
|
||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override
|
||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
||||
{
|
||||
if (arguments.empty())
|
||||
return ColumnTuple::create(input_rows_count);
|
||||
|
||||
size_t tuple_size = arguments.size();
|
||||
Columns tuple_columns(tuple_size);
|
||||
for (size_t i = 0; i < tuple_size; ++i)
|
||||
|
@ -12,6 +12,7 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
@ -19,6 +20,36 @@ namespace ErrorCodes
|
||||
extern const int ARGUMENT_OUT_OF_BOUND;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
/// Checks that passed data types are tuples and have the same size.
|
||||
/// Returns size of tuples.
|
||||
size_t checkAndGetTuplesSize(const DataTypePtr & lhs_type, const DataTypePtr & rhs_type, const String & function_name = {})
|
||||
{
|
||||
const auto * left_tuple = checkAndGetDataType<DataTypeTuple>(lhs_type.get());
|
||||
const auto * right_tuple = checkAndGetDataType<DataTypeTuple>(rhs_type.get());
|
||||
|
||||
if (!left_tuple)
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Argument 0{} should be tuple, got {}",
|
||||
function_name.empty() ? "" : fmt::format(" of function {}", function_name), lhs_type->getName());
|
||||
|
||||
if (!right_tuple)
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Argument 1{}should be tuple, got {}",
|
||||
function_name.empty() ? "" : fmt::format(" of function {}", function_name), rhs_type->getName());
|
||||
|
||||
const auto & left_types = left_tuple->getElements();
|
||||
const auto & right_types = right_tuple->getElements();
|
||||
|
||||
if (left_types.size() != right_types.size())
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Expected tuples of the same size as arguments{}, got {} and {}",
|
||||
function_name.empty() ? "" : fmt::format(" of function {}", function_name), lhs_type->getName(), rhs_type->getName());
|
||||
return left_types.size();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
struct PlusName { static constexpr auto name = "plus"; };
|
||||
struct MinusName { static constexpr auto name = "minus"; };
|
||||
struct MultiplyName { static constexpr auto name = "multiply"; };
|
||||
@ -33,8 +64,7 @@ struct L2SquaredLabel { static constexpr auto name = "2Squared"; };
|
||||
struct LinfLabel { static constexpr auto name = "inf"; };
|
||||
struct LpLabel { static constexpr auto name = "p"; };
|
||||
|
||||
/// str starts from the lowercase letter; not constexpr due to the compiler version
|
||||
/*constexpr*/ std::string makeFirstLetterUppercase(const std::string& str)
|
||||
constexpr std::string makeFirstLetterUppercase(const std::string & str)
|
||||
{
|
||||
std::string res(str);
|
||||
res[0] += 'A' - 'a';
|
||||
@ -57,35 +87,13 @@ public:
|
||||
|
||||
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
|
||||
{
|
||||
const auto * left_tuple = checkAndGetDataType<DataTypeTuple>(arguments[0].type.get());
|
||||
const auto * right_tuple = checkAndGetDataType<DataTypeTuple>(arguments[1].type.get());
|
||||
size_t tuple_size = checkAndGetTuplesSize(arguments[0].type, arguments[1].type, getName());
|
||||
|
||||
if (!left_tuple)
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Argument 0 of function {} should be tuple, got {}",
|
||||
getName(), arguments[0].type->getName());
|
||||
const auto & left_types = checkAndGetDataType<DataTypeTuple>(arguments[0].type.get())->getElements();
|
||||
const auto & right_types = checkAndGetDataType<DataTypeTuple>(arguments[1].type.get())->getElements();
|
||||
|
||||
if (!right_tuple)
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Argument 1 of function {} should be tuple, got {}",
|
||||
getName(), arguments[1].type->getName());
|
||||
|
||||
const auto & left_types = left_tuple->getElements();
|
||||
const auto & right_types = right_tuple->getElements();
|
||||
|
||||
Columns left_elements;
|
||||
Columns right_elements;
|
||||
if (arguments[0].column)
|
||||
left_elements = getTupleElements(*arguments[0].column);
|
||||
if (arguments[1].column)
|
||||
right_elements = getTupleElements(*arguments[1].column);
|
||||
|
||||
if (left_types.size() != right_types.size())
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Expected tuples of the same size as arguments of function {}. Got {} and {}",
|
||||
getName(), arguments[0].type->getName(), arguments[1].type->getName());
|
||||
|
||||
size_t tuple_size = left_types.size();
|
||||
if (tuple_size == 0)
|
||||
return std::make_shared<DataTypeUInt8>();
|
||||
Columns left_elements = arguments[0].column ? getTupleElements(*arguments[0].column) : Columns();
|
||||
Columns right_elements = arguments[1].column ? getTupleElements(*arguments[1].column) : Columns();
|
||||
|
||||
auto func = FunctionFactory::instance().get(FuncName::name, context);
|
||||
DataTypes types(tuple_size);
|
||||
@ -119,7 +127,7 @@ public:
|
||||
|
||||
size_t tuple_size = left_elements.size();
|
||||
if (tuple_size == 0)
|
||||
return DataTypeUInt8().createColumnConstWithDefaultValue(input_rows_count);
|
||||
return ColumnTuple::create(input_rows_count);
|
||||
|
||||
auto func = FunctionFactory::instance().get(FuncName::name, context);
|
||||
Columns columns(tuple_size);
|
||||
@ -177,9 +185,6 @@ public:
|
||||
cur_elements = getTupleElements(*arguments[0].column);
|
||||
|
||||
size_t tuple_size = cur_types.size();
|
||||
if (tuple_size == 0)
|
||||
return std::make_shared<DataTypeUInt8>();
|
||||
|
||||
auto negate = FunctionFactory::instance().get("negate", context);
|
||||
DataTypes types(tuple_size);
|
||||
for (size_t i = 0; i < tuple_size; ++i)
|
||||
@ -197,7 +202,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
return std::make_shared<DataTypeTuple>(types);
|
||||
return std::make_shared<DataTypeTuple>(std::move(types));
|
||||
}
|
||||
|
||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
||||
@ -208,7 +213,7 @@ public:
|
||||
|
||||
size_t tuple_size = cur_elements.size();
|
||||
if (tuple_size == 0)
|
||||
return DataTypeUInt8().createColumnConstWithDefaultValue(input_rows_count);
|
||||
return ColumnTuple::create(input_rows_count);
|
||||
|
||||
auto negate = FunctionFactory::instance().get("negate", context);
|
||||
Columns columns(tuple_size);
|
||||
@ -248,13 +253,9 @@ public:
|
||||
|
||||
const auto & cur_types = cur_tuple->getElements();
|
||||
|
||||
Columns cur_elements;
|
||||
if (arguments[0].column)
|
||||
cur_elements = getTupleElements(*arguments[0].column);
|
||||
Columns cur_elements = arguments[0].column ? getTupleElements(*arguments[0].column) : Columns();
|
||||
|
||||
size_t tuple_size = cur_types.size();
|
||||
if (tuple_size == 0)
|
||||
return std::make_shared<DataTypeUInt8>();
|
||||
|
||||
const auto & p_column = arguments[1];
|
||||
auto func = FunctionFactory::instance().get(FuncName::name, context);
|
||||
@ -285,7 +286,7 @@ public:
|
||||
|
||||
size_t tuple_size = cur_elements.size();
|
||||
if (tuple_size == 0)
|
||||
return DataTypeUInt8().createColumnConstWithDefaultValue(input_rows_count);
|
||||
return ColumnTuple::create(input_rows_count);
|
||||
|
||||
const auto & p_column = arguments[1];
|
||||
auto func = FunctionFactory::instance().get(FuncName::name, context);
|
||||
@ -583,11 +584,14 @@ public:
|
||||
types = {arguments[0]};
|
||||
}
|
||||
|
||||
const auto * interval_last = checkAndGetDataType<DataTypeInterval>(types.back().get());
|
||||
const auto * interval_new = checkAndGetDataType<DataTypeInterval>(arguments[1].get());
|
||||
if (!types.empty())
|
||||
{
|
||||
const auto * interval_last = checkAndGetDataType<DataTypeInterval>(types.back().get());
|
||||
const auto * interval_new = checkAndGetDataType<DataTypeInterval>(arguments[1].get());
|
||||
|
||||
if (!interval_last->equals(*interval_new))
|
||||
types.push_back(arguments[1]);
|
||||
if (!interval_last->equals(*interval_new))
|
||||
types.push_back(arguments[1]);
|
||||
}
|
||||
|
||||
return std::make_shared<DataTypeTuple>(types);
|
||||
}
|
||||
@ -632,14 +636,10 @@ public:
|
||||
size_t tuple_size = cur_elements.size();
|
||||
|
||||
if (tuple_size == 0)
|
||||
{
|
||||
can_be_merged = false;
|
||||
}
|
||||
else
|
||||
{
|
||||
const auto * tuple_last_interval = checkAndGetDataType<DataTypeInterval>(cur_types.back().get());
|
||||
can_be_merged = tuple_last_interval->equals(*second_interval);
|
||||
}
|
||||
return ColumnTuple::create(input_rows_count);
|
||||
|
||||
const auto * tuple_last_interval = checkAndGetDataType<DataTypeInterval>(cur_types.back().get());
|
||||
can_be_merged = tuple_last_interval->equals(*second_interval);
|
||||
|
||||
if (can_be_merged)
|
||||
tuple_columns.resize(tuple_size);
|
||||
@ -726,9 +726,7 @@ public:
|
||||
|
||||
const auto & cur_types = cur_tuple->getElements();
|
||||
|
||||
Columns cur_elements;
|
||||
if (arguments[0].column)
|
||||
cur_elements = getTupleElements(*arguments[0].column);
|
||||
Columns cur_elements = arguments[0].column ? getTupleElements(*arguments[0].column) : Columns();
|
||||
|
||||
size_t tuple_size = cur_types.size();
|
||||
if (tuple_size == 0)
|
||||
@ -1344,6 +1342,11 @@ public:
|
||||
|
||||
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
|
||||
{
|
||||
size_t tuple_size = checkAndGetTuplesSize(arguments[0].type, arguments[1].type, getName());
|
||||
if (tuple_size == 0)
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Result of function {} is undefined for empty tuples", getName());
|
||||
|
||||
FunctionDotProduct dot(context);
|
||||
ColumnWithTypeAndName dot_result{dot.getReturnTypeImpl(arguments), {}};
|
||||
|
||||
|
@ -48,7 +48,7 @@ HTTPSessionPtr makeHTTPSession(
|
||||
HTTPConnectionGroupType group,
|
||||
const Poco::URI & uri,
|
||||
const ConnectionTimeouts & timeouts,
|
||||
ProxyConfiguration proxy_configuration)
|
||||
const ProxyConfiguration & proxy_configuration)
|
||||
{
|
||||
auto connection_pool = HTTPConnectionPools::instance().getPool(group, uri, proxy_configuration);
|
||||
return connection_pool->getConnection(timeouts);
|
||||
|
@ -61,7 +61,7 @@ HTTPSessionPtr makeHTTPSession(
|
||||
HTTPConnectionGroupType group,
|
||||
const Poco::URI & uri,
|
||||
const ConnectionTimeouts & timeouts,
|
||||
ProxyConfiguration proxy_config = {}
|
||||
const ProxyConfiguration & proxy_config = {}
|
||||
);
|
||||
|
||||
bool isRedirect(Poco::Net::HTTPResponse::HTTPStatus status);
|
||||
|
@ -305,8 +305,7 @@ void PocoHTTPClient::makeRequestInternal(
|
||||
Aws::Utils::RateLimits::RateLimiterInterface * readLimiter,
|
||||
Aws::Utils::RateLimits::RateLimiterInterface * writeLimiter) const
|
||||
{
|
||||
const auto request_configuration = per_request_configuration();
|
||||
makeRequestInternalImpl(request, request_configuration, response, readLimiter, writeLimiter);
|
||||
makeRequestInternalImpl(request, response, readLimiter, writeLimiter);
|
||||
}
|
||||
|
||||
String getMethod(const Aws::Http::HttpRequest & request)
|
||||
@ -330,7 +329,6 @@ String getMethod(const Aws::Http::HttpRequest & request)
|
||||
|
||||
void PocoHTTPClient::makeRequestInternalImpl(
|
||||
Aws::Http::HttpRequest & request,
|
||||
const DB::ProxyConfiguration & proxy_configuration,
|
||||
std::shared_ptr<PocoHTTPResponse> & response,
|
||||
Aws::Utils::RateLimits::RateLimiterInterface *,
|
||||
Aws::Utils::RateLimits::RateLimiterInterface *) const
|
||||
@ -383,6 +381,7 @@ void PocoHTTPClient::makeRequestInternalImpl(
|
||||
|
||||
try
|
||||
{
|
||||
const auto proxy_configuration = per_request_configuration();
|
||||
for (unsigned int attempt = 0; attempt <= s3_max_redirects; ++attempt)
|
||||
{
|
||||
Poco::URI target_uri(uri);
|
||||
|
@ -156,7 +156,6 @@ private:
|
||||
|
||||
void makeRequestInternalImpl(
|
||||
Aws::Http::HttpRequest & request,
|
||||
const DB::ProxyConfiguration & proxy_configuration,
|
||||
std::shared_ptr<PocoHTTPResponse> & response,
|
||||
Aws::Utils::RateLimits::RateLimiterInterface * readLimiter,
|
||||
Aws::Utils::RateLimits::RateLimiterInterface * writeLimiter) const;
|
||||
|
@ -7,6 +7,7 @@ namespace DB
|
||||
{
|
||||
|
||||
static inline constexpr auto FULL_TEXT_INDEX_NAME = "full_text";
|
||||
static inline constexpr auto INVERTED_INDEX_NAME = "inverted";
|
||||
static inline constexpr UInt64 UNLIMITED_ROWS_PER_POSTINGS_LIST = 0;
|
||||
static inline constexpr UInt64 MIN_ROWS_PER_POSTINGS_LIST = 8 * 1024;
|
||||
static inline constexpr UInt64 DEFAULT_MAX_ROWS_PER_POSTINGS_LIST = 64 * 1024;
|
||||
|
@ -750,8 +750,13 @@ InterpreterCreateQuery::TableProperties InterpreterCreateQuery::getTableProperti
|
||||
throw Exception(ErrorCodes::ILLEGAL_INDEX, "Duplicated index name {} is not allowed. Please use different index names.", backQuoteIfNeed(index_desc.name));
|
||||
|
||||
const auto & settings = getContext()->getSettingsRef();
|
||||
if (index_desc.type == FULL_TEXT_INDEX_NAME && !settings.allow_experimental_inverted_index)
|
||||
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Experimental full-text index feature is not enabled (the setting 'allow_experimental_inverted_index')");
|
||||
if (index_desc.type == FULL_TEXT_INDEX_NAME && !settings.allow_experimental_full_text_index)
|
||||
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Experimental full-text index feature is not enabled (the setting 'allow_experimental_full_text_index')");
|
||||
/// ----
|
||||
/// Temporary check during a transition period. Please remove at the end of 2024.
|
||||
if (index_desc.type == INVERTED_INDEX_NAME && settings.allow_experimental_inverted_index) /// The funny condition is not a mistake, see 02346_fulltext_index_old_name.sql
|
||||
throw Exception(ErrorCodes::ILLEGAL_INDEX, "Please use index type 'full_text' instead of 'inverted'");
|
||||
/// ----
|
||||
if (index_desc.type == "annoy" && !settings.allow_experimental_annoy_index)
|
||||
throw Exception(ErrorCodes::INCORRECT_QUERY, "Annoy index is disabled. Turn on allow_experimental_annoy_index");
|
||||
if (index_desc.type == "usearch" && !settings.allow_experimental_usearch_index)
|
||||
|
@ -657,7 +657,7 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
||||
MergeTreeWhereOptimizer where_optimizer{
|
||||
std::move(column_compressed_sizes),
|
||||
metadata_snapshot,
|
||||
storage->getConditionEstimatorByPredicate(query_info, storage_snapshot, context),
|
||||
storage->getConditionEstimatorByPredicate(storage_snapshot, nullptr, context),
|
||||
queried_columns,
|
||||
supported_prewhere_columns,
|
||||
log};
|
||||
|
@ -653,7 +653,7 @@ BoolMask MergeTreeSetIndex::checkInRange(const std::vector<Range> & key_ranges,
|
||||
/// Given left_lower >= left_point, right_lower >= right_point, find if there may be a match in between left_lower and right_lower.
|
||||
if (left_lower + 1 < right_lower)
|
||||
{
|
||||
/// There is an point in between: left_lower + 1
|
||||
/// There is a point in between: left_lower + 1
|
||||
return {true, true};
|
||||
}
|
||||
else if (left_lower + 1 == right_lower)
|
||||
|
@ -166,7 +166,7 @@ void getBlockSortPermutationImpl(const Block & block, const SortDescription & de
|
||||
|
||||
for (const auto & column_with_sort_description : columns_with_sort_descriptions)
|
||||
{
|
||||
while (!ranges.empty() && limit && limit <= ranges.back().first)
|
||||
while (!ranges.empty() && limit && limit <= ranges.back().from)
|
||||
ranges.pop_back();
|
||||
|
||||
if (ranges.empty())
|
||||
|
@ -2,7 +2,6 @@
|
||||
#include <limits>
|
||||
#include <ostream>
|
||||
#include <Core/Field.h>
|
||||
#include <Core/iostream_debug_helpers.h>
|
||||
#include <Interpreters/convertFieldToType.h>
|
||||
#include <DataTypes/DataTypeFactory.h>
|
||||
|
||||
@ -24,9 +23,7 @@ std::ostream & operator << (std::ostream & ostr, const ConvertFieldToTypeTestPar
|
||||
{
|
||||
return ostr << "{"
|
||||
<< "\n\tfrom_type : " << params.from_type
|
||||
<< "\n\tfrom_value : " << params.from_value
|
||||
<< "\n\tto_type : " << params.to_type
|
||||
<< "\n\texpected : " << (params.expected_value ? *params.expected_value : Field())
|
||||
<< "\n}";
|
||||
}
|
||||
|
||||
|
@ -12,14 +12,6 @@ if (TARGET ch_rust::prql)
|
||||
target_link_libraries(clickhouse_parsers PRIVATE ch_rust::prql)
|
||||
endif ()
|
||||
|
||||
if (USE_DEBUG_HELPERS)
|
||||
# CMake generator expression will do insane quoting when it encounters special character like quotes, spaces, etc.
|
||||
# Prefixing "SHELL:" will force it to use the original text.
|
||||
set (INCLUDE_DEBUG_HELPERS "SHELL:-I\"${ClickHouse_SOURCE_DIR}/base\" -include \"${ClickHouse_SOURCE_DIR}/src/Parsers/iostream_debug_helpers.h\"")
|
||||
# Use generator expression as we don't want to pollute CMAKE_CXX_FLAGS, which will interfere with CMake check system.
|
||||
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:${INCLUDE_DEBUG_HELPERS}>)
|
||||
endif ()
|
||||
|
||||
if(ENABLE_EXAMPLES)
|
||||
add_subdirectory(examples)
|
||||
endif()
|
||||
|
@ -1,35 +0,0 @@
|
||||
#include "iostream_debug_helpers.h"
|
||||
#include <Parsers/IAST.h>
|
||||
#include <Parsers/IParser.h>
|
||||
#include <Parsers/Lexer.h>
|
||||
#include <Parsers/TokenIterator.h>
|
||||
#include <IO/WriteBufferFromOStream.h>
|
||||
#include <IO/Operators.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
std::ostream & operator<<(std::ostream & stream, const Token & what)
|
||||
{
|
||||
stream << "Token (type="<< static_cast<int>(what.type) <<"){"<< std::string{what.begin, what.end} << "}";
|
||||
return stream;
|
||||
}
|
||||
|
||||
std::ostream & operator<<(std::ostream & stream, const Expected & what)
|
||||
{
|
||||
stream << "Expected {variants=";
|
||||
dumpValue(stream, what.variants)
|
||||
<< "; max_parsed_pos=" << what.max_parsed_pos << "}";
|
||||
return stream;
|
||||
}
|
||||
|
||||
std::ostream & operator<<(std::ostream & stream, const IAST & what)
|
||||
{
|
||||
WriteBufferFromOStream buf(stream, 4096);
|
||||
buf << "IAST{";
|
||||
what.dumpTree(buf);
|
||||
buf << "}";
|
||||
return stream;
|
||||
}
|
||||
|
||||
}
|
@ -1,17 +0,0 @@
|
||||
#pragma once
|
||||
#include <iostream>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
struct Token;
|
||||
std::ostream & operator<<(std::ostream & stream, const Token & what);
|
||||
|
||||
struct Expected;
|
||||
std::ostream & operator<<(std::ostream & stream, const Expected & what);
|
||||
|
||||
class IAST;
|
||||
std::ostream & operator<<(std::ostream & stream, const IAST & what);
|
||||
|
||||
}
|
||||
|
||||
#include <Core/iostream_debug_helpers.h>
|
@ -398,8 +398,6 @@ void ParquetBlockInputFormat::initializeIfNeeded()
|
||||
{
|
||||
if (std::exchange(is_initialized, true))
|
||||
return;
|
||||
if (format_settings.parquet.use_native_reader)
|
||||
LOG_INFO(&Poco::Logger::get("ParquetBlockInputFormat"), "using native parquet reader");
|
||||
|
||||
// Create arrow file adapter.
|
||||
// TODO: Make the adapter do prefetching on IO threads, based on the full set of ranges that
|
||||
|
@ -83,7 +83,7 @@ void optimizePrewhere(Stack & stack, QueryPlan::Nodes &)
|
||||
MergeTreeWhereOptimizer where_optimizer{
|
||||
std::move(column_compressed_sizes),
|
||||
storage_metadata,
|
||||
storage.getConditionEstimatorByPredicate(source_step_with_filter->getQueryInfo(), storage_snapshot, context),
|
||||
storage.getConditionEstimatorByPredicate(storage_snapshot, source_step_with_filter->getFilterActionsDAG(), context),
|
||||
queried_columns,
|
||||
storage.supportedPrewhereColumns(),
|
||||
getLogger("QueryPlanOptimizePrewhere")};
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include <Processors/QueryPlan/TotalsHavingStep.h>
|
||||
#include <Processors/QueryPlan/UnionStep.h>
|
||||
#include <Processors/QueryPlan/WindowStep.h>
|
||||
#include "Storages/KeyDescription.h"
|
||||
#include <Storages/StorageMerge.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
|
||||
@ -332,8 +333,7 @@ InputOrderInfoPtr buildInputOrderInfo(
|
||||
const FixedColumns & fixed_columns,
|
||||
const ActionsDAGPtr & dag,
|
||||
const SortDescription & description,
|
||||
const ActionsDAG & sorting_key_dag,
|
||||
const Names & sorting_key_columns,
|
||||
const KeyDescription & sorting_key,
|
||||
size_t limit)
|
||||
{
|
||||
//std::cerr << "------- buildInputOrderInfo " << std::endl;
|
||||
@ -343,6 +343,8 @@ InputOrderInfoPtr buildInputOrderInfo(
|
||||
MatchedTrees::Matches matches;
|
||||
FixedColumns fixed_key_columns;
|
||||
|
||||
const auto & sorting_key_dag = sorting_key.expression->getActionsDAG();
|
||||
|
||||
if (dag)
|
||||
{
|
||||
matches = matchTrees(sorting_key_dag.getOutputs(), *dag);
|
||||
@ -371,9 +373,9 @@ InputOrderInfoPtr buildInputOrderInfo(
|
||||
size_t next_description_column = 0;
|
||||
size_t next_sort_key = 0;
|
||||
|
||||
while (next_description_column < description.size() && next_sort_key < sorting_key_columns.size())
|
||||
while (next_description_column < description.size() && next_sort_key < sorting_key.column_names.size())
|
||||
{
|
||||
const auto & sorting_key_column = sorting_key_columns[next_sort_key];
|
||||
const auto & sorting_key_column = sorting_key.column_names[next_sort_key];
|
||||
const auto & sort_column_description = description[next_description_column];
|
||||
|
||||
/// If required order depend on collation, it cannot be matched with primary key order.
|
||||
@ -381,6 +383,12 @@ InputOrderInfoPtr buildInputOrderInfo(
|
||||
if (sort_column_description.collator)
|
||||
break;
|
||||
|
||||
/// Since sorting key columns are always sorted with NULLS LAST, reading in order
|
||||
/// supported only for ASC NULLS LAST ("in order"), and DESC NULLS FIRST ("reverse")
|
||||
const auto column_is_nullable = sorting_key.data_types[next_sort_key]->isNullable();
|
||||
if (column_is_nullable && sort_column_description.nulls_direction != 1)
|
||||
break;
|
||||
|
||||
/// Direction for current sort key.
|
||||
int current_direction = 0;
|
||||
bool strict_monotonic = true;
|
||||
@ -691,12 +699,11 @@ InputOrderInfoPtr buildInputOrderInfo(
|
||||
size_t limit)
|
||||
{
|
||||
const auto & sorting_key = reading->getStorageMetadata()->getSortingKey();
|
||||
const auto & sorting_key_columns = sorting_key.column_names;
|
||||
|
||||
return buildInputOrderInfo(
|
||||
fixed_columns,
|
||||
dag, description,
|
||||
sorting_key.expression->getActionsDAG(), sorting_key_columns,
|
||||
sorting_key,
|
||||
limit);
|
||||
}
|
||||
|
||||
@ -714,15 +721,14 @@ InputOrderInfoPtr buildInputOrderInfo(
|
||||
{
|
||||
auto storage = std::get<StoragePtr>(table);
|
||||
const auto & sorting_key = storage->getInMemoryMetadataPtr()->getSortingKey();
|
||||
const auto & sorting_key_columns = sorting_key.column_names;
|
||||
|
||||
if (sorting_key_columns.empty())
|
||||
if (sorting_key.column_names.empty())
|
||||
return nullptr;
|
||||
|
||||
auto table_order_info = buildInputOrderInfo(
|
||||
fixed_columns,
|
||||
dag, description,
|
||||
sorting_key.expression->getActionsDAG(), sorting_key_columns,
|
||||
sorting_key,
|
||||
limit);
|
||||
|
||||
if (!table_order_info)
|
||||
|
@ -173,8 +173,12 @@ namespace
|
||||
|
||||
if (typeid_cast<const WindowStep *>(current_step))
|
||||
{
|
||||
actions_chain.push_back(std::move(dag_stack));
|
||||
dag_stack.clear();
|
||||
/// it can be empty in case of 2 WindowSteps following one another
|
||||
if (!dag_stack.empty())
|
||||
{
|
||||
actions_chain.push_back(std::move(dag_stack));
|
||||
dag_stack.clear();
|
||||
}
|
||||
}
|
||||
|
||||
if (const auto * const expr = typeid_cast<const ExpressionStep *>(current_step); expr)
|
||||
|
@ -1360,25 +1360,13 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsFinal(
|
||||
|
||||
ReadFromMergeTree::AnalysisResultPtr ReadFromMergeTree::selectRangesToRead() const
|
||||
{
|
||||
return selectRangesToReadImpl(
|
||||
prepared_parts,
|
||||
alter_conversions_for_parts,
|
||||
metadata_for_reading,
|
||||
query_info,
|
||||
context,
|
||||
requested_num_streams,
|
||||
max_block_numbers_to_read,
|
||||
data,
|
||||
all_column_names,
|
||||
log,
|
||||
indexes);
|
||||
return selectRangesToRead(prepared_parts, alter_conversions_for_parts, false /* find_exact_ranges */);
|
||||
}
|
||||
|
||||
ReadFromMergeTree::AnalysisResultPtr ReadFromMergeTree::selectRangesToRead(
|
||||
MergeTreeData::DataPartsVector parts,
|
||||
std::vector<AlterConversionsPtr> alter_conversions) const
|
||||
MergeTreeData::DataPartsVector parts, std::vector<AlterConversionsPtr> alter_conversions, bool find_exact_ranges) const
|
||||
{
|
||||
return selectRangesToReadImpl(
|
||||
return selectRangesToRead(
|
||||
std::move(parts),
|
||||
std::move(alter_conversions),
|
||||
metadata_for_reading,
|
||||
@ -1389,7 +1377,8 @@ ReadFromMergeTree::AnalysisResultPtr ReadFromMergeTree::selectRangesToRead(
|
||||
data,
|
||||
all_column_names,
|
||||
log,
|
||||
indexes);
|
||||
indexes,
|
||||
find_exact_ranges);
|
||||
}
|
||||
|
||||
static void buildIndexes(
|
||||
@ -1558,34 +1547,8 @@ ReadFromMergeTree::AnalysisResultPtr ReadFromMergeTree::selectRangesToRead(
|
||||
const MergeTreeData & data,
|
||||
const Names & all_column_names,
|
||||
LoggerPtr log,
|
||||
std::optional<Indexes> & indexes)
|
||||
{
|
||||
return selectRangesToReadImpl(
|
||||
std::move(parts),
|
||||
std::move(alter_conversions),
|
||||
metadata_snapshot,
|
||||
query_info_,
|
||||
context_,
|
||||
num_streams,
|
||||
max_block_numbers_to_read,
|
||||
data,
|
||||
all_column_names,
|
||||
log,
|
||||
indexes);
|
||||
}
|
||||
|
||||
ReadFromMergeTree::AnalysisResultPtr ReadFromMergeTree::selectRangesToReadImpl(
|
||||
MergeTreeData::DataPartsVector parts,
|
||||
std::vector<AlterConversionsPtr> alter_conversions,
|
||||
const StorageMetadataPtr & metadata_snapshot,
|
||||
const SelectQueryInfo & query_info_,
|
||||
ContextPtr context_,
|
||||
size_t num_streams,
|
||||
std::shared_ptr<PartitionIdToMaxBlock> max_block_numbers_to_read,
|
||||
const MergeTreeData & data,
|
||||
const Names & all_column_names,
|
||||
LoggerPtr log,
|
||||
std::optional<Indexes> & indexes)
|
||||
std::optional<Indexes> & indexes,
|
||||
bool find_exact_ranges)
|
||||
{
|
||||
AnalysisResult result;
|
||||
const auto & settings = context_->getSettingsRef();
|
||||
@ -1673,7 +1636,8 @@ ReadFromMergeTree::AnalysisResultPtr ReadFromMergeTree::selectRangesToReadImpl(
|
||||
log,
|
||||
num_streams,
|
||||
result.index_stats,
|
||||
indexes->use_skip_indexes);
|
||||
indexes->use_skip_indexes,
|
||||
find_exact_ranges);
|
||||
}
|
||||
|
||||
size_t sum_marks_pk = total_marks_pk;
|
||||
|
@ -161,11 +161,11 @@ public:
|
||||
const MergeTreeData & data,
|
||||
const Names & all_column_names,
|
||||
LoggerPtr log,
|
||||
std::optional<Indexes> & indexes);
|
||||
std::optional<Indexes> & indexes,
|
||||
bool find_exact_ranges);
|
||||
|
||||
AnalysisResultPtr selectRangesToRead(
|
||||
MergeTreeData::DataPartsVector parts,
|
||||
std::vector<AlterConversionsPtr> alter_conversions) const;
|
||||
MergeTreeData::DataPartsVector parts, std::vector<AlterConversionsPtr> alter_conversions, bool find_exact_ranges = false) const;
|
||||
|
||||
AnalysisResultPtr selectRangesToRead() const;
|
||||
|
||||
@ -196,19 +196,6 @@ public:
|
||||
void applyFilters(ActionDAGNodes added_filter_nodes) override;
|
||||
|
||||
private:
|
||||
static AnalysisResultPtr selectRangesToReadImpl(
|
||||
MergeTreeData::DataPartsVector parts,
|
||||
std::vector<AlterConversionsPtr> alter_conversions,
|
||||
const StorageMetadataPtr & metadata_snapshot,
|
||||
const SelectQueryInfo & query_info,
|
||||
ContextPtr context,
|
||||
size_t num_streams,
|
||||
std::shared_ptr<PartitionIdToMaxBlock> max_block_numbers_to_read,
|
||||
const MergeTreeData & data,
|
||||
const Names & all_column_names,
|
||||
LoggerPtr log,
|
||||
std::optional<Indexes> & indexes);
|
||||
|
||||
int getSortDirection() const
|
||||
{
|
||||
if (query_info.input_order_info)
|
||||
|
@ -236,7 +236,7 @@ StorageID IStorage::getStorageID() const
|
||||
return storage_id;
|
||||
}
|
||||
|
||||
ConditionEstimator IStorage::getConditionEstimatorByPredicate(const SelectQueryInfo &, const StorageSnapshotPtr &, ContextPtr) const
|
||||
ConditionEstimator IStorage::getConditionEstimatorByPredicate(const StorageSnapshotPtr &, const ActionsDAGPtr &, ContextPtr) const
|
||||
{
|
||||
return {};
|
||||
}
|
||||
|
@ -135,7 +135,7 @@ public:
|
||||
/// Returns true if the storage supports queries with the PREWHERE section.
|
||||
virtual bool supportsPrewhere() const { return false; }
|
||||
|
||||
virtual ConditionEstimator getConditionEstimatorByPredicate(const SelectQueryInfo &, const StorageSnapshotPtr &, ContextPtr) const;
|
||||
virtual ConditionEstimator getConditionEstimatorByPredicate(const StorageSnapshotPtr &, const ActionsDAGPtr &, ContextPtr) const;
|
||||
|
||||
/// Returns which columns supports PREWHERE, or empty std::nullopt if all columns is supported.
|
||||
/// This is needed for engines whose aggregates data from multiple tables, like Merge.
|
||||
|
@ -1,4 +1,4 @@
|
||||
#include "localBackup.h"
|
||||
#include "Backup.h"
|
||||
|
||||
#include <Common/Exception.h>
|
||||
#include <Disks/IDiskTransaction.h>
|
||||
@ -18,8 +18,9 @@ namespace ErrorCodes
|
||||
namespace
|
||||
{
|
||||
|
||||
void localBackupImpl(
|
||||
const DiskPtr & disk,
|
||||
void BackupImpl(
|
||||
const DiskPtr & src_disk,
|
||||
const DiskPtr & dst_disk,
|
||||
IDiskTransaction * transaction,
|
||||
const String & source_path,
|
||||
const String & destination_path,
|
||||
@ -40,41 +41,42 @@ void localBackupImpl(
|
||||
if (transaction)
|
||||
transaction->createDirectories(destination_path);
|
||||
else
|
||||
disk->createDirectories(destination_path);
|
||||
dst_disk->createDirectories(destination_path);
|
||||
|
||||
for (auto it = disk->iterateDirectory(source_path); it->isValid(); it->next())
|
||||
for (auto it = src_disk->iterateDirectory(source_path); it->isValid(); it->next())
|
||||
{
|
||||
auto source = it->path();
|
||||
auto destination = fs::path(destination_path) / it->name();
|
||||
|
||||
if (!disk->isDirectory(source))
|
||||
if (!src_disk->isDirectory(source))
|
||||
{
|
||||
if (make_source_readonly)
|
||||
{
|
||||
if (transaction)
|
||||
transaction->setReadOnly(source);
|
||||
else
|
||||
disk->setReadOnly(source);
|
||||
src_disk->setReadOnly(source);
|
||||
}
|
||||
if (copy_instead_of_hardlinks || files_to_copy_instead_of_hardlinks.contains(it->name()))
|
||||
{
|
||||
if (transaction)
|
||||
transaction->copyFile(source, destination, read_settings, write_settings);
|
||||
else
|
||||
disk->copyFile(source, *disk, destination, read_settings, write_settings);
|
||||
src_disk->copyFile(source, *dst_disk, destination, read_settings, write_settings);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (transaction)
|
||||
transaction->createHardLink(source, destination);
|
||||
else
|
||||
disk->createHardLink(source, destination);
|
||||
src_disk->createHardLink(source, destination);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
localBackupImpl(
|
||||
disk,
|
||||
BackupImpl(
|
||||
src_disk,
|
||||
dst_disk,
|
||||
transaction,
|
||||
source,
|
||||
destination,
|
||||
@ -125,8 +127,11 @@ private:
|
||||
};
|
||||
}
|
||||
|
||||
void localBackup(
|
||||
const DiskPtr & disk,
|
||||
/// src_disk and dst_disk can be the same disk when local backup.
|
||||
/// copy_instead_of_hardlinks must be true when remote backup.
|
||||
void Backup(
|
||||
const DiskPtr & src_disk,
|
||||
const DiskPtr & dst_disk,
|
||||
const String & source_path,
|
||||
const String & destination_path,
|
||||
const ReadSettings & read_settings,
|
||||
@ -137,10 +142,10 @@ void localBackup(
|
||||
const NameSet & files_to_copy_intead_of_hardlinks,
|
||||
DiskTransactionPtr disk_transaction)
|
||||
{
|
||||
if (disk->exists(destination_path) && !disk->isDirectoryEmpty(destination_path))
|
||||
if (dst_disk->exists(destination_path) && !dst_disk->isDirectoryEmpty(destination_path))
|
||||
{
|
||||
throw DB::Exception(ErrorCodes::DIRECTORY_ALREADY_EXISTS, "Directory {} already exists and is not empty.",
|
||||
DB::fullPath(disk, destination_path));
|
||||
DB::fullPath(dst_disk, destination_path));
|
||||
}
|
||||
|
||||
size_t try_no = 0;
|
||||
@ -156,8 +161,9 @@ void localBackup(
|
||||
{
|
||||
if (disk_transaction)
|
||||
{
|
||||
localBackupImpl(
|
||||
disk,
|
||||
BackupImpl(
|
||||
src_disk,
|
||||
dst_disk,
|
||||
disk_transaction.get(),
|
||||
source_path,
|
||||
destination_path,
|
||||
@ -167,27 +173,29 @@ void localBackup(
|
||||
/* level= */ 0,
|
||||
max_level,
|
||||
copy_instead_of_hardlinks,
|
||||
files_to_copy_intead_of_hardlinks);
|
||||
files_to_copy_intead_of_hardlinks
|
||||
);
|
||||
}
|
||||
else if (copy_instead_of_hardlinks)
|
||||
{
|
||||
CleanupOnFail cleanup([disk, destination_path]() { disk->removeRecursive(destination_path); });
|
||||
disk->copyDirectoryContent(source_path, disk, destination_path, read_settings, write_settings, /*cancellation_hook=*/{});
|
||||
CleanupOnFail cleanup([dst_disk, destination_path]() { dst_disk->removeRecursive(destination_path); });
|
||||
src_disk->copyDirectoryContent(source_path, dst_disk, destination_path, read_settings, write_settings, /*cancellation_hook=*/{});
|
||||
cleanup.success();
|
||||
}
|
||||
else
|
||||
{
|
||||
std::function<void()> cleaner;
|
||||
if (disk->supportZeroCopyReplication())
|
||||
if (dst_disk->supportZeroCopyReplication())
|
||||
/// Note: this code will create garbage on s3. We should always remove `copy_instead_of_hardlinks` files.
|
||||
/// The third argument should be a list of exceptions, but (looks like) it is ignored for keep_all_shared_data = true.
|
||||
cleaner = [disk, destination_path]() { disk->removeSharedRecursive(destination_path, /*keep_all_shared_data*/ true, {}); };
|
||||
cleaner = [dst_disk, destination_path]() { dst_disk->removeSharedRecursive(destination_path, /*keep_all_shared_data*/ true, {}); };
|
||||
else
|
||||
cleaner = [disk, destination_path]() { disk->removeRecursive(destination_path); };
|
||||
cleaner = [dst_disk, destination_path]() { dst_disk->removeRecursive(destination_path); };
|
||||
|
||||
CleanupOnFail cleanup(std::move(cleaner));
|
||||
localBackupImpl(
|
||||
disk,
|
||||
BackupImpl(
|
||||
src_disk,
|
||||
dst_disk,
|
||||
disk_transaction.get(),
|
||||
source_path,
|
||||
destination_path,
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user