Merge branch 'master' of github.com:ClickHouse/ClickHouse into union-distinct

This commit is contained in:
feng lv 2020-11-11 03:10:10 +00:00
commit fec4080d42
98 changed files with 1917 additions and 694 deletions

3
.gitmodules vendored
View File

@ -190,3 +190,6 @@
path = contrib/croaring path = contrib/croaring
url = https://github.com/RoaringBitmap/CRoaring url = https://github.com/RoaringBitmap/CRoaring
branch = v0.2.66 branch = v0.2.66
[submodule "contrib/miniselect"]
path = contrib/miniselect
url = https://github.com/danlark1/miniselect

View File

@ -445,6 +445,7 @@ include (cmake/find/brotli.cmake)
include (cmake/find/protobuf.cmake) include (cmake/find/protobuf.cmake)
include (cmake/find/grpc.cmake) include (cmake/find/grpc.cmake)
include (cmake/find/pdqsort.cmake) include (cmake/find/pdqsort.cmake)
include (cmake/find/miniselect.cmake)
include (cmake/find/hdfs3.cmake) # uses protobuf include (cmake/find/hdfs3.cmake) # uses protobuf
include (cmake/find/poco.cmake) include (cmake/find/poco.cmake)
include (cmake/find/curl.cmake) include (cmake/find/curl.cmake)

View File

@ -0,0 +1,2 @@
set(MINISELECT_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/miniselect/include)
message(STATUS "Using miniselect: ${MINISELECT_INCLUDE_DIR}")

1
contrib/miniselect vendored Submodule

@ -0,0 +1 @@
Subproject commit be0af6bd0b6eb044d1acc4f754b229972d99903a

View File

@ -127,7 +127,7 @@ function clone_submodules
( (
cd "$FASTTEST_SOURCE" cd "$FASTTEST_SOURCE"
SUBMODULES_TO_UPDATE=(contrib/boost contrib/zlib-ng contrib/libxml2 contrib/poco contrib/libunwind contrib/ryu contrib/fmtlib contrib/base64 contrib/cctz contrib/libcpuid contrib/double-conversion contrib/libcxx contrib/libcxxabi contrib/libc-headers contrib/lz4 contrib/zstd contrib/fastops contrib/rapidjson contrib/re2 contrib/sparsehash-c11 contrib/croaring) SUBMODULES_TO_UPDATE=(contrib/boost contrib/zlib-ng contrib/libxml2 contrib/poco contrib/libunwind contrib/ryu contrib/fmtlib contrib/base64 contrib/cctz contrib/libcpuid contrib/double-conversion contrib/libcxx contrib/libcxxabi contrib/libc-headers contrib/lz4 contrib/zstd contrib/fastops contrib/rapidjson contrib/re2 contrib/sparsehash-c11 contrib/croaring contrib/miniselect)
git submodule sync git submodule sync
git submodule update --init --recursive "${SUBMODULES_TO_UPDATE[@]}" git submodule update --init --recursive "${SUBMODULES_TO_UPDATE[@]}"

View File

@ -17,13 +17,6 @@ def get_skip_list_cmd(path):
return '' return ''
def run_perf_test(cmd, xmls_path, output_folder):
output_path = os.path.join(output_folder, "perf_stress_run.txt")
f = open(output_path, 'w')
p = Popen("{} --skip-tags=long --recursive --input-files {}".format(cmd, xmls_path), shell=True, stdout=f, stderr=f)
return p
def get_options(i): def get_options(i):
options = "" options = ""
if 0 < i: if 0 < i:
@ -75,8 +68,6 @@ if __name__ == "__main__":
args = parser.parse_args() args = parser.parse_args()
func_pipes = [] func_pipes = []
perf_process = None
perf_process = run_perf_test(args.perf_test_cmd, args.perf_test_xml_path, args.output_folder)
func_pipes = run_func_test(args.test_cmd, args.output_folder, args.num_parallel, args.skip_func_tests, args.global_time_limit) func_pipes = run_func_test(args.test_cmd, args.output_folder, args.num_parallel, args.skip_func_tests, args.global_time_limit)
logging.info("Will wait functests to finish") logging.info("Will wait functests to finish")

View File

@ -5,7 +5,7 @@ toc_title: null function
# null {#null-function} # null {#null-function}
Accepts an inserted data of the specified structure and immediately drops it away. The function is used for convenience writing tests and demonstrations. Creates a temporary table of the specified structure with the [Null](../../engines/table-engines/special/null.md) table engine. According to the `Null`-engine properties, the table data is ignored and the table itself is immediately droped right after the query execution. The function is used for the convenience of test writing and demonstrations.
**Syntax** **Syntax**
@ -19,7 +19,7 @@ null('structure')
**Returned value** **Returned value**
A table with the specified structure, which is dropped right after the query execution. A temporary `Null`-engine table with the specified structure.
**Example** **Example**
@ -36,6 +36,8 @@ INSERT INTO t SELECT * FROM numbers_mt(1000000000);
DROP TABLE IF EXISTS t; DROP TABLE IF EXISTS t;
``` ```
See also: format **Null**. See also:
- [Null table engine](../../engines/table-engines/special/null.md)
[Original article](https://clickhouse.tech/docs/en/sql-reference/table-functions/null/) <!--hide--> [Original article](https://clickhouse.tech/docs/en/sql-reference/table-functions/null/) <!--hide-->

View File

@ -0,0 +1,43 @@
---
toc_priority: 53
toc_title: null функция
---
# null {#null-function}
Создает временную таблицу указанной структуры с движком [Null](../../engines/table-engines/special/null.md). В соответствии со свойствами движка, данные в таблице игнорируются, а сама таблица удаляется сразу после выполнения запроса. Функция используется для удобства написания тестов и демонстрационных примеров.
**Синтаксис**
``` sql
null('structure')
```
**Параметр**
- `structure` — список колонок и их типов. [String](../../sql-reference/data-types/string.md).
**Возвращаемое значение**
Временная таблица указанной структуры с движком `Null`.
**Пример**
Один запрос с функцией `null`:
``` sql
INSERT INTO function null('x UInt64') SELECT * FROM numbers_mt(1000000000);
```
заменяет три запроса:
```sql
CREATE TABLE t (x UInt64) ENGINE = Null;
INSERT INTO t SELECT * FROM numbers_mt(1000000000);
DROP TABLE IF EXISTS t;
```
См. также:
- [Движок таблиц Null](../../engines/table-engines/special/null.md)
[Original article](https://clickhouse.tech/docs/en/sql-reference/table-functions/null/) <!--hide-->

View File

@ -8,6 +8,7 @@
#include <Common/NaNUtils.h> #include <Common/NaNUtils.h>
#include <Common/PODArray.h> #include <Common/PODArray.h>
#include <miniselect/floyd_rivest_select.h>
namespace DB namespace DB
{ {
@ -87,7 +88,7 @@ struct QuantileExact : QuantileExactBase<Value, QuantileExact<Value>>
{ {
size_t n = level < 1 ? level * array.size() : (array.size() - 1); size_t n = level < 1 ? level * array.size() : (array.size() - 1);
std::nth_element(array.begin(), array.begin() + n, array.end()); /// NOTE You can think of the radix-select algorithm. miniselect::floyd_rivest_select(array.begin(), array.begin() + n, array.end()); /// NOTE You can think of the radix-select algorithm.
return array[n]; return array[n];
} }
@ -107,7 +108,7 @@ struct QuantileExact : QuantileExactBase<Value, QuantileExact<Value>>
size_t n = level < 1 ? level * array.size() : (array.size() - 1); size_t n = level < 1 ? level * array.size() : (array.size() - 1);
std::nth_element(array.begin() + prev_n, array.begin() + n, array.end()); miniselect::floyd_rivest_select(array.begin() + prev_n, array.begin() + n, array.end());
result[indices[i]] = array[n]; result[indices[i]] = array[n];
prev_n = n; prev_n = n;
@ -144,7 +145,7 @@ struct QuantileExactExclusive : public QuantileExact<Value>
else if (n < 1) else if (n < 1)
return static_cast<Float64>(array[0]); return static_cast<Float64>(array[0]);
std::nth_element(array.begin(), array.begin() + n - 1, array.end()); miniselect::floyd_rivest_select(array.begin(), array.begin() + n - 1, array.end());
auto nth_element = std::min_element(array.begin() + n, array.end()); auto nth_element = std::min_element(array.begin() + n, array.end());
return static_cast<Float64>(array[n - 1]) + (h - n) * static_cast<Float64>(*nth_element - array[n - 1]); return static_cast<Float64>(array[n - 1]) + (h - n) * static_cast<Float64>(*nth_element - array[n - 1]);
@ -173,7 +174,7 @@ struct QuantileExactExclusive : public QuantileExact<Value>
result[indices[i]] = static_cast<Float64>(array[0]); result[indices[i]] = static_cast<Float64>(array[0]);
else else
{ {
std::nth_element(array.begin() + prev_n, array.begin() + n - 1, array.end()); miniselect::floyd_rivest_select(array.begin() + prev_n, array.begin() + n - 1, array.end());
auto nth_element = std::min_element(array.begin() + n, array.end()); auto nth_element = std::min_element(array.begin() + n, array.end());
result[indices[i]] = static_cast<Float64>(array[n - 1]) + (h - n) * static_cast<Float64>(*nth_element - array[n - 1]); result[indices[i]] = static_cast<Float64>(array[n - 1]) + (h - n) * static_cast<Float64>(*nth_element - array[n - 1]);
@ -209,7 +210,7 @@ struct QuantileExactInclusive : public QuantileExact<Value>
else if (n < 1) else if (n < 1)
return static_cast<Float64>(array[0]); return static_cast<Float64>(array[0]);
std::nth_element(array.begin(), array.begin() + n - 1, array.end()); miniselect::floyd_rivest_select(array.begin(), array.begin() + n - 1, array.end());
auto nth_element = std::min_element(array.begin() + n, array.end()); auto nth_element = std::min_element(array.begin() + n, array.end());
return static_cast<Float64>(array[n - 1]) + (h - n) * static_cast<Float64>(*nth_element - array[n - 1]); return static_cast<Float64>(array[n - 1]) + (h - n) * static_cast<Float64>(*nth_element - array[n - 1]);
@ -236,7 +237,7 @@ struct QuantileExactInclusive : public QuantileExact<Value>
result[indices[i]] = static_cast<Float64>(array[0]); result[indices[i]] = static_cast<Float64>(array[0]);
else else
{ {
std::nth_element(array.begin() + prev_n, array.begin() + n - 1, array.end()); miniselect::floyd_rivest_select(array.begin() + prev_n, array.begin() + n - 1, array.end());
auto nth_element = std::min_element(array.begin() + n, array.end()); auto nth_element = std::min_element(array.begin() + n, array.end());
result[indices[i]] = static_cast<Float64>(array[n - 1]) + (h - n) * static_cast<Float64>(*nth_element - array[n - 1]); result[indices[i]] = static_cast<Float64>(array[n - 1]) + (h - n) * static_cast<Float64>(*nth_element - array[n - 1]);

View File

@ -7,6 +7,7 @@
#include <IO/ReadHelpers.h> #include <IO/ReadHelpers.h>
#include <IO/WriteHelpers.h> #include <IO/WriteHelpers.h>
#include <miniselect/floyd_rivest_select.h>
namespace DB namespace DB
{ {
@ -179,7 +180,7 @@ namespace detail
/// Sorting an array will not be considered a violation of constancy. /// Sorting an array will not be considered a violation of constancy.
auto & array = elems; auto & array = elems;
std::nth_element(array.begin(), array.begin() + n, array.end()); miniselect::floyd_rivest_select(array.begin(), array.begin() + n, array.end());
quantile = array[n]; quantile = array[n];
} }
@ -200,7 +201,7 @@ namespace detail
? level * elems.size() ? level * elems.size()
: (elems.size() - 1); : (elems.size() - 1);
std::nth_element(array.begin() + prev_n, array.begin() + n, array.end()); miniselect::floyd_rivest_select(array.begin() + prev_n, array.begin() + n, array.end());
result[level_index] = array[n]; result[level_index] = array[n];
prev_n = n; prev_n = n;

View File

@ -321,6 +321,7 @@ target_include_directories(clickhouse_common_io PUBLIC ${CMAKE_CURRENT_BINARY_DI
dbms_target_include_directories(PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/Core/include) dbms_target_include_directories(PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/Core/include)
dbms_target_include_directories(SYSTEM BEFORE PUBLIC ${PDQSORT_INCLUDE_DIR}) dbms_target_include_directories(SYSTEM BEFORE PUBLIC ${PDQSORT_INCLUDE_DIR})
dbms_target_include_directories(SYSTEM BEFORE PUBLIC ${MINISELECT_INCLUDE_DIR})
if (ZSTD_LIBRARY) if (ZSTD_LIBRARY)
dbms_target_link_libraries(PRIVATE ${ZSTD_LIBRARY}) dbms_target_link_libraries(PRIVATE ${ZSTD_LIBRARY})

View File

@ -20,6 +20,7 @@
#include <Common/WeakHash.h> #include <Common/WeakHash.h>
#include <Common/HashTable/Hash.h> #include <Common/HashTable/Hash.h>
#include <miniselect/floyd_rivest_select.h>
namespace DB namespace DB
{ {
@ -782,7 +783,7 @@ void ColumnArray::getPermutationImpl(size_t limit, Permutation & res, Comparator
auto less = [&cmp](size_t lhs, size_t rhs){ return cmp(lhs, rhs) < 0; }; auto less = [&cmp](size_t lhs, size_t rhs){ return cmp(lhs, rhs) < 0; };
if (limit) if (limit)
std::partial_sort(res.begin(), res.begin() + limit, res.end(), less); miniselect::floyd_rivest_partial_sort(res.begin(), res.begin() + limit, res.end(), less);
else else
std::sort(res.begin(), res.end(), less); std::sort(res.begin(), res.end(), less);
} }
@ -835,7 +836,7 @@ void ColumnArray::updatePermutationImpl(size_t limit, Permutation & res, EqualRa
/// Since then we are working inside the interval. /// Since then we are working inside the interval.
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less); miniselect::floyd_rivest_partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less);
auto new_first = first; auto new_first = first;
for (auto j = first + 1; j < limit; ++j) for (auto j = first + 1; j < limit; ++j)
{ {

View File

@ -8,6 +8,7 @@
#include <common/unaligned.h> #include <common/unaligned.h>
#include <ext/scope_guard.h> #include <ext/scope_guard.h>
#include <miniselect/floyd_rivest_select.h>
#include <IO/WriteHelpers.h> #include <IO/WriteHelpers.h>
@ -162,10 +163,10 @@ void ColumnDecimal<T>::updatePermutation(bool reverse, size_t limit, int, IColum
{ {
const auto& [first, last] = equal_ranges[i]; const auto& [first, last] = equal_ranges[i];
if (reverse) if (reverse)
std::partial_sort(res.begin() + first, res.begin() + last, res.begin() + last, std::sort(res.begin() + first, res.begin() + last,
[this](size_t a, size_t b) { return data[a] > data[b]; }); [this](size_t a, size_t b) { return data[a] > data[b]; });
else else
std::partial_sort(res.begin() + first, res.begin() + last, res.begin() + last, std::sort(res.begin() + first, res.begin() + last,
[this](size_t a, size_t b) { return data[a] < data[b]; }); [this](size_t a, size_t b) { return data[a] < data[b]; });
auto new_first = first; auto new_first = first;
@ -193,10 +194,10 @@ void ColumnDecimal<T>::updatePermutation(bool reverse, size_t limit, int, IColum
/// Since then we are working inside the interval. /// Since then we are working inside the interval.
if (reverse) if (reverse)
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, miniselect::floyd_rivest_partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last,
[this](size_t a, size_t b) { return data[a] > data[b]; }); [this](size_t a, size_t b) { return data[a] > data[b]; });
else else
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, miniselect::floyd_rivest_partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last,
[this](size_t a, size_t b) { return data[a] < data[b]; }); [this](size_t a, size_t b) { return data[a] < data[b]; });
auto new_first = first; auto new_first = first;

View File

@ -7,6 +7,7 @@
#include <Columns/IColumnImpl.h> #include <Columns/IColumnImpl.h>
#include <Columns/ColumnVectorHelper.h> #include <Columns/ColumnVectorHelper.h>
#include <Core/Field.h> #include <Core/Field.h>
#include <miniselect/floyd_rivest_select.h>
namespace DB namespace DB
@ -253,9 +254,9 @@ protected:
sort_end = res.begin() + limit; sort_end = res.begin() + limit;
if (reverse) if (reverse)
std::partial_sort(res.begin(), sort_end, res.end(), [this](size_t a, size_t b) { return data[a] > data[b]; }); miniselect::floyd_rivest_partial_sort(res.begin(), sort_end, res.end(), [this](size_t a, size_t b) { return data[a] > data[b]; });
else else
std::partial_sort(res.begin(), sort_end, res.end(), [this](size_t a, size_t b) { return data[a] < data[b]; }); miniselect::floyd_rivest_partial_sort(res.begin(), sort_end, res.end(), [this](size_t a, size_t b) { return data[a] < data[b]; });
} }
}; };

View File

@ -10,6 +10,7 @@
#include <Common/HashTable/Hash.h> #include <Common/HashTable/Hash.h>
#include <ext/scope_guard.h> #include <ext/scope_guard.h>
#include <miniselect/floyd_rivest_select.h>
#include <DataStreams/ColumnGathererStream.h> #include <DataStreams/ColumnGathererStream.h>
@ -157,9 +158,9 @@ void ColumnFixedString::getPermutation(bool reverse, size_t limit, int /*nan_dir
if (limit) if (limit)
{ {
if (reverse) if (reverse)
std::partial_sort(res.begin(), res.begin() + limit, res.end(), less<false>(*this)); miniselect::floyd_rivest_partial_sort(res.begin(), res.begin() + limit, res.end(), less<false>(*this));
else else
std::partial_sort(res.begin(), res.begin() + limit, res.end(), less<true>(*this)); miniselect::floyd_rivest_partial_sort(res.begin(), res.begin() + limit, res.end(), less<true>(*this));
} }
else else
{ {
@ -217,9 +218,9 @@ void ColumnFixedString::updatePermutation(bool reverse, size_t limit, int, Permu
/// Since then we are working inside the interval. /// Since then we are working inside the interval.
if (reverse) if (reverse)
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less<false>(*this)); miniselect::floyd_rivest_partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less<false>(*this));
else else
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less<true>(*this)); miniselect::floyd_rivest_partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less<true>(*this));
auto new_first = first; auto new_first = first;
for (auto j = first + 1; j < limit; ++j) for (auto j = first + 1; j < limit; ++j)

View File

@ -8,6 +8,7 @@
#include <Common/WeakHash.h> #include <Common/WeakHash.h>
#include <ext/scope_guard.h> #include <ext/scope_guard.h>
#include <miniselect/floyd_rivest_select.h>
namespace DB namespace DB
{ {
@ -393,7 +394,7 @@ void ColumnLowCardinality::updatePermutationImpl(size_t limit, Permutation & res
/// Since then we are working inside the interval. /// Since then we are working inside the interval.
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less); miniselect::floyd_rivest_partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less);
auto new_first = first; auto new_first = first;
for (auto j = first + 1; j < limit; ++j) for (auto j = first + 1; j < limit; ++j)

View File

@ -10,6 +10,7 @@
#include <common/unaligned.h> #include <common/unaligned.h>
#include <ext/scope_guard.h> #include <ext/scope_guard.h>
#include <miniselect/floyd_rivest_select.h>
namespace DB namespace DB
{ {
@ -313,7 +314,7 @@ void ColumnString::getPermutationImpl(size_t limit, Permutation & res, Comparato
auto less = [&cmp](size_t lhs, size_t rhs){ return cmp(lhs, rhs) < 0; }; auto less = [&cmp](size_t lhs, size_t rhs){ return cmp(lhs, rhs) < 0; };
if (limit) if (limit)
std::partial_sort(res.begin(), res.begin() + limit, res.end(), less); miniselect::floyd_rivest_partial_sort(res.begin(), res.begin() + limit, res.end(), less);
else else
std::sort(res.begin(), res.end(), less); std::sort(res.begin(), res.end(), less);
} }
@ -365,7 +366,7 @@ void ColumnString::updatePermutationImpl(size_t limit, Permutation & res, EqualR
/// Since then we are working inside the interval. /// Since then we are working inside the interval.
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less); miniselect::floyd_rivest_partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less);
size_t new_first = first; size_t new_first = first;
for (size_t j = first + 1; j < limit; ++j) for (size_t j = first + 1; j < limit; ++j)

View File

@ -9,7 +9,7 @@
#include <Common/assert_cast.h> #include <Common/assert_cast.h>
#include <Common/WeakHash.h> #include <Common/WeakHash.h>
#include <Core/Field.h> #include <Core/Field.h>
#include <miniselect/floyd_rivest_select.h>
namespace DB namespace DB
{ {
@ -352,7 +352,7 @@ void ColumnTuple::getPermutationImpl(size_t limit, Permutation & res, LessOperat
if (limit) if (limit)
{ {
std::partial_sort(res.begin(), res.begin() + limit, res.end(), less); miniselect::floyd_rivest_partial_sort(res.begin(), res.begin() + limit, res.end(), less);
} }
else else
{ {

View File

@ -17,7 +17,7 @@
#include <ext/bit_cast.h> #include <ext/bit_cast.h>
#include <ext/scope_guard.h> #include <ext/scope_guard.h>
#include <pdqsort.h> #include <pdqsort.h>
#include <miniselect/floyd_rivest_select.h>
#ifdef __SSE2__ #ifdef __SSE2__
#include <emmintrin.h> #include <emmintrin.h>
@ -156,9 +156,9 @@ void ColumnVector<T>::getPermutation(bool reverse, size_t limit, int nan_directi
res[i] = i; res[i] = i;
if (reverse) if (reverse)
std::partial_sort(res.begin(), res.begin() + limit, res.end(), greater(*this, nan_direction_hint)); miniselect::floyd_rivest_partial_sort(res.begin(), res.begin() + limit, res.end(), greater(*this, nan_direction_hint));
else else
std::partial_sort(res.begin(), res.begin() + limit, res.end(), less(*this, nan_direction_hint)); miniselect::floyd_rivest_partial_sort(res.begin(), res.begin() + limit, res.end(), less(*this, nan_direction_hint));
} }
else else
{ {
@ -254,9 +254,9 @@ void ColumnVector<T>::updatePermutation(bool reverse, size_t limit, int nan_dire
/// Since then, we are working inside the interval. /// Since then, we are working inside the interval.
if (reverse) if (reverse)
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, greater(*this, nan_direction_hint)); miniselect::floyd_rivest_partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, greater(*this, nan_direction_hint));
else else
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less(*this, nan_direction_hint)); miniselect::floyd_rivest_partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less(*this, nan_direction_hint));
size_t new_first = first; size_t new_first = first;
for (size_t j = first + 1; j < limit; ++j) for (size_t j = first + 1; j < limit; ++j)

View File

@ -399,6 +399,7 @@ class IColumn;
\ \
/** Obsolete settings that do nothing but left for compatibility reasons. Remove each one after half a year of obsolescence. */ \ /** Obsolete settings that do nothing but left for compatibility reasons. Remove each one after half a year of obsolescence. */ \
\ \
M(UInt64, max_memory_usage_for_all_queries, 0, "Obsolete. Will be removed after 2020-10-20", 0) \
M(UInt64, multiple_joins_rewriter_version, 0, "Obsolete setting, does nothing. Will be removed after 2021-03-31", 0) \ M(UInt64, multiple_joins_rewriter_version, 0, "Obsolete setting, does nothing. Will be removed after 2021-03-31", 0) \
M(Bool, experimental_use_processors, true, "Obsolete setting, does nothing. Will be removed after 2020-11-29.", 0) \ M(Bool, experimental_use_processors, true, "Obsolete setting, does nothing. Will be removed after 2020-11-29.", 0) \
M(Bool, force_optimize_skip_unused_shards_no_nested, false, "Obsolete setting, does nothing. Will be removed after 2020-12-01. Use force_optimize_skip_unused_shards_nesting instead.", 0) \ M(Bool, force_optimize_skip_unused_shards_no_nested, false, "Obsolete setting, does nothing. Will be removed after 2020-12-01. Use force_optimize_skip_unused_shards_nesting instead.", 0) \

View File

@ -16,6 +16,9 @@ struct SelectQueryInfo;
class Pipe; class Pipe;
using Pipes = std::vector<Pipe>; using Pipes = std::vector<Pipe>;
class QueryPlan;
using QueryPlanPtr = std::unique_ptr<QueryPlan>;
namespace ClusterProxy namespace ClusterProxy
{ {
@ -31,7 +34,9 @@ public:
const String & query, const ASTPtr & query_ast, const String & query, const ASTPtr & query_ast,
const Context & context, const ThrottlerPtr & throttler, const Context & context, const ThrottlerPtr & throttler,
const SelectQueryInfo & query_info, const SelectQueryInfo & query_info,
Pipes & res) = 0; std::vector<QueryPlanPtr> & res,
Pipes & remote_pipes,
Pipes & delayed_pipes) = 0;
}; };
} }

View File

@ -14,6 +14,8 @@
#include <Processors/Sources/RemoteSource.h> #include <Processors/Sources/RemoteSource.h>
#include <Processors/Sources/DelayedSource.h> #include <Processors/Sources/DelayedSource.h>
#include <Processors/QueryPlan/QueryPlan.h> #include <Processors/QueryPlan/QueryPlan.h>
#include <Processors/QueryPlan/ConvertingStep.h>
namespace ProfileEvents namespace ProfileEvents
{ {
@ -69,37 +71,27 @@ SelectStreamFactory::SelectStreamFactory(
namespace namespace
{ {
auto createLocalPipe( std::unique_ptr<QueryPlan> createLocalPlan(
const ASTPtr & query_ast, const Block & header, const Context & context, QueryProcessingStage::Enum processed_stage) const ASTPtr & query_ast,
const Block & header,
const Context & context,
QueryProcessingStage::Enum processed_stage)
{ {
checkStackSize(); checkStackSize();
InterpreterSelectQuery interpreter(query_ast, context, SelectQueryOptions(processed_stage));
auto query_plan = std::make_unique<QueryPlan>(); auto query_plan = std::make_unique<QueryPlan>();
InterpreterSelectQuery interpreter(query_ast, context, SelectQueryOptions(processed_stage));
interpreter.buildQueryPlan(*query_plan); interpreter.buildQueryPlan(*query_plan);
auto pipeline = std::move(*query_plan->buildQueryPipeline());
/// Avoid going it out-of-scope for EXPLAIN /// Convert header structure to expected.
pipeline.addQueryPlan(std::move(query_plan)); /// Also we ignore constants from result and replace it with constants from header.
/// It is needed for functions like `now64()` or `randConstant()` because their values may be different.
auto converting = std::make_unique<ConvertingStep>(query_plan->getCurrentDataStream(), header, true);
converting->setStepDescription("Convert block structure for query from local replica");
query_plan->addStep(std::move(converting));
pipeline.addSimpleTransform([&](const Block & source_header) return query_plan;
{
return std::make_shared<ConvertingTransform>(
source_header, header, ConvertingTransform::MatchColumnsMode::Name, true);
});
/** Materialization is needed, since from remote servers the constants come materialized.
* If you do not do this, different types (Const and non-Const) columns will be produced in different threads,
* And this is not allowed, since all code is based on the assumption that in the block stream all types are the same.
*/
/* Now we don't need to materialize constants, because RemoteBlockInputStream will ignore constant and take it from header.
* So, streams from different threads will always have the same header.
*/
pipeline.setMaxThreads(1);
return QueryPipeline::getPipe(std::move(pipeline));
} }
String formattedAST(const ASTPtr & ast) String formattedAST(const ASTPtr & ast)
@ -119,7 +111,9 @@ void SelectStreamFactory::createForShard(
const String &, const ASTPtr & query_ast, const String &, const ASTPtr & query_ast,
const Context & context, const ThrottlerPtr & throttler, const Context & context, const ThrottlerPtr & throttler,
const SelectQueryInfo &, const SelectQueryInfo &,
Pipes & pipes) std::vector<QueryPlanPtr> & plans,
Pipes & remote_pipes,
Pipes & delayed_pipes)
{ {
bool add_agg_info = processed_stage == QueryProcessingStage::WithMergeableState; bool add_agg_info = processed_stage == QueryProcessingStage::WithMergeableState;
bool add_totals = false; bool add_totals = false;
@ -136,7 +130,7 @@ void SelectStreamFactory::createForShard(
auto emplace_local_stream = [&]() auto emplace_local_stream = [&]()
{ {
pipes.emplace_back(createLocalPipe(modified_query_ast, header, context, processed_stage)); plans.emplace_back(createLocalPlan(modified_query_ast, header, context, processed_stage));
}; };
String modified_query = formattedAST(modified_query_ast); String modified_query = formattedAST(modified_query_ast);
@ -149,7 +143,7 @@ void SelectStreamFactory::createForShard(
if (!table_func_ptr) if (!table_func_ptr)
remote_query_executor->setMainTable(main_table); remote_query_executor->setMainTable(main_table);
pipes.emplace_back(createRemoteSourcePipe(remote_query_executor, add_agg_info, add_totals, add_extremes)); remote_pipes.emplace_back(createRemoteSourcePipe(remote_query_executor, add_agg_info, add_totals, add_extremes));
}; };
const auto & settings = context.getSettingsRef(); const auto & settings = context.getSettingsRef();
@ -275,7 +269,10 @@ void SelectStreamFactory::createForShard(
} }
if (try_results.empty() || local_delay < max_remote_delay) if (try_results.empty() || local_delay < max_remote_delay)
return createLocalPipe(modified_query_ast, header, context, stage); {
auto plan = createLocalPlan(modified_query_ast, header, context, stage);
return QueryPipeline::getPipe(std::move(*plan->buildQueryPipeline()));
}
else else
{ {
std::vector<IConnectionPool::Entry> connections; std::vector<IConnectionPool::Entry> connections;
@ -290,7 +287,7 @@ void SelectStreamFactory::createForShard(
} }
}; };
pipes.emplace_back(createDelayedPipe(header, lazily_create_stream, add_totals, add_extremes)); delayed_pipes.emplace_back(createDelayedPipe(header, lazily_create_stream, add_totals, add_extremes));
} }
else else
emplace_remote_stream(); emplace_remote_stream();

View File

@ -39,7 +39,9 @@ public:
const String & query, const ASTPtr & query_ast, const String & query, const ASTPtr & query_ast,
const Context & context, const ThrottlerPtr & throttler, const Context & context, const ThrottlerPtr & throttler,
const SelectQueryInfo & query_info, const SelectQueryInfo & query_info,
Pipes & pipes) override; std::vector<QueryPlanPtr> & plans,
Pipes & remote_pipes,
Pipes & delayed_pipes) override;
private: private:
const Block header; const Block header;

View File

@ -7,6 +7,9 @@
#include <Interpreters/ProcessList.h> #include <Interpreters/ProcessList.h>
#include <Parsers/queryToString.h> #include <Parsers/queryToString.h>
#include <Processors/Pipe.h> #include <Processors/Pipe.h>
#include <Processors/QueryPlan/QueryPlan.h>
#include <Processors/QueryPlan/ReadFromPreparedSource.h>
#include <Processors/QueryPlan/UnionStep.h>
#include <Storages/SelectQueryInfo.h> #include <Storages/SelectQueryInfo.h>
@ -81,15 +84,19 @@ Context updateSettingsForCluster(const Cluster & cluster, const Context & contex
return new_context; return new_context;
} }
Pipe executeQuery( void executeQuery(
QueryPlan & query_plan,
IStreamFactory & stream_factory, Poco::Logger * log, IStreamFactory & stream_factory, Poco::Logger * log,
const ASTPtr & query_ast, const Context & context, const SelectQueryInfo & query_info) const ASTPtr & query_ast, const Context & context, const SelectQueryInfo & query_info)
{ {
assert(log); assert(log);
Pipes res;
const Settings & settings = context.getSettingsRef(); const Settings & settings = context.getSettingsRef();
std::vector<QueryPlanPtr> plans;
Pipes remote_pipes;
Pipes delayed_pipes;
const std::string query = queryToString(query_ast); const std::string query = queryToString(query_ast);
Context new_context = updateSettingsForCluster(*query_info.cluster, context, settings, log); Context new_context = updateSettingsForCluster(*query_info.cluster, context, settings, log);
@ -112,9 +119,43 @@ Pipe executeQuery(
throttler = user_level_throttler; throttler = user_level_throttler;
for (const auto & shard_info : query_info.cluster->getShardsInfo()) for (const auto & shard_info : query_info.cluster->getShardsInfo())
stream_factory.createForShard(shard_info, query, query_ast, new_context, throttler, query_info, res); stream_factory.createForShard(shard_info, query, query_ast, new_context, throttler, query_info, plans, remote_pipes, delayed_pipes);
return Pipe::unitePipes(std::move(res)); if (!remote_pipes.empty())
{
auto plan = std::make_unique<QueryPlan>();
auto read_from_remote = std::make_unique<ReadFromPreparedSource>(Pipe::unitePipes(std::move(remote_pipes)));
read_from_remote->setStepDescription("Read from remote replica");
plan->addStep(std::move(read_from_remote));
plans.emplace_back(std::move(plan));
}
if (!delayed_pipes.empty())
{
auto plan = std::make_unique<QueryPlan>();
auto read_from_remote = std::make_unique<ReadFromPreparedSource>(Pipe::unitePipes(std::move(delayed_pipes)));
read_from_remote->setStepDescription("Read from delayed local replica");
plan->addStep(std::move(read_from_remote));
plans.emplace_back(std::move(plan));
}
if (plans.empty())
return;
if (plans.size() == 1)
{
query_plan = std::move(*plans.front());
return;
}
DataStreams input_streams;
input_streams.reserve(plans.size());
for (auto & plan : plans)
input_streams.emplace_back(plan->getCurrentDataStream());
auto header = input_streams.front().header;
auto union_step = std::make_unique<UnionStep>(std::move(input_streams), header);
query_plan.unitePlans(std::move(union_step), std::move(plans));
} }
} }

View File

@ -11,6 +11,7 @@ class Cluster;
struct SelectQueryInfo; struct SelectQueryInfo;
class Pipe; class Pipe;
class QueryPlan;
namespace ClusterProxy namespace ClusterProxy
{ {
@ -31,8 +32,10 @@ Context updateSettingsForCluster(const Cluster & cluster, const Context & contex
/// Execute a distributed query, creating a vector of BlockInputStreams, from which the result can be read. /// Execute a distributed query, creating a vector of BlockInputStreams, from which the result can be read.
/// `stream_factory` object encapsulates the logic of creating streams for a different type of query /// `stream_factory` object encapsulates the logic of creating streams for a different type of query
/// (currently SELECT, DESCRIBE). /// (currently SELECT, DESCRIBE).
Pipe executeQuery( void executeQuery(
IStreamFactory & stream_factory, Poco::Logger * log, const ASTPtr & query_ast, const Context & context, const SelectQueryInfo & query_info); QueryPlan & query_plan,
IStreamFactory & stream_factory, Poco::Logger * log,
const ASTPtr & query_ast, const Context & context, const SelectQueryInfo & query_info);
} }

View File

@ -33,11 +33,13 @@
#include <Processors/Pipe.h> #include <Processors/Pipe.h>
#include <Processors/Sources/SourceFromInputStream.h> #include <Processors/Sources/SourceFromInputStream.h>
#include <Processors/Sources/NullSource.h>
#include <Processors/Transforms/ExpressionTransform.h> #include <Processors/Transforms/ExpressionTransform.h>
#include <Processors/Transforms/JoiningTransform.h> #include <Processors/Transforms/JoiningTransform.h>
#include <Processors/Transforms/AggregatingTransform.h> #include <Processors/Transforms/AggregatingTransform.h>
#include <Processors/Transforms/FilterTransform.h>
#include <Processors/QueryPlan/ArrayJoinStep.h> #include <Processors/QueryPlan/ArrayJoinStep.h>
#include <Processors/QueryPlan/ReadFromStorageStep.h> #include <Processors/QueryPlan/SettingQuotaAndLimitsStep.h>
#include <Processors/QueryPlan/ExpressionStep.h> #include <Processors/QueryPlan/ExpressionStep.h>
#include <Processors/QueryPlan/FilterStep.h> #include <Processors/QueryPlan/FilterStep.h>
#include <Processors/QueryPlan/ReadNothingStep.h> #include <Processors/QueryPlan/ReadNothingStep.h>
@ -1100,6 +1102,48 @@ static StreamLocalLimits getLimitsForStorage(const Settings & settings, const Se
return limits; return limits;
} }
void InterpreterSelectQuery::addEmptySourceToQueryPlan(QueryPlan & query_plan, const Block & source_header, const SelectQueryInfo & query_info)
{
Pipe pipe(std::make_shared<NullSource>(source_header));
if (query_info.prewhere_info)
{
if (query_info.prewhere_info->alias_actions)
{
pipe.addSimpleTransform([&](const Block & header)
{
return std::make_shared<ExpressionTransform>(header, query_info.prewhere_info->alias_actions);
});
}
pipe.addSimpleTransform([&](const Block & header)
{
return std::make_shared<FilterTransform>(
header,
query_info.prewhere_info->prewhere_actions,
query_info.prewhere_info->prewhere_column_name,
query_info.prewhere_info->remove_prewhere_column);
});
// To remove additional columns
// In some cases, we did not read any marks so that the pipeline.streams is empty
// Thus, some columns in prewhere are not removed as expected
// This leads to mismatched header in distributed table
if (query_info.prewhere_info->remove_columns_actions)
{
pipe.addSimpleTransform([&](const Block & header)
{
return std::make_shared<ExpressionTransform>(
header, query_info.prewhere_info->remove_columns_actions);
});
}
}
auto read_from_pipe = std::make_unique<ReadFromPreparedSource>(std::move(pipe));
read_from_pipe->setStepDescription("Read from NullSource");
query_plan.addStep(std::move(read_from_pipe));
}
void InterpreterSelectQuery::executeFetchColumns( void InterpreterSelectQuery::executeFetchColumns(
QueryProcessingStage::Enum processing_stage, QueryPlan & query_plan, QueryProcessingStage::Enum processing_stage, QueryPlan & query_plan,
const PrewhereInfoPtr & prewhere_info, const Names & columns_to_remove_after_prewhere) const PrewhereInfoPtr & prewhere_info, const Names & columns_to_remove_after_prewhere)
@ -1345,7 +1389,7 @@ void InterpreterSelectQuery::executeFetchColumns(
ErrorCodes::TOO_MANY_COLUMNS); ErrorCodes::TOO_MANY_COLUMNS);
/// General limit for the number of threads. /// General limit for the number of threads.
query_plan.setMaxThreads(settings.max_threads); size_t max_threads_execute_query = settings.max_threads;
/** With distributed query processing, almost no computations are done in the threads, /** With distributed query processing, almost no computations are done in the threads,
* but wait and receive data from remote servers. * but wait and receive data from remote servers.
@ -1358,8 +1402,7 @@ void InterpreterSelectQuery::executeFetchColumns(
if (storage && storage->isRemote()) if (storage && storage->isRemote())
{ {
is_remote = true; is_remote = true;
max_streams = settings.max_distributed_connections; max_threads_execute_query = max_streams = settings.max_distributed_connections;
query_plan.setMaxThreads(max_streams);
} }
UInt64 max_block_size = settings.max_block_size; UInt64 max_block_size = settings.max_block_size;
@ -1384,8 +1427,7 @@ void InterpreterSelectQuery::executeFetchColumns(
&& limit_length + limit_offset < max_block_size) && limit_length + limit_offset < max_block_size)
{ {
max_block_size = std::max(UInt64(1), limit_length + limit_offset); max_block_size = std::max(UInt64(1), limit_length + limit_offset);
max_streams = 1; max_threads_execute_query = max_streams = 1;
query_plan.setMaxThreads(max_streams);
} }
if (!max_block_size) if (!max_block_size)
@ -1465,12 +1507,36 @@ void InterpreterSelectQuery::executeFetchColumns(
if (!options.ignore_quota && (options.to_stage == QueryProcessingStage::Complete)) if (!options.ignore_quota && (options.to_stage == QueryProcessingStage::Complete))
quota = context->getQuota(); quota = context->getQuota();
storage->read(query_plan, table_lock, metadata_snapshot, limits, leaf_limits, std::move(quota), storage->read(query_plan, required_columns, metadata_snapshot,
required_columns, query_info, context, processing_stage, max_block_size, max_streams); query_info, *context, processing_stage, max_block_size, max_streams);
/// Create step which reads from empty source if storage has no data.
if (!query_plan.isInitialized())
{
auto header = metadata_snapshot->getSampleBlockForColumns(
required_columns, storage->getVirtuals(), storage->getStorageID());
addEmptySourceToQueryPlan(query_plan, header, query_info);
}
/// Extend lifetime of context, table lock, storage. Set limits and quota.
auto adding_limits_and_quota = std::make_unique<SettingQuotaAndLimitsStep>(
query_plan.getCurrentDataStream(),
storage,
std::move(table_lock),
limits,
leaf_limits,
std::move(quota),
context);
adding_limits_and_quota->setStepDescription("Set limits and quota after reading from storage");
query_plan.addStep(std::move(adding_limits_and_quota));
} }
else else
throw Exception("Logical error in InterpreterSelectQuery: nowhere to read", ErrorCodes::LOGICAL_ERROR); throw Exception("Logical error in InterpreterSelectQuery: nowhere to read", ErrorCodes::LOGICAL_ERROR);
/// Specify the number of threads only if it wasn't specified in storage.
if (!query_plan.getMaxThreads())
query_plan.setMaxThreads(max_threads_execute_query);
/// Aliases in table declaration. /// Aliases in table declaration.
if (processing_stage == QueryProcessingStage::FetchColumns && alias_actions) if (processing_stage == QueryProcessingStage::FetchColumns && alias_actions)
{ {

View File

@ -87,6 +87,8 @@ public:
const SelectQueryInfo & getQueryInfo() const { return query_info; } const SelectQueryInfo & getQueryInfo() const { return query_info; }
static void addEmptySourceToQueryPlan(QueryPlan & query_plan, const Block & source_header, const SelectQueryInfo & query_info);
private: private:
InterpreterSelectQuery( InterpreterSelectQuery(
const ASTPtr & query_ptr_, const ASTPtr & query_ptr_,

View File

@ -5,6 +5,7 @@
#include <Interpreters/MutationsInterpreter.h> #include <Interpreters/MutationsInterpreter.h>
#include <Interpreters/TreeRewriter.h> #include <Interpreters/TreeRewriter.h>
#include <Storages/MergeTree/MergeTreeData.h> #include <Storages/MergeTree/MergeTreeData.h>
#include <Storages/MergeTree/StorageFromMergeTreeDataPart.h>
#include <Processors/Transforms/FilterTransform.h> #include <Processors/Transforms/FilterTransform.h>
#include <Processors/Transforms/ExpressionTransform.h> #include <Processors/Transforms/ExpressionTransform.h>
#include <Processors/Transforms/CreatingSetsTransform.h> #include <Processors/Transforms/CreatingSetsTransform.h>
@ -32,6 +33,7 @@ namespace DB
namespace ErrorCodes namespace ErrorCodes
{ {
extern const int NOT_IMPLEMENTED;
extern const int BAD_ARGUMENTS; extern const int BAD_ARGUMENTS;
extern const int LOGICAL_ERROR; extern const int LOGICAL_ERROR;
extern const int UNKNOWN_MUTATION_COMMAND; extern const int UNKNOWN_MUTATION_COMMAND;
@ -92,6 +94,7 @@ std::optional<String> findFirstNonDeterministicFunctionName(const MutationComman
if (finder_data.nondeterministic_function_name) if (finder_data.nondeterministic_function_name)
return finder_data.nondeterministic_function_name; return finder_data.nondeterministic_function_name;
/// Currently UPDATE and DELETE both always have predicates so we can use fallthrough
[[fallthrough]]; [[fallthrough]];
} }
@ -110,7 +113,7 @@ std::optional<String> findFirstNonDeterministicFunctionName(const MutationComman
return {}; return {};
} }
ASTPtr prepareQueryAffectedAST(const std::vector<MutationCommand> & commands) ASTPtr prepareQueryAffectedAST(const std::vector<MutationCommand> & commands, const StoragePtr & storage, const Context & context)
{ {
/// Execute `SELECT count() FROM storage WHERE predicate1 OR predicate2 OR ...` query. /// Execute `SELECT count() FROM storage WHERE predicate1 OR predicate2 OR ...` query.
/// The result can differ from the number of affected rows (e.g. if there is an UPDATE command that /// The result can differ from the number of affected rows (e.g. if there is an UPDATE command that
@ -125,20 +128,23 @@ ASTPtr prepareQueryAffectedAST(const std::vector<MutationCommand> & commands)
count_func->arguments = std::make_shared<ASTExpressionList>(); count_func->arguments = std::make_shared<ASTExpressionList>();
select->select()->children.push_back(count_func); select->select()->children.push_back(count_func);
if (commands.size() == 1) ASTs conditions;
select->setExpression(ASTSelectQuery::Expression::WHERE, commands[0].predicate->clone()); for (const MutationCommand & command : commands)
else
{ {
auto coalesced_predicates = std::make_shared<ASTFunction>(); if (ASTPtr condition = getPartitionAndPredicateExpressionForMutationCommand(command, storage, context))
coalesced_predicates->name = "or"; conditions.push_back(std::move(condition));
coalesced_predicates->arguments = std::make_shared<ASTExpressionList>(); }
coalesced_predicates->children.push_back(coalesced_predicates->arguments);
for (const MutationCommand & command : commands)
coalesced_predicates->arguments->children.push_back(command.predicate->clone());
if (conditions.size() > 1)
{
auto coalesced_predicates = makeASTFunction("or");
coalesced_predicates->arguments->children = std::move(conditions);
select->setExpression(ASTSelectQuery::Expression::WHERE, std::move(coalesced_predicates)); select->setExpression(ASTSelectQuery::Expression::WHERE, std::move(coalesced_predicates));
} }
else if (conditions.size() == 1)
{
select->setExpression(ASTSelectQuery::Expression::WHERE, std::move(conditions.front()));
}
return select; return select;
} }
@ -167,8 +173,9 @@ ColumnDependencies getAllColumnDependencies(const StorageMetadataPtr & metadata_
} }
bool isStorageTouchedByMutations( bool isStorageTouchedByMutations(
StoragePtr storage, const StoragePtr & storage,
const StorageMetadataPtr & metadata_snapshot, const StorageMetadataPtr & metadata_snapshot,
const std::vector<MutationCommand> & commands, const std::vector<MutationCommand> & commands,
Context context_copy) Context context_copy)
@ -176,16 +183,33 @@ bool isStorageTouchedByMutations(
if (commands.empty()) if (commands.empty())
return false; return false;
bool all_commands_can_be_skipped = true;
auto storage_from_merge_tree_data_part = std::dynamic_pointer_cast<StorageFromMergeTreeDataPart>(storage);
for (const MutationCommand & command : commands) for (const MutationCommand & command : commands)
{ {
if (!command.predicate) /// The command touches all rows. if (!command.predicate) /// The command touches all rows.
return true; return true;
if (command.partition && !storage_from_merge_tree_data_part)
throw Exception("ALTER UPDATE/DELETE ... IN PARTITION is not supported for non-MergeTree tables", ErrorCodes::NOT_IMPLEMENTED);
if (command.partition && storage_from_merge_tree_data_part)
{
const String partition_id = storage_from_merge_tree_data_part->getPartitionIDFromQuery(command.partition, context_copy);
if (partition_id == storage_from_merge_tree_data_part->getPartitionId())
all_commands_can_be_skipped = false;
}
else
all_commands_can_be_skipped = false;
} }
if (all_commands_can_be_skipped)
return false;
context_copy.setSetting("max_streams_to_max_threads_ratio", 1); context_copy.setSetting("max_streams_to_max_threads_ratio", 1);
context_copy.setSetting("max_threads", 1); context_copy.setSetting("max_threads", 1);
ASTPtr select_query = prepareQueryAffectedAST(commands); ASTPtr select_query = prepareQueryAffectedAST(commands, storage, context_copy);
/// Interpreter must be alive, when we use result of execute() method. /// Interpreter must be alive, when we use result of execute() method.
/// For some reason it may copy context and and give it into ExpressionBlockInputStream /// For some reason it may copy context and and give it into ExpressionBlockInputStream
@ -202,9 +226,42 @@ bool isStorageTouchedByMutations(
auto count = (*block.getByName("count()").column)[0].get<UInt64>(); auto count = (*block.getByName("count()").column)[0].get<UInt64>();
return count != 0; return count != 0;
} }
ASTPtr getPartitionAndPredicateExpressionForMutationCommand(
const MutationCommand & command,
const StoragePtr & storage,
const Context & context
)
{
ASTPtr partition_predicate_as_ast_func;
if (command.partition)
{
String partition_id;
auto storage_merge_tree = std::dynamic_pointer_cast<MergeTreeData>(storage);
auto storage_from_merge_tree_data_part = std::dynamic_pointer_cast<StorageFromMergeTreeDataPart>(storage);
if (storage_merge_tree)
partition_id = storage_merge_tree->getPartitionIDFromQuery(command.partition, context);
else if (storage_from_merge_tree_data_part)
partition_id = storage_from_merge_tree_data_part->getPartitionIDFromQuery(command.partition, context);
else
throw Exception("ALTER UPDATE/DELETE ... IN PARTITION is not supported for non-MergeTree tables", ErrorCodes::NOT_IMPLEMENTED);
partition_predicate_as_ast_func = makeASTFunction("equals",
std::make_shared<ASTIdentifier>("_partition_id"),
std::make_shared<ASTLiteral>(partition_id)
);
}
if (command.predicate && command.partition)
return makeASTFunction("and", command.predicate->clone(), std::move(partition_predicate_as_ast_func));
else
return command.predicate ? command.predicate->clone() : partition_predicate_as_ast_func;
}
MutationsInterpreter::MutationsInterpreter( MutationsInterpreter::MutationsInterpreter(
StoragePtr storage_, StoragePtr storage_,
const StorageMetadataPtr & metadata_snapshot_, const StorageMetadataPtr & metadata_snapshot_,
@ -349,7 +406,7 @@ ASTPtr MutationsInterpreter::prepare(bool dry_run)
if (stages.empty() || !stages.back().column_to_updated.empty()) if (stages.empty() || !stages.back().column_to_updated.empty())
stages.emplace_back(context); stages.emplace_back(context);
auto negated_predicate = makeASTFunction("isZeroOrNull", command.predicate->clone()); auto negated_predicate = makeASTFunction("isZeroOrNull", getPartitionAndPredicateExpressionForMutationCommand(command));
stages.back().filters.push_back(negated_predicate); stages.back().filters.push_back(negated_predicate);
} }
else if (command.type == MutationCommand::UPDATE) else if (command.type == MutationCommand::UPDATE)
@ -387,7 +444,7 @@ ASTPtr MutationsInterpreter::prepare(bool dry_run)
const auto & update_expr = kv.second; const auto & update_expr = kv.second;
auto updated_column = makeASTFunction("CAST", auto updated_column = makeASTFunction("CAST",
makeASTFunction("if", makeASTFunction("if",
command.predicate->clone(), getPartitionAndPredicateExpressionForMutationCommand(command),
makeASTFunction("CAST", makeASTFunction("CAST",
update_expr->clone(), update_expr->clone(),
type_literal), type_literal),
@ -592,7 +649,7 @@ ASTPtr MutationsInterpreter::prepareInterpreterSelectQuery(std::vector<Stage> &
for (const String & column : stage.output_columns) for (const String & column : stage.output_columns)
all_asts->children.push_back(std::make_shared<ASTIdentifier>(column)); all_asts->children.push_back(std::make_shared<ASTIdentifier>(column));
auto syntax_result = TreeRewriter(context).analyze(all_asts, all_columns); auto syntax_result = TreeRewriter(context).analyze(all_asts, all_columns, storage, metadata_snapshot);
if (context.hasQueryContext()) if (context.hasQueryContext())
for (const auto & it : syntax_result->getScalars()) for (const auto & it : syntax_result->getScalars())
context.getQueryContext().addScalar(it.first, it.second); context.getQueryContext().addScalar(it.first, it.second);
@ -759,10 +816,10 @@ const Block & MutationsInterpreter::getUpdatedHeader() const
size_t MutationsInterpreter::evaluateCommandsSize() size_t MutationsInterpreter::evaluateCommandsSize()
{ {
for (const MutationCommand & command : commands) for (const MutationCommand & command : commands)
if (unlikely(!command.predicate)) /// The command touches all rows. if (unlikely(!command.predicate && !command.partition)) /// The command touches all rows.
return mutation_ast->size(); return mutation_ast->size();
return std::max(prepareQueryAffectedAST(commands)->size(), mutation_ast->size()); return std::max(prepareQueryAffectedAST(commands, storage, context)->size(), mutation_ast->size());
} }
std::optional<SortDescription> MutationsInterpreter::getStorageSortDescriptionIfPossible(const Block & header) const std::optional<SortDescription> MutationsInterpreter::getStorageSortDescriptionIfPossible(const Block & header) const
@ -783,6 +840,11 @@ std::optional<SortDescription> MutationsInterpreter::getStorageSortDescriptionIf
return sort_description; return sort_description;
} }
ASTPtr MutationsInterpreter::getPartitionAndPredicateExpressionForMutationCommand(const MutationCommand & command) const
{
return DB::getPartitionAndPredicateExpressionForMutationCommand(command, storage, context);
}
bool MutationsInterpreter::Stage::isAffectingAllColumns(const Names & storage_columns) const bool MutationsInterpreter::Stage::isAffectingAllColumns(const Names & storage_columns) const
{ {
/// is subset /// is subset

View File

@ -20,7 +20,17 @@ using QueryPipelinePtr = std::unique_ptr<QueryPipeline>;
/// Return false if the data isn't going to be changed by mutations. /// Return false if the data isn't going to be changed by mutations.
bool isStorageTouchedByMutations( bool isStorageTouchedByMutations(
StoragePtr storage, const StorageMetadataPtr & metadata_snapshot, const std::vector<MutationCommand> & commands, Context context_copy); const StoragePtr & storage,
const StorageMetadataPtr & metadata_snapshot,
const std::vector<MutationCommand> & commands,
Context context_copy
);
ASTPtr getPartitionAndPredicateExpressionForMutationCommand(
const MutationCommand & command,
const StoragePtr & storage,
const Context & context
);
/// Create an input stream that will read data from storage and apply mutation commands (UPDATEs, DELETEs, MATERIALIZEs) /// Create an input stream that will read data from storage and apply mutation commands (UPDATEs, DELETEs, MATERIALIZEs)
/// to this data. /// to this data.
@ -59,6 +69,8 @@ private:
std::optional<SortDescription> getStorageSortDescriptionIfPossible(const Block & header) const; std::optional<SortDescription> getStorageSortDescriptionIfPossible(const Block & header) const;
ASTPtr getPartitionAndPredicateExpressionForMutationCommand(const MutationCommand & command) const;
StoragePtr storage; StoragePtr storage;
StorageMetadataPtr metadata_snapshot; StorageMetadataPtr metadata_snapshot;
MutationCommands commands; MutationCommands commands;

View File

@ -25,6 +25,7 @@ namespace
{ {
constexpr size_t DEFAULT_SYSTEM_LOG_FLUSH_INTERVAL_MILLISECONDS = 7500; constexpr size_t DEFAULT_SYSTEM_LOG_FLUSH_INTERVAL_MILLISECONDS = 7500;
constexpr size_t DEFAULT_METRIC_LOG_COLLECT_INTERVAL_MILLISECONDS = 1000;
/// Creates a system log with MergeTree engine using parameters from config /// Creates a system log with MergeTree engine using parameters from config
template <typename TSystemLog> template <typename TSystemLog>
@ -125,7 +126,8 @@ SystemLogs::SystemLogs(Context & global_context, const Poco::Util::AbstractConfi
if (metric_log) if (metric_log)
{ {
size_t collect_interval_milliseconds = config.getUInt64("metric_log.collect_interval_milliseconds"); size_t collect_interval_milliseconds = config.getUInt64("metric_log.collect_interval_milliseconds",
DEFAULT_METRIC_LOG_COLLECT_INTERVAL_MILLISECONDS);
metric_log->startCollectMetric(collect_interval_milliseconds); metric_log->startCollectMetric(collect_interval_milliseconds);
} }

View File

@ -63,7 +63,7 @@ struct CustomizeFunctionsData
const String & customized_func_name; const String & customized_func_name;
void visit(ASTFunction & func, ASTPtr &) void visit(ASTFunction & func, ASTPtr &) const
{ {
if (Poco::toLower(func.name) == func_name) if (Poco::toLower(func.name) == func_name)
{ {
@ -97,7 +97,7 @@ struct CustomizeFunctionsSuffixData
const String & customized_func_suffix; const String & customized_func_suffix;
void visit(ASTFunction & func, ASTPtr &) void visit(ASTFunction & func, ASTPtr &) const
{ {
if (endsWith(Poco::toLower(func.name), func_suffix)) if (endsWith(Poco::toLower(func.name), func_suffix))
{ {
@ -118,7 +118,7 @@ struct CustomizeAggregateFunctionsSuffixData
const String & customized_func_suffix; const String & customized_func_suffix;
void visit(ASTFunction & func, ASTPtr &) void visit(ASTFunction & func, ASTPtr &) const
{ {
const auto & instance = AggregateFunctionFactory::instance(); const auto & instance = AggregateFunctionFactory::instance();
if (instance.isAggregateFunctionName(func.name) && !endsWith(func.name, customized_func_suffix)) if (instance.isAggregateFunctionName(func.name) && !endsWith(func.name, customized_func_suffix))

View File

@ -90,7 +90,7 @@ void ASTAlterCommand::formatImpl(
column->formatImpl(settings, state, frame); column->formatImpl(settings, state, frame);
if (partition) if (partition)
{ {
settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str<< " IN PARTITION " << (settings.hilite ? hilite_none : ""); settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << " IN PARTITION " << (settings.hilite ? hilite_none : "");
partition->formatImpl(settings, state, frame); partition->formatImpl(settings, state, frame);
} }
} }
@ -150,7 +150,7 @@ void ASTAlterCommand::formatImpl(
index->formatImpl(settings, state, frame); index->formatImpl(settings, state, frame);
if (partition) if (partition)
{ {
settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str<< " IN PARTITION " << (settings.hilite ? hilite_none : ""); settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << " IN PARTITION " << (settings.hilite ? hilite_none : "");
partition->formatImpl(settings, state, frame); partition->formatImpl(settings, state, frame);
} }
} }
@ -161,7 +161,7 @@ void ASTAlterCommand::formatImpl(
index->formatImpl(settings, state, frame); index->formatImpl(settings, state, frame);
if (partition) if (partition)
{ {
settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str<< " IN PARTITION " << (settings.hilite ? hilite_none : ""); settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << " IN PARTITION " << (settings.hilite ? hilite_none : "");
partition->formatImpl(settings, state, frame); partition->formatImpl(settings, state, frame);
} }
} }
@ -272,7 +272,15 @@ void ASTAlterCommand::formatImpl(
} }
else if (type == ASTAlterCommand::DELETE) else if (type == ASTAlterCommand::DELETE)
{ {
settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << "DELETE WHERE " << (settings.hilite ? hilite_none : ""); settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << "DELETE" << (settings.hilite ? hilite_none : "");
if (partition)
{
settings.ostr << (settings.hilite ? hilite_keyword : "") << " IN PARTITION " << (settings.hilite ? hilite_none : "");
partition->formatImpl(settings, state, frame);
}
settings.ostr << (settings.hilite ? hilite_keyword : "") << " WHERE " << (settings.hilite ? hilite_none : "");
predicate->formatImpl(settings, state, frame); predicate->formatImpl(settings, state, frame);
} }
else if (type == ASTAlterCommand::UPDATE) else if (type == ASTAlterCommand::UPDATE)
@ -280,6 +288,12 @@ void ASTAlterCommand::formatImpl(
settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << "UPDATE " << (settings.hilite ? hilite_none : ""); settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << "UPDATE " << (settings.hilite ? hilite_none : "");
update_assignments->formatImpl(settings, state, frame); update_assignments->formatImpl(settings, state, frame);
if (partition)
{
settings.ostr << (settings.hilite ? hilite_keyword : "") << " IN PARTITION " << (settings.hilite ? hilite_none : "");
partition->formatImpl(settings, state, frame);
}
settings.ostr << (settings.hilite ? hilite_keyword : "") << " WHERE " << (settings.hilite ? hilite_none : ""); settings.ostr << (settings.hilite ? hilite_keyword : "") << " WHERE " << (settings.hilite ? hilite_none : "");
predicate->formatImpl(settings, state, frame); predicate->formatImpl(settings, state, frame);
} }
@ -298,7 +312,7 @@ void ASTAlterCommand::formatImpl(
<< (settings.hilite ? hilite_none : ""); << (settings.hilite ? hilite_none : "");
if (partition) if (partition)
{ {
settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str<< " IN PARTITION " << (settings.hilite ? hilite_none : ""); settings.ostr << (settings.hilite ? hilite_keyword : "") << " IN PARTITION " << (settings.hilite ? hilite_none : "");
partition->formatImpl(settings, state, frame); partition->formatImpl(settings, state, frame);
} }
} }

View File

@ -103,7 +103,7 @@ public:
*/ */
ASTPtr constraint; ASTPtr constraint;
/** Used in DROP PARTITION and ATTACH PARTITION FROM queries. /** Used in DROP PARTITION, ATTACH PARTITION FROM, UPDATE, DELETE queries.
* The value or ID of the partition is stored here. * The value or ID of the partition is stored here.
*/ */
ASTPtr partition; ASTPtr partition;

View File

@ -55,6 +55,12 @@ const char * ParserComparisonExpression::operators[] =
nullptr nullptr
}; };
const char * ParserComparisonExpression::overlapping_operators_to_skip[] =
{
"IN PARTITION",
nullptr
};
const char * ParserLogicalNotExpression::operators[] = const char * ParserLogicalNotExpression::operators[] =
{ {
"NOT", "not", "NOT", "not",
@ -182,6 +188,14 @@ bool ParserLeftAssociativeBinaryOperatorList::parseImpl(Pos & pos, ASTPtr & node
/// try to find any of the valid operators /// try to find any of the valid operators
const char ** it; const char ** it;
Expected stub;
for (it = overlapping_operators_to_skip; *it; ++it)
if (ParserKeyword{*it}.checkWithoutMoving(pos, stub))
break;
if (*it)
break;
for (it = operators; *it; it += 2) for (it = operators; *it; it += 2)
if (parseOperator(pos, *it, expected)) if (parseOperator(pos, *it, expected))
break; break;

View File

@ -129,6 +129,7 @@ class ParserLeftAssociativeBinaryOperatorList : public IParserBase
{ {
private: private:
Operators_t operators; Operators_t operators;
Operators_t overlapping_operators_to_skip = { (const char *[]){ nullptr } };
ParserPtr first_elem_parser; ParserPtr first_elem_parser;
ParserPtr remaining_elem_parser; ParserPtr remaining_elem_parser;
@ -140,6 +141,11 @@ public:
{ {
} }
ParserLeftAssociativeBinaryOperatorList(Operators_t operators_, Operators_t overlapping_operators_to_skip_, ParserPtr && first_elem_parser_)
: operators(operators_), overlapping_operators_to_skip(overlapping_operators_to_skip_), first_elem_parser(std::move(first_elem_parser_))
{
}
ParserLeftAssociativeBinaryOperatorList(Operators_t operators_, ParserPtr && first_elem_parser_, ParserLeftAssociativeBinaryOperatorList(Operators_t operators_, ParserPtr && first_elem_parser_,
ParserPtr && remaining_elem_parser_) ParserPtr && remaining_elem_parser_)
: operators(operators_), first_elem_parser(std::move(first_elem_parser_)), : operators(operators_), first_elem_parser(std::move(first_elem_parser_)),
@ -331,7 +337,8 @@ class ParserComparisonExpression : public IParserBase
{ {
private: private:
static const char * operators[]; static const char * operators[];
ParserLeftAssociativeBinaryOperatorList operator_parser {operators, std::make_unique<ParserBetweenExpression>()}; static const char * overlapping_operators_to_skip[];
ParserLeftAssociativeBinaryOperatorList operator_parser {operators, overlapping_operators_to_skip, std::make_unique<ParserBetweenExpression>()};
protected: protected:
const char * getName() const override{ return "comparison expression"; } const char * getName() const override{ return "comparison expression"; }

View File

@ -79,7 +79,7 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
ParserKeyword s_to_volume("TO VOLUME"); ParserKeyword s_to_volume("TO VOLUME");
ParserKeyword s_to_table("TO TABLE"); ParserKeyword s_to_table("TO TABLE");
ParserKeyword s_delete_where("DELETE WHERE"); ParserKeyword s_delete("DELETE");
ParserKeyword s_update("UPDATE"); ParserKeyword s_update("UPDATE");
ParserKeyword s_where("WHERE"); ParserKeyword s_where("WHERE");
ParserKeyword s_to("TO"); ParserKeyword s_to("TO");
@ -506,8 +506,17 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
command->type = ASTAlterCommand::MODIFY_SAMPLE_BY; command->type = ASTAlterCommand::MODIFY_SAMPLE_BY;
} }
else if (s_delete_where.ignore(pos, expected)) else if (s_delete.ignore(pos, expected))
{ {
if (s_in_partition.ignore(pos, expected))
{
if (!parser_partition.parse(pos, command->partition, expected))
return false;
}
if (!s_where.ignore(pos, expected))
return false;
if (!parser_exp_elem.parse(pos, command->predicate, expected)) if (!parser_exp_elem.parse(pos, command->predicate, expected))
return false; return false;
@ -518,6 +527,12 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
if (!parser_assignment_list.parse(pos, command->update_assignments, expected)) if (!parser_assignment_list.parse(pos, command->update_assignments, expected))
return false; return false;
if (s_in_partition.ignore(pos, expected))
{
if (!parser_partition.parse(pos, command->partition, expected))
return false;
}
if (!s_where.ignore(pos, expected)) if (!s_where.ignore(pos, expected))
return false; return false;

View File

@ -10,7 +10,7 @@ namespace DB
* ALTER TABLE [db.]name [ON CLUSTER cluster] * ALTER TABLE [db.]name [ON CLUSTER cluster]
* [ADD COLUMN [IF NOT EXISTS] col_name type [AFTER col_after],] * [ADD COLUMN [IF NOT EXISTS] col_name type [AFTER col_after],]
* [DROP COLUMN [IF EXISTS] col_to_drop, ...] * [DROP COLUMN [IF EXISTS] col_to_drop, ...]
* [CLEAR COLUMN [IF EXISTS] col_to_clear [IN PARTITION partition],] * [CLEAR COLUMN [IF EXISTS] col_to_clear[ IN PARTITION partition],]
* [MODIFY COLUMN [IF EXISTS] col_to_modify type, ...] * [MODIFY COLUMN [IF EXISTS] col_to_modify type, ...]
* [RENAME COLUMN [IF EXISTS] col_name TO col_name] * [RENAME COLUMN [IF EXISTS] col_name TO col_name]
* [MODIFY PRIMARY KEY (a, b, c...)] * [MODIFY PRIMARY KEY (a, b, c...)]
@ -19,8 +19,12 @@ namespace DB
* [DROP|DETACH|ATTACH PARTITION|PART partition, ...] * [DROP|DETACH|ATTACH PARTITION|PART partition, ...]
* [FETCH PARTITION partition FROM ...] * [FETCH PARTITION partition FROM ...]
* [FREEZE [PARTITION] [WITH NAME name]] * [FREEZE [PARTITION] [WITH NAME name]]
* [DELETE WHERE ...] * [DELETE[ IN PARTITION partition] WHERE ...]
* [UPDATE col_name = expr, ... WHERE ...] * [UPDATE col_name = expr, ...[ IN PARTITION partition] WHERE ...]
* [ADD INDEX [IF NOT EXISTS] index_name [AFTER index_name]]
* [DROP INDEX [IF EXISTS] index_name]
* [CLEAR INDEX [IF EXISTS] index_name IN PARTITION partition]
* [MATERIALIZE INDEX [IF EXISTS] index_name [IN PARTITION partition]]
* ALTER LIVE VIEW [db.name] * ALTER LIVE VIEW [db.name]
* [REFRESH] * [REFRESH]
*/ */

View File

@ -95,6 +95,12 @@ void QueryPipeline::addTransform(ProcessorPtr transform)
pipe.addTransform(std::move(transform)); pipe.addTransform(std::move(transform));
} }
void QueryPipeline::transform(const Transformer & transformer)
{
checkInitializedAndNotCompleted();
pipe.transform(transformer);
}
void QueryPipeline::setSinks(const Pipe::ProcessorGetterWithStreamKind & getter) void QueryPipeline::setSinks(const Pipe::ProcessorGetterWithStreamKind & getter)
{ {
checkInitializedAndNotCompleted(); checkInitializedAndNotCompleted();

View File

@ -53,6 +53,11 @@ public:
void addSimpleTransform(const Pipe::ProcessorGetterWithStreamKind & getter); void addSimpleTransform(const Pipe::ProcessorGetterWithStreamKind & getter);
/// Add transform with getNumStreams() input ports. /// Add transform with getNumStreams() input ports.
void addTransform(ProcessorPtr transform); void addTransform(ProcessorPtr transform);
using Transformer = std::function<Processors(OutputPortRawPtrs ports)>;
/// Transform pipeline in general way.
void transform(const Transformer & transformer);
/// Add TotalsHavingTransform. Resize pipeline to single input. Adds totals port. /// Add TotalsHavingTransform. Resize pipeline to single input. Adds totals port.
void addTotalsHavingTransform(ProcessorPtr transform); void addTotalsHavingTransform(ProcessorPtr transform);
/// Add transform which calculates extremes. This transform adds extremes port and doesn't change inputs number. /// Add transform which calculates extremes. This transform adds extremes port and doesn't change inputs number.
@ -105,6 +110,9 @@ public:
void addInterpreterContext(std::shared_ptr<Context> context) { pipe.addInterpreterContext(std::move(context)); } void addInterpreterContext(std::shared_ptr<Context> context) { pipe.addInterpreterContext(std::move(context)); }
void addStorageHolder(StoragePtr storage) { pipe.addStorageHolder(std::move(storage)); } void addStorageHolder(StoragePtr storage) { pipe.addStorageHolder(std::move(storage)); }
void addQueryPlan(std::unique_ptr<QueryPlan> plan) { pipe.addQueryPlan(std::move(plan)); } void addQueryPlan(std::unique_ptr<QueryPlan> plan) { pipe.addQueryPlan(std::move(plan)); }
void setLimits(const StreamLocalLimits & limits) { pipe.setLimits(limits); }
void setLeafLimits(const SizeLimits & limits) { pipe.setLeafLimits(limits); }
void setQuota(const std::shared_ptr<const EnabledQuota> & quota) { pipe.setQuota(quota); }
/// For compatibility with IBlockInputStream. /// For compatibility with IBlockInputStream.
void setProgressCallback(const ProgressCallback & callback); void setProgressCallback(const ProgressCallback & callback);

View File

@ -0,0 +1,41 @@
#include <Processors/QueryPlan/AddingConstColumnStep.h>
#include <Processors/QueryPipeline.h>
#include <Processors/Transforms/AddingConstColumnTransform.h>
#include <IO/Operators.h>
namespace DB
{
static ITransformingStep::Traits getTraits()
{
return ITransformingStep::Traits
{
{
.preserves_distinct_columns = true,
.returns_single_stream = false,
.preserves_number_of_streams = true,
.preserves_sorting = true,
},
{
.preserves_number_of_rows = true,
}
};
}
AddingConstColumnStep::AddingConstColumnStep(const DataStream & input_stream_, ColumnWithTypeAndName column_)
: ITransformingStep(input_stream_,
AddingConstColumnTransform::transformHeader(input_stream_.header, column_),
getTraits())
, column(std::move(column_))
{
}
void AddingConstColumnStep::transformPipeline(QueryPipeline & pipeline)
{
pipeline.addSimpleTransform([&](const Block & header)
{
return std::make_shared<AddingConstColumnTransform>(header, column);
});
}
}

View File

@ -0,0 +1,22 @@
#pragma once
#include <Processors/QueryPlan/ITransformingStep.h>
namespace DB
{
/// Adds a materialized const column with a specified value.
class AddingConstColumnStep : public ITransformingStep
{
public:
AddingConstColumnStep(const DataStream & input_stream_, ColumnWithTypeAndName column_);
String getName() const override { return "AddingConstColumn"; }
void transformPipeline(QueryPipeline & pipeline) override;
private:
ColumnWithTypeAndName column;
};
}

View File

@ -0,0 +1,45 @@
#include <Processors/QueryPlan/AddingMissedStep.h>
#include <Processors/QueryPipeline.h>
#include <Processors/Transforms/AddingMissedTransform.h>
#include <IO/Operators.h>
namespace DB
{
static ITransformingStep::Traits getTraits()
{
return ITransformingStep::Traits
{
{
.preserves_distinct_columns = true,
.returns_single_stream = false,
.preserves_number_of_streams = true,
.preserves_sorting = true,
},
{
.preserves_number_of_rows = true,
}
};
}
AddingMissedStep::AddingMissedStep(
const DataStream & input_stream_,
Block result_header_,
ColumnsDescription columns_,
const Context & context_)
: ITransformingStep(input_stream_, result_header_, getTraits())
, columns(std::move(columns_))
, context(context_)
{
updateDistinctColumns(output_stream->header, output_stream->distinct_columns);
}
void AddingMissedStep::transformPipeline(QueryPipeline & pipeline)
{
pipeline.addSimpleTransform([&](const Block & header)
{
return std::make_shared<AddingMissedTransform>(header, output_stream->header, columns, context);
});
}
}

View File

@ -0,0 +1,26 @@
#pragma once
#include <Processors/QueryPlan/ITransformingStep.h>
#include <Storages/ColumnsDescription.h>
namespace DB
{
/// Convert one block structure to another. See ConvertingTransform.
class AddingMissedStep : public ITransformingStep
{
public:
AddingMissedStep(const DataStream & input_stream_,
Block result_header_,
ColumnsDescription columns_,
const Context & context_);
String getName() const override { return "AddingMissed"; }
void transformPipeline(QueryPipeline & pipeline) override;
private:
ColumnsDescription columns;
const Context & context;
};
}

View File

@ -14,7 +14,8 @@ QueryPipelinePtr ISourceStep::updatePipeline(QueryPipelines)
auto pipeline = std::make_unique<QueryPipeline>(); auto pipeline = std::make_unique<QueryPipeline>();
QueryPipelineProcessorsCollector collector(*pipeline, this); QueryPipelineProcessorsCollector collector(*pipeline, this);
initializePipeline(*pipeline); initializePipeline(*pipeline);
processors = collector.detachProcessors(); auto added_processors = collector.detachProcessors();
processors.insert(processors.end(), added_processors.begin(), added_processors.end());
return pipeline; return pipeline;
} }

View File

@ -16,7 +16,7 @@ public:
void describePipeline(FormatSettings & settings) const override; void describePipeline(FormatSettings & settings) const override;
private: protected:
/// We collect processors got after pipeline transformation. /// We collect processors got after pipeline transformation.
Processors processors; Processors processors;
}; };

View File

@ -0,0 +1,39 @@
#include <Processors/QueryPlan/MaterializingStep.h>
#include <Processors/QueryPipeline.h>
#include <Processors/Transforms/MaterializingTransform.h>
#include <DataStreams/materializeBlock.h>
namespace DB
{
static ITransformingStep::Traits getTraits()
{
return ITransformingStep::Traits
{
{
.preserves_distinct_columns = true,
.returns_single_stream = false,
.preserves_number_of_streams = true,
.preserves_sorting = true,
},
{
.preserves_number_of_rows = true,
}
};
}
MaterializingStep::MaterializingStep(const DataStream & input_stream_)
: ITransformingStep(input_stream_, materializeBlock(input_stream_.header), getTraits())
{
}
void MaterializingStep::transformPipeline(QueryPipeline & pipeline)
{
pipeline.addSimpleTransform([&](const Block & header)
{
return std::make_shared<MaterializingTransform>(header);
});
}
}

View File

@ -0,0 +1,18 @@
#pragma once
#include <Processors/QueryPlan/ITransformingStep.h>
namespace DB
{
/// Materialize constants. See MaterializingTransform.
class MaterializingStep : public ITransformingStep
{
public:
explicit MaterializingStep(const DataStream & input_stream_);
String getName() const override { return "Materializing"; }
void transformPipeline(QueryPipeline & pipeline) override;
};
}

View File

@ -0,0 +1,164 @@
#include <Processors/QueryPlan/MergingFinal.h>
#include <Processors/QueryPipeline.h>
#include <Processors/Merges/AggregatingSortedTransform.h>
#include <Processors/Merges/CollapsingSortedTransform.h>
#include <Processors/Merges/MergingSortedTransform.h>
#include <Processors/Merges/ReplacingSortedTransform.h>
#include <Processors/Merges/SummingSortedTransform.h>
#include <Processors/Merges/VersionedCollapsingTransform.h>
#include <Processors/Transforms/AddingSelectorTransform.h>
#include <Processors/Transforms/CopyTransform.h>
#include <IO/Operators.h>
namespace DB
{
namespace ErrorCodes
{
extern const int LOGICAL_ERROR;
}
static ITransformingStep::Traits getTraits()
{
return ITransformingStep::Traits
{
{
.preserves_distinct_columns = true,
.returns_single_stream = false,
.preserves_number_of_streams = false,
.preserves_sorting = false,
},
{
.preserves_number_of_rows = true,
}
};
}
MergingFinal::MergingFinal(
const DataStream & input_stream,
size_t num_output_streams_,
SortDescription sort_description_,
MergeTreeData::MergingParams params_,
Names partition_key_columns_,
size_t max_block_size_)
: ITransformingStep(input_stream, input_stream.header, getTraits())
, num_output_streams(num_output_streams_)
, sort_description(std::move(sort_description_))
, merging_params(std::move(params_))
, partition_key_columns(std::move(partition_key_columns_))
, max_block_size(max_block_size_)
{
/// TODO: check input_stream is partially sorted (each port) by the same description.
// output_stream->sort_description = sort_description;
// output_stream->sort_mode = DataStream::SortMode::Stream;
}
void MergingFinal::transformPipeline(QueryPipeline & pipeline)
{
const auto & header = pipeline.getHeader();
size_t num_outputs = pipeline.getNumStreams();
auto get_merging_processor = [&]() -> MergingTransformPtr
{
switch (merging_params.mode)
{
case MergeTreeData::MergingParams::Ordinary:
{
return std::make_shared<MergingSortedTransform>(header, num_outputs,
sort_description, max_block_size);
}
case MergeTreeData::MergingParams::Collapsing:
return std::make_shared<CollapsingSortedTransform>(header, num_outputs,
sort_description, merging_params.sign_column, true, max_block_size);
case MergeTreeData::MergingParams::Summing:
return std::make_shared<SummingSortedTransform>(header, num_outputs,
sort_description, merging_params.columns_to_sum, partition_key_columns, max_block_size);
case MergeTreeData::MergingParams::Aggregating:
return std::make_shared<AggregatingSortedTransform>(header, num_outputs,
sort_description, max_block_size);
case MergeTreeData::MergingParams::Replacing:
return std::make_shared<ReplacingSortedTransform>(header, num_outputs,
sort_description, merging_params.version_column, max_block_size);
case MergeTreeData::MergingParams::VersionedCollapsing:
return std::make_shared<VersionedCollapsingTransform>(header, num_outputs,
sort_description, merging_params.sign_column, max_block_size);
case MergeTreeData::MergingParams::Graphite:
throw Exception("GraphiteMergeTree doesn't support FINAL", ErrorCodes::LOGICAL_ERROR);
}
__builtin_unreachable();
};
if (num_output_streams <= 1 || sort_description.empty())
{
pipeline.addTransform(get_merging_processor());
return;
}
ColumnNumbers key_columns;
key_columns.reserve(sort_description.size());
for (auto & desc : sort_description)
{
if (!desc.column_name.empty())
key_columns.push_back(header.getPositionByName(desc.column_name));
else
key_columns.emplace_back(desc.column_number);
}
pipeline.addSimpleTransform([&](const Block & stream_header)
{
return std::make_shared<AddingSelectorTransform>(stream_header, num_output_streams, key_columns);
});
pipeline.transform([&](OutputPortRawPtrs ports)
{
Processors transforms;
std::vector<OutputPorts::iterator> output_ports;
transforms.reserve(ports.size() + num_output_streams);
output_ports.reserve(ports.size());
for (auto & port : ports)
{
auto copier = std::make_shared<CopyTransform>(header, num_output_streams);
connect(*port, copier->getInputPort());
output_ports.emplace_back(copier->getOutputs().begin());
transforms.emplace_back(std::move(copier));
}
for (size_t i = 0; i < num_output_streams; ++i)
{
auto merge = get_merging_processor();
merge->setSelectorPosition(i);
auto input = merge->getInputs().begin();
/// Connect i-th merge with i-th input port of every copier.
for (size_t j = 0; j < ports.size(); ++j)
{
connect(*output_ports[j], *input);
++output_ports[j];
++input;
}
transforms.emplace_back(std::move(merge));
}
return transforms;
});
}
void MergingFinal::describeActions(FormatSettings & settings) const
{
String prefix(settings.offset, ' ');
settings.out << prefix << "Sort description: ";
dumpSortDescription(sort_description, input_streams.front().header, settings.out);
settings.out << '\n';
}
}

View File

@ -0,0 +1,35 @@
#pragma once
#include <Processors/QueryPlan/ITransformingStep.h>
#include <Core/SortDescription.h>
#include <Storages/MergeTree/MergeTreeData.h>
namespace DB
{
/// Merge streams of data into single sorted stream.
class MergingFinal : public ITransformingStep
{
public:
explicit MergingFinal(
const DataStream & input_stream,
size_t num_output_streams_,
SortDescription sort_description_,
MergeTreeData::MergingParams params_,
Names partition_key_columns_,
size_t max_block_size_);
String getName() const override { return "MergingFinal"; }
void transformPipeline(QueryPipeline & pipeline) override;
void describeActions(FormatSettings & settings) const override;
private:
size_t num_output_streams;
SortDescription sort_description;
MergeTreeData::MergingParams merging_params;
Names partition_key_columns;
size_t max_block_size;
};
}

View File

@ -185,6 +185,17 @@ QueryPipelinePtr QueryPlan::buildQueryPipeline()
return last_pipeline; return last_pipeline;
} }
Pipe QueryPlan::convertToPipe()
{
if (!isInitialized())
return {};
if (isCompleted())
throw Exception("Cannot convert completed QueryPlan to Pipe", ErrorCodes::LOGICAL_ERROR);
return QueryPipeline::getPipe(std::move(*buildQueryPipeline()));
}
void QueryPlan::addInterpreterContext(std::shared_ptr<Context> context) void QueryPlan::addInterpreterContext(std::shared_ptr<Context> context)
{ {
interpreter_context.emplace_back(std::move(context)); interpreter_context.emplace_back(std::move(context));

View File

@ -17,6 +17,11 @@ using QueryPipelinePtr = std::unique_ptr<QueryPipeline>;
class Context; class Context;
class WriteBuffer; class WriteBuffer;
class QueryPlan;
using QueryPlanPtr = std::unique_ptr<QueryPlan>;
class Pipe;
/// A tree of query steps. /// A tree of query steps.
/// The goal of QueryPlan is to build QueryPipeline. /// The goal of QueryPlan is to build QueryPipeline.
/// QueryPlan let delay pipeline creation which is helpful for pipeline-level optimisations. /// QueryPlan let delay pipeline creation which is helpful for pipeline-level optimisations.
@ -28,7 +33,7 @@ public:
QueryPlan(QueryPlan &&); QueryPlan(QueryPlan &&);
QueryPlan & operator=(QueryPlan &&); QueryPlan & operator=(QueryPlan &&);
void unitePlans(QueryPlanStepPtr step, std::vector<std::unique_ptr<QueryPlan>> plans); void unitePlans(QueryPlanStepPtr step, std::vector<QueryPlanPtr> plans);
void addStep(QueryPlanStepPtr step); void addStep(QueryPlanStepPtr step);
bool isInitialized() const { return root != nullptr; } /// Tree is not empty bool isInitialized() const { return root != nullptr; } /// Tree is not empty
@ -39,6 +44,9 @@ public:
QueryPipelinePtr buildQueryPipeline(); QueryPipelinePtr buildQueryPipeline();
/// If initialized, build pipeline and convert to pipe. Otherwise, return empty pipe.
Pipe convertToPipe();
struct ExplainPlanOptions struct ExplainPlanOptions
{ {
/// Add output header to step. /// Add output header to step.
@ -61,6 +69,7 @@ public:
/// Set upper limit for the recommend number of threads. Will be applied to the newly-created pipelines. /// Set upper limit for the recommend number of threads. Will be applied to the newly-created pipelines.
/// TODO: make it in a better way. /// TODO: make it in a better way.
void setMaxThreads(size_t max_threads_) { max_threads = max_threads_; } void setMaxThreads(size_t max_threads_) { max_threads = max_threads_; }
size_t getMaxThreads() const { return max_threads; }
void addInterpreterContext(std::shared_ptr<Context> context); void addInterpreterContext(std::shared_ptr<Context> context);

View File

@ -5,7 +5,7 @@ namespace DB
{ {
ReadFromPreparedSource::ReadFromPreparedSource(Pipe pipe_, std::shared_ptr<Context> context_) ReadFromPreparedSource::ReadFromPreparedSource(Pipe pipe_, std::shared_ptr<Context> context_)
: ISourceStep(DataStream{.header = pipe_.getHeader(), .has_single_port = true}) : ISourceStep(DataStream{.header = pipe_.getHeader()})
, pipe(std::move(pipe_)) , pipe(std::move(pipe_))
, context(std::move(context_)) , context(std::move(context_))
{ {
@ -13,7 +13,11 @@ ReadFromPreparedSource::ReadFromPreparedSource(Pipe pipe_, std::shared_ptr<Conte
void ReadFromPreparedSource::initializePipeline(QueryPipeline & pipeline) void ReadFromPreparedSource::initializePipeline(QueryPipeline & pipeline)
{ {
for (const auto & processor : pipe.getProcessors())
processors.emplace_back(processor);
pipeline.init(std::move(pipe)); pipeline.init(std::move(pipe));
if (context) if (context)
pipeline.addInterpreterContext(std::move(context)); pipeline.addInterpreterContext(std::move(context));
} }

View File

@ -11,7 +11,7 @@ class ReadFromPreparedSource : public ISourceStep
public: public:
explicit ReadFromPreparedSource(Pipe pipe_, std::shared_ptr<Context> context_ = nullptr); explicit ReadFromPreparedSource(Pipe pipe_, std::shared_ptr<Context> context_ = nullptr);
String getName() const override { return "ReadNothing"; } String getName() const override { return "ReadFromPreparedSource"; }
void initializePipeline(QueryPipeline & pipeline) override; void initializePipeline(QueryPipeline & pipeline) override;
@ -20,4 +20,16 @@ private:
std::shared_ptr<Context> context; std::shared_ptr<Context> context;
}; };
class ReadFromStorageStep : public ReadFromPreparedSource
{
public:
ReadFromStorageStep(Pipe pipe_, String storage_name)
: ReadFromPreparedSource(std::move(pipe_))
{
setStepDescription(storage_name);
}
String getName() const override { return "ReadFromStorage"; }
};
} }

View File

@ -1,114 +0,0 @@
#include <Processors/QueryPlan/ReadFromStorageStep.h>
#include <Interpreters/Context.h>
#include <Processors/Sources/NullSource.h>
#include <Processors/Transforms/ExpressionTransform.h>
#include <Processors/Transforms/FilterTransform.h>
#include <Processors/Pipe.h>
#include <Processors/QueryPipeline.h>
#include <Storages/IStorage.h>
namespace DB
{
ReadFromStorageStep::ReadFromStorageStep(
TableLockHolder table_lock,
StorageMetadataPtr metadata_snapshot,
StreamLocalLimits & limits,
SizeLimits & leaf_limits,
std::shared_ptr<const EnabledQuota> quota,
StoragePtr storage,
const Names & required_columns,
SelectQueryInfo & query_info,
std::shared_ptr<Context> context,
QueryProcessingStage::Enum processing_stage,
size_t max_block_size,
size_t max_streams)
{
/// Note: we read from storage in constructor of step because we don't know real header before reading.
/// It will be fixed when storage return QueryPlanStep itself.
Pipe pipe = storage->read(required_columns, metadata_snapshot, query_info, *context, processing_stage, max_block_size, max_streams);
if (pipe.empty())
{
pipe = Pipe(std::make_shared<NullSource>(metadata_snapshot->getSampleBlockForColumns(required_columns, storage->getVirtuals(), storage->getStorageID())));
if (query_info.prewhere_info)
{
if (query_info.prewhere_info->alias_actions)
{
pipe.addSimpleTransform([&](const Block & header)
{
return std::make_shared<ExpressionTransform>(header, query_info.prewhere_info->alias_actions);
});
}
pipe.addSimpleTransform([&](const Block & header)
{
return std::make_shared<FilterTransform>(
header,
query_info.prewhere_info->prewhere_actions,
query_info.prewhere_info->prewhere_column_name,
query_info.prewhere_info->remove_prewhere_column);
});
// To remove additional columns
// In some cases, we did not read any marks so that the pipeline.streams is empty
// Thus, some columns in prewhere are not removed as expected
// This leads to mismatched header in distributed table
if (query_info.prewhere_info->remove_columns_actions)
{
pipe.addSimpleTransform([&](const Block & header)
{
return std::make_shared<ExpressionTransform>(
header, query_info.prewhere_info->remove_columns_actions);
});
}
}
}
pipeline = std::make_unique<QueryPipeline>();
QueryPipelineProcessorsCollector collector(*pipeline, this);
pipe.setLimits(limits);
/**
* Leaf size limits should be applied only for local processing of distributed queries.
* Such limits allow to control the read stage on leaf nodes and exclude the merging stage.
* Consider the case when distributed query needs to read from multiple shards. Then leaf
* limits will be applied on the shards only (including the root node) but will be ignored
* on the results merging stage.
*/
if (!storage->isRemote())
pipe.setLeafLimits(leaf_limits);
if (quota)
pipe.setQuota(quota);
pipeline->init(std::move(pipe));
/// Add resources to pipeline. The order is important.
/// Add in reverse order of destruction. Pipeline will be destroyed at the end in case of exception.
pipeline->addInterpreterContext(std::move(context));
pipeline->addStorageHolder(std::move(storage));
pipeline->addTableLock(std::move(table_lock));
processors = collector.detachProcessors();
output_stream = DataStream{.header = pipeline->getHeader(), .has_single_port = pipeline->getNumStreams() == 1};
}
ReadFromStorageStep::~ReadFromStorageStep() = default;
QueryPipelinePtr ReadFromStorageStep::updatePipeline(QueryPipelines)
{
return std::move(pipeline);
}
void ReadFromStorageStep::describePipeline(FormatSettings & settings) const
{
IQueryPlanStep::describePipeline(processors, settings);
}
}

View File

@ -1,53 +0,0 @@
#pragma once
#include <Processors/QueryPlan/IQueryPlanStep.h>
#include <Core/QueryProcessingStage.h>
#include <Storages/TableLockHolder.h>
#include <DataStreams/StreamLocalLimits.h>
namespace DB
{
class IStorage;
using StoragePtr = std::shared_ptr<IStorage>;
struct StorageInMemoryMetadata;
using StorageMetadataPtr = std::shared_ptr<const StorageInMemoryMetadata>;
struct SelectQueryInfo;
struct PrewhereInfo;
class EnabledQuota;
/// Reads from storage.
class ReadFromStorageStep : public IQueryPlanStep
{
public:
ReadFromStorageStep(
TableLockHolder table_lock,
StorageMetadataPtr metadata_snapshot,
StreamLocalLimits & limits,
SizeLimits & leaf_limits,
std::shared_ptr<const EnabledQuota> quota,
StoragePtr storage,
const Names & required_columns,
SelectQueryInfo & query_info,
std::shared_ptr<Context> context,
QueryProcessingStage::Enum processing_stage,
size_t max_block_size,
size_t max_streams);
~ReadFromStorageStep() override;
String getName() const override { return "ReadFromStorage"; }
QueryPipelinePtr updatePipeline(QueryPipelines) override;
void describePipeline(FormatSettings & settings) const override;
private:
QueryPipelinePtr pipeline;
Processors processors;
};
}

View File

@ -0,0 +1,37 @@
#include <Processors/QueryPlan/ReverseRowsStep.h>
#include <Processors/QueryPipeline.h>
#include <Processors/Transforms/ReverseTransform.h>
namespace DB
{
static ITransformingStep::Traits getTraits()
{
return ITransformingStep::Traits
{
{
.preserves_distinct_columns = true,
.returns_single_stream = false,
.preserves_number_of_streams = true,
.preserves_sorting = false,
},
{
.preserves_number_of_rows = true,
}
};
}
ReverseRowsStep::ReverseRowsStep(const DataStream & input_stream_)
: ITransformingStep(input_stream_, input_stream_.header, getTraits())
{
}
void ReverseRowsStep::transformPipeline(QueryPipeline & pipeline)
{
pipeline.addSimpleTransform([&](const Block & header)
{
return std::make_shared<ReverseTransform>(header);
});
}
}

View File

@ -0,0 +1,18 @@
#pragma once
#include <Processors/QueryPlan/ITransformingStep.h>
namespace DB
{
/// Reverse rows in chunk.
class ReverseRowsStep : public ITransformingStep
{
public:
ReverseRowsStep(const DataStream & input_stream_);
String getName() const override { return "ReverseRows"; }
void transformPipeline(QueryPipeline & pipeline) override;
};
}

View File

@ -0,0 +1,71 @@
#include <Processors/QueryPlan/SettingQuotaAndLimitsStep.h>
#include <Processors/QueryPipeline.h>
#include <Storages/IStorage.h>
namespace DB
{
static ITransformingStep::Traits getTraits()
{
return ITransformingStep::Traits
{
{
.preserves_distinct_columns = true,
.returns_single_stream = false,
.preserves_number_of_streams = true,
.preserves_sorting = true,
},
{
.preserves_number_of_rows = true,
}
};
}
SettingQuotaAndLimitsStep::SettingQuotaAndLimitsStep(
const DataStream & input_stream_,
StoragePtr storage_,
TableLockHolder table_lock_,
StreamLocalLimits & limits_,
SizeLimits & leaf_limits_,
std::shared_ptr<const EnabledQuota> quota_,
std::shared_ptr<Context> context_)
: ITransformingStep(input_stream_, input_stream_.header, getTraits())
, context(std::move(context_))
, storage(std::move(storage_))
, table_lock(std::move(table_lock_))
, limits(limits_)
, leaf_limits(leaf_limits_)
, quota(std::move(quota_))
{
}
void SettingQuotaAndLimitsStep::transformPipeline(QueryPipeline & pipeline)
{
/// Table lock is stored inside pipeline here.
pipeline.setLimits(limits);
/**
* Leaf size limits should be applied only for local processing of distributed queries.
* Such limits allow to control the read stage on leaf nodes and exclude the merging stage.
* Consider the case when distributed query needs to read from multiple shards. Then leaf
* limits will be applied on the shards only (including the root node) but will be ignored
* on the results merging stage.
*/
if (!storage->isRemote())
pipeline.setLeafLimits(leaf_limits);
if (quota)
pipeline.setQuota(quota);
/// Order of resources below is important.
if (context)
pipeline.addInterpreterContext(std::move(context));
if (storage)
pipeline.addStorageHolder(std::move(storage));
if (table_lock)
pipeline.addTableLock(std::move(table_lock));
}
}

View File

@ -0,0 +1,44 @@
#pragma once
#include <Processors/QueryPlan/ITransformingStep.h>
#include <Storages/TableLockHolder.h>
#include <DataStreams/StreamLocalLimits.h>
namespace DB
{
class IStorage;
using StoragePtr = std::shared_ptr<IStorage>;
struct StorageInMemoryMetadata;
using StorageMetadataPtr = std::shared_ptr<const StorageInMemoryMetadata>;
class EnabledQuota;
/// Add limits, quota, table_lock and other stuff to pipeline.
/// Doesn't change DataStream.
class SettingQuotaAndLimitsStep : public ITransformingStep
{
public:
SettingQuotaAndLimitsStep(
const DataStream & input_stream_,
StoragePtr storage_,
TableLockHolder table_lock_,
StreamLocalLimits & limits_,
SizeLimits & leaf_limits_,
std::shared_ptr<const EnabledQuota> quota_,
std::shared_ptr<Context> context_);
String getName() const override { return "SettingQuotaAndLimits"; }
void transformPipeline(QueryPipeline & pipeline) override;
private:
std::shared_ptr<Context> context;
StoragePtr storage;
TableLockHolder table_lock;
StreamLocalLimits limits;
SizeLimits leaf_limits;
std::shared_ptr<const EnabledQuota> quota;
};
}

View File

@ -30,7 +30,7 @@ QueryPipelinePtr UnionStep::updatePipeline(QueryPipelines pipelines)
return pipeline; return pipeline;
} }
*pipeline = QueryPipeline::unitePipelines(std::move(pipelines), output_stream->header ,max_threads); *pipeline = QueryPipeline::unitePipelines(std::move(pipelines), output_stream->header, max_threads);
processors = collector.detachProcessors(); processors = collector.detachProcessors();
return pipeline; return pipeline;

View File

@ -9,7 +9,7 @@ class UnionStep : public IQueryPlanStep
{ {
public: public:
/// max_threads is used to limit the number of threads for result pipeline. /// max_threads is used to limit the number of threads for result pipeline.
UnionStep(DataStreams input_streams_, Block result_header, size_t max_threads_); UnionStep(DataStreams input_streams_, Block result_header, size_t max_threads_ = 0);
String getName() const override { return "Union"; } String getName() const override { return "Union"; }

View File

@ -4,33 +4,40 @@
namespace DB namespace DB
{ {
namespace ErrorCodes
{
extern const int LOGICAL_ERROR;
}
/// Adds a materialized const column to the chunk with a specified value. /// Adds a materialized const column to the chunk with a specified value.
template <typename T>
class AddingConstColumnTransform : public ISimpleTransform class AddingConstColumnTransform : public ISimpleTransform
{ {
public: public:
AddingConstColumnTransform(const Block & header, DataTypePtr data_type_, T value_, const String & column_name_) AddingConstColumnTransform(const Block & header, ColumnWithTypeAndName column_)
: ISimpleTransform(header, addColumn(header, data_type_, column_name_), false) : ISimpleTransform(header, transformHeader(header, column_), false)
, data_type(std::move(data_type_)), value(value_) {} , column(std::move(column_))
{
if (!column.column || !isColumnConst(*column.column) || !column.column->empty())
throw Exception("AddingConstColumnTransform expected empty const column", ErrorCodes::LOGICAL_ERROR);
}
String getName() const override { return "AddingConstColumnTransform"; } String getName() const override { return "AddingConstColumnTransform"; }
static Block transformHeader(Block header, ColumnWithTypeAndName & column_)
{
header.insert(column_);
return header;
}
protected: protected:
void transform(Chunk & chunk) override void transform(Chunk & chunk) override
{ {
auto num_rows = chunk.getNumRows(); auto num_rows = chunk.getNumRows();
chunk.addColumn(data_type->createColumnConst(num_rows, value)->convertToFullColumnIfConst()); chunk.addColumn(column.column->cloneResized(num_rows)->convertToFullColumnIfConst());
} }
private: private:
static Block addColumn(Block header, const DataTypePtr & data_type, const String & column_name) ColumnWithTypeAndName column;
{
header.insert({data_type->createColumn(), data_type, column_name});
return header;
}
DataTypePtr data_type;
T value;
}; };
} }

View File

@ -88,7 +88,9 @@ SRCS(
Pipe.cpp Pipe.cpp
Port.cpp Port.cpp
QueryPipeline.cpp QueryPipeline.cpp
QueryPlan/AddingConstColumnStep.cpp
QueryPlan/AddingDelayedSourceStep.cpp QueryPlan/AddingDelayedSourceStep.cpp
QueryPlan/AddingMissedStep.cpp
QueryPlan/AggregatingStep.cpp QueryPlan/AggregatingStep.cpp
QueryPlan/ArrayJoinStep.cpp QueryPlan/ArrayJoinStep.cpp
QueryPlan/ConvertingStep.cpp QueryPlan/ConvertingStep.cpp
@ -105,16 +107,19 @@ SRCS(
QueryPlan/ITransformingStep.cpp QueryPlan/ITransformingStep.cpp
QueryPlan/LimitByStep.cpp QueryPlan/LimitByStep.cpp
QueryPlan/LimitStep.cpp QueryPlan/LimitStep.cpp
QueryPlan/MaterializingStep.cpp
QueryPlan/MergeSortingStep.cpp QueryPlan/MergeSortingStep.cpp
QueryPlan/MergingAggregatedStep.cpp QueryPlan/MergingAggregatedStep.cpp
QueryPlan/MergingFinal.cpp
QueryPlan/MergingSortedStep.cpp QueryPlan/MergingSortedStep.cpp
QueryPlan/OffsetStep.cpp QueryPlan/OffsetStep.cpp
QueryPlan/PartialSortingStep.cpp QueryPlan/PartialSortingStep.cpp
QueryPlan/QueryPlan.cpp QueryPlan/QueryPlan.cpp
QueryPlan/ReadFromPreparedSource.cpp QueryPlan/ReadFromPreparedSource.cpp
QueryPlan/ReadFromStorageStep.cpp
QueryPlan/ReadNothingStep.cpp QueryPlan/ReadNothingStep.cpp
QueryPlan/ReverseRowsStep.cpp
QueryPlan/RollupStep.cpp QueryPlan/RollupStep.cpp
QueryPlan/SettingQuotaAndLimitsStep.cpp
QueryPlan/TotalsHavingStep.cpp QueryPlan/TotalsHavingStep.cpp
QueryPlan/UnionStep.cpp QueryPlan/UnionStep.cpp
ResizeProcessor.cpp ResizeProcessor.cpp

View File

@ -7,11 +7,12 @@
#include <Parsers/ASTCreateQuery.h> #include <Parsers/ASTCreateQuery.h>
#include <Parsers/ASTSetQuery.h> #include <Parsers/ASTSetQuery.h>
#include <Processors/Pipe.h> #include <Processors/Pipe.h>
#include <Processors/QueryPlan/ReadFromStorageStep.h> #include <Processors/QueryPlan/ReadFromPreparedSource.h>
#include <Interpreters/Context.h> #include <Interpreters/Context.h>
#include <Common/StringUtils/StringUtils.h> #include <Common/StringUtils/StringUtils.h>
#include <Common/quoteString.h> #include <Common/quoteString.h>
#include <Interpreters/ExpressionActions.h> #include <Interpreters/ExpressionActions.h>
#include <Interpreters/InterpreterSelectQuery.h>
namespace DB namespace DB
@ -94,24 +95,25 @@ Pipe IStorage::read(
void IStorage::read( void IStorage::read(
QueryPlan & query_plan, QueryPlan & query_plan,
TableLockHolder table_lock,
StorageMetadataPtr metadata_snapshot,
StreamLocalLimits & limits,
SizeLimits & leaf_limits,
std::shared_ptr<const EnabledQuota> quota,
const Names & column_names, const Names & column_names,
const StorageMetadataPtr & metadata_snapshot,
SelectQueryInfo & query_info, SelectQueryInfo & query_info,
std::shared_ptr<Context> context, const Context & context,
QueryProcessingStage::Enum processed_stage, QueryProcessingStage::Enum processed_stage,
size_t max_block_size, size_t max_block_size,
unsigned num_streams) unsigned num_streams)
{ {
auto read_step = std::make_unique<ReadFromStorageStep>( auto pipe = read(column_names, metadata_snapshot, query_info, context, processed_stage, max_block_size, num_streams);
std::move(table_lock), std::move(metadata_snapshot), limits, leaf_limits, std::move(quota), shared_from_this(), if (pipe.empty())
column_names, query_info, std::move(context), processed_stage, max_block_size, num_streams); {
auto header = metadata_snapshot->getSampleBlockForColumns(column_names, getVirtuals(), getStorageID());
read_step->setStepDescription("Read from " + getName()); InterpreterSelectQuery::addEmptySourceToQueryPlan(query_plan, header, query_info);
query_plan.addStep(std::move(read_step)); }
else
{
auto read_step = std::make_unique<ReadFromStorageStep>(std::move(pipe), getName());
query_plan.addStep(std::move(read_step));
}
} }
Pipe IStorage::alterPartition( Pipe IStorage::alterPartition(

View File

@ -48,6 +48,7 @@ using Processors = std::vector<ProcessorPtr>;
class Pipe; class Pipe;
class QueryPlan; class QueryPlan;
using QueryPlanPtr = std::unique_ptr<QueryPlan>;
class StoragePolicy; class StoragePolicy;
using StoragePolicyPtr = std::shared_ptr<const StoragePolicy>; using StoragePolicyPtr = std::shared_ptr<const StoragePolicy>;
@ -285,17 +286,13 @@ public:
/// Default implementation creates ReadFromStorageStep and uses usual read. /// Default implementation creates ReadFromStorageStep and uses usual read.
virtual void read( virtual void read(
QueryPlan & query_plan, QueryPlan & query_plan,
TableLockHolder table_lock, const Names & /*column_names*/,
StorageMetadataPtr metadata_snapshot, const StorageMetadataPtr & /*metadata_snapshot*/,
StreamLocalLimits & limits, SelectQueryInfo & /*query_info*/,
SizeLimits & leaf_limits, const Context & /*context*/,
std::shared_ptr<const EnabledQuota> quota, QueryProcessingStage::Enum /*processed_stage*/,
const Names & column_names, size_t /*max_block_size*/,
SelectQueryInfo & query_info, unsigned /*num_streams*/);
std::shared_ptr<Context> context,
QueryProcessingStage::Enum processed_stage,
size_t max_block_size,
unsigned num_streams);
/** Writes the data to a table. /** Writes the data to a table.
* Receives a description of the query, which can contain information about the data write method. * Receives a description of the query, which can contain information about the data write method.

View File

@ -9,17 +9,17 @@ struct BoolMask
BoolMask() {} BoolMask() {}
BoolMask(bool can_be_true_, bool can_be_false_) : can_be_true(can_be_true_), can_be_false(can_be_false_) {} BoolMask(bool can_be_true_, bool can_be_false_) : can_be_true(can_be_true_), can_be_false(can_be_false_) {}
BoolMask operator &(const BoolMask & m) BoolMask operator &(const BoolMask & m) const
{ {
return BoolMask(can_be_true && m.can_be_true, can_be_false || m.can_be_false); return {can_be_true && m.can_be_true, can_be_false || m.can_be_false};
} }
BoolMask operator |(const BoolMask & m) BoolMask operator |(const BoolMask & m) const
{ {
return BoolMask(can_be_true || m.can_be_true, can_be_false && m.can_be_false); return {can_be_true || m.can_be_true, can_be_false && m.can_be_false};
} }
BoolMask operator !() BoolMask operator !() const
{ {
return BoolMask(can_be_false, can_be_true); return {can_be_false, can_be_true};
} }
/// If mask is (true, true), then it can no longer change under operation |. /// If mask is (true, true), then it can no longer change under operation |.

View File

@ -1,6 +1,7 @@
#include <Storages/MergeTree/EphemeralLockInZooKeeper.h> #include <Storages/MergeTree/EphemeralLockInZooKeeper.h>
#include <Common/ZooKeeper/KeeperException.h> #include <Common/ZooKeeper/KeeperException.h>
#include <common/logger_useful.h> #include <common/logger_useful.h>
#include <common/types.h>
namespace DB namespace DB
@ -71,13 +72,13 @@ EphemeralLockInZooKeeper::~EphemeralLockInZooKeeper()
EphemeralLocksInAllPartitions::EphemeralLocksInAllPartitions( EphemeralLocksInAllPartitions::EphemeralLocksInAllPartitions(
const String & block_numbers_path, const String & path_prefix, const String & temp_path, const String & block_numbers_path, const String & path_prefix, const String & temp_path,
zkutil::ZooKeeper & zookeeper_) zkutil::ZooKeeper & zookeeper_)
: zookeeper(zookeeper_) : zookeeper(&zookeeper_)
{ {
std::vector<String> holders; std::vector<String> holders;
while (true) while (true)
{ {
Coordination::Stat partitions_stat; Coordination::Stat partitions_stat;
Strings partitions = zookeeper.getChildren(block_numbers_path, &partitions_stat); Strings partitions = zookeeper->getChildren(block_numbers_path, &partitions_stat);
if (holders.size() < partitions.size()) if (holders.size() < partitions.size())
{ {
@ -85,7 +86,7 @@ EphemeralLocksInAllPartitions::EphemeralLocksInAllPartitions(
for (size_t i = 0; i < partitions.size() - holders.size(); ++i) for (size_t i = 0; i < partitions.size() - holders.size(); ++i)
{ {
String path = temp_path + "/abandonable_lock-"; String path = temp_path + "/abandonable_lock-";
holder_futures.push_back(zookeeper.asyncCreate(path, {}, zkutil::CreateMode::EphemeralSequential)); holder_futures.push_back(zookeeper->asyncCreate(path, {}, zkutil::CreateMode::EphemeralSequential));
} }
for (auto & future : holder_futures) for (auto & future : holder_futures)
{ {
@ -104,7 +105,7 @@ EphemeralLocksInAllPartitions::EphemeralLocksInAllPartitions(
lock_ops.push_back(zkutil::makeCheckRequest(block_numbers_path, partitions_stat.version)); lock_ops.push_back(zkutil::makeCheckRequest(block_numbers_path, partitions_stat.version));
Coordination::Responses lock_responses; Coordination::Responses lock_responses;
Coordination::Error rc = zookeeper.tryMulti(lock_ops, lock_responses); Coordination::Error rc = zookeeper->tryMulti(lock_ops, lock_responses);
if (rc == Coordination::Error::ZBADVERSION) if (rc == Coordination::Error::ZBADVERSION)
{ {
LOG_TRACE(&Poco::Logger::get("EphemeralLocksInAllPartitions"), "Someone has inserted a block in a new partition while we were creating locks. Retry."); LOG_TRACE(&Poco::Logger::get("EphemeralLocksInAllPartitions"), "Someone has inserted a block in a new partition while we were creating locks. Retry.");
@ -131,13 +132,16 @@ EphemeralLocksInAllPartitions::EphemeralLocksInAllPartitions(
void EphemeralLocksInAllPartitions::unlock() void EphemeralLocksInAllPartitions::unlock()
{ {
if (!zookeeper)
return;
std::vector<zkutil::ZooKeeper::FutureMulti> futures; std::vector<zkutil::ZooKeeper::FutureMulti> futures;
for (const auto & lock : locks) for (const auto & lock : locks)
{ {
Coordination::Requests unlock_ops; Coordination::Requests unlock_ops;
unlock_ops.emplace_back(zkutil::makeRemoveRequest(lock.path, -1)); unlock_ops.emplace_back(zkutil::makeRemoveRequest(lock.path, -1));
unlock_ops.emplace_back(zkutil::makeRemoveRequest(lock.holder_path, -1)); unlock_ops.emplace_back(zkutil::makeRemoveRequest(lock.holder_path, -1));
futures.push_back(zookeeper.asyncMulti(unlock_ops)); futures.push_back(zookeeper->asyncMulti(unlock_ops));
} }
for (auto & future : futures) for (auto & future : futures)

View File

@ -1,9 +1,14 @@
#pragma once #pragma once
#include "ReplicatedMergeTreeMutationEntry.h"
#include <Common/ZooKeeper/ZooKeeper.h> #include <Common/ZooKeeper/ZooKeeper.h>
#include <Common/Exception.h> #include <Common/Exception.h>
#include <IO/ReadHelpers.h> #include <IO/ReadHelpers.h>
#include <map>
#include <optional>
namespace DB namespace DB
{ {
@ -87,13 +92,30 @@ private:
/// Acquires block number locks in all partitions. /// Acquires block number locks in all partitions.
class EphemeralLocksInAllPartitions : private boost::noncopyable class EphemeralLocksInAllPartitions : public boost::noncopyable
{ {
public: public:
EphemeralLocksInAllPartitions( EphemeralLocksInAllPartitions(
const String & block_numbers_path, const String & path_prefix, const String & temp_path, const String & block_numbers_path, const String & path_prefix, const String & temp_path,
zkutil::ZooKeeper & zookeeper_); zkutil::ZooKeeper & zookeeper_);
EphemeralLocksInAllPartitions() = default;
EphemeralLocksInAllPartitions(EphemeralLocksInAllPartitions && rhs) noexcept
: zookeeper(rhs.zookeeper)
, locks(std::move(rhs.locks))
{
rhs.zookeeper = nullptr;
}
EphemeralLocksInAllPartitions & operator=(EphemeralLocksInAllPartitions && rhs) noexcept
{
zookeeper = rhs.zookeeper;
rhs.zookeeper = nullptr;
locks = std::move(rhs.locks);
return *this;
}
struct LockInfo struct LockInfo
{ {
String path; String path;
@ -110,8 +132,51 @@ public:
~EphemeralLocksInAllPartitions(); ~EphemeralLocksInAllPartitions();
private: private:
zkutil::ZooKeeper & zookeeper; zkutil::ZooKeeper * zookeeper = nullptr;
std::vector<LockInfo> locks; std::vector<LockInfo> locks;
}; };
/// This class allows scoped manipulations with block numbers locked in certain partitions
/// See StorageReplicatedMergeTree::allocateBlockNumbersInAffectedPartitions and alter()/mutate() methods
class PartitionBlockNumbersHolder
{
public:
PartitionBlockNumbersHolder(const PartitionBlockNumbersHolder &) = delete;
PartitionBlockNumbersHolder & operator=(const PartitionBlockNumbersHolder &) = delete;
using BlockNumbersType = ReplicatedMergeTreeMutationEntry::BlockNumbersType;
PartitionBlockNumbersHolder() = default;
PartitionBlockNumbersHolder(
BlockNumbersType block_numbers_, std::optional<EphemeralLocksInAllPartitions> locked_block_numbers_holder)
: block_numbers(std::move(block_numbers_))
, multiple_partitions_holder(std::move(locked_block_numbers_holder))
{
}
PartitionBlockNumbersHolder(
BlockNumbersType block_numbers_, std::optional<EphemeralLockInZooKeeper> locked_block_numbers_holder)
: block_numbers(std::move(block_numbers_))
, single_partition_holder(std::move(locked_block_numbers_holder))
{
}
PartitionBlockNumbersHolder & operator=(PartitionBlockNumbersHolder &&) = default;
const BlockNumbersType & getBlockNumbers() const { return block_numbers; }
void reset()
{
multiple_partitions_holder.reset();
single_partition_holder.reset();
block_numbers.clear();
}
private:
BlockNumbersType block_numbers;
std::optional<EphemeralLocksInAllPartitions> multiple_partitions_holder;
std::optional<EphemeralLockInZooKeeper> single_partition_holder;
};
} }

View File

@ -23,24 +23,20 @@
#include <Parsers/parseIdentifierOrStringLiteral.h> #include <Parsers/parseIdentifierOrStringLiteral.h>
#include <Interpreters/ExpressionAnalyzer.h> #include <Interpreters/ExpressionAnalyzer.h>
#include <Interpreters/Context.h> #include <Interpreters/Context.h>
#include <Processors/ConcatProcessor.h>
#include <Processors/QueryPlan/QueryPlan.h>
#include <Processors/QueryPlan/FilterStep.h>
#include <Processors/QueryPlan/ExpressionStep.h>
#include <Processors/QueryPlan/ReadFromPreparedSource.h>
#include <Processors/QueryPlan/AddingConstColumnStep.h>
#include <Processors/QueryPlan/ReverseRowsStep.h>
#include <Processors/QueryPlan/MergingSortedStep.h>
#include <Processors/QueryPlan/UnionStep.h>
#include <Processors/QueryPlan/MergingFinal.h>
#include <DataTypes/DataTypeDate.h> #include <DataTypes/DataTypeDate.h>
#include <DataTypes/DataTypeEnum.h> #include <DataTypes/DataTypeEnum.h>
#include <DataTypes/DataTypesNumber.h> #include <DataTypes/DataTypesNumber.h>
#include <Processors/ConcatProcessor.h>
#include <Processors/Merges/AggregatingSortedTransform.h>
#include <Processors/Merges/CollapsingSortedTransform.h>
#include <Processors/Merges/MergingSortedTransform.h>
#include <Processors/Merges/ReplacingSortedTransform.h>
#include <Processors/Merges/SummingSortedTransform.h>
#include <Processors/Merges/VersionedCollapsingTransform.h>
#include <Processors/Sources/SourceFromInputStream.h>
#include <Processors/Transforms/AddingConstColumnTransform.h>
#include <Processors/Transforms/AddingSelectorTransform.h>
#include <Processors/Transforms/CopyTransform.h>
#include <Processors/Transforms/ExpressionTransform.h>
#include <Processors/Transforms/FilterTransform.h>
#include <Processors/Transforms/ReverseTransform.h>
#include <Storages/VirtualColumnUtils.h> #include <Storages/VirtualColumnUtils.h>
namespace ProfileEvents namespace ProfileEvents
@ -83,17 +79,6 @@ static Block getBlockWithPartColumn(const MergeTreeData::DataPartsVector & parts
return Block{ColumnWithTypeAndName(std::move(column), std::make_shared<DataTypeString>(), "_part")}; return Block{ColumnWithTypeAndName(std::move(column), std::make_shared<DataTypeString>(), "_part")};
} }
/// Check if ORDER BY clause of the query has some expression.
static bool sortingDescriptionHasExpressions(const SortDescription & sort_description, const StorageMetadataPtr & metadata_snapshot)
{
auto all_columns = metadata_snapshot->getColumns();
for (const auto & sort_column : sort_description)
{
if (!all_columns.has(sort_column.column_name))
return true;
}
return false;
}
size_t MergeTreeDataSelectExecutor::getApproximateTotalRowsToRead( size_t MergeTreeDataSelectExecutor::getApproximateTotalRowsToRead(
const MergeTreeData::DataPartsVector & parts, const MergeTreeData::DataPartsVector & parts,
@ -144,7 +129,7 @@ static RelativeSize convertAbsoluteSampleSizeToRelative(const ASTPtr & node, siz
} }
Pipe MergeTreeDataSelectExecutor::read( QueryPlanPtr MergeTreeDataSelectExecutor::read(
const Names & column_names_to_return, const Names & column_names_to_return,
const StorageMetadataPtr & metadata_snapshot, const StorageMetadataPtr & metadata_snapshot,
const SelectQueryInfo & query_info, const SelectQueryInfo & query_info,
@ -159,7 +144,7 @@ Pipe MergeTreeDataSelectExecutor::read(
max_block_numbers_to_read); max_block_numbers_to_read);
} }
Pipe MergeTreeDataSelectExecutor::readFromParts( QueryPlanPtr MergeTreeDataSelectExecutor::readFromParts(
MergeTreeData::DataPartsVector parts, MergeTreeData::DataPartsVector parts,
const Names & column_names_to_return, const Names & column_names_to_return,
const StorageMetadataPtr & metadata_snapshot, const StorageMetadataPtr & metadata_snapshot,
@ -555,7 +540,7 @@ Pipe MergeTreeDataSelectExecutor::readFromParts(
if (no_data) if (no_data)
{ {
LOG_DEBUG(log, "Sampling yields no data."); LOG_DEBUG(log, "Sampling yields no data.");
return {}; return std::make_unique<QueryPlan>();
} }
LOG_DEBUG(log, "Key condition: {}", key_condition.toString()); LOG_DEBUG(log, "Key condition: {}", key_condition.toString());
@ -725,13 +710,13 @@ Pipe MergeTreeDataSelectExecutor::readFromParts(
LOG_DEBUG(log, "Selected {} parts by partition key, {} parts by primary key, {} marks by primary key, {} marks to read from {} ranges", parts.size(), parts_with_ranges.size(), sum_marks_pk.load(std::memory_order_relaxed), sum_marks, sum_ranges); LOG_DEBUG(log, "Selected {} parts by partition key, {} parts by primary key, {} marks by primary key, {} marks to read from {} ranges", parts.size(), parts_with_ranges.size(), sum_marks_pk.load(std::memory_order_relaxed), sum_marks, sum_ranges);
if (parts_with_ranges.empty()) if (parts_with_ranges.empty())
return {}; return std::make_unique<QueryPlan>();
ProfileEvents::increment(ProfileEvents::SelectedParts, parts_with_ranges.size()); ProfileEvents::increment(ProfileEvents::SelectedParts, parts_with_ranges.size());
ProfileEvents::increment(ProfileEvents::SelectedRanges, sum_ranges); ProfileEvents::increment(ProfileEvents::SelectedRanges, sum_ranges);
ProfileEvents::increment(ProfileEvents::SelectedMarks, sum_marks); ProfileEvents::increment(ProfileEvents::SelectedMarks, sum_marks);
Pipe res; QueryPlanPtr plan;
/// Projection, that needed to drop columns, which have appeared by execution /// Projection, that needed to drop columns, which have appeared by execution
/// of some extra expressions, and to allow execute the same expressions later. /// of some extra expressions, and to allow execute the same expressions later.
@ -752,7 +737,7 @@ Pipe MergeTreeDataSelectExecutor::readFromParts(
std::sort(column_names_to_read.begin(), column_names_to_read.end()); std::sort(column_names_to_read.begin(), column_names_to_read.end());
column_names_to_read.erase(std::unique(column_names_to_read.begin(), column_names_to_read.end()), column_names_to_read.end()); column_names_to_read.erase(std::unique(column_names_to_read.begin(), column_names_to_read.end()), column_names_to_read.end());
res = spreadMarkRangesAmongStreamsFinal( plan = spreadMarkRangesAmongStreamsFinal(
std::move(parts_with_ranges), std::move(parts_with_ranges),
num_streams, num_streams,
column_names_to_read, column_names_to_read,
@ -774,7 +759,7 @@ Pipe MergeTreeDataSelectExecutor::readFromParts(
auto syntax_result = TreeRewriter(context).analyze(order_key_prefix_ast, metadata_snapshot->getColumns().getAllPhysical()); auto syntax_result = TreeRewriter(context).analyze(order_key_prefix_ast, metadata_snapshot->getColumns().getAllPhysical());
auto sorting_key_prefix_expr = ExpressionAnalyzer(order_key_prefix_ast, syntax_result, context).getActions(false); auto sorting_key_prefix_expr = ExpressionAnalyzer(order_key_prefix_ast, syntax_result, context).getActions(false);
res = spreadMarkRangesAmongStreamsWithOrder( plan = spreadMarkRangesAmongStreamsWithOrder(
std::move(parts_with_ranges), std::move(parts_with_ranges),
num_streams, num_streams,
column_names_to_read, column_names_to_read,
@ -790,7 +775,7 @@ Pipe MergeTreeDataSelectExecutor::readFromParts(
} }
else else
{ {
res = spreadMarkRangesAmongStreams( plan = spreadMarkRangesAmongStreams(
std::move(parts_with_ranges), std::move(parts_with_ranges),
num_streams, num_streams,
column_names_to_read, column_names_to_read,
@ -803,42 +788,52 @@ Pipe MergeTreeDataSelectExecutor::readFromParts(
reader_settings); reader_settings);
} }
if (!plan)
return std::make_unique<QueryPlan>();
if (use_sampling) if (use_sampling)
{ {
res.addSimpleTransform([&filter_expression, &filter_function](const Block & header) auto sampling_step = std::make_unique<FilterStep>(
{ plan->getCurrentDataStream(),
return std::make_shared<FilterTransform>( filter_expression,
header, filter_expression, filter_function->getColumnName(), false); filter_function->getColumnName(),
}); false);
sampling_step->setStepDescription("Sampling");
plan->addStep(std::move(sampling_step));
} }
if (result_projection) if (result_projection)
{ {
res.addSimpleTransform([&result_projection](const Block & header) auto projection_step = std::make_unique<ExpressionStep>(plan->getCurrentDataStream(), result_projection);
{ projection_step->setStepDescription("Remove unused columns after reading from storage");
return std::make_shared<ExpressionTransform>(header, result_projection); plan->addStep(std::move(projection_step));
});
} }
/// By the way, if a distributed query or query to a Merge table is made, then the `_sample_factor` column can have different values. /// By the way, if a distributed query or query to a Merge table is made, then the `_sample_factor` column can have different values.
if (sample_factor_column_queried) if (sample_factor_column_queried)
{ {
res.addSimpleTransform([used_sample_factor](const Block & header) ColumnWithTypeAndName column;
{ column.name = "_sample_factor";
return std::make_shared<AddingConstColumnTransform<Float64>>( column.type = std::make_shared<DataTypeFloat64>();
header, std::make_shared<DataTypeFloat64>(), used_sample_factor, "_sample_factor"); column.column = column.type->createColumnConst(0, Field(used_sample_factor));
});
auto adding_column = std::make_unique<AddingConstColumnStep>(plan->getCurrentDataStream(), std::move(column));
adding_column->setStepDescription("Add _sample_factor column");
plan->addStep(std::move(adding_column));
} }
if (query_info.prewhere_info && query_info.prewhere_info->remove_columns_actions) if (query_info.prewhere_info && query_info.prewhere_info->remove_columns_actions)
{ {
res.addSimpleTransform([&query_info](const Block & header) auto expression_step = std::make_unique<ExpressionStep>(
{ plan->getCurrentDataStream(),
return std::make_shared<ExpressionTransform>(header, query_info.prewhere_info->remove_columns_actions); query_info.prewhere_info->remove_columns_actions);
});
expression_step->setStepDescription("Remove unused columns after PREWHERE");
plan->addStep(std::move(expression_step));
} }
return res; return plan;
} }
namespace namespace
@ -863,8 +858,20 @@ size_t roundRowsOrBytesToMarks(
} }
static QueryPlanPtr createPlanFromPipe(Pipe pipe, const std::string & description = "")
{
auto plan = std::make_unique<QueryPlan>();
Pipe MergeTreeDataSelectExecutor::spreadMarkRangesAmongStreams( std::string storage_name = "MergeTree";
if (!description.empty())
storage_name += ' ' + description;
auto step = std::make_unique<ReadFromStorageStep>(std::move(pipe), storage_name);
plan->addStep(std::move(step));
return plan;
}
QueryPlanPtr MergeTreeDataSelectExecutor::spreadMarkRangesAmongStreams(
RangesInDataParts && parts, RangesInDataParts && parts,
size_t num_streams, size_t num_streams,
const Names & column_names, const Names & column_names,
@ -958,7 +965,7 @@ Pipe MergeTreeDataSelectExecutor::spreadMarkRangesAmongStreams(
res.emplace_back(std::move(source)); res.emplace_back(std::move(source));
} }
return Pipe::unitePipes(std::move(res)); return createPlanFromPipe(Pipe::unitePipes(std::move(res)));
} }
else else
{ {
@ -982,19 +989,18 @@ Pipe MergeTreeDataSelectExecutor::spreadMarkRangesAmongStreams(
if (pipe.numOutputPorts() > 1) if (pipe.numOutputPorts() > 1)
pipe.addTransform(std::make_shared<ConcatProcessor>(pipe.getHeader(), pipe.numOutputPorts())); pipe.addTransform(std::make_shared<ConcatProcessor>(pipe.getHeader(), pipe.numOutputPorts()));
return pipe; return createPlanFromPipe(std::move(pipe));
} }
} }
static ExpressionActionsPtr createProjection(const Pipe & pipe, const MergeTreeData & data) static ExpressionActionsPtr createProjection(const Block & header, const MergeTreeData & data)
{ {
const auto & header = pipe.getHeader();
auto projection = std::make_shared<ExpressionActions>(header.getNamesAndTypesList(), data.global_context); auto projection = std::make_shared<ExpressionActions>(header.getNamesAndTypesList(), data.global_context);
projection->add(ExpressionAction::project(header.getNames())); projection->add(ExpressionAction::project(header.getNames()));
return projection; return projection;
} }
Pipe MergeTreeDataSelectExecutor::spreadMarkRangesAmongStreamsWithOrder( QueryPlanPtr MergeTreeDataSelectExecutor::spreadMarkRangesAmongStreamsWithOrder(
RangesInDataParts && parts, RangesInDataParts && parts,
size_t num_streams, size_t num_streams,
const Names & column_names, const Names & column_names,
@ -1095,7 +1101,8 @@ Pipe MergeTreeDataSelectExecutor::spreadMarkRangesAmongStreamsWithOrder(
const size_t min_marks_per_stream = (sum_marks - 1) / num_streams + 1; const size_t min_marks_per_stream = (sum_marks - 1) / num_streams + 1;
bool need_preliminary_merge = (parts.size() > settings.read_in_order_two_level_merge_threshold); bool need_preliminary_merge = (parts.size() > settings.read_in_order_two_level_merge_threshold);
size_t max_output_ports = 0;
std::vector<QueryPlanPtr> plans;
for (size_t i = 0; i < num_streams && !parts.empty(); ++i) for (size_t i = 0; i < num_streams && !parts.empty(); ++i)
{ {
@ -1195,60 +1202,64 @@ Pipe MergeTreeDataSelectExecutor::spreadMarkRangesAmongStreamsWithOrder(
} }
} }
auto pipe = Pipe::unitePipes(std::move(pipes)); auto plan = createPlanFromPipe(Pipe::unitePipes(std::move(pipes)), " with order");
if (input_order_info->direction != 1) if (input_order_info->direction != 1)
{ {
pipe.addSimpleTransform([](const Block & header) auto reverse_step = std::make_unique<ReverseRowsStep>(plan->getCurrentDataStream());
{ plan->addStep(std::move(reverse_step));
return std::make_shared<ReverseTransform>(header);
});
} }
max_output_ports = std::max(pipe.numOutputPorts(), max_output_ports); plans.emplace_back(std::move(plan));
res.emplace_back(std::move(pipe));
} }
if (need_preliminary_merge) if (need_preliminary_merge)
{ {
/// If ORDER BY clause of the query contains some expression, SortDescription sort_description;
/// then those new columns should be added for the merge step, for (size_t j = 0; j < input_order_info->order_key_prefix_descr.size(); ++j)
/// and this should be done always, if there is at least one pipe that sort_description.emplace_back(metadata_snapshot->getSortingKey().column_names[j],
/// has multiple output ports. input_order_info->direction, 1);
bool sorting_key_has_expression = sortingDescriptionHasExpressions(input_order_info->order_key_prefix_descr, metadata_snapshot);
bool force_sorting_key_transform = res.size() > 1 && max_output_ports > 1 && sorting_key_has_expression;
for (auto & pipe : res) for (auto & plan : plans)
{ {
SortDescription sort_description; /// Drop temporary columns, added by 'sorting_key_prefix_expr'
out_projection = createProjection(plan->getCurrentDataStream().header, data);
if (pipe.numOutputPorts() > 1 || force_sorting_key_transform) auto expression_step = std::make_unique<ExpressionStep>(
{ plan->getCurrentDataStream(),
for (size_t j = 0; j < input_order_info->order_key_prefix_descr.size(); ++j) sorting_key_prefix_expr);
sort_description.emplace_back(metadata_snapshot->getSortingKey().column_names[j],
input_order_info->direction, 1);
/// Drop temporary columns, added by 'sorting_key_prefix_expr' expression_step->setStepDescription("Calculate sorting key prefix");
out_projection = createProjection(pipe, data); plan->addStep(std::move(expression_step));
pipe.addSimpleTransform([sorting_key_prefix_expr](const Block & header)
{
return std::make_shared<ExpressionTransform>(header, sorting_key_prefix_expr);
});
}
if (pipe.numOutputPorts() > 1) auto merging_sorted = std::make_unique<MergingSortedStep>(
{ plan->getCurrentDataStream(),
pipe.addTransform(std::make_shared<MergingSortedTransform>( sort_description,
pipe.getHeader(), pipe.numOutputPorts(), sort_description, max_block_size)); max_block_size);
}
merging_sorted->setStepDescription("Merge sorting mark ranges");
plan->addStep(std::move(merging_sorted));
} }
} }
return Pipe::unitePipes(std::move(res)); if (plans.size() == 1)
return std::move(plans.front());
DataStreams input_streams;
for (const auto & plan : plans)
input_streams.emplace_back(plan->getCurrentDataStream());
const auto & common_header = plans.front()->getCurrentDataStream().header;
auto union_step = std::make_unique<UnionStep>(std::move(input_streams), common_header);
auto plan = std::make_unique<QueryPlan>();
plan->unitePlans(std::move(union_step), std::move(plans));
return plan;
} }
Pipe MergeTreeDataSelectExecutor::spreadMarkRangesAmongStreamsFinal( QueryPlanPtr MergeTreeDataSelectExecutor::spreadMarkRangesAmongStreamsFinal(
RangesInDataParts && parts, RangesInDataParts && parts,
size_t num_streams, size_t num_streams,
const Names & column_names, const Names & column_names,
@ -1316,11 +1327,11 @@ Pipe MergeTreeDataSelectExecutor::spreadMarkRangesAmongStreamsFinal(
parts_to_merge_ranges.push_back(parts.end()); parts_to_merge_ranges.push_back(parts.end());
} }
Pipes partition_pipes; std::vector<QueryPlanPtr> partition_plans;
for (size_t range_index = 0; range_index < parts_to_merge_ranges.size() - 1; ++range_index) for (size_t range_index = 0; range_index < parts_to_merge_ranges.size() - 1; ++range_index)
{ {
Pipe pipe; QueryPlanPtr plan;
{ {
Pipes pipes; Pipes pipes;
@ -1346,12 +1357,17 @@ Pipe MergeTreeDataSelectExecutor::spreadMarkRangesAmongStreamsFinal(
pipes.emplace_back(std::move(source_processor)); pipes.emplace_back(std::move(source_processor));
} }
pipe = Pipe::unitePipes(std::move(pipes)); if (pipes.empty())
} continue;
/// Drop temporary columns, added by 'sorting_key_expr' auto pipe = Pipe::unitePipes(std::move(pipes));
if (!out_projection)
out_projection = createProjection(pipe, data); /// Drop temporary columns, added by 'sorting_key_expr'
if (!out_projection)
out_projection = createProjection(pipe.getHeader(), data);
plan = createPlanFromPipe(std::move(pipe), "with final");
}
/// If do_not_merge_across_partitions_select_final is true and there is only one part in partition /// If do_not_merge_across_partitions_select_final is true and there is only one part in partition
/// with level > 0 then we won't postprocess this part /// with level > 0 then we won't postprocess this part
@ -1359,14 +1375,16 @@ Pipe MergeTreeDataSelectExecutor::spreadMarkRangesAmongStreamsFinal(
std::distance(parts_to_merge_ranges[range_index], parts_to_merge_ranges[range_index + 1]) == 1 && std::distance(parts_to_merge_ranges[range_index], parts_to_merge_ranges[range_index + 1]) == 1 &&
parts_to_merge_ranges[range_index]->data_part->info.level > 0) parts_to_merge_ranges[range_index]->data_part->info.level > 0)
{ {
partition_pipes.emplace_back(std::move(pipe)); partition_plans.emplace_back(std::move(plan));
continue; continue;
} }
pipe.addSimpleTransform([&metadata_snapshot](const Block & header) auto expression_step = std::make_unique<ExpressionStep>(
{ plan->getCurrentDataStream(),
return std::make_shared<ExpressionTransform>(header, metadata_snapshot->getSortingKey().expression); metadata_snapshot->getSortingKey().expression);
});
expression_step->setStepDescription("Calculate sorting key expression");
plan->addStep(std::move(expression_step));
Names sort_columns = metadata_snapshot->getSortingKeyColumns(); Names sort_columns = metadata_snapshot->getSortingKeyColumns();
SortDescription sort_description; SortDescription sort_description;
@ -1375,111 +1393,40 @@ Pipe MergeTreeDataSelectExecutor::spreadMarkRangesAmongStreamsFinal(
Names partition_key_columns = metadata_snapshot->getPartitionKey().column_names; Names partition_key_columns = metadata_snapshot->getPartitionKey().column_names;
Block header = pipe.getHeader(); const auto & header = plan->getCurrentDataStream().header;
for (size_t i = 0; i < sort_columns_size; ++i) for (size_t i = 0; i < sort_columns_size; ++i)
sort_description.emplace_back(header.getPositionByName(sort_columns[i]), 1, 1); sort_description.emplace_back(header.getPositionByName(sort_columns[i]), 1, 1);
auto get_merging_processor = [&]() -> MergingTransformPtr auto final_step = std::make_unique<MergingFinal>(
{ plan->getCurrentDataStream(),
switch (data.merging_params.mode) std::min<size_t>(num_streams, settings.max_final_threads),
{ sort_description,
case MergeTreeData::MergingParams::Ordinary: data.merging_params,
{ partition_key_columns,
return std::make_shared<MergingSortedTransform>(header, pipe.numOutputPorts(), sort_description, max_block_size); max_block_size);
}
case MergeTreeData::MergingParams::Collapsing: final_step->setStepDescription("Merge rows for FINAL");
return std::make_shared<CollapsingSortedTransform>( plan->addStep(std::move(final_step));
header, pipe.numOutputPorts(), sort_description, data.merging_params.sign_column, true, max_block_size);
case MergeTreeData::MergingParams::Summing: partition_plans.emplace_back(std::move(plan));
return std::make_shared<SummingSortedTransform>(
header,
pipe.numOutputPorts(),
sort_description,
data.merging_params.columns_to_sum,
partition_key_columns,
max_block_size);
case MergeTreeData::MergingParams::Aggregating:
return std::make_shared<AggregatingSortedTransform>(header, pipe.numOutputPorts(), sort_description, max_block_size);
case MergeTreeData::MergingParams::Replacing:
return std::make_shared<ReplacingSortedTransform>(
header, pipe.numOutputPorts(), sort_description, data.merging_params.version_column, max_block_size);
case MergeTreeData::MergingParams::VersionedCollapsing:
return std::make_shared<VersionedCollapsingTransform>(
header, pipe.numOutputPorts(), sort_description, data.merging_params.sign_column, max_block_size);
case MergeTreeData::MergingParams::Graphite:
throw Exception("GraphiteMergeTree doesn't support FINAL", ErrorCodes::LOGICAL_ERROR);
}
__builtin_unreachable();
};
if (num_streams <= 1 || sort_description.empty())
{
pipe.addTransform(get_merging_processor());
partition_pipes.emplace_back(std::move(pipe));
continue;
}
ColumnNumbers key_columns;
key_columns.reserve(sort_description.size());
for (auto & desc : sort_description)
{
if (!desc.column_name.empty())
key_columns.push_back(header.getPositionByName(desc.column_name));
else
key_columns.emplace_back(desc.column_number);
}
pipe.addSimpleTransform([&](const Block & stream_header)
{
return std::make_shared<AddingSelectorTransform>(stream_header, num_streams, key_columns);
});
pipe.transform([&](OutputPortRawPtrs ports)
{
Processors processors;
std::vector<OutputPorts::iterator> output_ports;
processors.reserve(ports.size() + num_streams);
output_ports.reserve(ports.size());
for (auto & port : ports)
{
auto copier = std::make_shared<CopyTransform>(header, num_streams);
connect(*port, copier->getInputPort());
output_ports.emplace_back(copier->getOutputs().begin());
processors.emplace_back(std::move(copier));
}
for (size_t i = 0; i < num_streams; ++i)
{
auto merge = get_merging_processor();
merge->setSelectorPosition(i);
auto input = merge->getInputs().begin();
/// Connect i-th merge with i-th input port of every copier.
for (size_t j = 0; j < ports.size(); ++j)
{
connect(*output_ports[j], *input);
++output_ports[j];
++input;
}
processors.emplace_back(std::move(merge));
}
return processors;
});
partition_pipes.emplace_back(std::move(pipe));
} }
return Pipe::unitePipes(std::move(partition_pipes)); if (partition_plans.empty())
return {};
if (partition_plans.size() == 1)
return std::move(partition_plans.front());
auto result_header = partition_plans.front()->getCurrentDataStream().header;
DataStreams input_streams;
for (const auto & partition_plan : partition_plans)
input_streams.push_back(partition_plan->getCurrentDataStream());
auto union_step = std::make_unique<UnionStep>(std::move(input_streams), result_header);
union_step->setStepDescription("Unite sources after FINAL");
QueryPlanPtr plan = std::make_unique<QueryPlan>();
plan->unitePlans(std::move(union_step), std::move(partition_plans));
return plan;
} }
/// Calculates a set of mark ranges, that could possibly contain keys, required by condition. /// Calculates a set of mark ranges, that could possibly contain keys, required by condition.

View File

@ -24,7 +24,7 @@ public:
*/ */
using PartitionIdToMaxBlock = std::unordered_map<String, Int64>; using PartitionIdToMaxBlock = std::unordered_map<String, Int64>;
Pipe read( QueryPlanPtr read(
const Names & column_names, const Names & column_names,
const StorageMetadataPtr & metadata_snapshot, const StorageMetadataPtr & metadata_snapshot,
const SelectQueryInfo & query_info, const SelectQueryInfo & query_info,
@ -33,7 +33,7 @@ public:
unsigned num_streams, unsigned num_streams,
const PartitionIdToMaxBlock * max_block_numbers_to_read = nullptr) const; const PartitionIdToMaxBlock * max_block_numbers_to_read = nullptr) const;
Pipe readFromParts( QueryPlanPtr readFromParts(
MergeTreeData::DataPartsVector parts, MergeTreeData::DataPartsVector parts,
const Names & column_names, const Names & column_names,
const StorageMetadataPtr & metadata_snapshot, const StorageMetadataPtr & metadata_snapshot,
@ -48,7 +48,7 @@ private:
Poco::Logger * log; Poco::Logger * log;
Pipe spreadMarkRangesAmongStreams( QueryPlanPtr spreadMarkRangesAmongStreams(
RangesInDataParts && parts, RangesInDataParts && parts,
size_t num_streams, size_t num_streams,
const Names & column_names, const Names & column_names,
@ -61,7 +61,7 @@ private:
const MergeTreeReaderSettings & reader_settings) const; const MergeTreeReaderSettings & reader_settings) const;
/// out_projection - save projection only with columns, requested to read /// out_projection - save projection only with columns, requested to read
Pipe spreadMarkRangesAmongStreamsWithOrder( QueryPlanPtr spreadMarkRangesAmongStreamsWithOrder(
RangesInDataParts && parts, RangesInDataParts && parts,
size_t num_streams, size_t num_streams,
const Names & column_names, const Names & column_names,
@ -75,7 +75,7 @@ private:
const MergeTreeReaderSettings & reader_settings, const MergeTreeReaderSettings & reader_settings,
ExpressionActionsPtr & out_projection) const; ExpressionActionsPtr & out_projection) const;
Pipe spreadMarkRangesAmongStreamsFinal( QueryPlanPtr spreadMarkRangesAmongStreamsFinal(
RangesInDataParts && parts, RangesInDataParts && parts,
size_t num_streams, size_t num_streams,
const Names & column_names, const Names & column_names,

View File

@ -35,9 +35,10 @@ struct ReplicatedMergeTreeMutationEntry
/// Replica which initiated mutation /// Replica which initiated mutation
String source_replica; String source_replica;
/// Accured numbers of blocks /// Acquired block numbers
/// partition_id -> block_number /// partition_id -> block_number
std::map<String, Int64> block_numbers; using BlockNumbersType = std::map<String, Int64>;
BlockNumbersType block_numbers;
/// Mutation commands which will give to MUTATE_PART entries /// Mutation commands which will give to MUTATE_PART entries
MutationCommands commands; MutationCommands commands;

View File

@ -3,6 +3,8 @@
#include <Storages/IStorage.h> #include <Storages/IStorage.h>
#include <Storages/MergeTree/IMergeTreeDataPart.h> #include <Storages/MergeTree/IMergeTreeDataPart.h>
#include <Storages/MergeTree/MergeTreeDataSelectExecutor.h> #include <Storages/MergeTree/MergeTreeDataSelectExecutor.h>
#include <Processors/QueryPlan/QueryPlan.h>
#include <Processors/QueryPipeline.h>
#include <Core/Defines.h> #include <Core/Defines.h>
#include <ext/shared_ptr_helper.h> #include <ext/shared_ptr_helper.h>
@ -27,8 +29,11 @@ public:
size_t max_block_size, size_t max_block_size,
unsigned num_streams) override unsigned num_streams) override
{ {
return MergeTreeDataSelectExecutor(part->storage) QueryPlan query_plan =
.readFromParts({part}, column_names, metadata_snapshot, query_info, context, max_block_size, num_streams); std::move(*MergeTreeDataSelectExecutor(part->storage)
.readFromParts({part}, column_names, metadata_snapshot, query_info, context, max_block_size, num_streams));
return query_plan.convertToPipe();
} }
@ -45,6 +50,16 @@ public:
return part->storage.getVirtuals(); return part->storage.getVirtuals();
} }
String getPartitionId() const
{
return part->info.partition_id;
}
String getPartitionIDFromQuery(const ASTPtr & ast, const Context & context) const
{
return part->storage.getPartitionIDFromQuery(ast, context);
}
protected: protected:
StorageFromMergeTreeDataPart(const MergeTreeData::DataPartPtr & part_) StorageFromMergeTreeDataPart(const MergeTreeData::DataPartPtr & part_)
: IStorage(getIDFromPart(part_)) : IStorage(getIDFromPart(part_))

View File

@ -2,11 +2,13 @@
#include <IO/Operators.h> #include <IO/Operators.h>
#include <Parsers/formatAST.h> #include <Parsers/formatAST.h>
#include <Parsers/ExpressionListParsers.h> #include <Parsers/ExpressionListParsers.h>
#include <Parsers/ASTColumnDeclaration.h>
#include <Parsers/ParserAlterQuery.h> #include <Parsers/ParserAlterQuery.h>
#include <Parsers/parseQuery.h> #include <Parsers/parseQuery.h>
#include <Parsers/ASTAssignment.h> #include <Parsers/ASTAssignment.h>
#include <Parsers/ASTColumnDeclaration.h>
#include <Parsers/ASTFunction.h>
#include <Parsers/ASTIdentifier.h> #include <Parsers/ASTIdentifier.h>
#include <Parsers/ASTLiteral.h>
#include <Common/typeid_cast.h> #include <Common/typeid_cast.h>
#include <Common/quoteString.h> #include <Common/quoteString.h>
#include <Core/Defines.h> #include <Core/Defines.h>
@ -32,6 +34,7 @@ std::optional<MutationCommand> MutationCommand::parse(ASTAlterCommand * command,
res.ast = command->ptr(); res.ast = command->ptr();
res.type = DELETE; res.type = DELETE;
res.predicate = command->predicate; res.predicate = command->predicate;
res.partition = command->partition;
return res; return res;
} }
else if (command->type == ASTAlterCommand::UPDATE) else if (command->type == ASTAlterCommand::UPDATE)
@ -40,6 +43,7 @@ std::optional<MutationCommand> MutationCommand::parse(ASTAlterCommand * command,
res.ast = command->ptr(); res.ast = command->ptr();
res.type = UPDATE; res.type = UPDATE;
res.predicate = command->predicate; res.predicate = command->predicate;
res.partition = command->partition;
for (const ASTPtr & assignment_ast : command->update_assignments->children) for (const ASTPtr & assignment_ast : command->update_assignments->children)
{ {
const auto & assignment = assignment_ast->as<ASTAssignment &>(); const auto & assignment = assignment_ast->as<ASTAssignment &>();
@ -124,6 +128,7 @@ std::shared_ptr<ASTAlterCommandList> MutationCommands::ast() const
return res; return res;
} }
void MutationCommands::writeText(WriteBuffer & out) const void MutationCommands::writeText(WriteBuffer & out) const
{ {
std::stringstream commands_ss; std::stringstream commands_ss;

View File

@ -43,8 +43,10 @@ struct MutationCommand
/// Columns with corresponding actions /// Columns with corresponding actions
std::unordered_map<String, ASTPtr> column_to_update_expression; std::unordered_map<String, ASTPtr> column_to_update_expression;
/// For MATERIALIZE INDEX /// For MATERIALIZE INDEX.
String index_name; String index_name;
/// For MATERIALIZE INDEX, UPDATE and DELETE.
ASTPtr partition; ASTPtr partition;
/// For reads, drops and etc. /// For reads, drops and etc.

View File

@ -4,7 +4,7 @@
#include <Interpreters/InterpreterAlterQuery.h> #include <Interpreters/InterpreterAlterQuery.h>
#include <Interpreters/castColumn.h> #include <Interpreters/castColumn.h>
#include <Interpreters/evaluateConstantExpression.h> #include <Interpreters/evaluateConstantExpression.h>
#include <Processors/Transforms/AddingMissedTransform.h> #include <Processors/QueryPlan/AddingMissedStep.h>
#include <DataStreams/IBlockInputStream.h> #include <DataStreams/IBlockInputStream.h>
#include <Storages/StorageBuffer.h> #include <Storages/StorageBuffer.h>
#include <Storages/StorageFactory.h> #include <Storages/StorageFactory.h>
@ -22,10 +22,13 @@
#include <common/logger_useful.h> #include <common/logger_useful.h>
#include <common/getThreadId.h> #include <common/getThreadId.h>
#include <ext/range.h> #include <ext/range.h>
#include <Processors/Transforms/ConvertingTransform.h> #include <Processors/QueryPlan/ConvertingStep.h>
#include <Processors/Transforms/FilterTransform.h> #include <Processors/Transforms/FilterTransform.h>
#include <Processors/Transforms/ExpressionTransform.h> #include <Processors/Transforms/ExpressionTransform.h>
#include <Processors/Sources/SourceFromInputStream.h> #include <Processors/Sources/SourceFromInputStream.h>
#include <Processors/QueryPlan/SettingQuotaAndLimitsStep.h>
#include <Processors/QueryPlan/ReadFromPreparedSource.h>
#include <Processors/QueryPlan/UnionStep.h>
namespace ProfileEvents namespace ProfileEvents
@ -147,6 +150,21 @@ QueryProcessingStage::Enum StorageBuffer::getQueryProcessingStage(const Context
Pipe StorageBuffer::read( Pipe StorageBuffer::read(
const Names & column_names,
const StorageMetadataPtr & metadata_snapshot,
SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum processed_stage,
const size_t max_block_size,
const unsigned num_streams)
{
QueryPlan plan;
read(plan, column_names, metadata_snapshot, query_info, context, processed_stage, max_block_size, num_streams);
return plan.convertToPipe();
}
void StorageBuffer::read(
QueryPlan & query_plan,
const Names & column_names, const Names & column_names,
const StorageMetadataPtr & metadata_snapshot, const StorageMetadataPtr & metadata_snapshot,
SelectQueryInfo & query_info, SelectQueryInfo & query_info,
@ -155,8 +173,6 @@ Pipe StorageBuffer::read(
size_t max_block_size, size_t max_block_size,
unsigned num_streams) unsigned num_streams)
{ {
Pipe pipe_from_dst;
if (destination_id) if (destination_id)
{ {
auto destination = DatabaseCatalog::instance().getTable(destination_id, context); auto destination = DatabaseCatalog::instance().getTable(destination_id, context);
@ -182,8 +198,8 @@ Pipe StorageBuffer::read(
query_info.input_order_info = query_info.order_optimizer->getInputOrder(destination_metadata_snapshot); query_info.input_order_info = query_info.order_optimizer->getInputOrder(destination_metadata_snapshot);
/// The destination table has the same structure of the requested columns and we can simply read blocks from there. /// The destination table has the same structure of the requested columns and we can simply read blocks from there.
pipe_from_dst = destination->read( destination->read(
column_names, destination_metadata_snapshot, query_info, query_plan, column_names, destination_metadata_snapshot, query_info,
context, processed_stage, max_block_size, num_streams); context, processed_stage, max_block_size, num_streams);
} }
else else
@ -217,29 +233,49 @@ Pipe StorageBuffer::read(
} }
else else
{ {
pipe_from_dst = destination->read( destination->read(
columns_intersection, destination_metadata_snapshot, query_info, query_plan, columns_intersection, destination_metadata_snapshot, query_info,
context, processed_stage, max_block_size, num_streams); context, processed_stage, max_block_size, num_streams);
if (!pipe_from_dst.empty()) if (query_plan.isInitialized())
{ {
pipe_from_dst.addSimpleTransform([&](const Block & stream_header)
{
return std::make_shared<AddingMissedTransform>(stream_header, header_after_adding_defaults,
metadata_snapshot->getColumns(), context);
});
pipe_from_dst.addSimpleTransform([&](const Block & stream_header) auto adding_missed = std::make_unique<AddingMissedStep>(
{ query_plan.getCurrentDataStream(),
return std::make_shared<ConvertingTransform>( header_after_adding_defaults,
stream_header, header, ConvertingTransform::MatchColumnsMode::Name); metadata_snapshot->getColumns(), context);
});
adding_missed->setStepDescription("Add columns missing in destination table");
query_plan.addStep(std::move(adding_missed));
auto converting = std::make_unique<ConvertingStep>(
query_plan.getCurrentDataStream(),
header);
converting->setStepDescription("Convert destination table columns to Buffer table structure");
query_plan.addStep(std::move(converting));
} }
} }
} }
pipe_from_dst.addTableLock(destination_lock); if (query_plan.isInitialized())
pipe_from_dst.addStorageHolder(destination); {
StreamLocalLimits limits;
SizeLimits leaf_limits;
/// Add table lock for destination table.
auto adding_limits_and_quota = std::make_unique<SettingQuotaAndLimitsStep>(
query_plan.getCurrentDataStream(),
destination,
std::move(destination_lock),
limits,
leaf_limits,
nullptr,
nullptr);
adding_limits_and_quota->setStepDescription("Lock destination table for Buffer");
query_plan.addStep(std::move(adding_limits_and_quota));
}
} }
Pipe pipe_from_buffers; Pipe pipe_from_buffers;
@ -252,49 +288,73 @@ Pipe StorageBuffer::read(
pipe_from_buffers = Pipe::unitePipes(std::move(pipes_from_buffers)); pipe_from_buffers = Pipe::unitePipes(std::move(pipes_from_buffers));
} }
/// Convert pipes from table to structure from buffer. if (pipe_from_buffers.empty())
if (!pipe_from_buffers.empty() && !pipe_from_dst.empty() return;
&& !blocksHaveEqualStructure(pipe_from_buffers.getHeader(), pipe_from_dst.getHeader()))
{ QueryPlan buffers_plan;
pipe_from_dst.addSimpleTransform([&](const Block & header)
{
return std::make_shared<ConvertingTransform>(
header,
pipe_from_buffers.getHeader(),
ConvertingTransform::MatchColumnsMode::Name);
});
}
/** If the sources from the table were processed before some non-initial stage of query execution, /** If the sources from the table were processed before some non-initial stage of query execution,
* then sources from the buffers must also be wrapped in the processing pipeline before the same stage. * then sources from the buffers must also be wrapped in the processing pipeline before the same stage.
*/ */
if (processed_stage > QueryProcessingStage::FetchColumns) if (processed_stage > QueryProcessingStage::FetchColumns)
pipe_from_buffers = QueryPipeline::getPipe(
InterpreterSelectQuery(query_info.query, context, std::move(pipe_from_buffers),
SelectQueryOptions(processed_stage)).execute().pipeline);
if (query_info.prewhere_info)
{ {
pipe_from_buffers.addSimpleTransform([&](const Block & header) auto interpreter = InterpreterSelectQuery(
{ query_info.query, context, std::move(pipe_from_buffers),
return std::make_shared<FilterTransform>( SelectQueryOptions(processed_stage));
header, query_info.prewhere_info->prewhere_actions, interpreter.buildQueryPlan(buffers_plan);
query_info.prewhere_info->prewhere_column_name, query_info.prewhere_info->remove_prewhere_column); }
}); else
{
if (query_info.prewhere_info->alias_actions) if (query_info.prewhere_info)
{ {
pipe_from_buffers.addSimpleTransform([&](const Block & header) pipe_from_buffers.addSimpleTransform([&](const Block & header)
{ {
return std::make_shared<ExpressionTransform>(header, query_info.prewhere_info->alias_actions); return std::make_shared<FilterTransform>(
header, query_info.prewhere_info->prewhere_actions,
query_info.prewhere_info->prewhere_column_name, query_info.prewhere_info->remove_prewhere_column);
}); });
if (query_info.prewhere_info->alias_actions)
{
pipe_from_buffers.addSimpleTransform([&](const Block & header)
{
return std::make_shared<ExpressionTransform>(header, query_info.prewhere_info->alias_actions);
});
}
} }
auto read_from_buffers = std::make_unique<ReadFromPreparedSource>(std::move(pipe_from_buffers));
read_from_buffers->setStepDescription("Read from buffers of Buffer table");
buffers_plan.addStep(std::move(read_from_buffers));
} }
Pipes pipes; if (!query_plan.isInitialized())
pipes.emplace_back(std::move(pipe_from_dst)); {
pipes.emplace_back(std::move(pipe_from_buffers)); query_plan = std::move(buffers_plan);
return Pipe::unitePipes(std::move(pipes)); return;
}
auto result_header = buffers_plan.getCurrentDataStream().header;
/// Convert structure from table to structure from buffer.
if (!blocksHaveEqualStructure(query_plan.getCurrentDataStream().header, result_header))
{
auto converting = std::make_unique<ConvertingStep>(query_plan.getCurrentDataStream(), result_header);
query_plan.addStep(std::move(converting));
}
DataStreams input_streams;
input_streams.emplace_back(query_plan.getCurrentDataStream());
input_streams.emplace_back(buffers_plan.getCurrentDataStream());
std::vector<std::unique_ptr<QueryPlan>> plans;
plans.emplace_back(std::make_unique<QueryPlan>(std::move(query_plan)));
plans.emplace_back(std::make_unique<QueryPlan>(std::move(buffers_plan)));
query_plan = QueryPlan();
auto union_step = std::make_unique<UnionStep>(std::move(input_streams), result_header);
union_step->setStepDescription("Unite sources from Buffer table");
query_plan.unitePlans(std::move(union_step), std::move(plans));
} }

View File

@ -65,6 +65,16 @@ public:
size_t max_block_size, size_t max_block_size,
unsigned num_streams) override; unsigned num_streams) override;
void read(
QueryPlan & query_plan,
const Names & column_names,
const StorageMetadataPtr & metadata_snapshot,
SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum processed_stage,
size_t max_block_size,
unsigned num_streams) override;
bool supportsParallelInsert() const override { return true; } bool supportsParallelInsert() const override { return true; }
BlockOutputStreamPtr write(const ASTPtr & query, const StorageMetadataPtr & /*metadata_snapshot*/, const Context & context) override; BlockOutputStreamPtr write(const ASTPtr & query, const StorageMetadataPtr & /*metadata_snapshot*/, const Context & context) override;

View File

@ -482,6 +482,21 @@ QueryProcessingStage::Enum StorageDistributed::getQueryProcessingStage(
} }
Pipe StorageDistributed::read( Pipe StorageDistributed::read(
const Names & column_names,
const StorageMetadataPtr & metadata_snapshot,
SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum processed_stage,
const size_t max_block_size,
const unsigned num_streams)
{
QueryPlan plan;
read(plan, column_names, metadata_snapshot, query_info, context, processed_stage, max_block_size, num_streams);
return plan.convertToPipe();
}
void StorageDistributed::read(
QueryPlan & query_plan,
const Names & column_names, const Names & column_names,
const StorageMetadataPtr & metadata_snapshot, const StorageMetadataPtr & metadata_snapshot,
SelectQueryInfo & query_info, SelectQueryInfo & query_info,
@ -508,7 +523,8 @@ Pipe StorageDistributed::read(
: ClusterProxy::SelectStreamFactory( : ClusterProxy::SelectStreamFactory(
header, processed_stage, StorageID{remote_database, remote_table}, scalars, has_virtual_shard_num_column, context.getExternalTables()); header, processed_stage, StorageID{remote_database, remote_table}, scalars, has_virtual_shard_num_column, context.getExternalTables());
return ClusterProxy::executeQuery(select_stream_factory, log, modified_query_ast, context, query_info); ClusterProxy::executeQuery(query_plan, select_stream_factory, log,
modified_query_ast, context, query_info);
} }

View File

@ -62,6 +62,16 @@ public:
size_t max_block_size, size_t max_block_size,
unsigned num_streams) override; unsigned num_streams) override;
void read(
QueryPlan & query_plan,
const Names & column_names,
const StorageMetadataPtr & metadata_snapshot,
SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum processed_stage,
size_t /*max_block_size*/,
unsigned /*num_streams*/) override;
bool supportsParallelInsert() const override { return true; } bool supportsParallelInsert() const override { return true; }
BlockOutputStreamPtr write(const ASTPtr & query, const StorageMetadataPtr & /*metadata_snapshot*/, const Context & context) override; BlockOutputStreamPtr write(const ASTPtr & query, const StorageMetadataPtr & /*metadata_snapshot*/, const Context & context) override;

View File

@ -22,6 +22,7 @@
#include <Common/typeid_cast.h> #include <Common/typeid_cast.h>
#include <Processors/Sources/SourceFromInputStream.h> #include <Processors/Sources/SourceFromInputStream.h>
#include <Processors/QueryPlan/SettingQuotaAndLimitsStep.h>
namespace DB namespace DB
@ -107,6 +108,21 @@ QueryProcessingStage::Enum StorageMaterializedView::getQueryProcessingStage(cons
} }
Pipe StorageMaterializedView::read( Pipe StorageMaterializedView::read(
const Names & column_names,
const StorageMetadataPtr & metadata_snapshot,
SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum processed_stage,
const size_t max_block_size,
const unsigned num_streams)
{
QueryPlan plan;
read(plan, column_names, metadata_snapshot, query_info, context, processed_stage, max_block_size, num_streams);
return plan.convertToPipe();
}
void StorageMaterializedView::read(
QueryPlan & query_plan,
const Names & column_names, const Names & column_names,
const StorageMetadataPtr & /*metadata_snapshot*/, const StorageMetadataPtr & /*metadata_snapshot*/,
SelectQueryInfo & query_info, SelectQueryInfo & query_info,
@ -122,11 +138,26 @@ Pipe StorageMaterializedView::read(
if (query_info.order_optimizer) if (query_info.order_optimizer)
query_info.input_order_info = query_info.order_optimizer->getInputOrder(metadata_snapshot); query_info.input_order_info = query_info.order_optimizer->getInputOrder(metadata_snapshot);
Pipe pipe = storage->read(column_names, metadata_snapshot, query_info, context, processed_stage, max_block_size, num_streams); storage->read(query_plan, column_names, metadata_snapshot, query_info, context, processed_stage, max_block_size, num_streams);
pipe.addTableLock(lock);
pipe.addStorageHolder(storage);
return pipe; if (query_plan.isInitialized())
{
StreamLocalLimits limits;
SizeLimits leaf_limits;
/// Add table lock for destination table.
auto adding_limits_and_quota = std::make_unique<SettingQuotaAndLimitsStep>(
query_plan.getCurrentDataStream(),
storage,
std::move(lock),
limits,
leaf_limits,
nullptr,
nullptr);
adding_limits_and_quota->setStepDescription("Lock destination table for Buffer");
query_plan.addStep(std::move(adding_limits_and_quota));
}
} }
BlockOutputStreamPtr StorageMaterializedView::write(const ASTPtr & query, const StorageMetadataPtr & /*metadata_snapshot*/, const Context & context) BlockOutputStreamPtr StorageMaterializedView::write(const ASTPtr & query, const StorageMetadataPtr & /*metadata_snapshot*/, const Context & context)

View File

@ -81,6 +81,16 @@ public:
size_t max_block_size, size_t max_block_size,
unsigned num_streams) override; unsigned num_streams) override;
void read(
QueryPlan & query_plan,
const Names & column_names,
const StorageMetadataPtr & metadata_snapshot,
SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum processed_stage,
size_t max_block_size,
unsigned num_streams) override;
Strings getDataPaths() const override; Strings getDataPaths() const override;
private: private:

View File

@ -332,10 +332,13 @@ Pipe StorageMerge::createSources(
if (has_table_virtual_column) if (has_table_virtual_column)
{ {
pipe.addSimpleTransform([name = table_name](const Block & stream_header) ColumnWithTypeAndName column;
column.name = "_table";
column.type = std::make_shared<DataTypeString>();
column.column = column.type->createColumnConst(0, Field(table_name));
pipe.addSimpleTransform([&](const Block & stream_header)
{ {
return std::make_shared<AddingConstColumnTransform<String>>( return std::make_shared<AddingConstColumnTransform>(stream_header, column);
stream_header, std::make_shared<DataTypeString>(), name, "_table");
}); });
} }

View File

@ -173,17 +173,32 @@ StorageMergeTree::~StorageMergeTree()
shutdown(); shutdown();
} }
Pipe StorageMergeTree::read( void StorageMergeTree::read(
QueryPlan & query_plan,
const Names & column_names, const Names & column_names,
const StorageMetadataPtr & metadata_snapshot, const StorageMetadataPtr & metadata_snapshot,
SelectQueryInfo & query_info, SelectQueryInfo & query_info,
const Context & context, const Context & context,
QueryProcessingStage::Enum /*processed_stage*/, QueryProcessingStage::Enum /*processed_stage*/,
size_t max_block_size,
unsigned num_streams)
{
if (auto plan = reader.read(column_names, metadata_snapshot, query_info, context, max_block_size, num_streams))
query_plan = std::move(*plan);
}
Pipe StorageMergeTree::read(
const Names & column_names,
const StorageMetadataPtr & metadata_snapshot,
SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum processed_stage,
const size_t max_block_size, const size_t max_block_size,
const unsigned num_streams) const unsigned num_streams)
{ {
return reader.read(column_names, metadata_snapshot, query_info, QueryPlan plan;
context, max_block_size, num_streams); read(plan, column_names, metadata_snapshot, query_info, context, processed_stage, max_block_size, num_streams);
return plan.convertToPipe();
} }
std::optional<UInt64> StorageMergeTree::totalRows() const std::optional<UInt64> StorageMergeTree::totalRows() const

View File

@ -46,6 +46,16 @@ public:
size_t max_block_size, size_t max_block_size,
unsigned num_streams) override; unsigned num_streams) override;
void read(
QueryPlan & query_plan,
const Names & column_names,
const StorageMetadataPtr & /*metadata_snapshot*/,
SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum processed_stage,
size_t max_block_size,
unsigned num_streams) override;
std::optional<UInt64> totalRows() const override; std::optional<UInt64> totalRows() const override;
std::optional<UInt64> totalRowsByPartitionPredicate(const SelectQueryInfo &, const Context &) const override; std::optional<UInt64> totalRowsByPartitionPredicate(const SelectQueryInfo &, const Context &) const override;
std::optional<UInt64> totalBytes() const override; std::optional<UInt64> totalBytes() const override;

View File

@ -3643,7 +3643,9 @@ ReplicatedMergeTreeQuorumAddedParts::PartitionIdToMaxBlock StorageReplicatedMerg
return max_added_blocks; return max_added_blocks;
} }
Pipe StorageReplicatedMergeTree::read(
void StorageReplicatedMergeTree::read(
QueryPlan & query_plan,
const Names & column_names, const Names & column_names,
const StorageMetadataPtr & metadata_snapshot, const StorageMetadataPtr & metadata_snapshot,
SelectQueryInfo & query_info, SelectQueryInfo & query_info,
@ -3660,10 +3662,27 @@ Pipe StorageReplicatedMergeTree::read(
if (context.getSettingsRef().select_sequential_consistency) if (context.getSettingsRef().select_sequential_consistency)
{ {
auto max_added_blocks = getMaxAddedBlocks(); auto max_added_blocks = getMaxAddedBlocks();
return reader.read(column_names, metadata_snapshot, query_info, context, max_block_size, num_streams, &max_added_blocks); if (auto plan = reader.read(column_names, metadata_snapshot, query_info, context, max_block_size, num_streams, &max_added_blocks))
query_plan = std::move(*plan);
return;
} }
return reader.read(column_names, metadata_snapshot, query_info, context, max_block_size, num_streams); if (auto plan = reader.read(column_names, metadata_snapshot, query_info, context, max_block_size, num_streams))
query_plan = std::move(*plan);
}
Pipe StorageReplicatedMergeTree::read(
const Names & column_names,
const StorageMetadataPtr & metadata_snapshot,
SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum processed_stage,
const size_t max_block_size,
const unsigned num_streams)
{
QueryPlan plan;
read(plan, column_names, metadata_snapshot, query_info, context, processed_stage, max_block_size, num_streams);
return plan.convertToPipe();
} }
@ -3915,6 +3934,60 @@ bool StorageReplicatedMergeTree::executeMetadataAlter(const StorageReplicatedMer
} }
std::set<String> StorageReplicatedMergeTree::getPartitionIdsAffectedByCommands(
const MutationCommands & commands, const Context & query_context) const
{
std::set<String> affected_partition_ids;
for (const auto & command : commands)
{
if (!command.partition)
{
affected_partition_ids.clear();
break;
}
affected_partition_ids.insert(
getPartitionIDFromQuery(command.partition, query_context)
);
}
return affected_partition_ids;
}
PartitionBlockNumbersHolder StorageReplicatedMergeTree::allocateBlockNumbersInAffectedPartitions(
const MutationCommands & commands, const Context & query_context, const zkutil::ZooKeeperPtr & zookeeper) const
{
const std::set<String> mutation_affected_partition_ids = getPartitionIdsAffectedByCommands(commands, query_context);
if (mutation_affected_partition_ids.size() == 1)
{
const auto & affected_partition_id = *mutation_affected_partition_ids.cbegin();
auto block_number_holder = allocateBlockNumber(affected_partition_id, zookeeper);
if (!block_number_holder.has_value())
return {};
auto block_number = block_number_holder->getNumber(); /// Avoid possible UB due to std::move
return {{{affected_partition_id, block_number}}, std::move(block_number_holder)};
}
else
{
/// TODO: Implement optimal block number aqcuisition algorithm in multiple (but not all) partitions
EphemeralLocksInAllPartitions lock_holder(
zookeeper_path + "/block_numbers", "block-", zookeeper_path + "/temp", *zookeeper);
PartitionBlockNumbersHolder::BlockNumbersType block_numbers;
for (const auto & lock : lock_holder.getLocks())
{
if (mutation_affected_partition_ids.empty() || mutation_affected_partition_ids.count(lock.partition_id))
block_numbers[lock.partition_id] = lock.number;
}
return {std::move(block_numbers), std::move(lock_holder)};
}
}
void StorageReplicatedMergeTree::alter( void StorageReplicatedMergeTree::alter(
const AlterCommands & commands, const Context & query_context, TableLockHolder & table_lock_holder) const AlterCommands & commands, const Context & query_context, TableLockHolder & table_lock_holder)
{ {
@ -3942,7 +4015,7 @@ void StorageReplicatedMergeTree::alter(
return queryToString(query); return queryToString(query);
}; };
auto zookeeper = getZooKeeper(); const auto zookeeper = getZooKeeper();
std::optional<ReplicatedMergeTreeLogEntryData> alter_entry; std::optional<ReplicatedMergeTreeLogEntryData> alter_entry;
std::optional<String> mutation_znode; std::optional<String> mutation_znode;
@ -3953,10 +4026,6 @@ void StorageReplicatedMergeTree::alter(
alter_entry.emplace(); alter_entry.emplace();
mutation_znode.reset(); mutation_znode.reset();
/// We can safely read structure, because we guarded with alter_intention_lock
if (is_readonly)
throw Exception("Can't ALTER readonly table", ErrorCodes::TABLE_IS_READ_ONLY);
auto current_metadata = getInMemoryMetadataPtr(); auto current_metadata = getInMemoryMetadataPtr();
StorageInMemoryMetadata future_metadata = *current_metadata; StorageInMemoryMetadata future_metadata = *current_metadata;
@ -4029,27 +4098,23 @@ void StorageReplicatedMergeTree::alter(
ops.emplace_back(zkutil::makeCreateRequest( ops.emplace_back(zkutil::makeCreateRequest(
zookeeper_path + "/log/log-", alter_entry->toString(), zkutil::CreateMode::PersistentSequential)); zookeeper_path + "/log/log-", alter_entry->toString(), zkutil::CreateMode::PersistentSequential));
std::optional<EphemeralLocksInAllPartitions> lock_holder; PartitionBlockNumbersHolder partition_block_numbers_holder;
/// Now we will prepare mutations record.
/// This code pretty same with mutate() function but process results slightly differently.
if (alter_entry->have_mutation) if (alter_entry->have_mutation)
{ {
String mutations_path = zookeeper_path + "/mutations"; const String mutations_path(zookeeper_path + "/mutations");
ReplicatedMergeTreeMutationEntry mutation_entry; ReplicatedMergeTreeMutationEntry mutation_entry;
mutation_entry.source_replica = replica_name;
mutation_entry.commands = maybe_mutation_commands;
mutation_entry.alter_version = new_metadata_version; mutation_entry.alter_version = new_metadata_version;
mutation_entry.source_replica = replica_name;
mutation_entry.commands = std::move(maybe_mutation_commands);
Coordination::Stat mutations_stat; Coordination::Stat mutations_stat;
zookeeper->get(mutations_path, &mutations_stat); zookeeper->get(mutations_path, &mutations_stat);
lock_holder.emplace( partition_block_numbers_holder =
zookeeper_path + "/block_numbers", "block-", zookeeper_path + "/temp", *zookeeper); allocateBlockNumbersInAffectedPartitions(mutation_entry.commands, query_context, zookeeper);
for (const auto & lock : lock_holder->getLocks())
mutation_entry.block_numbers[lock.partition_id] = lock.number;
mutation_entry.block_numbers = partition_block_numbers_holder.getBlockNumbers();
mutation_entry.create_time = time(nullptr); mutation_entry.create_time = time(nullptr);
ops.emplace_back(zkutil::makeSetRequest(mutations_path, String(), mutations_stat.version)); ops.emplace_back(zkutil::makeSetRequest(mutations_path, String(), mutations_stat.version));
@ -4060,6 +4125,11 @@ void StorageReplicatedMergeTree::alter(
Coordination::Responses results; Coordination::Responses results;
Coordination::Error rc = zookeeper->tryMulti(ops, results); Coordination::Error rc = zookeeper->tryMulti(ops, results);
/// For the sake of constitency with mechanics of concurrent background process of assigning parts merge tasks
/// this placeholder must be held up until the moment of committing into ZK of the mutation entry
/// See ReplicatedMergeTreeMergePredicate::canMergeTwoParts() method
partition_block_numbers_holder.reset();
if (rc == Coordination::Error::ZOK) if (rc == Coordination::Error::ZOK)
{ {
if (alter_entry->have_mutation) if (alter_entry->have_mutation)
@ -4398,7 +4468,7 @@ void StorageReplicatedMergeTree::rename(const String & new_path_to_table_data, c
} }
bool StorageReplicatedMergeTree::existsNodeCached(const std::string & path) bool StorageReplicatedMergeTree::existsNodeCached(const std::string & path) const
{ {
{ {
std::lock_guard lock(existing_nodes_cache_mutex); std::lock_guard lock(existing_nodes_cache_mutex);
@ -4420,7 +4490,7 @@ bool StorageReplicatedMergeTree::existsNodeCached(const std::string & path)
std::optional<EphemeralLockInZooKeeper> std::optional<EphemeralLockInZooKeeper>
StorageReplicatedMergeTree::allocateBlockNumber( StorageReplicatedMergeTree::allocateBlockNumber(
const String & partition_id, zkutil::ZooKeeperPtr & zookeeper, const String & zookeeper_block_id_path) const String & partition_id, const zkutil::ZooKeeperPtr & zookeeper, const String & zookeeper_block_id_path) const
{ {
/// Lets check for duplicates in advance, to avoid superfluous block numbers allocation /// Lets check for duplicates in advance, to avoid superfluous block numbers allocation
Coordination::Requests deduplication_check_ops; Coordination::Requests deduplication_check_ops;
@ -5063,44 +5133,46 @@ void StorageReplicatedMergeTree::mutate(const MutationCommands & commands, const
/// After all needed parts are mutated (i.e. all active parts have the mutation version greater than /// After all needed parts are mutated (i.e. all active parts have the mutation version greater than
/// the version of this mutation), the mutation is considered done and can be deleted. /// the version of this mutation), the mutation is considered done and can be deleted.
ReplicatedMergeTreeMutationEntry entry; ReplicatedMergeTreeMutationEntry mutation_entry;
entry.source_replica = replica_name; mutation_entry.source_replica = replica_name;
entry.commands = commands; mutation_entry.commands = commands;
String mutations_path = zookeeper_path + "/mutations"; const String mutations_path = zookeeper_path + "/mutations";
const auto zookeeper = getZooKeeper();
/// Update the mutations_path node when creating the mutation and check its version to ensure that /// Update the mutations_path node when creating the mutation and check its version to ensure that
/// nodes for mutations are created in the same order as the corresponding block numbers. /// nodes for mutations are created in the same order as the corresponding block numbers.
/// Should work well if the number of concurrent mutation requests is small. /// Should work well if the number of concurrent mutation requests is small.
while (true) while (true)
{ {
auto zookeeper = getZooKeeper();
Coordination::Stat mutations_stat; Coordination::Stat mutations_stat;
zookeeper->get(mutations_path, &mutations_stat); zookeeper->get(mutations_path, &mutations_stat);
EphemeralLocksInAllPartitions block_number_locks( PartitionBlockNumbersHolder partition_block_numbers_holder =
zookeeper_path + "/block_numbers", "block-", zookeeper_path + "/temp", *zookeeper); allocateBlockNumbersInAffectedPartitions(mutation_entry.commands, query_context, zookeeper);
for (const auto & lock : block_number_locks.getLocks()) mutation_entry.block_numbers = partition_block_numbers_holder.getBlockNumbers();
entry.block_numbers[lock.partition_id] = lock.number; mutation_entry.create_time = time(nullptr);
entry.create_time = time(nullptr);
/// The following version check guarantees the linearizability property for any pair of mutations:
/// mutation with higher sequence number is guaranteed to have higher block numbers in every partition
/// (and thus will be applied strictly according to sequence numbers of mutations)
Coordination::Requests requests; Coordination::Requests requests;
requests.emplace_back(zkutil::makeSetRequest(mutations_path, String(), mutations_stat.version)); requests.emplace_back(zkutil::makeSetRequest(mutations_path, String(), mutations_stat.version));
requests.emplace_back(zkutil::makeCreateRequest( requests.emplace_back(zkutil::makeCreateRequest(
mutations_path + "/", entry.toString(), zkutil::CreateMode::PersistentSequential)); mutations_path + "/", mutation_entry.toString(), zkutil::CreateMode::PersistentSequential));
Coordination::Responses responses; Coordination::Responses responses;
Coordination::Error rc = zookeeper->tryMulti(requests, responses); Coordination::Error rc = zookeeper->tryMulti(requests, responses);
partition_block_numbers_holder.reset();
if (rc == Coordination::Error::ZOK) if (rc == Coordination::Error::ZOK)
{ {
const String & path_created = const String & path_created =
dynamic_cast<const Coordination::CreateResponse *>(responses[1].get())->path_created; dynamic_cast<const Coordination::CreateResponse *>(responses[1].get())->path_created;
entry.znode_name = path_created.substr(path_created.find_last_of('/') + 1); mutation_entry.znode_name = path_created.substr(path_created.find_last_of('/') + 1);
LOG_TRACE(log, "Created mutation with ID {}", entry.znode_name); LOG_TRACE(log, "Created mutation with ID {}", mutation_entry.znode_name);
break; break;
} }
else if (rc == Coordination::Error::ZBADVERSION) else if (rc == Coordination::Error::ZBADVERSION)
@ -5112,7 +5184,7 @@ void StorageReplicatedMergeTree::mutate(const MutationCommands & commands, const
throw Coordination::Exception("Unable to create a mutation znode", rc); throw Coordination::Exception("Unable to create a mutation znode", rc);
} }
waitMutation(entry.znode_name, query_context.getSettingsRef().mutations_sync); waitMutation(mutation_entry.znode_name, query_context.getSettingsRef().mutations_sync);
} }
void StorageReplicatedMergeTree::waitMutation(const String & znode_name, size_t mutations_sync) const void StorageReplicatedMergeTree::waitMutation(const String & znode_name, size_t mutations_sync) const

View File

@ -96,6 +96,16 @@ public:
size_t max_block_size, size_t max_block_size,
unsigned num_streams) override; unsigned num_streams) override;
void read(
QueryPlan & query_plan,
const Names & column_names,
const StorageMetadataPtr & /*metadata_snapshot*/,
SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum processed_stage,
size_t max_block_size,
unsigned num_streams) override;
std::optional<UInt64> totalRows() const override; std::optional<UInt64> totalRows() const override;
std::optional<UInt64> totalRowsByPartitionPredicate(const SelectQueryInfo & query_info, const Context & context) const override; std::optional<UInt64> totalRowsByPartitionPredicate(const SelectQueryInfo & query_info, const Context & context) const override;
std::optional<UInt64> totalBytes() const override; std::optional<UInt64> totalBytes() const override;
@ -506,8 +516,8 @@ private:
/// Creates new block number if block with such block_id does not exist /// Creates new block number if block with such block_id does not exist
std::optional<EphemeralLockInZooKeeper> allocateBlockNumber( std::optional<EphemeralLockInZooKeeper> allocateBlockNumber(
const String & partition_id, zkutil::ZooKeeperPtr & zookeeper, const String & partition_id, const zkutil::ZooKeeperPtr & zookeeper,
const String & zookeeper_block_id_path = ""); const String & zookeeper_block_id_path = "") const;
/** Wait until all replicas, including this, execute the specified action from the log. /** Wait until all replicas, including this, execute the specified action from the log.
* If replicas are added at the same time, it can not wait the added replica . * If replicas are added at the same time, it can not wait the added replica .
@ -531,9 +541,9 @@ private:
bool getFakePartCoveringAllPartsInPartition(const String & partition_id, MergeTreePartInfo & part_info, bool for_replace_partition = false); bool getFakePartCoveringAllPartsInPartition(const String & partition_id, MergeTreePartInfo & part_info, bool for_replace_partition = false);
/// Check for a node in ZK. If it is, remember this information, and then immediately answer true. /// Check for a node in ZK. If it is, remember this information, and then immediately answer true.
std::unordered_set<std::string> existing_nodes_cache; mutable std::unordered_set<std::string> existing_nodes_cache;
std::mutex existing_nodes_cache_mutex; mutable std::mutex existing_nodes_cache_mutex;
bool existsNodeCached(const std::string & path); bool existsNodeCached(const std::string & path) const;
void getClearBlocksInPartitionOps(Coordination::Requests & ops, zkutil::ZooKeeper & zookeeper, const String & partition_id, Int64 min_block_num, Int64 max_block_num); void getClearBlocksInPartitionOps(Coordination::Requests & ops, zkutil::ZooKeeper & zookeeper, const String & partition_id, Int64 min_block_num, Int64 max_block_num);
/// Remove block IDs from `blocks/` in ZooKeeper for the given partition ID in the given block number range. /// Remove block IDs from `blocks/` in ZooKeeper for the given partition ID in the given block number range.
@ -565,6 +575,11 @@ private:
MutationCommands getFirtsAlterMutationCommandsForPart(const DataPartPtr & part) const override; MutationCommands getFirtsAlterMutationCommandsForPart(const DataPartPtr & part) const override;
void startBackgroundMovesIfNeeded() override; void startBackgroundMovesIfNeeded() override;
std::set<String> getPartitionIdsAffectedByCommands(const MutationCommands & commands, const Context & query_context) const;
PartitionBlockNumbersHolder allocateBlockNumbersInAffectedPartitions(
const MutationCommands & commands, const Context & query_context, const zkutil::ZooKeeperPtr & zookeeper) const;
protected: protected:
/** If not 'attach', either creates a new table in ZK, or adds a replica to an existing table. /** If not 'attach', either creates a new table in ZK, or adds a replica to an existing table.
*/ */

View File

@ -16,6 +16,9 @@
#include <Processors/Pipe.h> #include <Processors/Pipe.h>
#include <Processors/Transforms/MaterializingTransform.h> #include <Processors/Transforms/MaterializingTransform.h>
#include <Processors/Transforms/ConvertingTransform.h> #include <Processors/Transforms/ConvertingTransform.h>
#include <Processors/QueryPlan/MaterializingStep.h>
#include <Processors/QueryPlan/ConvertingStep.h>
#include <Processors/QueryPlan/SettingQuotaAndLimitsStep.h>
namespace DB namespace DB
{ {
@ -52,12 +55,25 @@ Pipe StorageView::read(
const StorageMetadataPtr & metadata_snapshot, const StorageMetadataPtr & metadata_snapshot,
SelectQueryInfo & query_info, SelectQueryInfo & query_info,
const Context & context, const Context & context,
QueryProcessingStage::Enum /*processed_stage*/, QueryProcessingStage::Enum processed_stage,
const size_t /*max_block_size*/, const size_t max_block_size,
const unsigned /*num_streams*/) const unsigned num_streams)
{ {
Pipes pipes; QueryPlan plan;
read(plan, column_names, metadata_snapshot, query_info, context, processed_stage, max_block_size, num_streams);
return plan.convertToPipe();
}
void StorageView::read(
QueryPlan & query_plan,
const Names & column_names,
const StorageMetadataPtr & metadata_snapshot,
SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum /*processed_stage*/,
const size_t /*max_block_size*/,
const unsigned /*num_streams*/)
{
ASTPtr current_inner_query = metadata_snapshot->getSelectQuery().inner_query; ASTPtr current_inner_query = metadata_snapshot->getSelectQuery().inner_query;
if (query_info.view_query) if (query_info.view_query)
@ -68,25 +84,19 @@ Pipe StorageView::read(
} }
InterpreterSelectWithUnionQuery interpreter(current_inner_query, context, {}, column_names); InterpreterSelectWithUnionQuery interpreter(current_inner_query, context, {}, column_names);
interpreter.buildQueryPlan(query_plan);
auto pipeline = interpreter.execute().pipeline;
/// It's expected that the columns read from storage are not constant. /// It's expected that the columns read from storage are not constant.
/// Because method 'getSampleBlockForColumns' is used to obtain a structure of result in InterpreterSelectQuery. /// Because method 'getSampleBlockForColumns' is used to obtain a structure of result in InterpreterSelectQuery.
pipeline.addSimpleTransform([](const Block & header) auto materializing = std::make_unique<MaterializingStep>(query_plan.getCurrentDataStream());
{ materializing->setStepDescription("Materialize constants after VIEW subquery");
return std::make_shared<MaterializingTransform>(header); query_plan.addStep(std::move(materializing));
});
/// And also convert to expected structure. /// And also convert to expected structure.
pipeline.addSimpleTransform([&](const Block & header) auto header = metadata_snapshot->getSampleBlockForColumns(column_names, getVirtuals(), getStorageID());
{ auto converting = std::make_unique<ConvertingStep>(query_plan.getCurrentDataStream(), header);
return std::make_shared<ConvertingTransform>( converting->setStepDescription("Convert VIEW subquery result to VIEW table structure");
header, metadata_snapshot->getSampleBlockForColumns( query_plan.addStep(std::move(converting));
column_names, getVirtuals(), getStorageID()), ConvertingTransform::MatchColumnsMode::Name);
});
return QueryPipeline::getPipe(std::move(pipeline));
} }
static ASTTableExpression * getFirstTableExpression(ASTSelectQuery & select_query) static ASTTableExpression * getFirstTableExpression(ASTSelectQuery & select_query)

View File

@ -30,6 +30,16 @@ public:
size_t max_block_size, size_t max_block_size,
unsigned num_streams) override; unsigned num_streams) override;
void read(
QueryPlan & query_plan,
const Names & column_names,
const StorageMetadataPtr & metadata_snapshot,
SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum processed_stage,
size_t max_block_size,
unsigned num_streams) override;
void replaceWithSubquery(ASTSelectQuery & select_query, ASTPtr & view_name, const StorageMetadataPtr & metadata_snapshot) const void replaceWithSubquery(ASTSelectQuery & select_query, ASTPtr & view_name, const StorageMetadataPtr & metadata_snapshot) const
{ {
replaceWithSubquery(select_query, metadata_snapshot->getSelectQuery().inner_query->clone(), view_name); replaceWithSubquery(select_query, metadata_snapshot->getSelectQuery().inner_query->clone(), view_name);

View File

@ -0,0 +1,16 @@
<yandex>
<remote_servers>
<test_cluster>
<shard>
<replica>
<host>node1</host>
<port>9000</port>
</replica>
<replica>
<host>node2</host>
<port>9000</port>
</replica>
</shard>
</test_cluster>
</remote_servers>
</yandex>

View File

@ -0,0 +1,17 @@
<yandex>
<shutdown_wait_unfinished>3</shutdown_wait_unfinished>
<logger>
<level>trace</level>
<log>/var/log/clickhouse-server/log.log</log>
<errorlog>/var/log/clickhouse-server/log.err.log</errorlog>
<size>1000M</size>
<count>10</count>
<stderr>/var/log/clickhouse-server/stderr.log</stderr>
<stdout>/var/log/clickhouse-server/stdout.log</stdout>
</logger>
<part_log>
<database>system</database>
<table>part_log</table>
<flush_interval_milliseconds>500</flush_interval_milliseconds>
</part_log>
</yandex>

View File

@ -0,0 +1,98 @@
import pytest
import helpers.client
import helpers.cluster
cluster = helpers.cluster.ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1', main_configs=['configs/logs_config.xml', 'configs/cluster.xml'],
with_zookeeper=True, stay_alive=True)
node2 = cluster.add_instance('node2', main_configs=['configs/logs_config.xml', 'configs/cluster.xml'],
with_zookeeper=True, stay_alive=True)
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def test_trivial_alter_in_partition_merge_tree_without_where(started_cluster):
try:
name = "test_trivial_alter_in_partition_merge_tree_without_where"
node1.query("DROP TABLE IF EXISTS {}".format(name))
node1.query("CREATE TABLE {} (p Int64, x Int64) ENGINE=MergeTree() ORDER BY tuple() PARTITION BY p".format(name))
node1.query("INSERT INTO {} VALUES (1, 2), (2, 3)".format(name))
with pytest.raises(helpers.client.QueryRuntimeException):
node1.query("ALTER TABLE {} UPDATE x = x + 1 IN PARTITION 1 SETTINGS mutations_sync = 2".format(name))
assert node1.query("SELECT sum(x) FROM {}".format(name)).splitlines() == ["5"]
with pytest.raises(helpers.client.QueryRuntimeException):
node1.query("ALTER TABLE {} UPDATE x = x + 1 IN PARTITION 2 SETTINGS mutations_sync = 2".format(name))
assert node1.query("SELECT sum(x) FROM {}".format(name)).splitlines() == ["5"]
with pytest.raises(helpers.client.QueryRuntimeException):
node1.query("ALTER TABLE {} DELETE IN PARTITION 1 SETTINGS mutations_sync = 2".format(name))
assert node1.query("SELECT sum(x) FROM {}".format(name)).splitlines() == ["5"]
with pytest.raises(helpers.client.QueryRuntimeException):
node1.query("ALTER TABLE {} DELETE IN PARTITION 2 SETTINGS mutations_sync = 2".format(name))
assert node1.query("SELECT sum(x) FROM {}".format(name)).splitlines() == ["5"]
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
def test_trivial_alter_in_partition_merge_tree_with_where(started_cluster):
try:
name = "test_trivial_alter_in_partition_merge_tree_with_where"
node1.query("DROP TABLE IF EXISTS {}".format(name))
node1.query("CREATE TABLE {} (p Int64, x Int64) ENGINE=MergeTree() ORDER BY tuple() PARTITION BY p".format(name))
node1.query("INSERT INTO {} VALUES (1, 2), (2, 3)".format(name))
node1.query("ALTER TABLE {} UPDATE x = x + 1 IN PARTITION 2 WHERE p = 2 SETTINGS mutations_sync = 2".format(name))
assert node1.query("SELECT sum(x) FROM {}".format(name)).splitlines() == ["6"]
node1.query("ALTER TABLE {} UPDATE x = x + 1 IN PARTITION 1 WHERE p = 2 SETTINGS mutations_sync = 2".format(name))
assert node1.query("SELECT sum(x) FROM {}".format(name)).splitlines() == ["6"]
node1.query("ALTER TABLE {} DELETE IN PARTITION 2 WHERE p = 2 SETTINGS mutations_sync = 2".format(name))
assert node1.query("SELECT sum(x) FROM {}".format(name)).splitlines() == ["2"]
node1.query("ALTER TABLE {} DELETE IN PARTITION 1 WHERE p = 2 SETTINGS mutations_sync = 2".format(name))
assert node1.query("SELECT sum(x) FROM {}".format(name)).splitlines() == ["2"]
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
def test_trivial_alter_in_partition_replicated_merge_tree(started_cluster):
try:
name = "test_trivial_alter_in_partition_replicated_merge_tree"
node1.query("DROP TABLE IF EXISTS {}".format(name))
node2.query("DROP TABLE IF EXISTS {}".format(name))
for node in (node1, node2):
node.query(
"CREATE TABLE {name} (p Int64, x Int64) ENGINE=ReplicatedMergeTree('/clickhouse/{name}', '{{instance}}') ORDER BY tuple() PARTITION BY p"
.format(name=name))
node1.query("INSERT INTO {} VALUES (1, 2)".format(name))
node2.query("INSERT INTO {} VALUES (2, 3)".format(name))
node1.query("ALTER TABLE {} UPDATE x = x + 1 IN PARTITION 2 WHERE 1 SETTINGS mutations_sync = 2".format(name))
for node in (node1, node2):
assert node.query("SELECT sum(x) FROM {}".format(name)).splitlines() == ["6"]
node1.query("ALTER TABLE {} UPDATE x = x + 1 IN PARTITION 1 WHERE p = 2 SETTINGS mutations_sync = 2".format(name))
for node in (node1, node2):
assert node.query("SELECT sum(x) FROM {}".format(name)).splitlines() == ["6"]
with pytest.raises(helpers.client.QueryRuntimeException):
node1.query("ALTER TABLE {} DELETE IN PARTITION 2 SETTINGS mutations_sync = 2".format(name))
for node in (node1, node2):
assert node.query("SELECT sum(x) FROM {}".format(name)).splitlines() == ["6"]
node1.query("ALTER TABLE {} DELETE IN PARTITION 2 WHERE p = 2 SETTINGS mutations_sync = 2".format(name))
for node in (node1, node2):
assert node.query("SELECT sum(x) FROM {}".format(name)).splitlines() == ["2"]
node1.query("ALTER TABLE {} DELETE IN PARTITION 1 WHERE p = 2 SETTINGS mutations_sync = 2".format(name))
for node in (node1, node2):
assert node.query("SELECT sum(x) FROM {}".format(name)).splitlines() == ["2"]
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
node2.query("DROP TABLE IF EXISTS {}".format(name))

View File

@ -1,4 +1,8 @@
<test> <test>
<query>select number from (select number from numbers(10000000) order by -number) limit 10</query> <create_query>CREATE VIEW numbers_view AS SELECT number from numbers_mt(100000000) order by number desc</create_query>
<query>select number from (select number from numbers_mt(100000000) order by -number) limit 10</query>
<query>select number from (select number from numbers(1500000000) order by -number) limit 10</query>
<query>select number from (select number from numbers_mt(1500000000) order by -number) limit 10</query>
<query>select number from numbers_view limit 100</query>
</test> </test>

View File

@ -10,9 +10,9 @@ SET max_threads=1;
SET optimize_move_functions_out_of_any=0; SET optimize_move_functions_out_of_any=0;
SELECT 'LIMIT'; SELECT 'LIMIT';
SELECT any(_shard_num) shard_num, count(), uniq(dummy) FROM remote('127.0.0.{2,3}', system.one) LIMIT 1 SETTINGS distributed_group_by_no_merge=2; SELECT * FROM (SELECT any(_shard_num) shard_num, count(), uniq(dummy) FROM remote('127.0.0.{2,3}', system.one)) ORDER BY shard_num LIMIT 1 SETTINGS distributed_group_by_no_merge=2;
SELECT 'OFFSET'; SELECT 'OFFSET';
SELECT any(_shard_num) shard_num, count(), uniq(dummy) FROM remote('127.0.0.{2,3}', system.one) LIMIT 1, 1 SETTINGS distributed_group_by_no_merge=2; SELECT * FROM (SELECT any(_shard_num) shard_num, count(), uniq(dummy) FROM remote('127.0.0.{2,3}', system.one)) ORDER BY shard_num LIMIT 1, 1 SETTINGS distributed_group_by_no_merge=2;
SELECT 'ALIAS'; SELECT 'ALIAS';
SELECT dummy AS d FROM remote('127.0.0.{2,3}', system.one) ORDER BY d SETTINGS distributed_group_by_no_merge=2; SELECT dummy AS d FROM remote('127.0.0.{2,3}', system.one) ORDER BY d SETTINGS distributed_group_by_no_merge=2;

View File

@ -3,5 +3,7 @@ Header: x UInt8
Expression (Before ORDER BY and SELECT) Expression (Before ORDER BY and SELECT)
Header: _dummy UInt8 Header: _dummy UInt8
1 UInt8 1 UInt8
ReadFromStorage (Read from SystemOne) SettingQuotaAndLimits (Set limits and quota after reading from storage)
Header: dummy UInt8 Header: dummy UInt8
ReadFromStorage (SystemOne)
Header: dummy UInt8

View File

@ -3,7 +3,7 @@
5 5
1 1 1 1
2 1 2 1
3 4 3 3
1 1 1 1
2 1 2 1
3 4 3 4

View File

@ -8,4 +8,23 @@ ExpressionTransform
AggregatingInOrderTransform × 3 AggregatingInOrderTransform × 3
(Expression) (Expression)
ExpressionTransform × 3 ExpressionTransform × 3
(ReadFromStorage) (SettingQuotaAndLimits)
(Expression)
ExpressionTransform × 3
(Union)
(MergingSorted)
(Expression)
ExpressionTransform
(ReadFromStorage)
MergeTree 0 → 1
(MergingSorted)
MergingSortedTransform 2 → 1
(Expression)
ExpressionTransform × 2
(ReadFromStorage)
MergeTree × 2 0 → 1
(MergingSorted)
(Expression)
ExpressionTransform
(ReadFromStorage)
MergeTree 0 → 1

View File

@ -159,9 +159,10 @@
01533_collate_in_nullable 01533_collate_in_nullable
01542_collate_in_array 01542_collate_in_array
01543_collate_in_tuple 01543_collate_in_tuple
01545_url_file_format_settings
01546_log_queries_min_query_duration_ms 01546_log_queries_min_query_duration_ms
01547_query_log_current_database 01547_query_log_current_database
01548_query_log_query_execution_ms 01548_query_log_query_execution_ms
01552_dict_fixedstring 01552_dict_fixedstring
01555_system_distribution_queue_mask 01555_system_distribution_queue_mask
01557_max_parallel_replicas_no_sample.sql 01557_max_parallel_replicas_no_sample.sql

View File

@ -19,6 +19,7 @@ inc="-I. \
-I./contrib/double-conversion \ -I./contrib/double-conversion \
-I./contrib/cityhash102/include \ -I./contrib/cityhash102/include \
-I./contrib/croaring \ -I./contrib/croaring \
-I./contrib/miniselect/include \
-I./contrib/murmurhash/include \ -I./contrib/murmurhash/include \
-I./contrib/zookeeper/src/c/include \ -I./contrib/zookeeper/src/c/include \
-I./contrib/zookeeper/src/c/generated \ -I./contrib/zookeeper/src/c/generated \