mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-28 02:21:59 +00:00
Merge branch 'master' into pr-distributed-prefer-localhost-replica
This commit is contained in:
commit
081fa19a97
2
contrib/abseil-cpp
vendored
2
contrib/abseil-cpp
vendored
@ -1 +1 @@
|
||||
Subproject commit 215105818dfde3174fe799600bb0f3cae233d0bf
|
||||
Subproject commit 5655528c41830f733160de4fb0b99073841bae9e
|
@ -1,5 +1,5 @@
|
||||
set(ABSL_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/abseil-cpp")
|
||||
set(BUILD_TESTING OFF)
|
||||
|
||||
set(ABSL_PROPAGATE_CXX_STD ON)
|
||||
add_subdirectory("${ABSL_ROOT_DIR}" "${ClickHouse_BINARY_DIR}/contrib/abseil-cpp")
|
||||
|
||||
|
2
contrib/s2geometry
vendored
2
contrib/s2geometry
vendored
@ -1 +1 @@
|
||||
Subproject commit 4a7ebd5da04cb6c9ea38bbf5914a9f8f3c768564
|
||||
Subproject commit 0547c38371777a1c1c8be263a6f05c3bf71bb05b
|
@ -7,12 +7,6 @@ endif()
|
||||
|
||||
set(S2_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/s2geometry/src")
|
||||
|
||||
set(ABSL_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/abseil-cpp")
|
||||
if(NOT EXISTS "${ABSL_SOURCE_DIR}/CMakeLists.txt")
|
||||
message(FATAL_ERROR " submodule contrib/abseil-cpp is missing. To fix try run: \n git submodule update --init --recursive")
|
||||
endif()
|
||||
|
||||
|
||||
set(S2_SRCS
|
||||
"${S2_SOURCE_DIR}/s2/encoded_s2cell_id_vector.cc"
|
||||
"${S2_SOURCE_DIR}/s2/encoded_s2point_vector.cc"
|
||||
@ -58,7 +52,9 @@ set(S2_SRCS
|
||||
"${S2_SOURCE_DIR}/s2/s2edge_crossings.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2edge_distances.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2edge_tessellator.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2error.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2furthest_edge_query.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2hausdorff_distance_query.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2latlng.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2latlng_rect.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2latlng_rect_bounder.cc"
|
||||
@ -93,33 +89,28 @@ set(S2_SRCS
|
||||
"${S2_SOURCE_DIR}/s2/s2shape_index_buffered_region.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2shape_index_measures.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2shape_measures.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2shape_nesting_query.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2shapeutil_build_polygon_boundaries.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2shapeutil_coding.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2shapeutil_contains_brute_force.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2shapeutil_conversion.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2shapeutil_edge_iterator.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2shapeutil_get_reference_point.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2shapeutil_range_iterator.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2shapeutil_visit_crossing_edge_pairs.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2text_format.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2wedge_relations.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2winding_operation.cc"
|
||||
"${S2_SOURCE_DIR}/s2/strings/serialize.cc"
|
||||
"${S2_SOURCE_DIR}/s2/util/bits/bit-interleave.cc"
|
||||
"${S2_SOURCE_DIR}/s2/util/bits/bits.cc"
|
||||
"${S2_SOURCE_DIR}/s2/util/coding/coder.cc"
|
||||
"${S2_SOURCE_DIR}/s2/util/coding/varint.cc"
|
||||
"${S2_SOURCE_DIR}/s2/util/math/exactfloat/exactfloat.cc"
|
||||
"${S2_SOURCE_DIR}/s2/util/math/mathutil.cc"
|
||||
"${S2_SOURCE_DIR}/s2/util/units/length-units.cc"
|
||||
|
||||
)
|
||||
|
||||
add_library(_s2 ${S2_SRCS})
|
||||
add_library(ch_contrib::s2 ALIAS _s2)
|
||||
|
||||
set_property(TARGET _s2 PROPERTY CXX_STANDARD 17)
|
||||
|
||||
if (TARGET OpenSSL::SSL)
|
||||
target_link_libraries(_s2 PRIVATE OpenSSL::Crypto OpenSSL::SSL)
|
||||
endif()
|
||||
@ -128,24 +119,28 @@ endif()
|
||||
target_link_libraries(_s2 PRIVATE
|
||||
absl::base
|
||||
absl::btree
|
||||
absl::check
|
||||
absl::config
|
||||
absl::core_headers
|
||||
absl::dynamic_annotations
|
||||
absl::endian
|
||||
absl::fixed_array
|
||||
absl::flags
|
||||
absl::flat_hash_map
|
||||
absl::flat_hash_set
|
||||
absl::hash
|
||||
absl::inlined_vector
|
||||
absl::int128
|
||||
absl::log
|
||||
absl::log_severity
|
||||
absl::memory
|
||||
absl::span
|
||||
absl::status
|
||||
absl::str_format
|
||||
absl::strings
|
||||
absl::type_traits
|
||||
absl::utility
|
||||
)
|
||||
)
|
||||
|
||||
target_include_directories(_s2 SYSTEM BEFORE PUBLIC "${S2_SOURCE_DIR}/")
|
||||
target_include_directories(_s2 SYSTEM PUBLIC "${ABSL_SOURCE_DIR}")
|
||||
|
@ -1222,7 +1222,6 @@ Configuration markup:
|
||||
<account_name>account</account_name>
|
||||
<account_key>pass123</account_key>
|
||||
<metadata_path>/var/lib/clickhouse/disks/blob_storage_disk/</metadata_path>
|
||||
<cache_enabled>true</cache_enabled>
|
||||
<cache_path>/var/lib/clickhouse/disks/blob_storage_disk/cache/</cache_path>
|
||||
<skip_access_check>false</skip_access_check>
|
||||
</blob_storage_disk>
|
||||
@ -1250,8 +1249,6 @@ Limit parameters (mainly for internal usage):
|
||||
|
||||
Other parameters:
|
||||
* `metadata_path` - Path on local FS to store metadata files for Blob Storage. Default value is `/var/lib/clickhouse/disks/<disk_name>/`.
|
||||
* `cache_enabled` - Allows to cache mark and index files on local FS. Default value is `true`.
|
||||
* `cache_path` - Path on local FS where to store cached mark and index files. Default value is `/var/lib/clickhouse/disks/<disk_name>/cache/`.
|
||||
* `skip_access_check` - If true, disk access checks will not be performed on disk start-up. Default value is `false`.
|
||||
* `read_resource` — Resource name to be used for [scheduling](/docs/en/operations/workload-scheduling.md) of read requests to this disk. Default value is empty string (IO scheduling is not enabled for this disk).
|
||||
* `write_resource` — Resource name to be used for [scheduling](/docs/en/operations/workload-scheduling.md) of write requests to this disk. Default value is empty string (IO scheduling is not enabled for this disk).
|
||||
|
@ -90,7 +90,7 @@ In ClickHouse Cloud, by default, passwords must meet the following complexity re
|
||||
CREATE USER name3 IDENTIFIED WITH sha256_password BY 'my_password'
|
||||
```
|
||||
|
||||
The `name3` user can now login using `my_password`, but the password is stored as the hashed value above. THe following SQL file was created in `/var/lib/clickhouse/access` and gets executed at server startup:
|
||||
The `name3` user can now login using `my_password`, but the password is stored as the hashed value above. The following SQL file was created in `/var/lib/clickhouse/access` and gets executed at server startup:
|
||||
|
||||
```bash
|
||||
/var/lib/clickhouse/access $ cat 3843f510-6ebd-a52d-72ac-e021686d8a93.sql
|
||||
|
@ -805,8 +805,6 @@ SETTINGS storage_policy = 'moving_from_ssd_to_hdd'
|
||||
<single_read_retries>4</single_read_retries>
|
||||
<min_bytes_for_seek>1000</min_bytes_for_seek>
|
||||
<metadata_path>/var/lib/clickhouse/disks/s3/</metadata_path>
|
||||
<cache_enabled>true</cache_enabled>
|
||||
<cache_path>/var/lib/clickhouse/disks/s3/cache/</cache_path>
|
||||
<skip_access_check>false</skip_access_check>
|
||||
</s3>
|
||||
</disks>
|
||||
@ -832,8 +830,6 @@ SETTINGS storage_policy = 'moving_from_ssd_to_hdd'
|
||||
- `single_read_retries` — число попыток выполнения запроса в случае возникновения ошибки в процессе чтения. Значение по умолчанию: `4`.
|
||||
- `min_bytes_for_seek` — минимальное количество байтов, которые используются для операций поиска вместо последовательного чтения. Значение по умолчанию: 1 МБайт.
|
||||
- `metadata_path` — путь к локальному файловому хранилищу для хранения файлов с метаданными для S3. Значение по умолчанию: `/var/lib/clickhouse/disks/<disk_name>/`.
|
||||
- `cache_enabled` — признак, разрешено ли хранение кэша засечек и файлов индекса в локальной файловой системе. Значение по умолчанию: `true`.
|
||||
- `cache_path` — путь в локальной файловой системе, где будут храниться кэш засечек и файлы индекса. Значение по умолчанию: `/var/lib/clickhouse/disks/<disk_name>/cache/`.
|
||||
- `skip_access_check` — признак, выполнять ли проверку доступов при запуске диска. Если установлено значение `true`, то проверка не выполняется. Значение по умолчанию: `false`.
|
||||
|
||||
Диск S3 может быть сконфигурирован как `main` или `cold`:
|
||||
|
@ -745,8 +745,6 @@ SETTINGS storage_policy = 'moving_from_ssd_to_hdd'
|
||||
<single_read_retries>4</single_read_retries>
|
||||
<min_bytes_for_seek>1000</min_bytes_for_seek>
|
||||
<metadata_path>/var/lib/clickhouse/disks/s3/</metadata_path>
|
||||
<cache_enabled>true</cache_enabled>
|
||||
<cache_path>/var/lib/clickhouse/disks/s3/cache/</cache_path>
|
||||
<skip_access_check>false</skip_access_check>
|
||||
</s3>
|
||||
</disks>
|
||||
@ -772,8 +770,6 @@ SETTINGS storage_policy = 'moving_from_ssd_to_hdd'
|
||||
- `single_read_retries` - 读过程中连接丢失后重试次数,默认值为4。
|
||||
- `min_bytes_for_seek` - 使用查找操作,而不是顺序读操作的最小字节数,默认值为1000。
|
||||
- `metadata_path` - 本地存放S3元数据文件的路径,默认值为`/var/lib/clickhouse/disks/<disk_name>/`
|
||||
- `cache_enabled` - 是否允许缓存标记和索引文件。默认值为`true`。
|
||||
- `cache_path` - 本地缓存标记和索引文件的路径。默认值为`/var/lib/clickhouse/disks/<disk_name>/cache/`。
|
||||
- `skip_access_check` - 如果为`true`,Clickhouse启动时不检查磁盘是否可用。默认为`false`。
|
||||
- `server_side_encryption_customer_key_base64` - 如果指定该项的值,请求时会加上为了访问SSE-C加密数据而必须的头信息。
|
||||
|
||||
@ -823,4 +819,3 @@ S3磁盘也可以设置冷热存储:
|
||||
- `_part_uuid` - 唯一部分标识符(如果 MergeTree 设置`assign_part_uuids` 已启用)。
|
||||
- `_partition_value` — `partition by` 表达式的值(元组)。
|
||||
- `_sample_factor` - 采样因子(来自请求)。
|
||||
|
||||
|
@ -5,7 +5,7 @@ sidebar_position: 31
|
||||
|
||||
# stddevSamp {#stddevsamp}
|
||||
|
||||
结果等于 [varSamp] (../../../sql-reference/aggregate-functions/reference/varsamp.md)的平方根。
|
||||
结果等于 [varSamp](../../../sql-reference/aggregate-functions/reference/varsamp.md) 的平方根。
|
||||
|
||||
:::note
|
||||
该函数使用数值不稳定的算法。 如果你需要 [数值稳定性](https://en.wikipedia.org/wiki/Numerical_stability) 在计算中,使用 `stddevSampStable` 函数。 它的工作速度较慢,但提供较低的计算错误。
|
||||
|
@ -32,10 +32,10 @@ contents:
|
||||
dst: /usr/bin/clickhouse-keeper
|
||||
- src: clickhouse-keeper.service
|
||||
dst: /lib/systemd/system/clickhouse-keeper.service
|
||||
- src: clickhouse
|
||||
- src: clickhouse-keeper
|
||||
dst: /usr/bin/clickhouse-keeper-client
|
||||
type: symlink
|
||||
- src: clickhouse
|
||||
- src: clickhouse-keeper
|
||||
dst: /usr/bin/clickhouse-keeper-converter
|
||||
type: symlink
|
||||
# docs
|
||||
|
@ -1209,8 +1209,6 @@
|
||||
<single_read_retries>4</single_read_retries>
|
||||
<min_bytes_for_seek>1000</min_bytes_for_seek>
|
||||
<metadata_path>/var/lib/clickhouse/disks/s3/</metadata_path>
|
||||
<cache_enabled>true</cache_enabled>
|
||||
<cache_path>/var/lib/clickhouse/disks/s3/cache/</cache_path>
|
||||
<skip_access_check>false</skip_access_check>
|
||||
</s3>
|
||||
</disks>
|
||||
|
@ -448,8 +448,6 @@
|
||||
<account_name>account</account_name>
|
||||
<account_key>pass123</account_key>
|
||||
<metadata_path>/var/lib/clickhouse/disks/blob_storage_disk/</metadata_path>
|
||||
<cache_enabled>true</cache_enabled>
|
||||
<cache_path>/var/lib/clickhouse/disks/blob_storage_disk/cache/</cache_path>
|
||||
<skip_access_check>false</skip_access_check>
|
||||
</blob_storage_disk>
|
||||
</disks>
|
||||
|
@ -701,7 +701,7 @@ void ZooKeeper::receiveThread()
|
||||
|
||||
if (in->poll(max_wait_us))
|
||||
{
|
||||
if (requests_queue.isFinished())
|
||||
if (finalization_started.test())
|
||||
break;
|
||||
|
||||
receiveEvent();
|
||||
|
@ -1319,26 +1319,32 @@ TEST_P(CoordinationTest, SnapshotableHashMapDataSize)
|
||||
n2.setData("123456");
|
||||
n2.addChild("");
|
||||
|
||||
/// Note: Below, we check in many cases only that getApproximateDataSize() > 0. This is because
|
||||
/// the SnapshotableHashTable's approximate data size includes Node's sizeInBytes(). The
|
||||
/// latter includes sizeof(absl::flat_hash_set) which is surprisingly not constant across
|
||||
/// different runs. The approximate size is only used for statistics accounting, so this
|
||||
/// should be okay.
|
||||
|
||||
world.disableSnapshotMode();
|
||||
world.insert("world", n1);
|
||||
EXPECT_EQ(world.getApproximateDataSize(), 193);
|
||||
EXPECT_GT(world.getApproximateDataSize(), 0);
|
||||
world.updateValue("world", [&](Node & value) { value = n2; });
|
||||
EXPECT_EQ(world.getApproximateDataSize(), 211);
|
||||
EXPECT_GT(world.getApproximateDataSize(), 0);
|
||||
|
||||
world.erase("world");
|
||||
EXPECT_EQ(world.getApproximateDataSize(), 0);
|
||||
|
||||
world.enableSnapshotMode(100000);
|
||||
world.insert("world", n1);
|
||||
EXPECT_EQ(world.getApproximateDataSize(), 193);
|
||||
EXPECT_GT(world.getApproximateDataSize(), 0);
|
||||
world.updateValue("world", [&](Node & value) { value = n2; });
|
||||
EXPECT_EQ(world.getApproximateDataSize(), 404);
|
||||
EXPECT_GT(world.getApproximateDataSize(), 0);
|
||||
|
||||
world.clearOutdatedNodes();
|
||||
EXPECT_EQ(world.getApproximateDataSize(), 211);
|
||||
EXPECT_GT(world.getApproximateDataSize(), 0);
|
||||
|
||||
world.erase("world");
|
||||
EXPECT_EQ(world.getApproximateDataSize(), 211);
|
||||
EXPECT_GT(world.getApproximateDataSize(), 0);
|
||||
|
||||
world.clear();
|
||||
EXPECT_EQ(world.getApproximateDataSize(), 0);
|
||||
|
@ -663,6 +663,7 @@ class IColumn;
|
||||
M(SetOperationMode, except_default_mode, SetOperationMode::ALL, "Set default mode in EXCEPT query. Possible values: empty string, 'ALL', 'DISTINCT'. If empty, query without mode will throw exception.", 0) \
|
||||
M(Bool, optimize_aggregators_of_group_by_keys, true, "Eliminates min/max/any/anyLast aggregators of GROUP BY keys in SELECT section", 0) \
|
||||
M(Bool, optimize_group_by_function_keys, true, "Eliminates functions of other keys in GROUP BY section", 0) \
|
||||
M(Bool, optimize_group_by_constant_keys, true, "Optimize GROUP BY when all keys in block are constant", 0) \
|
||||
M(Bool, legacy_column_name_of_tuple_literal, false, "List all names of element of large tuple literals in their column names instead of hash. This settings exists only for compatibility reasons. It makes sense to set to 'true', while doing rolling update of cluster from version lower than 21.7 to higher.", 0) \
|
||||
\
|
||||
M(Bool, query_plan_enable_optimizations, true, "Apply optimizations to query plan", 0) \
|
||||
|
@ -80,6 +80,7 @@ namespace SettingsChangesHistory
|
||||
/// It's used to implement `compatibility` setting (see https://github.com/ClickHouse/ClickHouse/issues/35972)
|
||||
static std::map<ClickHouseVersion, SettingsChangesHistory::SettingsChanges> settings_changes_history =
|
||||
{
|
||||
{"23.9", {{"optimize_group_by_constant_keys", false, true, "Optimize group by constant keys by default"}}},
|
||||
{"23.8", {{"rewrite_count_distinct_if_with_count_distinct_implementation", false, true, "Rewrite countDistinctIf with count_distinct_implementation configuration"}}},
|
||||
{"23.7", {{"function_sleep_max_microseconds_per_block", 0, 3000000, "In previous versions, the maximum sleep time of 3 seconds was applied only for `sleep`, but not for `sleepEachRow` function. In the new version, we introduce this setting. If you set compatibility with the previous versions, we will disable the limit altogether."}}},
|
||||
{"23.6", {{"http_send_timeout", 180, 30, "3 minutes seems crazy long. Note that this is timeout for a single network write call, not for the whole upload operation."},
|
||||
|
@ -1156,12 +1156,12 @@ class FunctionBinaryArithmetic : public IFunction
|
||||
return function->execute(arguments, result_type, input_rows_count);
|
||||
}
|
||||
|
||||
ColumnPtr executeArrayImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const
|
||||
ColumnPtr executeArraysImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const
|
||||
{
|
||||
const auto * return_type_array = checkAndGetDataType<DataTypeArray>(result_type.get());
|
||||
|
||||
if (!return_type_array)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Return type for function {} must be array.", getName());
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Return type for function {} must be array", getName());
|
||||
|
||||
auto num_args = arguments.size();
|
||||
DataTypes data_types;
|
||||
@ -1211,6 +1211,72 @@ class FunctionBinaryArithmetic : public IFunction
|
||||
return ColumnArray::create(res, typeid_cast<const ColumnArray *>(arguments[0].column.get())->getOffsetsPtr());
|
||||
}
|
||||
|
||||
ColumnPtr executeArrayWithNumericImpl(const ColumnsWithTypeAndName & args, const DataTypePtr & result_type, size_t input_rows_count) const
|
||||
{
|
||||
ColumnsWithTypeAndName arguments = args;
|
||||
bool is_swapped = isNumber(args[0].type); /// Defines the order of arguments (If array is first argument - is_swapped = false)
|
||||
|
||||
const auto * return_type_array = checkAndGetDataType<DataTypeArray>(result_type.get());
|
||||
if (!return_type_array)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Return type for function {} must be array", getName());
|
||||
|
||||
auto num_args = arguments.size();
|
||||
DataTypes data_types;
|
||||
|
||||
ColumnsWithTypeAndName new_arguments {num_args};
|
||||
DataTypePtr result_array_type;
|
||||
|
||||
const auto * left_const = typeid_cast<const ColumnConst *>(arguments[0].column.get());
|
||||
const auto * right_const = typeid_cast<const ColumnConst *>(arguments[1].column.get());
|
||||
|
||||
if (left_const && right_const)
|
||||
{
|
||||
new_arguments[0] = {left_const->getDataColumnPtr(), arguments[0].type, arguments[0].name};
|
||||
new_arguments[1] = {right_const->getDataColumnPtr(), arguments[1].type, arguments[1].name};
|
||||
auto col = executeImpl(new_arguments, result_type, 1);
|
||||
return ColumnConst::create(std::move(col), input_rows_count);
|
||||
}
|
||||
|
||||
if (right_const && is_swapped)
|
||||
{
|
||||
new_arguments[0] = {arguments[0].column.get()->getPtr(), arguments[0].type, arguments[0].name};
|
||||
new_arguments[1] = {right_const->convertToFullColumnIfConst(), arguments[1].type, arguments[1].name};
|
||||
return executeImpl(new_arguments, result_type, input_rows_count);
|
||||
}
|
||||
else if (left_const && !is_swapped)
|
||||
{
|
||||
new_arguments[0] = {left_const->convertToFullColumnIfConst(), arguments[0].type, arguments[0].name};
|
||||
new_arguments[1] = {arguments[1].column.get()->getPtr(), arguments[1].type, arguments[1].name};
|
||||
return executeImpl(new_arguments, result_type, input_rows_count);
|
||||
}
|
||||
|
||||
if (is_swapped)
|
||||
std::swap(arguments[1], arguments[0]);
|
||||
|
||||
const auto * left_array_col = typeid_cast<const ColumnArray *>(arguments[0].column.get());
|
||||
const auto & left_array_elements_type = typeid_cast<const DataTypeArray *>(arguments[0].type.get())->getNestedType();
|
||||
const auto & right_col = arguments[1].column.get()->cloneResized(left_array_col->size());
|
||||
|
||||
size_t rows_count = 0;
|
||||
const auto & left_offsets = left_array_col->getOffsets();
|
||||
if (!left_offsets.empty())
|
||||
rows_count = left_offsets.back();
|
||||
|
||||
new_arguments[0] = {left_array_col->getDataPtr(), left_array_elements_type, arguments[0].name};
|
||||
if (right_const)
|
||||
new_arguments[1] = {right_col->cloneResized(rows_count), arguments[1].type, arguments[1].name};
|
||||
else
|
||||
new_arguments[1] = {right_col->replicate(left_array_col->getOffsets()), arguments[1].type, arguments[1].name};
|
||||
|
||||
result_array_type = left_array_elements_type;
|
||||
|
||||
if (is_swapped)
|
||||
std::swap(new_arguments[1], new_arguments[0]);
|
||||
auto res = executeImpl(new_arguments, result_array_type, rows_count);
|
||||
|
||||
return ColumnArray::create(res, left_array_col->getOffsetsPtr());
|
||||
}
|
||||
|
||||
ColumnPtr executeTupleNumberOperator(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type,
|
||||
size_t input_rows_count, const FunctionOverloadResolverPtr & function_builder) const
|
||||
{
|
||||
@ -1425,6 +1491,25 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
if constexpr (is_multiply || is_division)
|
||||
{
|
||||
if (isArray(arguments[0]) && isNumber(arguments[1]))
|
||||
{
|
||||
DataTypes new_arguments {
|
||||
static_cast<const DataTypeArray &>(*arguments[0]).getNestedType(),
|
||||
arguments[1],
|
||||
};
|
||||
return std::make_shared<DataTypeArray>(getReturnTypeImplStatic(new_arguments, context));
|
||||
}
|
||||
if (isNumber(arguments[0]) && isArray(arguments[1]))
|
||||
{
|
||||
DataTypes new_arguments {
|
||||
arguments[0],
|
||||
static_cast<const DataTypeArray &>(*arguments[1]).getNestedType(),
|
||||
};
|
||||
return std::make_shared<DataTypeArray>(getReturnTypeImplStatic(new_arguments, context));
|
||||
}
|
||||
}
|
||||
|
||||
/// Special case when the function is plus or minus, one of arguments is Date/DateTime and another is Interval.
|
||||
if (auto function_builder = getFunctionForIntervalArithmetic(arguments[0], arguments[1], context))
|
||||
@ -2132,7 +2217,11 @@ ColumnPtr executeStringInteger(const ColumnsWithTypeAndName & arguments, const A
|
||||
});
|
||||
|
||||
if (isArray(result_type))
|
||||
return executeArrayImpl(arguments, result_type, input_rows_count);
|
||||
{
|
||||
if (!isArray(arguments[0].type) || !isArray(arguments[1].type))
|
||||
return executeArrayWithNumericImpl(arguments, result_type, input_rows_count);
|
||||
return executeArraysImpl(arguments, result_type, input_rows_count);
|
||||
}
|
||||
|
||||
if (!valid)
|
||||
{
|
||||
|
@ -1035,11 +1035,12 @@ void Aggregator::executeImpl(
|
||||
ColumnRawPtrs & key_columns,
|
||||
AggregateFunctionInstruction * aggregate_instructions,
|
||||
bool no_more_keys,
|
||||
bool all_keys_are_const,
|
||||
AggregateDataPtr overflow_row) const
|
||||
{
|
||||
#define M(NAME, IS_TWO_LEVEL) \
|
||||
else if (result.type == AggregatedDataVariants::Type::NAME) \
|
||||
executeImpl(*result.NAME, result.aggregates_pool, row_begin, row_end, key_columns, aggregate_instructions, no_more_keys, overflow_row);
|
||||
executeImpl(*result.NAME, result.aggregates_pool, row_begin, row_end, key_columns, aggregate_instructions, no_more_keys, all_keys_are_const, overflow_row);
|
||||
|
||||
if (false) {} // NOLINT
|
||||
APPLY_FOR_AGGREGATED_VARIANTS(M)
|
||||
@ -1059,6 +1060,7 @@ void NO_INLINE Aggregator::executeImpl(
|
||||
ColumnRawPtrs & key_columns,
|
||||
AggregateFunctionInstruction * aggregate_instructions,
|
||||
bool no_more_keys,
|
||||
bool all_keys_are_const,
|
||||
AggregateDataPtr overflow_row) const
|
||||
{
|
||||
typename Method::State state(key_columns, key_sizes, aggregation_state_cache);
|
||||
@ -1074,25 +1076,25 @@ void NO_INLINE Aggregator::executeImpl(
|
||||
{
|
||||
if (prefetch)
|
||||
executeImplBatch<false, true, true>(
|
||||
method, state, aggregates_pool, row_begin, row_end, aggregate_instructions, overflow_row);
|
||||
method, state, aggregates_pool, row_begin, row_end, aggregate_instructions, all_keys_are_const, overflow_row);
|
||||
else
|
||||
executeImplBatch<false, true, false>(
|
||||
method, state, aggregates_pool, row_begin, row_end, aggregate_instructions, overflow_row);
|
||||
method, state, aggregates_pool, row_begin, row_end, aggregate_instructions, all_keys_are_const, overflow_row);
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
if (prefetch)
|
||||
executeImplBatch<false, false, true>(
|
||||
method, state, aggregates_pool, row_begin, row_end, aggregate_instructions, overflow_row);
|
||||
method, state, aggregates_pool, row_begin, row_end, aggregate_instructions, all_keys_are_const, overflow_row);
|
||||
else
|
||||
executeImplBatch<false, false, false>(
|
||||
method, state, aggregates_pool, row_begin, row_end, aggregate_instructions, overflow_row);
|
||||
method, state, aggregates_pool, row_begin, row_end, aggregate_instructions, all_keys_are_const, overflow_row);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
executeImplBatch<true, false, false>(method, state, aggregates_pool, row_begin, row_end, aggregate_instructions, overflow_row);
|
||||
executeImplBatch<true, false, false>(method, state, aggregates_pool, row_begin, row_end, aggregate_instructions, all_keys_are_const, overflow_row);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1104,6 +1106,7 @@ void NO_INLINE Aggregator::executeImplBatch(
|
||||
size_t row_begin,
|
||||
size_t row_end,
|
||||
AggregateFunctionInstruction * aggregate_instructions,
|
||||
bool all_keys_are_const,
|
||||
AggregateDataPtr overflow_row) const
|
||||
{
|
||||
using KeyHolder = decltype(state.getKeyHolder(0, std::declval<Arena &>()));
|
||||
@ -1120,6 +1123,12 @@ void NO_INLINE Aggregator::executeImplBatch(
|
||||
|
||||
/// For all rows.
|
||||
AggregateDataPtr place = aggregates_pool->alloc(0);
|
||||
if (all_keys_are_const)
|
||||
{
|
||||
state.emplaceKey(method.data, 0, *aggregates_pool).setMapped(place);
|
||||
}
|
||||
else
|
||||
{
|
||||
for (size_t i = row_begin; i < row_end; ++i)
|
||||
{
|
||||
if constexpr (prefetch && HasPrefetchMemberFunc<decltype(method.data), KeyHolder>)
|
||||
@ -1136,6 +1145,7 @@ void NO_INLINE Aggregator::executeImplBatch(
|
||||
|
||||
state.emplaceKey(method.data, i, *aggregates_pool).setMapped(place);
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1153,7 +1163,7 @@ void NO_INLINE Aggregator::executeImplBatch(
|
||||
}
|
||||
}
|
||||
|
||||
if (!has_arrays && !hasSparseArguments(aggregate_instructions))
|
||||
if (!has_arrays && !hasSparseArguments(aggregate_instructions) && !all_keys_are_const)
|
||||
{
|
||||
for (AggregateFunctionInstruction * inst = aggregate_instructions; inst->that; ++inst)
|
||||
{
|
||||
@ -1179,10 +1189,23 @@ void NO_INLINE Aggregator::executeImplBatch(
|
||||
/// - this affects only optimize_aggregation_in_order,
|
||||
/// - this is just a pointer, so it should not be significant,
|
||||
/// - and plus this will require other changes in the interface.
|
||||
std::unique_ptr<AggregateDataPtr[]> places(new AggregateDataPtr[row_end]);
|
||||
std::unique_ptr<AggregateDataPtr[]> places(new AggregateDataPtr[all_keys_are_const ? 1 : row_end]);
|
||||
|
||||
/// For all rows.
|
||||
for (size_t i = row_begin; i < row_end; ++i)
|
||||
size_t start, end;
|
||||
/// If all keys are const, key columns contain only 1 row.
|
||||
if (all_keys_are_const)
|
||||
{
|
||||
start = 0;
|
||||
end = 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
start = row_begin;
|
||||
end = row_end;
|
||||
}
|
||||
|
||||
for (size_t i = start; i < end; ++i)
|
||||
{
|
||||
AggregateDataPtr aggregate_data = nullptr;
|
||||
|
||||
@ -1253,10 +1276,14 @@ void NO_INLINE Aggregator::executeImplBatch(
|
||||
/// Add only if the key already exists.
|
||||
auto find_result = state.findKey(method.data, i, *aggregates_pool);
|
||||
if (find_result.isFound())
|
||||
{
|
||||
aggregate_data = find_result.getMapped();
|
||||
}
|
||||
else
|
||||
{
|
||||
aggregate_data = overflow_row;
|
||||
}
|
||||
}
|
||||
|
||||
places[i] = aggregate_data;
|
||||
}
|
||||
@ -1278,9 +1305,17 @@ void NO_INLINE Aggregator::executeImplBatch(
|
||||
columns_data.emplace_back(getColumnData(inst->batch_arguments[argument_index]));
|
||||
}
|
||||
|
||||
if (all_keys_are_const)
|
||||
{
|
||||
auto add_into_aggregate_states_function_single_place = compiled_aggregate_functions_holder->compiled_aggregate_functions.add_into_aggregate_states_function_single_place;
|
||||
add_into_aggregate_states_function_single_place(row_begin, row_end, columns_data.data(), places[0]);
|
||||
}
|
||||
else
|
||||
{
|
||||
auto add_into_aggregate_states_function = compiled_aggregate_functions_holder->compiled_aggregate_functions.add_into_aggregate_states_function;
|
||||
add_into_aggregate_states_function(row_begin, row_end, columns_data.data(), places.get());
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/// Add values to the aggregate functions.
|
||||
@ -1294,6 +1329,17 @@ void NO_INLINE Aggregator::executeImplBatch(
|
||||
|
||||
AggregateFunctionInstruction * inst = aggregate_instructions + i;
|
||||
|
||||
if (all_keys_are_const)
|
||||
{
|
||||
if (inst->offsets)
|
||||
inst->batch_that->addBatchSinglePlace(inst->offsets[static_cast<ssize_t>(row_begin) - 1], inst->offsets[row_end - 1], places[0] + inst->state_offset, inst->batch_arguments, aggregates_pool);
|
||||
else if (inst->has_sparse_arguments)
|
||||
inst->batch_that->addBatchSparseSinglePlace(row_begin, row_end, places[0] + inst->state_offset, inst->batch_arguments, aggregates_pool);
|
||||
else
|
||||
inst->batch_that->addBatchSinglePlace(row_begin, row_end, places[0] + inst->state_offset, inst->batch_arguments, aggregates_pool);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (inst->offsets)
|
||||
inst->batch_that->addBatchArray(row_begin, row_end, places.get(), inst->state_offset, inst->batch_arguments, inst->offsets, aggregates_pool);
|
||||
else if (inst->has_sparse_arguments)
|
||||
@ -1301,6 +1347,7 @@ void NO_INLINE Aggregator::executeImplBatch(
|
||||
else
|
||||
inst->batch_that->addBatch(row_begin, row_end, places.get(), inst->state_offset, inst->batch_arguments, aggregates_pool);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1539,12 +1586,27 @@ bool Aggregator::executeOnBlock(Columns columns,
|
||||
* To make them work anyway, we materialize them.
|
||||
*/
|
||||
Columns materialized_columns;
|
||||
bool all_keys_are_const = false;
|
||||
if (params.optimize_group_by_constant_keys)
|
||||
{
|
||||
all_keys_are_const = true;
|
||||
for (size_t i = 0; i < params.keys_size; ++i)
|
||||
all_keys_are_const &= isColumnConst(*columns.at(keys_positions[i]));
|
||||
}
|
||||
|
||||
/// Remember the columns we will work with
|
||||
for (size_t i = 0; i < params.keys_size; ++i)
|
||||
{
|
||||
if (all_keys_are_const)
|
||||
{
|
||||
key_columns[i] = assert_cast<const ColumnConst &>(*columns.at(keys_positions[i])).getDataColumnPtr().get();
|
||||
}
|
||||
else
|
||||
{
|
||||
materialized_columns.push_back(recursiveRemoveSparse(columns.at(keys_positions[i]))->convertToFullColumnIfConst());
|
||||
key_columns[i] = materialized_columns.back().get();
|
||||
}
|
||||
|
||||
|
||||
if (!result.isLowCardinality())
|
||||
{
|
||||
@ -1589,7 +1651,7 @@ bool Aggregator::executeOnBlock(Columns columns,
|
||||
{
|
||||
/// This is where data is written that does not fit in `max_rows_to_group_by` with `group_by_overflow_mode = any`.
|
||||
AggregateDataPtr overflow_row_ptr = params.overflow_row ? result.without_key : nullptr;
|
||||
executeImpl(result, row_begin, row_end, key_columns, aggregate_functions_instructions.data(), no_more_keys, overflow_row_ptr);
|
||||
executeImpl(result, row_begin, row_end, key_columns, aggregate_functions_instructions.data(), no_more_keys, all_keys_are_const, overflow_row_ptr);
|
||||
}
|
||||
|
||||
size_t result_size = result.sizeWithoutOverflowRow();
|
||||
|
@ -1023,6 +1023,8 @@ public:
|
||||
|
||||
bool enable_prefetch;
|
||||
|
||||
bool optimize_group_by_constant_keys;
|
||||
|
||||
struct StatsCollectingParams
|
||||
{
|
||||
StatsCollectingParams();
|
||||
@ -1060,6 +1062,7 @@ public:
|
||||
size_t max_block_size_,
|
||||
bool enable_prefetch_,
|
||||
bool only_merge_, // true for projections
|
||||
bool optimize_group_by_constant_keys_,
|
||||
const StatsCollectingParams & stats_collecting_params_ = {})
|
||||
: keys(keys_)
|
||||
, aggregates(aggregates_)
|
||||
@ -1080,6 +1083,7 @@ public:
|
||||
, max_block_size(max_block_size_)
|
||||
, only_merge(only_merge_)
|
||||
, enable_prefetch(enable_prefetch_)
|
||||
, optimize_group_by_constant_keys(optimize_group_by_constant_keys_)
|
||||
, stats_collecting_params(stats_collecting_params_)
|
||||
{
|
||||
}
|
||||
@ -1280,6 +1284,7 @@ private:
|
||||
ColumnRawPtrs & key_columns,
|
||||
AggregateFunctionInstruction * aggregate_instructions,
|
||||
bool no_more_keys = false,
|
||||
bool all_keys_are_const = false,
|
||||
AggregateDataPtr overflow_row = nullptr) const;
|
||||
|
||||
/// Process one data block, aggregate the data into a hash table.
|
||||
@ -1292,6 +1297,7 @@ private:
|
||||
ColumnRawPtrs & key_columns,
|
||||
AggregateFunctionInstruction * aggregate_instructions,
|
||||
bool no_more_keys,
|
||||
bool all_keys_are_const,
|
||||
AggregateDataPtr overflow_row) const;
|
||||
|
||||
/// Specialization for a particular value no_more_keys.
|
||||
@ -1303,6 +1309,7 @@ private:
|
||||
size_t row_begin,
|
||||
size_t row_end,
|
||||
AggregateFunctionInstruction * aggregate_instructions,
|
||||
bool all_keys_are_const,
|
||||
AggregateDataPtr overflow_row) const;
|
||||
|
||||
/// For case when there are no keys (all aggregate into one row).
|
||||
|
@ -2574,6 +2574,7 @@ static Aggregator::Params getAggregatorParams(
|
||||
settings.max_block_size,
|
||||
settings.enable_software_prefetch_in_aggregation,
|
||||
/* only_merge */ false,
|
||||
settings.optimize_group_by_constant_keys,
|
||||
stats_collecting_params
|
||||
};
|
||||
}
|
||||
|
@ -290,6 +290,7 @@ Aggregator::Params getAggregatorParams(const PlannerContextPtr & planner_context
|
||||
settings.max_block_size,
|
||||
settings.enable_software_prefetch_in_aggregation,
|
||||
/* only_merge */ false,
|
||||
settings.optimize_group_by_constant_keys,
|
||||
stats_collecting_params);
|
||||
|
||||
return aggregator_params;
|
||||
|
@ -230,6 +230,7 @@ void AggregatingStep::transformPipeline(QueryPipelineBuilder & pipeline, const B
|
||||
transform_params->params.max_block_size,
|
||||
transform_params->params.enable_prefetch,
|
||||
/* only_merge */ false,
|
||||
transform_params->params.optimize_group_by_constant_keys,
|
||||
transform_params->params.stats_collecting_params};
|
||||
auto transform_params_for_set = std::make_shared<AggregatingTransformParams>(src_header, std::move(params_for_set), final);
|
||||
|
||||
|
@ -41,7 +41,8 @@ TTLAggregationAlgorithm::TTLAggregationAlgorithm(
|
||||
settings.min_count_to_compile_aggregate_expression,
|
||||
settings.max_block_size,
|
||||
settings.enable_software_prefetch_in_aggregation,
|
||||
false /* only_merge */);
|
||||
false /* only_merge */,
|
||||
settings.optimize_group_by_constant_keys);
|
||||
|
||||
aggregator = std::make_unique<Aggregator>(header, params);
|
||||
|
||||
|
@ -421,7 +421,12 @@ const KeyCondition::AtomMap KeyCondition::atom_map
|
||||
if (value.getType() != Field::Types::String)
|
||||
return false;
|
||||
|
||||
String prefix = extractFixedPrefixFromRegularExpression(value.get<const String &>());
|
||||
const String & expression = value.get<const String &>();
|
||||
// This optimization can't process alternation - this would require a comprehensive parsing of regular expression.
|
||||
if (expression.contains('|'))
|
||||
return false;
|
||||
|
||||
String prefix = extractFixedPrefixFromRegularExpression(expression);
|
||||
if (prefix.empty())
|
||||
return false;
|
||||
|
||||
|
@ -329,7 +329,8 @@ QueryPlanPtr MergeTreeDataSelectExecutor::read(
|
||||
settings.min_count_to_compile_aggregate_expression,
|
||||
settings.max_block_size,
|
||||
settings.enable_software_prefetch_in_aggregation,
|
||||
only_merge);
|
||||
only_merge,
|
||||
settings.optimize_group_by_constant_keys);
|
||||
|
||||
return std::make_pair(params, only_merge);
|
||||
};
|
||||
|
@ -101,6 +101,19 @@ ContextPtr getViewContext(ContextPtr context)
|
||||
return view_context;
|
||||
}
|
||||
|
||||
ASTTableExpression * getFirstTableExpression(ASTSelectQuery & select_query)
|
||||
{
|
||||
if (!select_query.tables() || select_query.tables()->children.empty())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: no table expression in view select AST");
|
||||
|
||||
auto * select_element = select_query.tables()->children[0]->as<ASTTablesInSelectQueryElement>();
|
||||
|
||||
if (!select_element->table_expression)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: incorrect table expression");
|
||||
|
||||
return select_element->table_expression->as<ASTTableExpression>();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
StorageView::StorageView(
|
||||
@ -148,6 +161,21 @@ void StorageView::read(
|
||||
current_inner_query = query_info.view_query->clone();
|
||||
}
|
||||
|
||||
const auto & select_query = query_info.query->as<ASTSelectQuery &>();
|
||||
if (auto sample_size = select_query.sampleSize(), sample_offset = select_query.sampleOffset(); sample_size || sample_offset)
|
||||
{
|
||||
for (auto & inner_select_query : current_inner_query->as<ASTSelectWithUnionQuery &>().list_of_selects->children)
|
||||
{
|
||||
if (auto * select = inner_select_query->as<ASTSelectQuery>(); select)
|
||||
{
|
||||
ASTTableExpression * table_expression = getFirstTableExpression(*select);
|
||||
|
||||
table_expression->sample_offset = sample_offset;
|
||||
table_expression->sample_size = sample_size;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
auto options = SelectQueryOptions(QueryProcessingStage::Complete, 0, false, query_info.settings_limit_offset_done);
|
||||
|
||||
if (context->getSettingsRef().allow_experimental_analyzer)
|
||||
@ -196,19 +224,6 @@ void StorageView::read(
|
||||
query_plan.addStep(std::move(converting));
|
||||
}
|
||||
|
||||
static ASTTableExpression * getFirstTableExpression(ASTSelectQuery & select_query)
|
||||
{
|
||||
if (!select_query.tables() || select_query.tables()->children.empty())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: no table expression in view select AST");
|
||||
|
||||
auto * select_element = select_query.tables()->children[0]->as<ASTTablesInSelectQueryElement>();
|
||||
|
||||
if (!select_element->table_expression)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: incorrect table expression");
|
||||
|
||||
return select_element->table_expression->as<ASTTableExpression>();
|
||||
}
|
||||
|
||||
void StorageView::replaceQueryParametersIfParametrizedView(ASTPtr & outer_query, const NameToNameMap & parameter_values)
|
||||
{
|
||||
ReplaceQueryParameterVisitor visitor(parameter_values);
|
||||
|
@ -7,3 +7,4 @@ SELECT count() FROM mt_match_pk WHERE match(v, '^ab');
|
||||
SELECT count() FROM mt_match_pk WHERE match(v, '^a.');
|
||||
SELECT count() FROM mt_match_pk WHERE match(v, '^ab*');
|
||||
SELECT count() FROM mt_match_pk WHERE match(v, '^ac?');
|
||||
SELECT count() FROM mt_match_pk WHERE match(v, '^a$|^b'); -- {serverError INDEX_NOT_USED}
|
||||
|
32
tests/queries/0_stateless/02681_undrop_query.reference
Normal file
32
tests/queries/0_stateless/02681_undrop_query.reference
Normal file
@ -0,0 +1,32 @@
|
||||
test MergeTree undrop
|
||||
02681_undrop_mergetree
|
||||
1
|
||||
2
|
||||
3
|
||||
test detach
|
||||
UPDATE num = 2 WHERE id = 1
|
||||
test MergeTree with cluster
|
||||
02681_undrop_uuid_on_cluster
|
||||
1
|
||||
2
|
||||
3
|
||||
test MergeTree without uuid on cluster
|
||||
02681_undrop_no_uuid_on_cluster
|
||||
1
|
||||
2
|
||||
3
|
||||
test ReplicatedMergeTree undrop
|
||||
02681_undrop_replicatedmergetree
|
||||
1
|
||||
2
|
||||
3
|
||||
test Log undrop
|
||||
02681_undrop_log
|
||||
1
|
||||
2
|
||||
3
|
||||
test Distributed undrop
|
||||
02681_undrop_distributed
|
||||
test MergeTree drop and undrop multiple times
|
||||
02681_undrop_multiple
|
||||
3
|
90
tests/queries/0_stateless/02681_undrop_query.sql
Normal file
90
tests/queries/0_stateless/02681_undrop_query.sql
Normal file
@ -0,0 +1,90 @@
|
||||
-- Tags: no-ordinary-database, no-replicated-database, distributed, zookeeper
|
||||
|
||||
set database_atomic_wait_for_drop_and_detach_synchronously = 0;
|
||||
set allow_experimental_undrop_table_query = 1;
|
||||
|
||||
select 'test MergeTree undrop';
|
||||
drop table if exists 02681_undrop_mergetree sync;
|
||||
create table 02681_undrop_mergetree (id Int32) Engine=MergeTree() order by id;
|
||||
insert into 02681_undrop_mergetree values (1),(2),(3);
|
||||
drop table 02681_undrop_mergetree;
|
||||
select table from system.dropped_tables where table = '02681_undrop_mergetree' limit 1;
|
||||
undrop table 02681_undrop_mergetree;
|
||||
select * from 02681_undrop_mergetree order by id;
|
||||
drop table 02681_undrop_mergetree sync;
|
||||
|
||||
select 'test detach';
|
||||
drop table if exists 02681_undrop_detach sync;
|
||||
create table 02681_undrop_detach (id Int32, num Int32) Engine=MergeTree() order by id;
|
||||
insert into 02681_undrop_detach values (1, 1);
|
||||
detach table 02681_undrop_detach sync;
|
||||
undrop table 02681_undrop_detach; -- { serverError 57 }
|
||||
attach table 02681_undrop_detach;
|
||||
alter table 02681_undrop_detach update num = 2 where id = 1;
|
||||
select command from system.mutations where table='02681_undrop_detach' and database=currentDatabase() limit 1;
|
||||
drop table 02681_undrop_detach sync;
|
||||
|
||||
select 'test MergeTree with cluster';
|
||||
drop table if exists 02681_undrop_uuid_on_cluster on cluster test_shard_localhost sync format Null;
|
||||
create table 02681_undrop_uuid_on_cluster on cluster test_shard_localhost (id Int32) Engine=MergeTree() order by id format Null;
|
||||
insert into 02681_undrop_uuid_on_cluster values (1),(2),(3);
|
||||
drop table 02681_undrop_uuid_on_cluster on cluster test_shard_localhost format Null;
|
||||
select table from system.dropped_tables where table = '02681_undrop_uuid_on_cluster' limit 1;
|
||||
undrop table 02681_undrop_uuid_on_cluster on cluster test_shard_localhost format Null;
|
||||
select * from 02681_undrop_uuid_on_cluster order by id;
|
||||
drop table 02681_undrop_uuid_on_cluster sync;
|
||||
|
||||
select 'test MergeTree without uuid on cluster';
|
||||
drop table if exists 02681_undrop_no_uuid_on_cluster on cluster test_shard_localhost sync format Null;
|
||||
create table 02681_undrop_no_uuid_on_cluster on cluster test_shard_localhost (id Int32) Engine=MergeTree() order by id format Null;
|
||||
insert into 02681_undrop_no_uuid_on_cluster values (1),(2),(3);
|
||||
drop table 02681_undrop_no_uuid_on_cluster on cluster test_shard_localhost format Null;
|
||||
select table from system.dropped_tables where table = '02681_undrop_no_uuid_on_cluster' limit 1;
|
||||
undrop table 02681_undrop_no_uuid_on_cluster on cluster test_shard_localhost format Null;
|
||||
select * from 02681_undrop_no_uuid_on_cluster order by id;
|
||||
drop table 02681_undrop_no_uuid_on_cluster on cluster test_shard_localhost sync format Null;
|
||||
|
||||
select 'test ReplicatedMergeTree undrop';
|
||||
drop table if exists 02681_undrop_replicatedmergetree sync;
|
||||
create table 02681_undrop_replicatedmergetree (id Int32) Engine=ReplicatedMergeTree('/clickhouse/tables/{database}/02681_undrop_replicatedmergetree', 'test_undrop') order by id;
|
||||
insert into 02681_undrop_replicatedmergetree values (1),(2),(3);
|
||||
drop table 02681_undrop_replicatedmergetree;
|
||||
select table from system.dropped_tables where table = '02681_undrop_replicatedmergetree' limit 1;
|
||||
undrop table 02681_undrop_replicatedmergetree;
|
||||
select * from 02681_undrop_replicatedmergetree order by id;
|
||||
drop table 02681_undrop_replicatedmergetree sync;
|
||||
|
||||
select 'test Log undrop';
|
||||
drop table if exists 02681_undrop_log sync;
|
||||
create table 02681_undrop_log (id Int32) Engine=Log();
|
||||
insert into 02681_undrop_log values (1),(2),(3);
|
||||
drop table 02681_undrop_log;
|
||||
select table from system.dropped_tables where table = '02681_undrop_log' limit 1;
|
||||
undrop table 02681_undrop_log;
|
||||
select * from 02681_undrop_log order by id;
|
||||
drop table 02681_undrop_log sync;
|
||||
|
||||
select 'test Distributed undrop';
|
||||
drop table if exists 02681_undrop_distributed sync;
|
||||
create table 02681_undrop_distributed (id Int32) Engine = Distributed(test_shard_localhost, currentDatabase(), 02681_undrop, rand());
|
||||
drop table 02681_undrop_distributed;
|
||||
select table from system.dropped_tables where table = '02681_undrop_distributed' limit 1;
|
||||
undrop table 02681_undrop_distributed;
|
||||
drop table 02681_undrop_distributed sync;
|
||||
|
||||
select 'test MergeTree drop and undrop multiple times';
|
||||
drop table if exists 02681_undrop_multiple sync;
|
||||
create table 02681_undrop_multiple (id Int32) Engine=MergeTree() order by id;
|
||||
insert into 02681_undrop_multiple values (1);
|
||||
drop table 02681_undrop_multiple;
|
||||
create table 02681_undrop_multiple (id Int32) Engine=MergeTree() order by id;
|
||||
insert into 02681_undrop_multiple values (2);
|
||||
drop table 02681_undrop_multiple;
|
||||
create table 02681_undrop_multiple (id Int32) Engine=MergeTree() order by id;
|
||||
insert into 02681_undrop_multiple values (3);
|
||||
drop table 02681_undrop_multiple;
|
||||
select table from system.dropped_tables where table = '02681_undrop_multiple' limit 1;
|
||||
undrop table 02681_undrop_multiple;
|
||||
select * from 02681_undrop_multiple order by id;
|
||||
undrop table 02681_undrop_multiple; -- { serverError 57 }
|
||||
drop table 02681_undrop_multiple sync;
|
@ -0,0 +1,6 @@
|
||||
test MergeTree with uuid
|
||||
02681_undrop_uuid
|
||||
OK
|
||||
1
|
||||
2
|
||||
3
|
19
tests/queries/0_stateless/02681_undrop_query_uuid.sh
Executable file
19
tests/queries/0_stateless/02681_undrop_query_uuid.sh
Executable file
@ -0,0 +1,19 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-ordinary-database, no-replicated-database
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
echo 'test MergeTree with uuid'
|
||||
${CLICKHOUSE_CLIENT} -q "drop table if exists 02681_undrop_uuid sync;"
|
||||
uuid=$(${CLICKHOUSE_CLIENT} --query "SELECT generateUUIDv4()")
|
||||
uuid2=$(${CLICKHOUSE_CLIENT} --query "SELECT generateUUIDv4()")
|
||||
${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none -q "create table 02681_undrop_uuid UUID '$uuid' on cluster test_shard_localhost (id Int32) Engine=MergeTree() order by id;"
|
||||
${CLICKHOUSE_CLIENT} -q "insert into 02681_undrop_uuid values (1),(2),(3);"
|
||||
${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none -q "drop table 02681_undrop_uuid on cluster test_shard_localhost settings database_atomic_wait_for_drop_and_detach_synchronously = 0;"
|
||||
${CLICKHOUSE_CLIENT} -q "select table from system.dropped_tables where table = '02681_undrop_uuid' limit 1;"
|
||||
${CLICKHOUSE_CLIENT} -q "undrop table 02681_undrop_uuid UUID '$uuid2' settings allow_experimental_undrop_table_query = 1;" 2>&1| grep -Faq "UNKNOWN_TABLE" && echo OK
|
||||
${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none -q "undrop table 02681_undrop_uuid UUID '$uuid' on cluster test_shard_localhost settings allow_experimental_undrop_table_query = 1;"
|
||||
${CLICKHOUSE_CLIENT} -q "select * from 02681_undrop_uuid order by id;"
|
||||
${CLICKHOUSE_CLIENT} -q "drop table 02681_undrop_uuid sync;"
|
@ -0,0 +1,40 @@
|
||||
10000000 1 2 3
|
||||
10000000 1 2 3
|
||||
10000000 1 2 3
|
||||
10000000 1 2 3
|
||||
10
|
||||
10
|
||||
10
|
||||
10
|
||||
10
|
||||
10
|
||||
10
|
||||
10
|
||||
10
|
||||
10
|
||||
10
|
||||
10
|
||||
10 data.1.JSON
|
||||
10 data.2.JSON
|
||||
10 data.JSON
|
||||
10 data.1.JSON
|
||||
10 data.2.JSON
|
||||
10 data.JSON
|
||||
10 data.1.JSON
|
||||
10 data.2.JSON
|
||||
10 data.JSON
|
||||
10 data.1.JSON
|
||||
10 data.2.JSON
|
||||
10 data.JSON
|
||||
10
|
||||
10
|
||||
10
|
||||
10
|
||||
10
|
||||
10
|
||||
10
|
||||
10
|
||||
10
|
||||
10
|
||||
10
|
||||
10
|
28
tests/queries/0_stateless/02845_group_by_constant_keys.sql
Normal file
28
tests/queries/0_stateless/02845_group_by_constant_keys.sql
Normal file
@ -0,0 +1,28 @@
|
||||
select count(number), 1 AS k1, 2 as k2, 3 as k3 from numbers_mt(10000000) group by k1, k2, k3 settings optimize_group_by_constant_keys=1, enable_software_prefetch_in_aggregation=0, compile_aggregate_expressions=0;
|
||||
select count(number), 1 AS k1, 2 as k2, 3 as k3 from numbers_mt(10000000) group by k1, k2, k3 settings optimize_group_by_constant_keys=1, enable_software_prefetch_in_aggregation=1, compile_aggregate_expressions = 0;
|
||||
select count(number), 1 AS k1, 2 as k2, 3 as k3 from numbers_mt(10000000) group by k1, k2, k3 settings optimize_group_by_constant_keys=1, enable_software_prefetch_in_aggregation=0, compile_aggregate_expressions = 1;
|
||||
select count(number), 1 AS k1, 2 as k2, 3 as k3 from numbers_mt(10000000) group by k1, k2, k3 settings optimize_group_by_constant_keys=1, enable_software_prefetch_in_aggregation=1, compile_aggregate_expressions = 1;
|
||||
|
||||
drop table if exists test;
|
||||
create table test (x UInt64) engine=File(JSON);
|
||||
set engine_file_allow_create_multiple_files = 1;
|
||||
insert into test select * from numbers(10);
|
||||
insert into test select * from numbers(10);
|
||||
insert into test select * from numbers(10);
|
||||
|
||||
select count() from test group by _file order by _file settings optimize_group_by_constant_keys=1, enable_software_prefetch_in_aggregation=0, compile_aggregate_expressions=0;
|
||||
select count() from test group by _file order by _file settings optimize_group_by_constant_keys=1, enable_software_prefetch_in_aggregation=1, compile_aggregate_expressions=0;
|
||||
select count() from test group by _file order by _file settings optimize_group_by_constant_keys=1, enable_software_prefetch_in_aggregation=0, compile_aggregate_expressions=1;
|
||||
select count() from test group by _file order by _file settings optimize_group_by_constant_keys=1, enable_software_prefetch_in_aggregation=1, compile_aggregate_expressions=1;
|
||||
|
||||
select count(), _file from test group by _file order by _file settings optimize_group_by_constant_keys=1, enable_software_prefetch_in_aggregation=0, compile_aggregate_expressions=0;
|
||||
select count(), _file from test group by _file order by _file settings optimize_group_by_constant_keys=1, enable_software_prefetch_in_aggregation=1, compile_aggregate_expressions=0;
|
||||
select count(), _file from test group by _file order by _file settings optimize_group_by_constant_keys=1, enable_software_prefetch_in_aggregation=0, compile_aggregate_expressions=1;
|
||||
select count(), _file from test group by _file order by _file settings optimize_group_by_constant_keys=1, enable_software_prefetch_in_aggregation=1, compile_aggregate_expressions=1;
|
||||
|
||||
select count() from test group by _file, _path order by _file, _path settings optimize_group_by_constant_keys=1, enable_software_prefetch_in_aggregation=0, compile_aggregate_expressions=0;
|
||||
select count() from test group by _file, _path order by _file, _path settings optimize_group_by_constant_keys=1, enable_software_prefetch_in_aggregation=1, compile_aggregate_expressions=0;
|
||||
select count() from test group by _file, _path order by _file, _path settings optimize_group_by_constant_keys=1, enable_software_prefetch_in_aggregation=0, compile_aggregate_expressions=1;
|
||||
select count() from test group by _file, _path order by _file, _path settings optimize_group_by_constant_keys=1, enable_software_prefetch_in_aggregation=1, compile_aggregate_expressions=1;
|
||||
|
||||
drop table test;
|
7
tests/queries/0_stateless/02881_view_sampling.reference
Normal file
7
tests/queries/0_stateless/02881_view_sampling.reference
Normal file
@ -0,0 +1,7 @@
|
||||
-- { echo }
|
||||
select * from data_mt sample 0.1 order by key;
|
||||
9
|
||||
select * from view_mt sample 0.1 order by key;
|
||||
9
|
||||
select * from data_mem sample 0.1 order by key; -- { serverError SAMPLING_NOT_SUPPORTED }
|
||||
select * from view_mem sample 0.1 order by key; -- { serverError SAMPLING_NOT_SUPPORTED }
|
18
tests/queries/0_stateless/02881_view_sampling.sql
Normal file
18
tests/queries/0_stateless/02881_view_sampling.sql
Normal file
@ -0,0 +1,18 @@
|
||||
drop table if exists data_mt;
|
||||
drop table if exists view_mt;
|
||||
drop table if exists data_mem;
|
||||
drop table if exists view_mem;
|
||||
|
||||
create table data_mt (key Int) engine=MergeTree() order by (key, sipHash64(key)) sample by sipHash64(key);
|
||||
insert into data_mt select * from numbers(10);
|
||||
create view view_mt as select * from data_mt;
|
||||
|
||||
create table data_mem (key Int) engine=Memory();
|
||||
insert into data_mem select * from numbers(10);
|
||||
create view view_mem as select * from data_mem;
|
||||
|
||||
-- { echo }
|
||||
select * from data_mt sample 0.1 order by key;
|
||||
select * from view_mt sample 0.1 order by key;
|
||||
select * from data_mem sample 0.1 order by key; -- { serverError SAMPLING_NOT_SUPPORTED }
|
||||
select * from view_mem sample 0.1 order by key; -- { serverError SAMPLING_NOT_SUPPORTED }
|
@ -0,0 +1 @@
|
||||
Can't get data for node '/test-keeper-client-default': node doesn't exist
|
13
tests/queries/0_stateless/02882_clickhouse_keeper_client_no_confirmation.sh
Executable file
13
tests/queries/0_stateless/02882_clickhouse_keeper_client_no_confirmation.sh
Executable file
@ -0,0 +1,13 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CUR_DIR"/../shell_config.sh
|
||||
|
||||
path="/test-keeper-client-$CLICKHOUSE_DATABASE"
|
||||
|
||||
$CLICKHOUSE_KEEPER_CLIENT -q "rm $path" >& /dev/null
|
||||
|
||||
$CLICKHOUSE_KEEPER_CLIENT -q "create $path 'foobar'"
|
||||
$CLICKHOUSE_KEEPER_CLIENT -q "rmr $path"
|
||||
$CLICKHOUSE_KEEPER_CLIENT -q "get $path" 2>&1
|
@ -0,0 +1,33 @@
|
||||
[14,21,35]
|
||||
[14,21,35]
|
||||
[14,21,35]
|
||||
[14,21,35]
|
||||
[14,21,35]
|
||||
[14,21,35]
|
||||
[14,21,35]
|
||||
[[[14,21,35,35]]]
|
||||
[[[14,21,35,35]]]
|
||||
[[[1,1.5,2.5,2.5]]]
|
||||
[[[1,0.6666666666666666,0.4,0.4]]]
|
||||
[(7,14),(14,14)]
|
||||
[(NULL,14),(14,NULL)]
|
||||
[(NULL,2),(2,NULL)]
|
||||
[(7,700000000000000000000),(NULL,7340039)]
|
||||
[14,0]
|
||||
[14,7]
|
||||
[14,14]
|
||||
[14,21]
|
||||
[14,28]
|
||||
[0,0,0]
|
||||
[2,3,5]
|
||||
[4,6,10]
|
||||
[6,9,15]
|
||||
[8,12,20]
|
||||
[]
|
||||
[0]
|
||||
[0,42]
|
||||
[0,42,84]
|
||||
[0,42,84,126]
|
||||
[60,15,5]
|
||||
[0,0,1]
|
||||
[2.4,2.4,1.2]
|
@ -0,0 +1,25 @@
|
||||
SELECT materialize([2, 3, 5]) * materialize(7);
|
||||
SELECT materialize(7) * materialize([2, 3, 5]);
|
||||
SELECT [2, 3, 5] * materialize(7);
|
||||
SELECT materialize(7) * [2, 3, 5];
|
||||
SELECT materialize([2, 3, 5]) * 7;
|
||||
SELECT 7 * materialize([2, 3, 5]);
|
||||
SELECT [2, 3, 5] * 7;
|
||||
SELECT [[[2, 3, 5, 5]]] * 7;
|
||||
SELECT 7 * [[[2, 3, 5, 5]]];
|
||||
SELECT [[[2, 3, 5, 5]]] / 2;
|
||||
SELECT 2 / [[[2, 3, 5, 5]]];
|
||||
SELECT [(1, 2), (2, 2)] * 7;
|
||||
SELECT [(NULL, 2), (2, NULL)] * 7;
|
||||
SELECT [(NULL, 2), (2, NULL)] / 1;
|
||||
SELECT [(1., 100000000000000000000.), (NULL, 1048577)] * 7;
|
||||
SELECT [CAST('2', 'UInt64'), number] * 7 FROM numbers(5);
|
||||
SELECT [2, 3, 5] * number FROM numbers(5);
|
||||
SELECT range(number) * 42 FROM numbers(5);
|
||||
CREATE TABLE my_table (values Array(Int32)) ENGINE = MergeTree() ORDER BY values;
|
||||
INSERT INTO my_table (values) VALUES ([12, 3, 1]);
|
||||
SELECT values * 5 FROM my_table WHERE arrayExists(x -> x > 5, values);
|
||||
DROP TABLE my_table;
|
||||
SELECT [6, 6, 3] % 2;
|
||||
SELECT [6, 6, 3] / 2.5::Decimal(1, 1);
|
||||
SELECT [1] / 'a'; -- { serverError 43 }
|
10
tests/queries/0_stateless/02883_zookeeper_finalize_stress.sh
Executable file
10
tests/queries/0_stateless/02883_zookeeper_finalize_stress.sh
Executable file
@ -0,0 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: long
|
||||
|
||||
# Regression test for possible CANNOT_READ_ALL_DATA during client termination
|
||||
|
||||
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CUR_DIR"/../shell_config.sh
|
||||
|
||||
yes /keeper/api_version | head -n1000 | xargs -P30 -i $CLICKHOUSE_KEEPER_CLIENT -q 'get {}' > /dev/null
|
@ -19,13 +19,13 @@
|
||||
59183 85379
|
||||
33010362 77807
|
||||
800784 77492
|
||||
1704509 523264
|
||||
732797 475698
|
||||
598875 337212
|
||||
792887 252197
|
||||
3807842 196036
|
||||
25703952 147211
|
||||
716829 90109
|
||||
59183 85379
|
||||
33010362 77807
|
||||
800784 77492
|
||||
732797 46144
|
||||
1704509 40129
|
||||
598875 30482
|
||||
792887 22585
|
||||
3807842 18891
|
||||
11312316 13181
|
||||
25703952 11177
|
||||
59183 9935
|
||||
4379238 8593
|
||||
716829 8411
|
||||
|
@ -79,6 +79,11 @@ export CLICKHOUSE_PORT_POSTGRESQL=${CLICKHOUSE_PORT_POSTGRESQL:="9005"}
|
||||
export CLICKHOUSE_PORT_KEEPER=${CLICKHOUSE_PORT_KEEPER:=$(${CLICKHOUSE_EXTRACT_CONFIG} --try --key=keeper_server.tcp_port 2>/dev/null)} 2>/dev/null
|
||||
export CLICKHOUSE_PORT_KEEPER=${CLICKHOUSE_PORT_KEEPER:="9181"}
|
||||
|
||||
# keeper-client
|
||||
[ -x "${CLICKHOUSE_BINARY}-keeper-client" ] && CLICKHOUSE_KEEPER_CLIENT=${CLICKHOUSE_KEEPER_CLIENT:="${CLICKHOUSE_BINARY}-keeper-client"}
|
||||
[ -x "${CLICKHOUSE_BINARY}" ] && CLICKHOUSE_KEEPER_CLIENT=${CLICKHOUSE_KEEPER_CLIENT:="${CLICKHOUSE_BINARY} keeper-client"}
|
||||
export CLICKHOUSE_KEEPER_CLIENT=${CLICKHOUSE_KEEPER_CLIENT:="${CLICKHOUSE_BINARY}-keeper-client --port $CLICKHOUSE_PORT_KEEPER"}
|
||||
|
||||
export CLICKHOUSE_CLIENT_SECURE=${CLICKHOUSE_CLIENT_SECURE:=$(echo "${CLICKHOUSE_CLIENT}" | sed 's/--secure //' | sed 's/'"--port=${CLICKHOUSE_PORT_TCP}"'//g; s/$/'"--secure --accept-invalid-certificate --port=${CLICKHOUSE_PORT_TCP_SECURE}"'/g')}
|
||||
|
||||
# Add database and log comment to url params
|
||||
|
Loading…
Reference in New Issue
Block a user