Merge branch 'ClickHouse:master' into zvonand-globs-small-fix

This commit is contained in:
Andrey Zvonov 2023-08-28 17:15:33 +05:00 committed by GitHub
commit c9b0db9c61
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
60 changed files with 347 additions and 150 deletions

View File

@ -35,10 +35,6 @@ find_package(Threads REQUIRED)
include (cmake/unwind.cmake)
include (cmake/cxx.cmake)
# Delay the call to link the global interface after the libc++ libraries are included to avoid circular dependencies
# which are ok with static libraries but not with dynamic ones
link_libraries(global-group)
if (NOT OS_ANDROID)
if (NOT USE_MUSL)
# Our compatibility layer doesn't build under Android, many errors in musl.
@ -47,6 +43,8 @@ if (NOT OS_ANDROID)
add_subdirectory(base/harmful)
endif ()
link_libraries(global-group)
target_link_libraries(global-group INTERFACE
-Wl,--start-group
$<TARGET_PROPERTY:global-libs,INTERFACE_LINK_LIBRARIES>

View File

@ -334,20 +334,36 @@ set(ARROW_SRCS
"${LIBRARY_DIR}/compute/api_vector.cc"
"${LIBRARY_DIR}/compute/cast.cc"
"${LIBRARY_DIR}/compute/exec.cc"
"${LIBRARY_DIR}/compute/exec/accumulation_queue.cc"
"${LIBRARY_DIR}/compute/exec/accumulation_queue.h"
"${LIBRARY_DIR}/compute/exec/aggregate.cc"
"${LIBRARY_DIR}/compute/exec/aggregate_node.cc"
"${LIBRARY_DIR}/compute/exec/asof_join_node.cc"
"${LIBRARY_DIR}/compute/exec/bloom_filter.cc"
"${LIBRARY_DIR}/compute/exec/exec_plan.cc"
"${LIBRARY_DIR}/compute/exec/expression.cc"
"${LIBRARY_DIR}/compute/exec/filter_node.cc"
"${LIBRARY_DIR}/compute/exec/project_node.cc"
"${LIBRARY_DIR}/compute/exec/source_node.cc"
"${LIBRARY_DIR}/compute/exec/sink_node.cc"
"${LIBRARY_DIR}/compute/exec/hash_join.cc"
"${LIBRARY_DIR}/compute/exec/hash_join_dict.cc"
"${LIBRARY_DIR}/compute/exec/hash_join_node.cc"
"${LIBRARY_DIR}/compute/exec/key_hash.cc"
"${LIBRARY_DIR}/compute/exec/key_map.cc"
"${LIBRARY_DIR}/compute/exec/map_node.cc"
"${LIBRARY_DIR}/compute/exec/options.cc"
"${LIBRARY_DIR}/compute/exec/order_by_impl.cc"
"${LIBRARY_DIR}/compute/exec/partition_util.cc"
"${LIBRARY_DIR}/compute/exec/project_node.cc"
"${LIBRARY_DIR}/compute/exec/query_context.cc"
"${LIBRARY_DIR}/compute/exec/sink_node.cc"
"${LIBRARY_DIR}/compute/exec/source_node.cc"
"${LIBRARY_DIR}/compute/exec/swiss_join.cc"
"${LIBRARY_DIR}/compute/exec/task_util.cc"
"${LIBRARY_DIR}/compute/exec/tpch_node.cc"
"${LIBRARY_DIR}/compute/exec/union_node.cc"
"${LIBRARY_DIR}/compute/exec/util.cc"
"${LIBRARY_DIR}/compute/function.cc"
"${LIBRARY_DIR}/compute/function_internal.cc"
"${LIBRARY_DIR}/compute/kernel.cc"
"${LIBRARY_DIR}/compute/light_array.cc"
"${LIBRARY_DIR}/compute/registry.cc"
"${LIBRARY_DIR}/compute/kernels/aggregate_basic.cc"
"${LIBRARY_DIR}/compute/kernels/aggregate_mode.cc"
"${LIBRARY_DIR}/compute/kernels/aggregate_quantile.cc"
@ -355,49 +371,43 @@ set(ARROW_SRCS
"${LIBRARY_DIR}/compute/kernels/aggregate_var_std.cc"
"${LIBRARY_DIR}/compute/kernels/codegen_internal.cc"
"${LIBRARY_DIR}/compute/kernels/hash_aggregate.cc"
"${LIBRARY_DIR}/compute/kernels/row_encoder.cc"
"${LIBRARY_DIR}/compute/kernels/scalar_arithmetic.cc"
"${LIBRARY_DIR}/compute/kernels/scalar_boolean.cc"
"${LIBRARY_DIR}/compute/kernels/scalar_cast_boolean.cc"
"${LIBRARY_DIR}/compute/kernels/scalar_cast_dictionary.cc"
"${LIBRARY_DIR}/compute/kernels/scalar_cast_internal.cc"
"${LIBRARY_DIR}/compute/kernels/scalar_cast_extension.cc"
"${LIBRARY_DIR}/compute/kernels/scalar_cast_internal.cc"
"${LIBRARY_DIR}/compute/kernels/scalar_cast_nested.cc"
"${LIBRARY_DIR}/compute/kernels/scalar_cast_numeric.cc"
"${LIBRARY_DIR}/compute/kernels/scalar_cast_string.cc"
"${LIBRARY_DIR}/compute/kernels/scalar_cast_temporal.cc"
"${LIBRARY_DIR}/compute/kernels/scalar_compare.cc"
"${LIBRARY_DIR}/compute/kernels/scalar_if_else.cc"
"${LIBRARY_DIR}/compute/kernels/scalar_nested.cc"
"${LIBRARY_DIR}/compute/kernels/scalar_random.cc"
"${LIBRARY_DIR}/compute/kernels/scalar_round.cc"
"${LIBRARY_DIR}/compute/kernels/scalar_set_lookup.cc"
"${LIBRARY_DIR}/compute/kernels/scalar_string_ascii.cc"
"${LIBRARY_DIR}/compute/kernels/scalar_string_utf8.cc"
"${LIBRARY_DIR}/compute/kernels/scalar_temporal_binary.cc"
"${LIBRARY_DIR}/compute/kernels/scalar_temporal_unary.cc"
"${LIBRARY_DIR}/compute/kernels/scalar_validity.cc"
"${LIBRARY_DIR}/compute/kernels/scalar_if_else.cc"
"${LIBRARY_DIR}/compute/kernels/scalar_string_ascii.cc"
"${LIBRARY_DIR}/compute/kernels/scalar_string_utf8.cc"
"${LIBRARY_DIR}/compute/kernels/util_internal.cc"
"${LIBRARY_DIR}/compute/kernels/vector_array_sort.cc"
"${LIBRARY_DIR}/compute/kernels/vector_cumulative_ops.cc"
"${LIBRARY_DIR}/compute/kernels/vector_hash.cc"
"${LIBRARY_DIR}/compute/kernels/vector_rank.cc"
"${LIBRARY_DIR}/compute/kernels/vector_select_k.cc"
"${LIBRARY_DIR}/compute/kernels/vector_nested.cc"
"${LIBRARY_DIR}/compute/kernels/vector_rank.cc"
"${LIBRARY_DIR}/compute/kernels/vector_replace.cc"
"${LIBRARY_DIR}/compute/kernels/vector_select_k.cc"
"${LIBRARY_DIR}/compute/kernels/vector_selection.cc"
"${LIBRARY_DIR}/compute/kernels/vector_sort.cc"
"${LIBRARY_DIR}/compute/kernels/row_encoder.cc"
"${LIBRARY_DIR}/compute/exec/union_node.cc"
"${LIBRARY_DIR}/compute/exec/key_hash.cc"
"${LIBRARY_DIR}/compute/exec/key_map.cc"
"${LIBRARY_DIR}/compute/exec/util.cc"
"${LIBRARY_DIR}/compute/exec/hash_join_dict.cc"
"${LIBRARY_DIR}/compute/exec/hash_join.cc"
"${LIBRARY_DIR}/compute/exec/hash_join_node.cc"
"${LIBRARY_DIR}/compute/exec/task_util.cc"
"${LIBRARY_DIR}/compute/light_array.cc"
"${LIBRARY_DIR}/compute/registry.cc"
"${LIBRARY_DIR}/compute/row/compare_internal.cc"
"${LIBRARY_DIR}/compute/row/encode_internal.cc"
"${LIBRARY_DIR}/compute/row/grouper.cc"
"${LIBRARY_DIR}/compute/row/compare_internal.cc"
"${LIBRARY_DIR}/compute/row/row_internal.cc"
"${LIBRARY_DIR}/ipc/dictionary.cc"

2
contrib/boost vendored

@ -1 +1 @@
Subproject commit 5a3fb87e67cc67ffadfc1990b3665fc3b260fcf4
Subproject commit ae94606a70f1e298ce2a5718db858079185c4d9c

View File

@ -25,18 +25,16 @@ if (OS_LINUX)
)
endif ()
# headers-only
# headers-only
add_library (_boost_headers_only INTERFACE)
add_library (boost::headers_only ALIAS _boost_headers_only)
target_include_directories (_boost_headers_only SYSTEM BEFORE INTERFACE ${LIBRARY_DIR})
# asio
target_compile_definitions (_boost_headers_only INTERFACE
BOOST_ASIO_STANDALONE=1
# Avoid using of deprecated in c++ > 17 std::result_of
BOOST_ASIO_HAS_STD_INVOKE_RESULT=1
BOOST_ASIO_HAS_STD_INVOKE_RESULT=1 # Avoid using of deprecated in c++ > 17 std::result_of
BOOST_TIMER_ENABLE_DEPRECATED=1 # wordnet-blast (enabled via USE_NLP) uses Boost legacy timer classes
)
# iostreams

View File

@ -32,7 +32,6 @@ set(RE2_SOURCES
${SRC_DIR}/re2/tostring.cc
${SRC_DIR}/re2/unicode_casefold.cc
${SRC_DIR}/re2/unicode_groups.cc
${SRC_DIR}/util/pcre.cc
${SRC_DIR}/util/rune.cc
${SRC_DIR}/util/strutil.cc
)

View File

@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
esac
ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release"
ARG VERSION="23.7.4.5"
ARG VERSION="23.7.5.30"
ARG PACKAGES="clickhouse-keeper"
# user/group precreated explicitly with fixed uid/gid on purpose.

View File

@ -33,7 +33,7 @@ RUN arch=${TARGETARCH:-amd64} \
# lts / testing / prestable / etc
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
ARG VERSION="23.7.4.5"
ARG VERSION="23.7.5.30"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
# user/group precreated explicitly with fixed uid/gid on purpose.

View File

@ -23,7 +23,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
ARG VERSION="23.7.4.5"
ARG VERSION="23.7.5.30"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
# set non-empty deb_location_url url to create a docker image

View File

@ -41,7 +41,7 @@ do
echo "Creating destination table ${table}_${hash}" >&2
echo "$statement" | clickhouse-client --distributed_ddl_task_timeout=10 $CONNECTION_PARAMETERS || continue
echo "$statement" | clickhouse-client --distributed_ddl_task_timeout=10 --receive_timeout=10 --send_timeout=10 $CONNECTION_PARAMETERS || continue
echo "Creating table system.${table}_sender" >&2

View File

@ -0,0 +1,31 @@
---
sidebar_position: 1
sidebar_label: 2023
---
# 2023 Changelog
### ClickHouse release v23.7.5.30-stable (e86c21fb922) FIXME as compared to v23.7.4.5-stable (bd2fcd44553)
#### Build/Testing/Packaging Improvement
* Backported in [#53291](https://github.com/ClickHouse/ClickHouse/issues/53291): The compiler's profile data (`-ftime-trace`) is uploaded to ClickHouse Cloud., the second attempt after [#53100](https://github.com/ClickHouse/ClickHouse/issues/53100). [#53213](https://github.com/ClickHouse/ClickHouse/pull/53213) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Backported in [#53467](https://github.com/ClickHouse/ClickHouse/issues/53467): Preserve environment parameters in `clickhouse start` command. Fixes [#51962](https://github.com/ClickHouse/ClickHouse/issues/51962). [#53418](https://github.com/ClickHouse/ClickHouse/pull/53418) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in an official stable release)
* Not-ready Set [#53162](https://github.com/ClickHouse/ClickHouse/pull/53162) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fix incorrect normal projection AST format [#53347](https://github.com/ClickHouse/ClickHouse/pull/53347) ([Amos Bird](https://github.com/amosbird)).
* Fix loading lazy database during system.table select query [#53372](https://github.com/ClickHouse/ClickHouse/pull/53372) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
* Fix wrong columns order for queries with parallel FINAL. [#53489](https://github.com/ClickHouse/ClickHouse/pull/53489) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fix: interpolate expression takes source column instead of same name aliased from select expression. [#53572](https://github.com/ClickHouse/ClickHouse/pull/53572) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Correctly handle totals and extremes with `DelayedSource` [#53644](https://github.com/ClickHouse/ClickHouse/pull/53644) ([Antonio Andelic](https://github.com/antonio2368)).
* Fix: sorted distinct with sparse columns [#53711](https://github.com/ClickHouse/ClickHouse/pull/53711) ([Igor Nikonov](https://github.com/devcrafter)).
* Fix fuzzer crash in parseDateTime() [#53764](https://github.com/ClickHouse/ClickHouse/pull/53764) ([Robert Schulze](https://github.com/rschu1ze)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Disable test_reverse_dns_query/test.py [#53195](https://github.com/ClickHouse/ClickHouse/pull/53195) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Disable test_host_regexp_multiple_ptr_records/test.py [#53211](https://github.com/ClickHouse/ClickHouse/pull/53211) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Fix broken `02862_sorted_distinct_sparse_fix` [#53738](https://github.com/ClickHouse/ClickHouse/pull/53738) ([Antonio Andelic](https://github.com/antonio2368)).
* Get rid of describe_parameters for the best robot token [#53833](https://github.com/ClickHouse/ClickHouse/pull/53833) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -0,0 +1,103 @@
---
slug: /zh/engines/table-engines/mergetree-family/sharedmergetree
---
# SharedMergeTree {#sharedmergetree}
仅在ClickHouse Cloud以及第一方合作伙伴云服务中提供
SharedMergeTree表引擎系列是ReplicatedMergeTree引擎的云原生替代方案经过优化适用于共享对象存储例如Amazon S3、Google Cloud Storage、MinIO、Azure Blob Storage。每个特定的MergeTree引擎类型都有对应的SharedMergeTree引擎例如ReplacingSharedMergeTree替代ReplacingReplicatedMergeTree。
SharedMergeTree表引擎为ClickHouse Cloud的性能带来了显著提升。对于终端用户来说无需做任何改变即可开始使用SharedMergeTree引擎系列而不是基于ReplicatedMergeTree的引擎。它提供的好处包括
- 更高的插入吞吐量
- 后台合并的吞吐量提高
- Mutation操作的吞吐量提高
- 更快的扩容和缩容操作
- 用于选择查询的更轻量级强一致性
SharedMergeTree带来的一个重要改进是与ReplicatedMergeTree相比它提供了更彻底的计算和存储分离。下图展示了ReplicatedMergeTree如何分离计算和存储
![ReplicatedMergeTree Diagram](../../../images/shared-merge-tree-1.png)
正如您所见尽管存储在ReplicatedMergeTree中的数据位于对象存储中但元数据仍存储在每个clickhouse-server上。这意味着对于每个复制操作元数据也需要在所有副本上进行复制。
![ReplicatedMergeTree Diagram](../../../images/shared-merge-tree-2.png)
与ReplicatedMergeTree不同SharedMergeTree不需要副本之间进行通信。相反所有通信都通过共享存储和clickhouse-keeper进行。SharedMergeTree实现了异步无领导复制并使用clickhouse-keeper进行协调和元数据存储。这意味着随着服务的扩展不需要复制元数据。这可以加快复制、变更、合并和扩展操作。SharedMergeTree允许每个表有数百个副本使得无需分片即可进行动态扩展。这也意味着在ClickHouse Cloud中使用分布式查询执行方法可以利用更多的计算资源来执行查询。
## 系统监控
用于系统监控的ReplicatedMergeTree的大部分系统表system table在SharedMergeTree中也存在唯独没有`system.replication_queue`和`system.replicated_fetches`因为没有数据和元数据的复制。然而SharedMergeTree对这两个表有相应的替代表。
`system.virtual_parts`
这个表作为SharedMergeTree对 `system.replication_queue` 的替代存储关于最新的一组data parts以及未来正在进行的合并、变更和删除parts。
`system.shared_merge_tree_fetches`
这个表是SharedMergeTree对`system.replicated_fetches`的替代。它包含关于正在加载入内存的主键和校验码信息。
## 使用SharedMergeTree
SharedMergeTree已经是所有开发实例development tier中的默认表引擎并且可以通过提交支持工单在生产环境实例product tier中启用https://clickhouse.cloud/support。
对于支持SharedMergeTree表引擎的实例您不需要做任何额外变更。您可以像以前一样创建表它会自动使用基于SharedMergeTree的表引擎该引擎与您在CREATE TABLE查询中指定的引擎相对应。
通过使用 SharedMergeTree 表引擎可以创建 my_table 表。
```sql
CREATE TABLE my_table(
key UInt64,
value String
)
ENGINE = MergeTree
ORDER BY key
```
在ClickHouse Cloud中由于 `default_table_engine=MergeTree`,用户不必再特别设置`ENGINE=MergeTree`。下面的查询语句和上面的完全一样。
```sql
CREATE TABLE my_table(
key UInt64,
value String
)
ORDER BY key
```
如果您使用Replacing、Collapsing、Aggregating、Summing、VersionedCollapsing、Graphite MergeTree表它们将自动转换为相应的基于SharedMergeTree的表引擎。
```sql
CREATE TABLE myFirstReplacingMT
(
`key` Int64,
`someCol` String,
`eventTime` DateTime
)
ENGINE = ReplacingMergeTree
ORDER BY key;
```
您可以使用SHOW CREATE TABLE查看用于创建表的语句。
``` sql
SHOW CREATE TABLE myFirstReplacingMT;
```
```sql
CREATE TABLE default.myFirstReplacingMT
( `key` Int64, `someCol` String, `eventTime` DateTime )
ENGINE = SharedReplacingMergeTree('/clickhouse/tables/{uuid}/{shard}', '{replica}')
ORDER BY key
SETTINGS index_granularity = 8192
```
## 配置
一些配置的行为发生了显著的改变:
- `insert_quorum` -- 所有对SharedMergeTree的insert都是quorum insert写入共享对象存储因此在使用SharedMergeTree表引擎时不需要此设置。
- `insert_quorum_parallel` -- 所有对SharedMergeTree的insert都是quorum insert写入共享对象存储
- `select_sequential_consistency` -- 不需要quorum inserts会引起在SELECT查询中向clickhouse-keeper增加附加的请求。

Binary file not shown.

After

Width:  |  Height:  |  Size: 695 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 700 KiB

View File

@ -100,8 +100,8 @@ private:
struct Constraint
{
SettingConstraintWritability writability = SettingConstraintWritability::WRITABLE;
Field min_value;
Field max_value;
Field min_value{};
Field max_value{};
bool operator ==(const Constraint & other) const;
bool operator !=(const Constraint & other) const { return !(*this == other); }

View File

@ -559,6 +559,7 @@ FieldInfo ColumnObject::Subcolumn::getFieldInfo() const
.have_nulls = base_type->isNullable(),
.need_convert = false,
.num_dimensions = least_common_type.getNumberOfDimensions(),
.need_fold_dimension = false,
};
}

View File

@ -182,6 +182,7 @@ AsyncLoader::AsyncLoader(std::vector<PoolInitializer> pool_initializers, bool lo
init.max_threads,
/* max_free_threads = */ 0,
init.max_threads),
.ready_queue = {},
.max_threads = init.max_threads
});
}

View File

@ -69,11 +69,11 @@ struct FunctionDocumentation
using Related = std::string;
Description description; /// E.g. "Returns the position (in bytes, starting at 1) of a substring needle in a string haystack."
Syntax syntax; /// E.g. "position(haystack, needle)"
Arguments arguments; /// E.g. ["haystack — String in which the search is performed. String.", "needle — Substring to be searched. String."]
ReturnedValue returned_value; /// E.g. "Starting position in bytes and counting from 1, if the substring was found."
Examples examples; ///
Categories categories; /// E.g. {"String Search"}
Syntax syntax = {}; /// E.g. "position(haystack, needle)"
Arguments arguments {}; /// E.g. ["haystack — String in which the search is performed. String.", "needle — Substring to be searched. String."]
ReturnedValue returned_value {};/// E.g. "Starting position in bytes and counting from 1, if the substring was found."
Examples examples {}; ///
Categories categories {}; /// E.g. {"String Search"}
std::string argumentsAsString() const;
std::string examplesAsString() const;

View File

@ -34,8 +34,8 @@ public:
StorageID table_id = StorageID::createEmpty();
bool ignore_unknown = false;
bool expand_special_macros_only = false;
std::optional<String> shard;
std::optional<String> replica;
std::optional<String> shard = {};
std::optional<String> replica = {};
/// Information about macro expansion
size_t level = 0;

View File

@ -863,7 +863,9 @@ void TestKeeper::reconfig(
.callback = [callback](const Response & response)
{
callback(dynamic_cast<const ReconfigResponse &>(response));
}
},
.watch = nullptr,
.time = {}
});
}

View File

@ -49,7 +49,7 @@ struct AsyncLoaderTest
}
explicit AsyncLoaderTest(size_t max_threads = 1)
: AsyncLoaderTest({{.max_threads = max_threads}})
: AsyncLoaderTest({{.max_threads = max_threads, .priority = {}}})
{}
std::vector<AsyncLoader::PoolInitializer> getPoolInitializers(std::vector<Initializer> initializers)

View File

@ -480,6 +480,7 @@ void KeeperDispatcher::shutdown()
.session_id = session,
.time = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count(),
.request = std::move(request),
.digest = std::nullopt
};
close_requests.push_back(std::move(request_info));
@ -576,6 +577,7 @@ void KeeperDispatcher::sessionCleanerTask()
.session_id = dead_session,
.time = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count(),
.request = std::move(request),
.digest = std::nullopt
};
if (!requests_queue->push(std::move(request_info)))
LOG_INFO(log, "Cannot push close request to queue while cleaning outdated sessions");

View File

@ -2127,7 +2127,7 @@ void KeeperStorage::preprocessRequest(
}
std::vector<Delta> new_deltas;
TransactionInfo transaction{.zxid = new_last_zxid};
TransactionInfo transaction{.zxid = new_last_zxid, .nodes_digest = {}};
uint64_t new_digest = getNodesDigest(false).value;
SCOPE_EXIT({
if (keeper_context->digestEnabled())

View File

@ -16,7 +16,6 @@
#include <base/IPv4andIPv6.h>
#include <base/DayNum.h>
namespace DB
{
@ -285,6 +284,11 @@ decltype(auto) castToNearestFieldType(T && x)
return U(x);
}
template <typename T>
concept not_field_or_bool_or_stringlike
= (!std::is_same_v<std::decay_t<T>, Field> && !std::is_same_v<std::decay_t<T>, bool>
&& !std::is_same_v<NearestFieldType<std::decay_t<T>>, String>);
/** 32 is enough. Round number is used for alignment and for better arithmetic inside std::vector.
* NOTE: Actually, sizeof(std::string) is 32 when using libc++, so Field is 40 bytes.
*/
@ -347,13 +351,6 @@ public:
|| which == Types::Decimal256;
}
/// Templates to avoid ambiguity.
template <typename T, typename Z = void *>
using enable_if_not_field_or_bool_or_stringlike_t = std::enable_if_t<
!std::is_same_v<std::decay_t<T>, Field> &&
!std::is_same_v<std::decay_t<T>, bool> &&
!std::is_same_v<NearestFieldType<std::decay_t<T>>, String>, Z>;
Field() : Field(Null{}) {}
/** Despite the presence of a template constructor, this constructor is still needed,
@ -370,7 +367,8 @@ public:
}
template <typename T>
Field(T && rhs, enable_if_not_field_or_bool_or_stringlike_t<T> = nullptr); /// NOLINT
requires not_field_or_bool_or_stringlike<T>
Field(T && rhs); /// NOLINT
Field(bool rhs) : Field(castToNearestFieldType(rhs)) /// NOLINT
{
@ -425,7 +423,8 @@ public:
/// 1. float <--> int needs explicit cast
/// 2. customized types needs explicit cast
template <typename T>
enable_if_not_field_or_bool_or_stringlike_t<T, Field> & /// NOLINT
requires not_field_or_bool_or_stringlike<T>
Field & /// NOLINT
operator=(T && rhs);
Field & operator= (bool rhs)
@ -896,14 +895,16 @@ auto & Field::safeGet()
template <typename T>
Field::Field(T && rhs, enable_if_not_field_or_bool_or_stringlike_t<T>)
requires not_field_or_bool_or_stringlike<T>
Field::Field(T && rhs)
{
auto && val = castToNearestFieldType(std::forward<T>(rhs));
createConcrete(std::forward<decltype(val)>(val));
}
template <typename T>
Field::enable_if_not_field_or_bool_or_stringlike_t<T, Field> & /// NOLINT
requires not_field_or_bool_or_stringlike<T>
Field & /// NOLINT
Field::operator=(T && rhs)
{
auto && val = castToNearestFieldType(std::forward<T>(rhs));
@ -1005,7 +1006,6 @@ void writeFieldText(const Field & x, WriteBuffer & buf);
String toString(const Field & x);
std::string_view fieldTypeToString(Field::Types::Which type);
}
template <>

View File

@ -228,7 +228,7 @@ void RegExpTreeDictionary::initRegexNodes(Block & block)
else
{
Field field = parseStringToField(value, attr.type);
node->attributes[name_] = RegexTreeNode::AttributeValue{.field = std::move(field), .original_value = value};
node->attributes[name_] = RegexTreeNode::AttributeValue{.field = std::move(field), .pieces = {}, .original_value = value};
}
}
}

View File

@ -165,6 +165,7 @@ void FileSegmentRangeWriter::appendFilesystemCacheLog(const FileSegment & file_s
.file_segment_range = { file_segment_range.left, file_segment_right_bound },
.requested_range = {},
.cache_type = FilesystemCacheLogElement::CacheType::WRITE_THROUGH_CACHE,
.file_segment_key = {},
.file_segment_size = file_segment_range.size(),
.read_from_cache_attempted = false,
.read_buffer_id = {},

View File

@ -62,8 +62,16 @@ IOUringReader::IOUringReader(uint32_t entries_)
struct io_uring_params params =
{
.sq_entries = 0, // filled by the kernel, initializing to silence warning
.cq_entries = 0, // filled by the kernel, initializing to silence warning
.flags = 0,
.sq_thread_cpu = 0, // Unused (IORING_SETUP_SQ_AFF isn't set). Silences warning
.sq_thread_idle = 0, // Unused (IORING_SETUP_SQPOL isn't set). Silences warning
.features = 0, // filled by the kernel, initializing to silence warning
.wq_fd = 0, // Unused (IORING_SETUP_ATTACH_WQ isn't set). Silences warning.
.resv = {0, 0, 0}, // "The resv array must be initialized to zero."
.sq_off = {}, // filled by the kernel, initializing to silence warning
.cq_off = {}, // filled by the kernel, initializing to silence warning
};
int ret = io_uring_queue_init_params(entries_, &ring, &params);

View File

@ -3,12 +3,10 @@
#include <Functions/FunctionHelpers.h>
#include <DataTypes/DataTypeTuple.h>
#include <DataTypes/DataTypesNumber.h>
#include <DataTypes/DataTypeLowCardinality.h>
#include <Columns/ColumnConst.h>
#include <Columns/ColumnsNumber.h>
#include <Columns/ColumnTuple.h>
#include <Columns/ColumnSet.h>
#include <Columns/ColumnLowCardinality.h>
#include <Interpreters/Set.h>
@ -70,12 +68,6 @@ public:
return 2;
}
/// Do not use default implementation for LowCardinality.
/// For now, Set may be const or non const column, depending on how it was created.
/// But we will return UInt8 for any case.
/// TODO: we could use special implementation later.
bool useDefaultImplementationForLowCardinalityColumns() const override { return false; }
DataTypePtr getReturnTypeImpl(const DataTypes & /*arguments*/) const override
{
return std::make_shared<DataTypeUInt8>();
@ -143,8 +135,6 @@ public:
else
columns_of_key_columns.emplace_back(left_arg);
/// Replace single LowCardinality column to it's dictionary if possible.
ColumnPtr lc_indexes = nullptr;
bool is_const = false;
if (columns_of_key_columns.size() == 1)
{
@ -155,20 +145,10 @@ public:
col = &const_col->getDataColumn();
is_const = true;
}
if (const auto * lc = typeid_cast<const ColumnLowCardinality *>(col))
{
lc_indexes = lc->getIndexesPtr();
arg.column = lc->getDictionary().getNestedColumn();
arg.type = removeLowCardinality(arg.type);
}
}
auto res = set->execute(columns_of_key_columns, negative);
if (lc_indexes)
res = res->index(*lc_indexes, 0);
if (is_const)
res = ColumnUInt8::create(input_rows_count, res->getUInt(0));

View File

@ -64,7 +64,7 @@ public:
/// Optional. Useful when implementation needs to do ignore().
size_t offset = 0;
std::unique_ptr<Stopwatch> execution_watch;
std::unique_ptr<Stopwatch> execution_watch = {};
operator std::tuple<size_t &, size_t &>() { return {size, offset}; }
};

View File

@ -16,7 +16,7 @@ struct ObjectInfo
size_t size = 0;
time_t last_modification_time = 0;
std::map<String, String> metadata; /// Set only if getObjectInfo() is called with `with_metadata = true`.
std::map<String, String> metadata = {}; /// Set only if getObjectInfo() is called with `with_metadata = true`.
};
ObjectInfo getObjectInfo(

View File

@ -2191,8 +2191,8 @@ ActionsDAGPtr ActionsDAG::cloneActionsForFilterPushDown(
/// Replace predicate result to constant 1.
Node node;
node.type = ActionType::COLUMN;
node.result_name = std::move(predicate->result_name);
node.result_type = std::move(predicate->result_type);
node.result_name = predicate->result_name;
node.result_type = predicate->result_type;
node.column = node.result_type->createColumnConst(0, 1);
if (predicate->type != ActionType::INPUT)

View File

@ -251,6 +251,7 @@ AsynchronousInsertQueue::push(ASTPtr query, ContextPtr query_context)
return PushResult
{
.status = PushResult::TOO_MUCH_DATA,
.future = {},
.insert_data_buffer = std::make_unique<ConcatReadBuffer>(std::move(buffers)),
};
}
@ -318,6 +319,7 @@ AsynchronousInsertQueue::push(ASTPtr query, ContextPtr query_context)
{
.status = PushResult::OK,
.future = std::move(insert_future),
.insert_data_buffer = nullptr,
};
}

View File

@ -895,6 +895,12 @@ FileSegments LockedKey::sync()
FileSegments broken;
for (auto it = key_metadata->begin(); it != key_metadata->end();)
{
if (it->second->evicting() || !it->second->releasable())
{
++it;
continue;
}
auto file_segment = it->second->file_segment;
if (file_segment->isDetached())
{

View File

@ -318,20 +318,11 @@ void executeQueryWithParallelReplicas(
}
auto coordinator = std::make_shared<ParallelReplicasReadingCoordinator>(all_replicas_count);
/// This is a little bit weird, but we construct an "empty" coordinator without
/// any specified reading/coordination method (like Default, InOrder, InReverseOrder)
/// Because we will understand it later during QueryPlan optimization
/// So we place a reference to the coordinator to some common plane like QueryInfo
/// to then tell it about the reading method we chose.
query_info.coordinator = coordinator;
auto external_tables = new_context->getExternalTables();
auto read_from_remote = std::make_unique<ReadFromParallelRemoteReplicasStep>(
query_ast,
new_cluster,
coordinator,
std::move(coordinator),
stream_factory.header,
stream_factory.processed_stage,
main_table,

View File

@ -30,12 +30,12 @@ struct FilesystemCacheLogElement
std::pair<size_t, size_t> file_segment_range{};
std::pair<size_t, size_t> requested_range{};
CacheType cache_type{};
std::string file_segment_key;
size_t file_segment_offset;
size_t file_segment_size;
std::string file_segment_key{};
size_t file_segment_offset = 0;
size_t file_segment_size = 0;
bool read_from_cache_attempted;
String read_buffer_id;
std::shared_ptr<ProfileEvents::Counters::Snapshot> profile_counters;
String read_buffer_id{};
std::shared_ptr<ProfileEvents::Counters::Snapshot> profile_counters = nullptr;
static std::string name() { return "FilesystemCacheLog"; }

View File

@ -23,11 +23,9 @@ public:
{
const char * assert_no_aggregates = nullptr;
const char * assert_no_windows = nullptr;
// Explicit empty initializers are needed to make designated initializers
// work on GCC 10.
std::unordered_set<String> uniq_names {};
ASTs aggregates;
ASTs window_functions;
ASTs aggregates{};
ASTs window_functions{};
};
static bool needChildVisit(const ASTPtr & node, const ASTPtr & child)

View File

@ -20,9 +20,9 @@ struct QueryStatusInfo;
struct QueryResultDetails
{
String query_id;
std::optional<String> content_type;
std::optional<String> format;
std::optional<String> timezone;
std::optional<String> content_type = {};
std::optional<String> format = {};
std::optional<String> timezone = {};
};
using SetResultDetailsFunc = std::function<void(const QueryResultDetails &)>;

View File

@ -605,7 +605,7 @@ void writeColumnImpl(
if (use_dictionary)
{
dict_encoded_pages.push_back({.header = std::move(header)});
dict_encoded_pages.push_back({.header = std::move(header), .data = {}});
std::swap(dict_encoded_pages.back().data, compressed);
}
else

View File

@ -596,7 +596,13 @@ void ParquetBlockInputFormat::decodeOneChunk(size_t row_group_batch_idx, std::un
auto tmp_table = arrow::Table::FromRecordBatches({*batch});
size_t approx_chunk_original_size = static_cast<size_t>(std::ceil(static_cast<double>(row_group_batch.total_bytes_compressed) / row_group_batch.total_rows * (*tmp_table)->num_rows()));
PendingChunk res = {.chunk_idx = row_group_batch.next_chunk_idx, .row_group_batch_idx = row_group_batch_idx, .approx_original_chunk_size = approx_chunk_original_size};
PendingChunk res = {
.chunk = {},
.block_missing_values = {},
.chunk_idx = row_group_batch.next_chunk_idx,
.row_group_batch_idx = row_group_batch_idx,
.approx_original_chunk_size = approx_chunk_original_size
};
/// If defaults_for_omitted_fields is true, calculate the default values from default expression for omitted fields.
/// Otherwise fill the missing columns with zero values of its type.

View File

@ -201,5 +201,9 @@ void CreateSetAndFilterOnTheFlyStep::updateOutputStream()
output_stream = createOutputStream(input_streams.front(), input_streams.front().header, getDataStreamTraits());
}
bool CreateSetAndFilterOnTheFlyStep::isColumnPartOfSetKey(const String & column_name) const
{
return std::find(column_names.begin(), column_names.end(), column_name) != column_names.end();
}
}

View File

@ -35,6 +35,8 @@ public:
SetWithStatePtr getSet() const { return own_set; }
bool isColumnPartOfSetKey(const String & column_name) const;
/// Set for another stream.
void setFiltering(SetWithStatePtr filtering_set_) { filtering_set = filtering_set_; }

View File

@ -428,8 +428,15 @@ size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes
return updated_steps;
}
if (auto updated_steps = simplePushDownOverStep<CreateSetAndFilterOnTheFlyStep>(parent_node, nodes, child))
return updated_steps;
if (const auto * join_filter_set_step = typeid_cast<CreateSetAndFilterOnTheFlyStep *>(child.get()))
{
const auto & filter_column_name = assert_cast<const FilterStep *>(parent_node->step.get())->getFilterColumnName();
bool can_remove_filter = !join_filter_set_step->isColumnPartOfSetKey(filter_column_name);
Names allowed_inputs = child->getOutputStream().header.getNames();
if (auto updated_steps = tryAddNewFilterStep(parent_node, nodes, allowed_inputs, can_remove_filter))
return updated_steps;
}
if (auto * union_step = typeid_cast<UnionStep *>(child.get()))
{

View File

@ -78,10 +78,10 @@ public:
struct IndexStat
{
IndexType type;
std::string name;
std::string description;
std::string condition;
std::vector<std::string> used_keys;
std::string name = {};
std::string description = {};
std::string condition = {};
std::vector<std::string> used_keys = {};
size_t num_parts_after;
size_t num_granules_after;
};

View File

@ -46,9 +46,9 @@ public:
/// decide whether to deny or to accept that request.
struct Extension
{
std::shared_ptr<TaskIterator> task_iterator;
std::shared_ptr<ParallelReplicasReadingCoordinator> parallel_reading_coordinator;
std::optional<IConnections::ReplicaInfo> replica_info;
std::shared_ptr<TaskIterator> task_iterator = nullptr;
std::shared_ptr<ParallelReplicasReadingCoordinator> parallel_reading_coordinator = nullptr;
std::optional<IConnections::ReplicaInfo> replica_info = {};
};
/// Takes already set connection.

View File

@ -130,7 +130,7 @@ std::optional<ExternalDataSourceInfo> getExternalDataSourceConfiguration(
"Named collection of connection parameters is missing some "
"of the parameters and dictionary parameters are not added");
}
return ExternalDataSourceInfo{ .configuration = configuration, .specific_args = {}, .settings_changes = config_settings };
return ExternalDataSourceInfo{.configuration = configuration, .settings_changes = config_settings};
}
return std::nullopt;
}

View File

@ -39,7 +39,6 @@ using StorageSpecificArgs = std::vector<std::pair<String, ASTPtr>>;
struct ExternalDataSourceInfo
{
ExternalDataSourceConfiguration configuration;
StorageSpecificArgs specific_args;
SettingsChanges settings_changes;
};
@ -85,7 +84,6 @@ struct URLBasedDataSourceConfiguration
struct URLBasedDataSourceConfig
{
URLBasedDataSourceConfiguration configuration;
StorageSpecificArgs specific_args;
};
std::optional<URLBasedDataSourceConfig> getURLBasedDataSourceConfiguration(

View File

@ -232,6 +232,7 @@ PartitionCommandsResultInfo Unfreezer::unfreezePartitionsFromTableDirectory(Merg
bool keep_shared = removeFreezedPart(disk, path, partition_directory, local_context, zookeeper);
result.push_back(PartitionCommandResultInfo{
.command_type = "UNFREEZE PART",
.partition_id = partition_id,
.part_name = partition_directory,
.backup_path = disk->getPath() + table_directory.generic_string(),
@ -239,11 +240,11 @@ PartitionCommandsResultInfo Unfreezer::unfreezePartitionsFromTableDirectory(Merg
.backup_name = backup_name,
});
LOG_DEBUG(log, "Unfreezed part by path {}, keep shared data: {}", disk->getPath() + path, keep_shared);
LOG_DEBUG(log, "Unfrozen part by path {}, keep shared data: {}", disk->getPath() + path, keep_shared);
}
}
LOG_DEBUG(log, "Unfreezed {} parts", result.size());
LOG_DEBUG(log, "Unfrozen {} parts", result.size());
return result;
}

View File

@ -242,7 +242,7 @@ public:
MergeTreeTransactionPtr txn = NO_TRANSACTION_PTR;
HardlinkedFiles * hardlinked_files = nullptr;
bool copy_instead_of_hardlink = false;
NameSet files_to_copy_instead_of_hardlinks;
NameSet files_to_copy_instead_of_hardlinks = {};
bool keep_metadata_version = false;
bool make_source_readonly = false;
DiskTransactionPtr external_transaction = nullptr;

View File

@ -7720,6 +7720,7 @@ PartitionCommandsResultInfo MergeTreeData::freezePartitionsByMatcher(
part->is_frozen.store(true, std::memory_order_relaxed);
result.push_back(PartitionCommandResultInfo{
.command_type = "FREEZE PART",
.partition_id = part->info.partition_id,
.part_name = part->name,
.backup_path = new_storage->getFullRootPath(),
@ -7729,7 +7730,7 @@ PartitionCommandsResultInfo MergeTreeData::freezePartitionsByMatcher(
++parts_processed;
}
LOG_DEBUG(log, "Freezed {} parts", parts_processed);
LOG_DEBUG(log, "Froze {} parts", parts_processed);
return result;
}

View File

@ -13,20 +13,20 @@ namespace DB
struct MergeTreeMutationStatus
{
String id;
String command;
String id = "";
String command = "";
time_t create_time = 0;
std::map<String, Int64> block_numbers;
std::map<String, Int64> block_numbers{};
/// Parts that should be mutated/merged or otherwise moved to Obsolete state for this mutation to complete.
Names parts_to_do_names;
Names parts_to_do_names = {};
/// If the mutation is done. Note that in case of ReplicatedMergeTree parts_to_do == 0 doesn't imply is_done == true.
bool is_done = false;
String latest_failed_part;
String latest_failed_part = "";
time_t latest_fail_time = 0;
String latest_fail_reason;
String latest_fail_reason = "";
/// FIXME: currently unused, but would be much better to report killed mutations with this flag.
bool is_killed = false;

View File

@ -21,7 +21,7 @@ class ReadBuffer;
/// to values from set of columns which satisfy predicate.
struct MutationCommand
{
ASTPtr ast; /// The AST of the whole command
ASTPtr ast = {}; /// The AST of the whole command
enum Type
{
@ -43,27 +43,27 @@ struct MutationCommand
Type type = EMPTY;
/// WHERE part of mutation
ASTPtr predicate;
ASTPtr predicate = {};
/// Columns with corresponding actions
std::unordered_map<String, ASTPtr> column_to_update_expression;
std::unordered_map<String, ASTPtr> column_to_update_expression = {};
/// For MATERIALIZE INDEX and PROJECTION
String index_name;
String projection_name;
String index_name = {};
String projection_name = {};
/// For MATERIALIZE INDEX, UPDATE and DELETE.
ASTPtr partition;
ASTPtr partition = {};
/// For reads, drops and etc.
String column_name;
DataTypePtr data_type; /// Maybe empty if we just want to drop column
String column_name = {};
DataTypePtr data_type = {}; /// Maybe empty if we just want to drop column
/// We need just clear column, not drop from metadata.
bool clear = false;
/// Column rename_to
String rename_to;
String rename_to = {};
/// If parse_alter_commands, than consider more Alter commands as mutation commands
static std::optional<MutationCommand> parse(ASTAlterCommand * command, bool parse_alter_commands = false);

View File

@ -80,7 +80,7 @@ struct PartitionCommand
using PartitionCommands = std::vector<PartitionCommand>;
/// Result of exectuin of a single partition commands. Partition commands quite
/// Result of executing of a single partition commands. Partition commands quite
/// different, so some fields will be empty for some commands. Currently used in
/// ATTACH and FREEZE commands.
struct PartitionCommandResultInfo
@ -92,14 +92,14 @@ struct PartitionCommandResultInfo
/// Part name, always filled
String part_name;
/// Part name in /detached directory, filled in ATTACH
String old_part_name;
String old_part_name = {};
/// Absolute path to backup directory, filled in FREEZE
String backup_path;
String backup_path = {};
/// Absolute path part backup, filled in FREEZE
String part_backup_path;
String part_backup_path = {};
/// Name of the backup (specified by user or increment value), filled in
/// FREEZE
String backup_name;
String backup_name = {};
};
using PartitionCommandsResultInfo = std::vector<PartitionCommandResultInfo>;

View File

@ -10,7 +10,6 @@
#include <Planner/PlannerContext.h>
#include <QueryPipeline/StreamLocalLimits.h>
#include <Storages/ProjectionsDescription.h>
#include <Storages/MergeTree/ParallelReplicasReadingCoordinator.h>
#include <memory>
@ -211,8 +210,6 @@ struct SelectQueryInfo
/// should we use custom key with the cluster
bool use_custom_key = false;
mutable ParallelReplicasReadingCoordinatorPtr coordinator;
TreeRewriterResultPtr syntax_analyzer_result;
/// This is an additional filer applied to current table.

View File

@ -1989,6 +1989,7 @@ PartitionCommandsResultInfo StorageMergeTree::attachPartition(
renamed_parts.old_and_new_names[i].old_name.clear();
results.push_back(PartitionCommandResultInfo{
.command_type = "ATTACH_PART",
.partition_id = loaded_parts[i]->info.partition_id,
.part_name = loaded_parts[i]->name,
.old_part_name = old_name,

View File

@ -6154,6 +6154,7 @@ PartitionCommandsResultInfo StorageReplicatedMergeTree::attachPartition(
LOG_DEBUG(log, "Attached part {} as {}", old_name, loaded_parts[i]->name);
results.push_back(PartitionCommandResultInfo{
.command_type = "ATTACH PART",
.partition_id = loaded_parts[i]->info.partition_id,
.part_name = loaded_parts[i]->name,
.old_part_name = old_name,

View File

@ -166,4 +166,4 @@ if (TARGET ch_contrib::libarchive)
set(USE_LIBARCHIVE 1)
endif()
set(SOURCE_DIR ${CMAKE_SOURCE_DIR})
set(SOURCE_DIR ${PROJECT_SOURCE_DIR})

View File

@ -0,0 +1,8 @@
1
1
1
1 1
1
1
1
1 1

View File

@ -0,0 +1,18 @@
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 (key UInt8) ENGINE = Memory;
INSERT INTO t1 VALUES (1),(2);
SET join_algorithm = 'full_sorting_merge';
SELECT key FROM ( SELECT key FROM t1 ) AS t1 JOIN ( SELECT key FROM t1 ) AS t2 ON t1.key = t2.key WHERE key;
SELECT key FROM ( SELECT 1 AS key ) AS t1 JOIN ( SELECT 1 AS key ) AS t2 ON t1.key = t2.key WHERE key;
SELECT * FROM ( SELECT 1 AS key GROUP BY NULL ) AS t1 INNER JOIN (SELECT 1 AS key) AS t2 ON t1.key = t2.key WHERE t1.key ORDER BY key;
SET max_rows_in_set_to_optimize_join = 0;
SELECT key FROM ( SELECT key FROM t1 ) AS t1 JOIN ( SELECT key FROM t1 ) AS t2 ON t1.key = t2.key WHERE key;
SELECT key FROM ( SELECT 1 AS key ) AS t1 JOIN ( SELECT 1 AS key ) AS t2 ON t1.key = t2.key WHERE key;
SELECT * FROM ( SELECT 1 AS key GROUP BY NULL ) AS t1 INNER JOIN (SELECT 1 AS key) AS t2 ON t1.key = t2.key WHERE t1.key ORDER BY key;
DROP TABLE IF EXISTS t1;

View File

@ -0,0 +1,4 @@
pure nullable result:
qwe
wrapping in LC:
qwe

View File

@ -0,0 +1,17 @@
-- https://github.com/ClickHouse/ClickHouse/issues/50570
DROP TABLE IF EXISTS tnul SYNC;
DROP TABLE IF EXISTS tlc SYNC;
CREATE TABLE tnul (lc Nullable(String)) ENGINE = MergeTree ORDER BY tuple();
INSERT INTO tnul VALUES (NULL), ('qwe');
SELECT 'pure nullable result:';
SELECT lc FROM tnul WHERE notIn(lc, ('rty', 'uiop'));
DROP TABLE tnul SYNC;
CREATE TABLE tlc (lc LowCardinality(Nullable(String))) ENGINE = MergeTree ORDER BY tuple();
INSERT INTO tlc VALUES (NULL), ('qwe');
SELECT 'wrapping in LC:';
SELECT lc FROM tlc WHERE notIn(lc, ('rty', 'uiop'));
DROP TABLE tlc SYNC;

View File

@ -1,3 +1,4 @@
v23.7.5.30-stable 2023-08-28
v23.7.4.5-stable 2023-08-08
v23.7.3.14-stable 2023-08-05
v23.7.2.25-stable 2023-08-03

1 v23.7.4.5-stable v23.7.5.30-stable 2023-08-08 2023-08-28
1 v23.7.5.30-stable 2023-08-28
2 v23.7.4.5-stable v23.7.4.5-stable 2023-08-08 2023-08-08
3 v23.7.3.14-stable v23.7.3.14-stable 2023-08-05 2023-08-05
4 v23.7.2.25-stable v23.7.2.25-stable 2023-08-03 2023-08-03