Merge branch 'master' into vdimir/residual_join_conditions

This commit is contained in:
vdimir 2024-10-07 15:10:07 +00:00
commit 20f88f424f
No known key found for this signature in database
GPG Key ID: 6EE4CE2BEDC51862
1016 changed files with 30320 additions and 18675 deletions

View File

@ -16,3 +16,6 @@
# Applied Black formatter for Python code
e6f5a3f98b21ba99cf274a9833797889e020a2b3
# Enabling clang-tidy readability-else-no-return rule
67c1e89d90ef576e62f8b1c68269742a3c6f9b1e

View File

@ -4,7 +4,6 @@ self-hosted-runner:
- func-tester
- func-tester-aarch64
- fuzzer-unit-tester
- stress-tester
- style-checker
- style-checker-aarch64
- release-maker

View File

@ -229,18 +229,26 @@ jobs:
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (tsan)
runner_type: stress-tester
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
#############################################################################################
############################# INTEGRATION TESTS #############################################
#############################################################################################
IntegrationTestsRelease:
needs: [RunConfig, BuilderDebRelease]
IntegrationTestsAsanOldAnalyzer:
needs: [RunConfig, BuilderDebAsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (release)
runner_type: stress-tester
test_name: Integration tests (asan, old analyzer)
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
IntegrationTestsTsan:
needs: [RunConfig, BuilderDebTsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (tsan)
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
FinishCheck:
if: ${{ !cancelled() }}
@ -250,7 +258,8 @@ jobs:
- FunctionalStatelessTestAsan
- FunctionalStatefulTestDebug
- StressTestTsan
- IntegrationTestsRelease
- IntegrationTestsTsan
- IntegrationTestsAsanOldAnalyzer
- CompatibilityCheckX86
- CompatibilityCheckAarch64
runs-on: [self-hosted, style-checker]

View File

@ -374,7 +374,7 @@ jobs:
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (asan)
runner_type: stress-tester
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
StressTestTsan:
needs: [RunConfig, BuilderDebTsan]
@ -382,7 +382,7 @@ jobs:
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (tsan)
runner_type: stress-tester
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
StressTestMsan:
needs: [RunConfig, BuilderDebMsan]
@ -390,7 +390,7 @@ jobs:
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (msan)
runner_type: stress-tester
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
StressTestUBsan:
needs: [RunConfig, BuilderDebUBsan]
@ -398,7 +398,7 @@ jobs:
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (ubsan)
runner_type: stress-tester
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
StressTestDebug:
needs: [RunConfig, BuilderDebDebug]
@ -406,7 +406,7 @@ jobs:
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (debug)
runner_type: stress-tester
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
#############################################################################################
############################# INTEGRATION TESTS #############################################
@ -417,7 +417,7 @@ jobs:
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (asan)
runner_type: stress-tester
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
IntegrationTestsAnalyzerAsan:
needs: [RunConfig, BuilderDebAsan]
@ -425,7 +425,7 @@ jobs:
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (asan, old analyzer)
runner_type: stress-tester
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
IntegrationTestsTsan:
needs: [RunConfig, BuilderDebTsan]
@ -433,7 +433,7 @@ jobs:
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (tsan)
runner_type: stress-tester
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
IntegrationTestsRelease:
needs: [RunConfig, BuilderDebRelease]
@ -441,7 +441,7 @@ jobs:
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (release)
runner_type: stress-tester
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
FinishCheck:
if: ${{ !cancelled() }}

View File

@ -339,7 +339,6 @@ set (CMAKE_ASM_FLAGS_RELWITHDEBINFO "${CMAKE_ASM_FLAGS_RELWITHDEBINFO} -O3
set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -O${DEBUG_O_LEVEL} ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
if (OS_DARWIN)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-U,_inside_main")
endif()

View File

@ -110,8 +110,7 @@ struct DecomposedFloat
{
if (!isNegative())
return rhs > 0 ? -1 : 1;
else
return rhs >= 0 ? -1 : 1;
return rhs >= 0 ? -1 : 1;
}
/// The case of the most negative integer
@ -128,8 +127,7 @@ struct DecomposedFloat
if (mantissa() == 0)
return 0;
else
return -1;
return -1;
}
}
@ -169,9 +167,8 @@ struct DecomposedFloat
/// Float has no fractional part means that the numbers are equal.
if (large_and_always_integer || (mantissa() & ((1ULL << (Traits::mantissa_bits - normalizedExponent())) - 1)) == 0)
return 0;
else
/// Float has fractional part means its abs value is larger.
return isNegative() ? -1 : 1;
/// Float has fractional part means its abs value is larger.
return isNegative() ? -1 : 1;
}

View File

@ -205,8 +205,7 @@ JSON::ElementType JSON::getType() const
Pos after_string = skipString();
if (after_string < ptr_end && *after_string == ':')
return TYPE_NAME_VALUE_PAIR;
else
return TYPE_STRING;
return TYPE_STRING;
}
default:
throw JSONException(std::string("JSON: unexpected char ") + *ptr_begin + ", expected one of '{[tfn-0123456789\"'");
@ -474,8 +473,7 @@ JSON::Pos JSON::searchField(const char * data, size_t size) const
if (it == end())
return nullptr;
else
return it->data();
return it->data();
}
@ -487,7 +485,7 @@ bool JSON::hasEscapes() const
if (*pos == '"')
return false;
else if (*pos == '\\')
if (*pos == '\\')
return true;
throw JSONException("JSON: unexpected end of data.");
}
@ -503,7 +501,7 @@ bool JSON::hasSpecialChars() const
if (*pos == '"')
return false;
else if (pos < ptr_end)
if (pos < ptr_end)
return true;
throw JSONException("JSON: unexpected end of data.");
}
@ -682,10 +680,9 @@ double JSON::toDouble() const
if (type == TYPE_NUMBER)
return getDouble();
else if (type == TYPE_STRING)
if (type == TYPE_STRING)
return JSON(ptr_begin + 1, ptr_end, level + 1).getDouble();
else
throw JSONException("JSON: cannot convert value to double.");
throw JSONException("JSON: cannot convert value to double.");
}
Int64 JSON::toInt() const
@ -694,10 +691,9 @@ Int64 JSON::toInt() const
if (type == TYPE_NUMBER)
return getInt();
else if (type == TYPE_STRING)
if (type == TYPE_STRING)
return JSON(ptr_begin + 1, ptr_end, level + 1).getInt();
else
throw JSONException("JSON: cannot convert value to signed integer.");
throw JSONException("JSON: cannot convert value to signed integer.");
}
UInt64 JSON::toUInt() const
@ -706,10 +702,9 @@ UInt64 JSON::toUInt() const
if (type == TYPE_NUMBER)
return getUInt();
else if (type == TYPE_STRING)
if (type == TYPE_STRING)
return JSON(ptr_begin + 1, ptr_end, level + 1).getUInt();
else
throw JSONException("JSON: cannot convert value to unsigned integer.");
throw JSONException("JSON: cannot convert value to unsigned integer.");
}
std::string JSON::toString() const
@ -718,11 +713,9 @@ std::string JSON::toString() const
if (type == TYPE_STRING)
return getString();
else
{
Pos pos = skipElement();
return std::string(ptr_begin, pos - ptr_begin);
}
Pos pos = skipElement();
return std::string(ptr_begin, pos - ptr_begin);
}

View File

@ -203,9 +203,7 @@ T JSON::getWithDefault(const std::string & key, const T & default_) const
if (key_json.isType<T>())
return key_json.get<T>();
else
return default_;
}
else
return default_;
}
return default_;
}

View File

@ -151,19 +151,19 @@ inline bool memequalWide(const char * p1, const char * p2, size_t size)
return unalignedLoad<uint64_t>(p1) == unalignedLoad<uint64_t>(p2)
&& unalignedLoad<uint64_t>(p1 + size - 8) == unalignedLoad<uint64_t>(p2 + size - 8);
}
else if (size >= 4)
if (size >= 4)
{
/// Chunks of 4..7 bytes.
return unalignedLoad<uint32_t>(p1) == unalignedLoad<uint32_t>(p2)
&& unalignedLoad<uint32_t>(p1 + size - 4) == unalignedLoad<uint32_t>(p2 + size - 4);
}
else if (size >= 2)
if (size >= 2)
{
/// Chunks of 2..3 bytes.
return unalignedLoad<uint16_t>(p1) == unalignedLoad<uint16_t>(p2)
&& unalignedLoad<uint16_t>(p1 + size - 2) == unalignedLoad<uint16_t>(p2 + size - 2);
}
else if (size >= 1)
if (size >= 1)
{
/// A single byte.
return *p1 == *p2;

View File

@ -53,10 +53,9 @@ void argsToConfig(const Poco::Util::Application::ArgVec & argv,
key = arg.substr(key_start);
continue;
}
else
{
key = "";
}
key = "";
if (key_start == std::string::npos)
continue;

View File

@ -330,9 +330,8 @@ inline const char * find_first_symbols_dispatch(const char * begin, const char *
#if defined(__SSE4_2__)
if (sizeof...(symbols) >= 5)
return find_first_symbols_sse42<positive, return_mode, sizeof...(symbols), symbols...>(begin, end);
else
#endif
return find_first_symbols_sse2<positive, return_mode, symbols...>(begin, end);
return find_first_symbols_sse2<positive, return_mode, symbols...>(begin, end);
}
template <bool positive, ReturnMode return_mode>
@ -341,9 +340,8 @@ inline const char * find_first_symbols_dispatch(const std::string_view haystack,
#if defined(__SSE4_2__)
if (symbols.str.size() >= 5)
return find_first_symbols_sse42<positive, return_mode>(haystack.begin(), haystack.end(), symbols);
else
#endif
return find_first_symbols_sse2<positive, return_mode>(haystack.begin(), haystack.end(), symbols.str.data(), symbols.str.size());
return find_first_symbols_sse2<positive, return_mode>(haystack.begin(), haystack.end(), symbols.str.data(), symbols.str.size());
}
}

View File

@ -33,8 +33,7 @@ std::optional<uint64_t> getCgroupsV2MemoryLimit()
uint64_t value;
if (setting_file >> value)
return {value};
else
return {}; /// e.g. the cgroups default "max"
return {}; /// e.g. the cgroups default "max"
}
current_cgroup = current_cgroup.parent_path();
}

View File

@ -1420,8 +1420,6 @@ config
configs
conformant
congruential
conjuction
conjuctive
connectionId
const
contrib
@ -1698,7 +1696,6 @@ formatReadableSize
formatReadableTimeDelta
formatRow
formatRowNoNewline
formated
formatschema
formatter
formatters
@ -3048,3 +3045,89 @@ znode
znodes
zookeeperSessionUptime
zstd
ArrowCompression
CapnProtoEnumComparingMode
DateTimeInputFormat
DateTimeOutputFormat
DateTimeOverflowBehavior
deserialize
dotall
EachRow
EscapingRule
IdentifierQuotingRule
IdentifierQuotingStyle
IntervalOutputFormat
MsgPackUUIDRepresentation
ORCCompression
ParquetCompression
ParquetVersion
SchemaInferenceMode
alloc
CacheWarmer
conjuctive
cors
CORS
countIf
DefaultTableEngine
dereference
DistributedDDLOutputMode
DistributedProductMode
formatdatetime
inequal
INVOKER
ITION
JoinAlgorithm
JoinStrictness
keepalive
ListObject
ListObjects
LoadBalancing
LocalFSReadMethod
LogQueriesType
LogsLevel
MaxThreads
MemorySample
multibuffer
multiif
multiread
multithreading
MySQLDataTypesSupport
nonconst
NonZeroUInt
nullptr
OverflowMode
OverflowModeGroupBy
ParallelReplicasMode
param
parsedatetime
perf
PerfEventInfo
perkey
prefetched
prefetches
prefetching
preimage
QueryCacheNondeterministicFunctionHandling
QueryCacheSystemTableHandling
remerge
replcase
rerange
RetryStrategy
rowlist
SetOperationMode
ShortCircuitFunctionEvaluation
SQLSecurityType
sumIf
TCPHandler
throwif
TotalsMode
TransactionsWaitCSNMode
undelete
unmerged
DataPacket
DDLs
DistributedCacheLogMode
DistributedCachePoolBehaviourOnLimit
SharedJoin
ShareSet
unacked

View File

@ -48,6 +48,8 @@ if (NOT LINKER_NAME)
find_program (LLD_PATH NAMES "ld.lld-${COMPILER_VERSION_MAJOR}" "ld.lld")
elseif (OS_DARWIN)
find_program (LLD_PATH NAMES "ld")
# Duplicate libraries passed to the linker is not a problem.
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-no_warn_duplicate_libraries")
endif ()
if (LLD_PATH)
if (OS_LINUX OR OS_DARWIN)

2
contrib/grpc vendored

@ -1 +1 @@
Subproject commit 7bc3abe952aba1dc7bce7f2f790dc781cb51a41e
Subproject commit 62e871c36fa93c0af939bd31762845265214fe3d

2
contrib/libdivide vendored

@ -1 +1 @@
Subproject commit 3bd34388573681ce563348cdf04fe15d24770d04
Subproject commit 01526031eb79375dc85e0212c966d2c514a01234

2
contrib/simdjson vendored

@ -1 +1 @@
Subproject commit 6060be2fdf62edf4a8f51a8b0883d57d09397b30
Subproject commit e341c8b43861b43de29c48ab65f292d997096953

View File

@ -0,0 +1 @@
[rabbitmq_consistent_hash_exchange].

View File

@ -13,3 +13,5 @@ ssl_options.fail_if_no_peer_cert = false
ssl_options.cacertfile = /etc/rabbitmq/ca-cert.pem
ssl_options.certfile = /etc/rabbitmq/server-cert.pem
ssl_options.keyfile = /etc/rabbitmq/server-key.pem
vm_memory_high_watermark.absolute = 2GB

View File

@ -41,7 +41,7 @@ sidebar_label: 2022
* Backported in [#25364](https://github.com/ClickHouse/ClickHouse/issues/25364): On ZooKeeper connection loss `ReplicatedMergeTree` table might wait for background operations to complete before trying to reconnect. It's fixed, now background operations are stopped forcefully. [#25306](https://github.com/ClickHouse/ClickHouse/pull/25306) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Backported in [#25387](https://github.com/ClickHouse/ClickHouse/issues/25387): Fix the possibility of non-deterministic behaviour of the `quantileDeterministic` function and similar. This closes [#20480](https://github.com/ClickHouse/ClickHouse/issues/20480). [#25313](https://github.com/ClickHouse/ClickHouse/pull/25313) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Backported in [#25455](https://github.com/ClickHouse/ClickHouse/issues/25455): Fix lost `WHERE` condition in expression-push-down optimization of query plan (setting `query_plan_filter_push_down = 1` by default). Fixes [#25368](https://github.com/ClickHouse/ClickHouse/issues/25368). [#25370](https://github.com/ClickHouse/ClickHouse/pull/25370) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#25406](https://github.com/ClickHouse/ClickHouse/issues/25406): Fix `REPLACE` column transformer when used in DDL by correctly quoting the formated query. This fixes [#23925](https://github.com/ClickHouse/ClickHouse/issues/23925). [#25391](https://github.com/ClickHouse/ClickHouse/pull/25391) ([Amos Bird](https://github.com/amosbird)).
* Backported in [#25406](https://github.com/ClickHouse/ClickHouse/issues/25406): Fix `REPLACE` column transformer when used in DDL by correctly quoting the formatted query. This fixes [#23925](https://github.com/ClickHouse/ClickHouse/issues/23925). [#25391](https://github.com/ClickHouse/ClickHouse/pull/25391) ([Amos Bird](https://github.com/amosbird)).
* Backported in [#25505](https://github.com/ClickHouse/ClickHouse/issues/25505): Fix segfault when sharding_key is absent in task config for copier. [#25419](https://github.com/ClickHouse/ClickHouse/pull/25419) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
#### NO CL ENTRY

View File

@ -40,7 +40,7 @@ sidebar_label: 2022
* Backported in [#25362](https://github.com/ClickHouse/ClickHouse/issues/25362): On ZooKeeper connection loss `ReplicatedMergeTree` table might wait for background operations to complete before trying to reconnect. It's fixed, now background operations are stopped forcefully. [#25306](https://github.com/ClickHouse/ClickHouse/pull/25306) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Backported in [#25386](https://github.com/ClickHouse/ClickHouse/issues/25386): Fix the possibility of non-deterministic behaviour of the `quantileDeterministic` function and similar. This closes [#20480](https://github.com/ClickHouse/ClickHouse/issues/20480). [#25313](https://github.com/ClickHouse/ClickHouse/pull/25313) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Backported in [#25456](https://github.com/ClickHouse/ClickHouse/issues/25456): Fix lost `WHERE` condition in expression-push-down optimization of query plan (setting `query_plan_filter_push_down = 1` by default). Fixes [#25368](https://github.com/ClickHouse/ClickHouse/issues/25368). [#25370](https://github.com/ClickHouse/ClickHouse/pull/25370) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#25408](https://github.com/ClickHouse/ClickHouse/issues/25408): Fix `REPLACE` column transformer when used in DDL by correctly quoting the formated query. This fixes [#23925](https://github.com/ClickHouse/ClickHouse/issues/23925). [#25391](https://github.com/ClickHouse/ClickHouse/pull/25391) ([Amos Bird](https://github.com/amosbird)).
* Backported in [#25408](https://github.com/ClickHouse/ClickHouse/issues/25408): Fix `REPLACE` column transformer when used in DDL by correctly quoting the formatted query. This fixes [#23925](https://github.com/ClickHouse/ClickHouse/issues/23925). [#25391](https://github.com/ClickHouse/ClickHouse/pull/25391) ([Amos Bird](https://github.com/amosbird)).
* Backported in [#25504](https://github.com/ClickHouse/ClickHouse/issues/25504): Fix segfault when sharding_key is absent in task config for copier. [#25419](https://github.com/ClickHouse/ClickHouse/pull/25419) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
#### NO CL ENTRY

View File

@ -24,7 +24,7 @@ sidebar_label: 2022
* Backported in [#25363](https://github.com/ClickHouse/ClickHouse/issues/25363): On ZooKeeper connection loss `ReplicatedMergeTree` table might wait for background operations to complete before trying to reconnect. It's fixed, now background operations are stopped forcefully. [#25306](https://github.com/ClickHouse/ClickHouse/pull/25306) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Backported in [#25388](https://github.com/ClickHouse/ClickHouse/issues/25388): Fix the possibility of non-deterministic behaviour of the `quantileDeterministic` function and similar. This closes [#20480](https://github.com/ClickHouse/ClickHouse/issues/20480). [#25313](https://github.com/ClickHouse/ClickHouse/pull/25313) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Backported in [#25448](https://github.com/ClickHouse/ClickHouse/issues/25448): Fix lost `WHERE` condition in expression-push-down optimization of query plan (setting `query_plan_filter_push_down = 1` by default). Fixes [#25368](https://github.com/ClickHouse/ClickHouse/issues/25368). [#25370](https://github.com/ClickHouse/ClickHouse/pull/25370) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#25407](https://github.com/ClickHouse/ClickHouse/issues/25407): Fix `REPLACE` column transformer when used in DDL by correctly quoting the formated query. This fixes [#23925](https://github.com/ClickHouse/ClickHouse/issues/23925). [#25391](https://github.com/ClickHouse/ClickHouse/pull/25391) ([Amos Bird](https://github.com/amosbird)).
* Backported in [#25407](https://github.com/ClickHouse/ClickHouse/issues/25407): Fix `REPLACE` column transformer when used in DDL by correctly quoting the formatted query. This fixes [#23925](https://github.com/ClickHouse/ClickHouse/issues/23925). [#25391](https://github.com/ClickHouse/ClickHouse/pull/25391) ([Amos Bird](https://github.com/amosbird)).
#### NOT FOR CHANGELOG / INSIGNIFICANT

View File

@ -133,7 +133,7 @@ sidebar_label: 2022
* On ZooKeeper connection loss `ReplicatedMergeTree` table might wait for background operations to complete before trying to reconnect. It's fixed, now background operations are stopped forcefully. [#25306](https://github.com/ClickHouse/ClickHouse/pull/25306) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Fix the possibility of non-deterministic behaviour of the `quantileDeterministic` function and similar. This closes [#20480](https://github.com/ClickHouse/ClickHouse/issues/20480). [#25313](https://github.com/ClickHouse/ClickHouse/pull/25313) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix lost `WHERE` condition in expression-push-down optimization of query plan (setting `query_plan_filter_push_down = 1` by default). Fixes [#25368](https://github.com/ClickHouse/ClickHouse/issues/25368). [#25370](https://github.com/ClickHouse/ClickHouse/pull/25370) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fix `REPLACE` column transformer when used in DDL by correctly quoting the formated query. This fixes [#23925](https://github.com/ClickHouse/ClickHouse/issues/23925). [#25391](https://github.com/ClickHouse/ClickHouse/pull/25391) ([Amos Bird](https://github.com/amosbird)).
* Fix `REPLACE` column transformer when used in DDL by correctly quoting the formatted query. This fixes [#23925](https://github.com/ClickHouse/ClickHouse/issues/23925). [#25391](https://github.com/ClickHouse/ClickHouse/pull/25391) ([Amos Bird](https://github.com/amosbird)).
* Fix segfault when sharding_key is absent in task config for copier. [#25419](https://github.com/ClickHouse/ClickHouse/pull/25419) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Fix excessive underscore before the names of the preprocessed configuration files. [#25431](https://github.com/ClickHouse/ClickHouse/pull/25431) ([Vitaly Baranov](https://github.com/vitlibar)).
* Fix convertion of datetime with timezone for MySQL, PostgreSQL, ODBC. Closes [#5057](https://github.com/ClickHouse/ClickHouse/issues/5057). [#25528](https://github.com/ClickHouse/ClickHouse/pull/25528) ([Kseniia Sumarokova](https://github.com/kssenii)).

View File

@ -195,6 +195,9 @@ You can pass parameters to `clickhouse-client` (all parameters have a default va
- `--print-profile-events` Print `ProfileEvents` packets.
- `--profile-events-delay-ms` Delay between printing `ProfileEvents` packets (-1 - print only totals, 0 - print every single packet).
- `--jwt` If specified, enables authorization via JSON Web Token. Server JWT authorization is available only in ClickHouse Cloud.
- `--progress` Print progress of query execution. Possible values: 'tty|on|1|true|yes' - outputs to TTY in interactive mode; 'err' - outputs to STDERR non-interactive mode; 'off|0|false|no' - disables the progress printing. Default: TTY in interactive mode, disabled in non-interactive.
- `--progress-table` Print a progress table with changing metrics during query execution. Possible values: 'tty|on|1|true|yes' - outputs to TTY in interactive mode; 'err' - outputs to STDERR non-interactive mode; 'off|0|false|no' - disables the progress table. Default: TTY in interactive mode, disabled in non-interactive.
- `--enable-progress-table-toggle` Enable toggling of the progress table by pressing the control key (Space). Only applicable in interactive mode with the progress table printing enabled. Default: 'true'.
Instead of `--host`, `--port`, `--user` and `--password` options, ClickHouse client also supports connection strings (see next section).

View File

@ -1057,12 +1057,12 @@ Default value: throw
## deduplicate_merge_projection_mode
Whether to allow create projection for the table with non-classic MergeTree, that is not (Replicated, Shared) MergeTree. If allowed, what is the action when merge projections, either drop or rebuild. So classic MergeTree would ignore this setting.
Whether to allow create projection for the table with non-classic MergeTree, that is not (Replicated, Shared) MergeTree. Ignore option is purely for compatibility which might result in incorrect answer. Otherwise, if allowed, what is the action when merge projections, either drop or rebuild. So classic MergeTree would ignore this setting.
It also controls `OPTIMIZE DEDUPLICATE` as well, but has effect on all MergeTree family members. Similar to the option `lightweight_mutation_projection_mode`, it is also part level.
Possible values:
- throw, drop, rebuild
- ignore, throw, drop, rebuild
Default value: throw

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -10,21 +10,21 @@ Columns:
- `database` ([String](../../sql-reference/data-types/string.md)) — The name of the database the table is in.
- `view` ([String](../../sql-reference/data-types/string.md)) — Table name.
- `uuid` ([UUID](../../sql-reference/data-types/uuid.md)) — Table uuid (Atomic database).
- `status` ([String](../../sql-reference/data-types/string.md)) — Current state of the refresh.
- `last_refresh_result` ([String](../../sql-reference/data-types/string.md)) — Outcome of the latest refresh attempt.
- `last_refresh_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Time of the last refresh attempt. `NULL` if no refresh attempts happened since server startup or table creation.
- `last_success_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Time of the last successful refresh. `NULL` if no successful refreshes happened since server startup or table creation.
- `duration_ms` ([UInt64](../../sql-reference/data-types/int-uint.md)) — How long the last refresh attempt took.
- `next_refresh_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Time at which the next refresh is scheduled to start.
- `remaining_dependencies` ([Array(String)](../../sql-reference/data-types/array.md)) — If the view has [refresh dependencies](../../sql-reference/statements/create/view.md#refresh-dependencies), this array contains the subset of those dependencies that are not satisfied for the current refresh yet. If `status = 'WaitingForDependencies'`, a refresh is ready to start as soon as these dependencies are fulfilled.
- `exception` ([String](../../sql-reference/data-types/string.md)) — if `last_refresh_result = 'Error'`, i.e. the last refresh attempt failed, this column contains the corresponding error message and stack trace.
- `retry` ([UInt64](../../sql-reference/data-types/int-uint.md)) — If nonzero, the current or next refresh is a retry (see `refresh_retries` refresh setting), and `retry` is the 1-based index of that retry.
- `refresh_count` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of successful refreshes since last server restart or table creation.
- `progress` ([Float64](../../sql-reference/data-types/float.md)) — Progress of the current refresh, between 0 and 1.
- `read_rows` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of rows read by the current refresh so far.
- `total_rows` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Estimated total number of rows that need to be read by the current refresh.
(There are additional columns related to current refresh progress, but they are currently unreliable.)
- `last_success_time` ([Nullable](../../sql-reference/data-types/nullable.md)([DateTime](../../sql-reference/data-types/datetime.md))) — Time when the latest successful refresh started. NULL if no successful refreshes happened since server startup or table creation.
- `last_success_duration_ms` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — How long the latest refresh took.
- `last_refresh_time` ([Nullable](../../sql-reference/data-types/nullable.md)([DateTime](../../sql-reference/data-types/datetime.md))) — Time when the latest refresh attempt finished (if known) or started (if unknown or still running). NULL if no refresh attempts happened since server startup or table creation.
- `last_refresh_replica` ([String](../../sql-reference/data-types/string.md)) — If coordination is enabled, name of the replica that made the current (if running) or previous (if not running) refresh attempt.
- `next_refresh_time` ([Nullable](../../sql-reference/data-types/nullable.md)([DateTime](../../sql-reference/data-types/datetime.md))) — Time at which the next refresh is scheduled to start, if status = Scheduled.
- `exception` ([String](../../sql-reference/data-types/string.md)) — Error message from previous attempt if it failed.
- `retry` ([UInt64](../../sql-reference/data-types/int-uint.md)) — How many failed attempts there were so far, for the current refresh.
- `progress` ([Float64](../../sql-reference/data-types/float.md)) — Progress of the current refresh, between 0 and 1. Not available if status is `RunningOnAnotherReplica`.
- `read_rows` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of rows read by the current refresh so far. Not available if status is `RunningOnAnotherReplica`.
- `read_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of bytes read during the current refresh. Not available if status is `RunningOnAnotherReplica`.
- `total_rows` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Estimated total number of rows that need to be read by the current refresh. Not available if status is `RunningOnAnotherReplica`.
- `written_rows` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of rows written during the current refresh. Not available if status is `RunningOnAnotherReplica`.
- `written_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number rof bytes written during the current refresh. Not available if status is `RunningOnAnotherReplica`.
**Example**

View File

@ -226,9 +226,9 @@ Result:
## bitTestAll
Returns result of [logical conjuction](https://en.wikipedia.org/wiki/Logical_conjunction) (AND operator) of all bits at given positions. Counting is right-to-left, starting at 0.
Returns result of [logical conjunction](https://en.wikipedia.org/wiki/Logical_conjunction) (AND operator) of all bits at given positions. Counting is right-to-left, starting at 0.
The conjuction for bit-wise operations:
The conjunction for bit-wise operations:
0 AND 0 = 0
@ -251,7 +251,7 @@ SELECT bitTestAll(number, index1, index2, index3, index4, ...)
**Returned value**
- Result of the logical conjuction. [UInt8](../data-types/int-uint.md).
- Result of the logical conjunction. [UInt8](../data-types/int-uint.md).
**Example**

View File

@ -135,15 +135,15 @@ To change SQL security for an existing view, use
ALTER TABLE MODIFY SQL SECURITY { DEFINER | INVOKER | NONE } [DEFINER = { user | CURRENT_USER }]
```
### Examples sql security
### Examples
```sql
CREATE test_view
CREATE VIEW test_view
DEFINER = alice SQL SECURITY DEFINER
AS SELECT ...
```
```sql
CREATE test_view
CREATE VIEW test_view
SQL SECURITY INVOKER
AS SELECT ...
```
@ -184,14 +184,6 @@ Differences from regular non-refreshable materialized views:
The settings in the `REFRESH ... SETTINGS` part of the query are refresh settings (e.g. `refresh_retries`), distinct from regular settings (e.g. `max_threads`). Regular settings can be specified using `SETTINGS` at the end of the query.
:::
:::note
Refreshable materialized views are a work in progress. Setting `allow_experimental_refreshable_materialized_view = 1` is required for creating one. Current limitations:
* not compatible with Replicated database or table engines
* It is not supported in ClickHouse Cloud
* require [Atomic database engine](../../../engines/database-engines/atomic.md),
* no limit on number of concurrent refreshes.
:::
### Refresh Schedule
Example refresh schedules:
@ -203,6 +195,10 @@ REFRESH EVERY 2 WEEK OFFSET 5 DAY 15 HOUR 10 MINUTE -- every other Saturday, at
REFRESH EVERY 30 MINUTE -- at 00:00, 00:30, 01:00, 01:30, etc
REFRESH AFTER 30 MINUTE -- 30 minutes after the previous refresh completes, no alignment with time of day
-- REFRESH AFTER 1 HOUR OFFSET 1 MINUTE -- syntax errror, OFFSET is not allowed with AFTER
REFRESH EVERY 1 WEEK 2 DAYS -- every 9 days, not on any particular day of the week or month;
-- specifically, when day number (since 1969-12-29) is divisible by 9
REFRESH EVERY 5 MONTHS -- every 5 months, different months each year (as 12 is not divisible by 5);
-- specifically, when month number (since 1970-01) is divisible by 5
```
`RANDOMIZE FOR` randomly adjusts the time of each refresh, e.g.:
@ -214,6 +210,16 @@ At most one refresh may be running at a time, for a given view. E.g. if a view w
Additionally, a refresh is started immediately after the materialized view is created, unless `EMPTY` is specified in the `CREATE` query. If `EMPTY` is specified, the first refresh happens according to schedule.
### In Replicated DB
If the refreshable materialized view is in a [Replicated database](../../../engines/database-engines/replicated.md), the replicas coordinate with each other such that only one replica performs the refresh at each scheduled time. [ReplicatedMergeTree](../../../engines/table-engines/mergetree-family/replication.md) table engine is required, so that all replicas see the data produced by the refresh.
In `APPEND` mode, coordination can be disabled using `SETTINGS all_replicas = 1`. This makes replicas do refreshes independently of each other. In this case ReplicatedMergeTree is not required.
In non-`APPEND` mode, only coordinated refreshing is supported. For uncoordinated, use `Atomic` database and `CREATE ... ON CLUSTER` query to create refreshable materialized views on all replicas.
The coordination is done through Keeper. The znode path is determined by [default_replica_path](../../../operations/server-configuration-parameters/settings.md#default_replica_path) server setting.
### Dependencies {#refresh-dependencies}
`DEPENDS ON` synchronizes refreshes of different tables. By way of example, suppose there's a chain of two refreshable materialized views:
@ -277,6 +283,8 @@ The status of all refreshable materialized views is available in table [`system.
To manually stop, start, trigger, or cancel refreshes use [`SYSTEM STOP|START|REFRESH|CANCEL VIEW`](../system.md#refreshable-materialized-views).
To wait for a refresh to complete, use [`SYSTEM WAIT VIEW`](../system.md#refreshable-materialized-views). In particular, useful for waiting for initial refresh after creating a view.
:::note
Fun fact: the refresh query is allowed to read from the view that's being refreshed, seeing pre-refresh version of the data. This means you can implement Conway's game of life: https://pastila.nl/?00021a4b/d6156ff819c83d490ad2dcec05676865#O0LGWTO7maUQIA4AcGUtlA==
:::

View File

@ -233,15 +233,20 @@ Hierarchy of privileges:
- `addressToSymbol`
- `demangle`
- [SOURCES](#sources)
- `AZURE`
- `FILE`
- `URL`
- `REMOTE`
- `YSQL`
- `ODBC`
- `JDBC`
- `HDFS`
- `S3`
- `HIVE`
- `JDBC`
- `MONGO`
- `MYSQL`
- `ODBC`
- `POSTGRES`
- `REDIS`
- `REMOTE`
- `S3`
- `SQLITE`
- `URL`
- [dictGet](#dictget)
- [displaySecretsInShowAndSelect](#displaysecretsinshowandselect)
- [NAMED COLLECTION ADMIN](#named-collection-admin)
@ -510,15 +515,20 @@ Allows using [introspection](../../operations/optimizing-performance/sampling-qu
Allows using external data sources. Applies to [table engines](../../engines/table-engines/index.md) and [table functions](../../sql-reference/table-functions/index.md#table-functions).
- `SOURCES`. Level: `GROUP`
- `AZURE`. Level: `GLOBAL`
- `FILE`. Level: `GLOBAL`
- `URL`. Level: `GLOBAL`
- `REMOTE`. Level: `GLOBAL`
- `YSQL`. Level: `GLOBAL`
- `ODBC`. Level: `GLOBAL`
- `JDBC`. Level: `GLOBAL`
- `HDFS`. Level: `GLOBAL`
- `S3`. Level: `GLOBAL`
- `HIVE`. Level: `GLOBAL`
- `JDBC`. Level: `GLOBAL`
- `MONGO`. Level: `GLOBAL`
- `MYSQL`. Level: `GLOBAL`
- `ODBC`. Level: `GLOBAL`
- `POSTGRES`. Level: `GLOBAL`
- `REDIS`. Level: `GLOBAL`
- `REMOTE`. Level: `GLOBAL`
- `S3`. Level: `GLOBAL`
- `SQLITE`. Level: `GLOBAL`
- `URL`. Level: `GLOBAL`
The `SOURCES` privilege enables use of all the sources. Also you can grant a privilege for each source individually. To use sources, you need additional privileges.

View File

@ -565,3 +565,13 @@ If there's a refresh in progress for the given view, interrupt and cancel it. Ot
```sql
SYSTEM CANCEL VIEW [db.]name
```
### SYSTEM WAIT VIEW
Waits for the running refresh to complete. If no refresh is running, returns immediately. If the latest refresh attempt failed, reports an error.
Can be used right after creating a new refreshable materialized view (without EMPTY keyword) to wait for the initial refresh to complete.
```sql
SYSTEM WAIT VIEW [db.]name
```

View File

@ -220,7 +220,7 @@ SELECT bitTest(43, 2);
## bitTestAll {#bittestall}
返回给定位置所有位的 [logical conjuction](https://en.wikipedia.org/wiki/Logical_conjunction) 进行与操作的结果。位值从右到左数从0开始计数。
返回给定位置所有位的 [logical conjunction](https://en.wikipedia.org/wiki/Logical_conjunction) 进行与操作的结果。位值从右到左数从0开始计数。
与运算的结果:

View File

@ -445,15 +445,13 @@ private:
shutdown = true;
throw;
}
else
{
std::cerr << getCurrentExceptionMessage(print_stacktrace,
true /*check embedded stack trace*/) << std::endl;
size_t info_index = round_robin ? 0 : connection_index;
++comparison_info_per_interval[info_index]->errors;
++comparison_info_total[info_index]->errors;
}
std::cerr << getCurrentExceptionMessage(print_stacktrace,
true /*check embedded stack trace*/) << std::endl;
size_t info_index = round_robin ? 0 : connection_index;
++comparison_info_per_interval[info_index]->errors;
++comparison_info_total[info_index]->errors;
}
// Count failed queries toward executed, so that we'd reach
// max_iterations even if every run fails.

View File

@ -346,7 +346,9 @@ try
processConfig();
adjustSettings();
initTTYBuffer(toProgressOption(config().getString("progress", "default")));
initTTYBuffer(toProgressOption(config().getString("progress", "default")),
toProgressOption(config().getString("progress-table", "default")));
initKeystrokeInterceptor();
ASTAlterCommand::setFormatAlterCommandsWithParentheses(true);
{
@ -478,27 +480,23 @@ void Client::connect()
}
catch (const Exception & e)
{
/// This problem can't be fixed with reconnection so it is not attempted
if (e.code() == DB::ErrorCodes::AUTHENTICATION_FAILED)
{
/// This problem can't be fixed with reconnection so it is not attempted
throw;
}
else
{
if (attempted_address_index == hosts_and_ports.size() - 1)
throw;
if (is_interactive)
{
std::cerr << "Connection attempt to database at "
<< connection_parameters.host << ":" << connection_parameters.port
<< " resulted in failure"
<< std::endl
<< getExceptionMessage(e, false)
<< std::endl
<< "Attempting connection to the next provided address"
<< std::endl;
}
if (attempted_address_index == hosts_and_ports.size() - 1)
throw;
if (is_interactive)
{
std::cerr << "Connection attempt to database at "
<< connection_parameters.host << ":" << connection_parameters.port
<< " resulted in failure"
<< std::endl
<< getExceptionMessage(e, false)
<< std::endl
<< "Attempting connection to the next provided address"
<< std::endl;
}
}
}
@ -772,7 +770,7 @@ bool Client::processWithFuzzing(const String & full_query)
else
this_query_runs = 1;
}
else if (const auto * insert = orig_ast->as<ASTInsertQuery>())
else if (const auto * /*insert*/ _ = orig_ast->as<ASTInsertQuery>())
{
this_query_runs = 1;
queries_for_fuzzed_tables = fuzzer.getInsertQueriesForFuzzedTables(full_query);

View File

@ -46,7 +46,7 @@ public:
path_from,
disk_from.getDisk()->getName());
}
else if (disk_from.getDisk()->isFile(path_from))
if (disk_from.getDisk()->isFile(path_from))
{
auto target_location = getTargetLocation(path_from, disk_to, path_to);
if (!disk_to.getDisk()->exists(target_location) || disk_to.getDisk()->isFile(target_location))
@ -77,7 +77,7 @@ public:
{
throw Exception(ErrorCodes::BAD_ARGUMENTS, "cannot overwrite non-directory {} with directory {}", path_to, target_location);
}
else if (!disk_to.getDisk()->exists(target_location))
if (!disk_to.getDisk()->exists(target_location))
{
disk_to.getDisk()->createDirectory(target_location);
}

View File

@ -72,13 +72,9 @@ private:
auto path = [&]() -> String
{
if (relative_path.ends_with("/"))
{
return relative_path + file_name;
}
else
{
return relative_path + "/" + file_name;
}
return relative_path + "/" + file_name;
}();
if (disk.isDirectory(path))
{

View File

@ -53,10 +53,8 @@ public:
{
throw Exception(ErrorCodes::BAD_ARGUMENTS, "cannot move '{}' to '{}': Directory not empty", path_from, target_location);
}
else
{
disk.getDisk()->moveDirectory(path_from, target_location);
}
disk.getDisk()->moveDirectory(path_from, target_location);
}
}
else if (!disk.getDisk()->exists(path_from))

View File

@ -32,16 +32,14 @@ public:
{
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path {} on disk {} doesn't exist", path, disk.getDisk()->getName());
}
else if (disk.getDisk()->isDirectory(path))
if (disk.getDisk()->isDirectory(path))
{
if (!recursive)
{
throw Exception(ErrorCodes::BAD_ARGUMENTS, "cannot remove '{}': Is a directory", path);
}
else
{
disk.getDisk()->removeRecursive(path);
}
disk.getDisk()->removeRecursive(path);
}
else
{

View File

@ -33,14 +33,10 @@ public:
auto in = [&]() -> std::unique_ptr<ReadBufferFromFileBase>
{
if (!path_from.has_value())
{
return std::make_unique<ReadBufferFromFileDescriptor>(STDIN_FILENO);
}
else
{
String relative_path_from = disk.getRelativeFromRoot(path_from.value());
return disk.getDisk()->readFile(relative_path_from, getReadSettings());
}
String relative_path_from = disk.getRelativeFromRoot(path_from.value());
return disk.getDisk()->readFile(relative_path_from, getReadSettings());
}();
auto out = disk.getDisk()->writeFile(path_to);

View File

@ -127,68 +127,58 @@ std::vector<String> DisksApp::getCompletions(const String & prefix) const
}
return getEmptyCompletion(command->command_name);
}
else if (arguments.size() == 1)
if (arguments.size() == 1)
{
String command_prefix = arguments[0];
return getCommandsToComplete(command_prefix);
}
else
{
String last_token = arguments.back();
CommandPtr command;
try
{
command = getCommandByName(arguments[0]);
}
catch (...)
{
return {last_token};
}
std::vector<String> answer = {};
if (command->command_name == "help")
{
return getCommandsToComplete(last_token);
}
else
{
answer = [&]() -> std::vector<String>
{
if (multidisk_commands.contains(command->command_name))
{
return client->getAllFilesByPatternFromAllDisks(last_token);
}
else
{
return client->getCurrentDiskWithPath().getAllFilesByPattern(last_token);
}
}();
for (const auto & disk_name : client->getAllDiskNames())
{
if (disk_name.starts_with(last_token))
{
answer.push_back(disk_name);
}
}
for (const auto & option : command->options_description.options())
{
String option_sign = "--" + option->long_name();
if (option_sign.starts_with(last_token))
{
answer.push_back(option_sign);
}
}
}
if (!answer.empty())
String last_token = arguments.back();
CommandPtr command;
try
{
command = getCommandByName(arguments[0]);
}
catch (...)
{
return {last_token};
}
std::vector<String> answer = {};
if (command->command_name == "help")
return getCommandsToComplete(last_token);
answer = [&]() -> std::vector<String>
{
if (multidisk_commands.contains(command->command_name))
return client->getAllFilesByPatternFromAllDisks(last_token);
return client->getCurrentDiskWithPath().getAllFilesByPattern(last_token);
}();
for (const auto & disk_name : client->getAllDiskNames())
{
if (disk_name.starts_with(last_token))
{
std::sort(answer.begin(), answer.end());
return answer;
}
else
{
return {last_token};
answer.push_back(disk_name);
}
}
for (const auto & option : command->options_description.options())
{
String option_sign = "--" + option->long_name();
if (option_sign.starts_with(last_token))
{
answer.push_back(option_sign);
}
}
if (!answer.empty())
{
std::sort(answer.begin(), answer.end());
return answer;
}
return {last_token};
}
bool DisksApp::processQueryText(const String & text)
@ -210,11 +200,11 @@ bool DisksApp::processQueryText(const String & text)
catch (DB::Exception & err)
{
int code = getCurrentExceptionCode();
if (code == ErrorCodes::LOGICAL_ERROR)
{
throw std::move(err);
}
else if (code == ErrorCodes::BAD_ARGUMENTS)
if (code == ErrorCodes::BAD_ARGUMENTS)
{
std::cerr << err.message() << "\n"
<< "\n";

View File

@ -49,10 +49,8 @@ std::vector<String> DiskWithPath::listAllFilesByPath(const String & any_path) co
disk->listFiles(getRelativeFromRoot(any_path), file_names);
return file_names;
}
else
{
return {};
}
return {};
}
std::vector<String> DiskWithPath::getAllFilesByPattern(const String & pattern) const
@ -61,39 +59,30 @@ std::vector<String> DiskWithPath::getAllFilesByPattern(const String & pattern) c
{
auto slash_pos = pattern.find_last_of('/');
if (slash_pos >= pattern.size())
{
return {"", pattern};
}
else
{
return {pattern.substr(0, slash_pos + 1), pattern.substr(slash_pos + 1, pattern.size() - slash_pos - 1)};
}
return {pattern.substr(0, slash_pos + 1), pattern.substr(slash_pos + 1, pattern.size() - slash_pos - 1)};
}();
if (!isDirectory(path_before))
{
return {};
}
else
std::vector<String> file_names = listAllFilesByPath(path_before);
std::vector<String> answer;
for (const auto & file_name : file_names)
{
std::vector<String> file_names = listAllFilesByPath(path_before);
std::vector<String> answer;
for (const auto & file_name : file_names)
if (file_name.starts_with(path_after))
{
if (file_name.starts_with(path_after))
String file_pattern = path_before + file_name;
if (isDirectory(file_pattern))
{
String file_pattern = path_before + file_name;
if (isDirectory(file_pattern))
{
file_pattern = file_pattern + "/";
}
answer.push_back(file_pattern);
file_pattern = file_pattern + "/";
}
answer.push_back(file_pattern);
}
return answer;
}
return answer;
};
void DiskWithPath::setPath(const String & any_path)

View File

@ -39,13 +39,9 @@ DiskWithPath & ICommand::getDiskWithPath(DisksClient & client, const CommandLine
{
auto disk_name = getValueFromCommandLineOptionsWithOptional<String>(options, name);
if (disk_name.has_value())
{
return client.getDiskWithPath(disk_name.value());
}
else
{
return client.getCurrentDiskWithPath();
}
return client.getCurrentDiskWithPath();
}
}

View File

@ -63,39 +63,27 @@ protected:
static T getValueFromCommandLineOptionsThrow(const CommandLineOptions & options, const String & name)
{
if (options.count(name))
{
return getValueFromCommandLineOptions<T>(options, name);
}
else
{
throw DB::Exception(ErrorCodes::BAD_ARGUMENTS, "Mandatory argument '{}' is missing", name);
}
throw DB::Exception(ErrorCodes::BAD_ARGUMENTS, "Mandatory argument '{}' is missing", name);
}
template <typename T>
static T getValueFromCommandLineOptionsWithDefault(const CommandLineOptions & options, const String & name, const T & default_value)
{
if (options.count(name))
{
return getValueFromCommandLineOptions<T>(options, name);
}
else
{
return default_value;
}
return default_value;
}
template <typename T>
static std::optional<T> getValueFromCommandLineOptionsWithOptional(const CommandLineOptions & options, const String & name)
{
if (options.count(name))
{
return std::optional{getValueFromCommandLineOptions<T>(options, name)};
}
else
{
return std::nullopt;
}
return std::nullopt;
}
DiskWithPath & getDiskWithPath(DisksClient & client, const CommandLineOptions & options, const String & name);

View File

@ -110,8 +110,8 @@ static auto executeScript(const std::string & command, bool throw_on_error = fal
sh->wait();
return 0;
}
else
return sh->tryWait();
return sh->tryWait();
}
static bool ask(std::string question)

View File

@ -26,17 +26,15 @@ CatBoostLibraryHandlerPtr CatBoostLibraryHandlerFactory::tryGetModel(const Strin
if (found)
return handler->second;
else
if (create_if_not_found)
{
if (create_if_not_found)
{
auto new_handler = std::make_shared<CatBoostLibraryHandler>(library_path, model_path);
library_handlers.emplace(model_path, new_handler);
LOG_DEBUG(log, "Loaded catboost library handler for model path '{}'", model_path);
return new_handler;
}
return nullptr;
auto new_handler = std::make_shared<CatBoostLibraryHandler>(library_path, model_path);
library_handlers.emplace(model_path, new_handler);
LOG_DEBUG(log, "Loaded catboost library handler for model path '{}'", model_path);
return new_handler;
}
return nullptr;
}
void CatBoostLibraryHandlerFactory::removeModel(const String & model_path)

View File

@ -25,7 +25,7 @@ std::unique_ptr<HTTPRequestHandler> LibraryBridgeHandlerFactory::createRequestHa
{
if (uri.getPath() == "/extdict_ping")
return std::make_unique<ExternalDictionaryLibraryBridgeExistsHandler>(getContext());
else if (uri.getPath() == "/catboost_ping")
if (uri.getPath() == "/catboost_ping")
return std::make_unique<CatBoostLibraryBridgeExistsHandler>(getContext());
}
@ -33,7 +33,7 @@ std::unique_ptr<HTTPRequestHandler> LibraryBridgeHandlerFactory::createRequestHa
{
if (uri.getPath() == "/extdict_request")
return std::make_unique<ExternalDictionaryLibraryBridgeRequestHandler>(getContext());
else if (uri.getPath() == "/catboost_request")
if (uri.getPath() == "/catboost_request")
return std::make_unique<CatBoostLibraryBridgeRequestHandler>(getContext());
}

View File

@ -158,11 +158,9 @@ void ExternalDictionaryLibraryBridgeRequestHandler::handleRequest(HTTPServerRequ
out.finalize();
return;
}
else
{
LOG_TRACE(log, "Cannot clone from dictionary with id: {}, will call extDict_libNew instead", from_dictionary_id);
lib_new = true;
}
LOG_TRACE(log, "Cannot clone from dictionary with id: {}, will call extDict_libNew instead", from_dictionary_id);
lib_new = true;
}
if (lib_new)
{

View File

@ -518,7 +518,9 @@ try
SCOPE_EXIT({ cleanup(); });
initTTYBuffer(toProgressOption(getClientConfiguration().getString("progress", "default")));
initTTYBuffer(toProgressOption(getClientConfiguration().getString("progress", "default")),
toProgressOption(config().getString("progress-table", "default")));
initKeystrokeInterceptor();
ASTAlterCommand::setFormatAlterCommandsWithParentheses(true);
/// try to load user defined executable functions, throw on error and die

View File

@ -231,8 +231,8 @@ static Int64 transformSigned(Int64 x, UInt64 seed)
{
if (x >= 0)
return transform(x, seed);
else
return -transform(-x, seed); /// It works Ok even for minimum signed number.
return -transform(-x, seed); /// It works Ok even for minimum signed number.
}
@ -1105,8 +1105,8 @@ public:
{
if (isUInt(data_type))
return std::make_unique<UnsignedIntegerModel>(seed);
else
return std::make_unique<SignedIntegerModel>(seed);
return std::make_unique<SignedIntegerModel>(seed);
}
if (typeid_cast<const DataTypeFloat32 *>(&data_type))

View File

@ -39,13 +39,13 @@ IdentifierQuotingStyle getQuotingStyle(nanodbc::ConnectionHolderPtr connection)
auto identifier_quote = getIdentifierQuote(connection);
if (identifier_quote.empty())
return IdentifierQuotingStyle::Backticks;
else if (identifier_quote[0] == '`')
if (identifier_quote[0] == '`')
return IdentifierQuotingStyle::Backticks;
else if (identifier_quote[0] == '"')
if (identifier_quote[0] == '"')
return IdentifierQuotingStyle::DoubleQuotes;
else
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"Can not map quote identifier '{}' to IdentifierQuotingStyle value", identifier_quote);
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"Can not map quote identifier '{}' to IdentifierQuotingStyle value", identifier_quote);
}
}

View File

@ -127,8 +127,8 @@ std::string validateODBCConnectionString(const std::string & connection_string)
if (*pos == '{')
return read_escaped_value();
else
return read_plain_value();
return read_plain_value();
};
std::map<std::string, std::string> parameters;
@ -206,12 +206,10 @@ std::string validateODBCConnectionString(const std::string & connection_string)
reconstructed_connection_string.append(value_pos, next_pos - value_pos);
break;
}
else
{
reconstructed_connection_string.append(value_pos, next_pos - value_pos);
reconstructed_connection_string.append("}}");
value_pos = next_pos + 1;
}
reconstructed_connection_string.append(value_pos, next_pos - value_pos);
reconstructed_connection_string.append("}}");
value_pos = next_pos + 1;
}
reconstructed_connection_string += '}';

View File

@ -158,6 +158,11 @@ namespace Setting
extern const SettingsSeconds send_timeout;
}
namespace MergeTreeSetting
{
extern const MergeTreeSettingsBool allow_remote_fs_zero_copy_replication;
}
}
namespace CurrentMetrics
@ -599,7 +604,7 @@ void sanityChecks(Server & server)
{
}
if (server.context()->getMergeTreeSettings().allow_remote_fs_zero_copy_replication)
if (server.context()->getMergeTreeSettings()[MergeTreeSetting::allow_remote_fs_zero_copy_replication])
{
server.context()->addWarningMessage("The setting 'allow_remote_fs_zero_copy_replication' is enabled for MergeTree tables."
" But the feature of 'zero-copy replication' is under development and is not ready for production."
@ -628,7 +633,9 @@ void loadStartupScripts(const Poco::Util::AbstractConfiguration & config, Contex
auto condition_write_buffer = WriteBufferFromOwnString();
LOG_DEBUG(log, "Checking startup query condition `{}`", condition);
executeQuery(condition_read_buffer, condition_write_buffer, true, context, callback, QueryFlags{ .internal = true }, std::nullopt, {});
auto startup_context = Context::createCopy(context);
startup_context->makeQueryContext();
executeQuery(condition_read_buffer, condition_write_buffer, true, startup_context, callback, QueryFlags{ .internal = true }, std::nullopt, {});
auto result = condition_write_buffer.str();
@ -648,7 +655,9 @@ void loadStartupScripts(const Poco::Util::AbstractConfiguration & config, Contex
auto write_buffer = WriteBufferFromOwnString();
LOG_DEBUG(log, "Executing query `{}`", query);
executeQuery(read_buffer, write_buffer, true, context, callback, QueryFlags{ .internal = true }, std::nullopt, {});
auto startup_context = Context::createCopy(context);
startup_context->makeQueryContext();
executeQuery(read_buffer, write_buffer, true, startup_context, callback, QueryFlags{ .internal = true }, std::nullopt, {});
}
}
catch (...)
@ -1125,9 +1134,6 @@ try
/// We need to reload server settings because config could be updated via zookeeper.
server_settings.loadSettingsFromConfig(config());
/// NOTE: Do sanity checks after we loaded all possible substitutions (for the configuration) from ZK
sanityChecks(*this);
#if defined(OS_LINUX)
std::string executable_path = getExecutablePath();
@ -2019,6 +2025,11 @@ try
if (!filesystem_caches_path.empty())
global_context->setFilesystemCachesPath(filesystem_caches_path);
/// NOTE: Do sanity checks after we loaded all possible substitutions (for the configuration) from ZK
/// Additionally, making the check after the default profile is initialized.
/// It is important to initialize MergeTreeSettings after Settings, to support compatibility for MergeTreeSettings.
sanityChecks(*this);
/// Check sanity of MergeTreeSettings on server startup
{
size_t background_pool_tasks = global_context->getMergeMutateExecutor()->getMaxTasksCount();

View File

@ -116,16 +116,14 @@ namespace
reading_dependents = false;
continue;
}
else if (line == "DEPENDENTS")
if (line == "DEPENDENTS")
{
reading_dependents = true;
reading_dependencies = false;
continue;
}
else if (line.empty())
{
if (line.empty())
continue;
}
size_t separator1 = line.find('\t');
size_t separator2 = line.find('\t', separator1 + 1);

View File

@ -22,8 +22,10 @@
#include <Backups/RestorerFromBackup.h>
#include <Core/Settings.h>
#include <base/defines.h>
#include <base/range.h>
#include <IO/Operators.h>
#include <Common/re2.h>
#include <Poco/AccessExpireCache.h>
#include <boost/algorithm/string/join.hpp>
#include <filesystem>
@ -132,8 +134,8 @@ public:
"' registered for user-defined settings",
String{setting_name}, boost::algorithm::join(registered_prefixes, "' or '"));
}
else
BaseSettingsHelpers::throwSettingNotFound(setting_name);
throw Exception(ErrorCodes::UNKNOWN_SETTING, "Unknown setting '{}'", String{setting_name});
}
private:
@ -828,8 +830,7 @@ std::shared_ptr<const EnabledQuota> AccessControl::getAuthenticationQuota(
quota_key,
throw_if_client_key_empty);
}
else
return nullptr;
return nullptr;
}

View File

@ -1483,17 +1483,17 @@ bool AccessRights::isGrantedImplHelper(const AccessRightsElement & element) cons
{
if (element.anyParameter())
return isGrantedImpl<grant_option, wildcard>(element.access_flags);
else
return isGrantedImpl<grant_option, wildcard>(element.access_flags, element.parameter);
return isGrantedImpl<grant_option, wildcard>(element.access_flags, element.parameter);
}
else if (element.anyDatabase())
if (element.anyDatabase())
return isGrantedImpl<grant_option, wildcard>(element.access_flags);
else if (element.anyTable())
if (element.anyTable())
return isGrantedImpl<grant_option, wildcard>(element.access_flags, element.database);
else if (element.anyColumn())
if (element.anyColumn())
return isGrantedImpl<grant_option, wildcard>(element.access_flags, element.database, element.table);
else
return isGrantedImpl<grant_option, wildcard>(element.access_flags, element.database, element.table, element.columns);
return isGrantedImpl<grant_option, wildcard>(element.access_flags, element.database, element.table, element.columns);
}
template <bool grant_option, bool wildcard>
@ -1503,16 +1503,14 @@ bool AccessRights::isGrantedImpl(const AccessRightsElement & element) const
{
if (element.grant_option)
return isGrantedImplHelper<true, true>(element);
else
return isGrantedImplHelper<grant_option, true>(element);
}
else
{
if (element.grant_option)
return isGrantedImplHelper<true, wildcard>(element);
else
return isGrantedImplHelper<grant_option, wildcard>(element);
return isGrantedImplHelper<grant_option, true>(element);
}
if (element.grant_option)
return isGrantedImplHelper<true, wildcard>(element);
return isGrantedImplHelper<grant_option, wildcard>(element);
}
template <bool grant_option, bool wildcard>

View File

@ -501,10 +501,9 @@ AuthenticationData AuthenticationData::fromAST(const ASTAuthenticationData & que
auth_data.setPasswordHashBinary(AuthenticationData::Util::stringToDigest(value));
return auth_data;
}
else
{
auth_data.setPasswordHashHex(value);
}
auth_data.setPasswordHashHex(value);
if (query.type == AuthenticationType::SHA256_PASSWORD && args_size == 2)
{

View File

@ -247,8 +247,7 @@ namespace
const auto & unused_node = *(owned_nodes.begin()->second);
if (unused_node.node_type == UNKNOWN)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Parent group '{}' not found", unused_node.keyword);
else
throw Exception(ErrorCodes::LOGICAL_ERROR, "Access type '{}' should have parent group", unused_node.keyword);
throw Exception(ErrorCodes::LOGICAL_ERROR, "Access type '{}' should have parent group", unused_node.keyword);
}
}

View File

@ -61,7 +61,7 @@ namespace
{
if (addr.family() == IPAddress::Family::IPv4 && addr_v6 == toIPv6(addr))
return true;
else if (addr.family() == IPAddress::Family::IPv6 && addr_v6 == addr)
if (addr.family() == IPAddress::Family::IPv6 && addr_v6 == addr)
return true;
}
@ -267,10 +267,9 @@ String AllowedClientHosts::IPSubnet::toString() const
unsigned int prefix_length = mask.prefixLength();
if (isMaskAllBitsOne())
return prefix.toString();
else if (IPAddress{prefix_length, mask.family()} == mask)
if (IPAddress{prefix_length, mask.family()} == mask)
return fs::path(prefix.toString()) / std::to_string(prefix_length);
else
return fs::path(prefix.toString()) / mask.toString();
return fs::path(prefix.toString()) / mask.toString();
}
bool AllowedClientHosts::IPSubnet::isMaskAllBitsOne() const
@ -575,12 +574,11 @@ bool AllowedClientHosts::contains(const IPAddress & client_address) const
parseLikePattern(pattern, subnet, name, name_regexp);
if (subnet)
return check_subnet(*subnet);
else if (name)
if (name)
return check_name(*name);
else if (name_regexp)
if (name_regexp)
return check_name_regexp(*name_regexp);
else
return false;
return false;
};
for (const String & like_pattern : like_patterns)

View File

@ -28,16 +28,14 @@ String QuotaTypeInfo::valueToString(QuotaValue value) const
{
if (!(value % output_denominator))
return std::to_string(value / output_denominator);
else
return toString(static_cast<double>(value) / output_denominator);
return toString(static_cast<double>(value) / output_denominator);
}
QuotaValue QuotaTypeInfo::stringToValue(const String & str) const
{
if (output_denominator == 1)
return static_cast<QuotaValue>(parse<UInt64>(str));
else
return static_cast<QuotaValue>(parse<Float64>(str) * output_denominator);
return static_cast<QuotaValue>(parse<Float64>(str) * output_denominator);
}
String QuotaTypeInfo::valueToStringWithName(QuotaValue value) const

View File

@ -728,8 +728,7 @@ bool ContextAccess::checkAccessImplHelper(const ContextPtr & context, AccessFlag
"For queries over HTTP, method GET implies readonly. "
"You should use method POST for modifying queries");
}
else
return access_denied(ErrorCodes::READONLY, "{}: Cannot execute query in readonly mode");
return access_denied(ErrorCodes::READONLY, "{}: Cannot execute query in readonly mode");
}
}
@ -770,17 +769,17 @@ bool ContextAccess::checkAccessImplHelper(const ContextPtr & context, const Acce
{
if (element.anyParameter())
return checkAccessImpl<throw_if_denied, grant_option, wildcard>(context, element.access_flags);
else
return checkAccessImpl<throw_if_denied, grant_option, wildcard>(context, element.access_flags, element.parameter);
return checkAccessImpl<throw_if_denied, grant_option, wildcard>(context, element.access_flags, element.parameter);
}
else if (element.anyDatabase())
if (element.anyDatabase())
return checkAccessImpl<throw_if_denied, grant_option, wildcard>(context, element.access_flags);
else if (element.anyTable())
if (element.anyTable())
return checkAccessImpl<throw_if_denied, grant_option, wildcard>(context, element.access_flags, element.database);
else if (element.anyColumn())
if (element.anyColumn())
return checkAccessImpl<throw_if_denied, grant_option, wildcard>(context, element.access_flags, element.database, element.table);
else
return checkAccessImpl<throw_if_denied, grant_option, wildcard>(context, element.access_flags, element.database, element.table, element.columns);
return checkAccessImpl<throw_if_denied, grant_option, wildcard>(context, element.access_flags, element.database, element.table, element.columns);
}
template <bool throw_if_denied, bool grant_option, bool wildcard>
@ -790,16 +789,14 @@ bool ContextAccess::checkAccessImpl(const ContextPtr & context, const AccessRigh
{
if (element.grant_option)
return checkAccessImplHelper<throw_if_denied, true, true>(context, element);
else
return checkAccessImplHelper<throw_if_denied, grant_option, true>(context, element);
}
else
{
if (element.grant_option)
return checkAccessImplHelper<throw_if_denied, true, wildcard>(context, element);
else
return checkAccessImplHelper<throw_if_denied, grant_option, wildcard>(context, element);
return checkAccessImplHelper<throw_if_denied, grant_option, true>(context, element);
}
if (element.grant_option)
return checkAccessImplHelper<throw_if_denied, true, wildcard>(context, element);
return checkAccessImplHelper<throw_if_denied, grant_option, wildcard>(context, element);
}
template <bool throw_if_denied, bool grant_option, bool wildcard>

View File

@ -1,18 +1,17 @@
#pragma once
#include <Access/AccessRights.h>
#include <Access/ContextAccessParams.h>
#include <Access/EnabledRowPolicies.h>
#include <Interpreters/ClientInfo.h>
#include <Access/QuotaUsage.h>
#include <Common/SettingsChanges.h>
#include <Core/UUID.h>
#include <base/scope_guard.h>
#include <boost/container/flat_set.hpp>
#include <memory>
#include <mutex>
#include <optional>
#include <unordered_map>
#include <Access/AccessRights.h>
#include <Access/ContextAccessParams.h>
#include <Access/EnabledRowPolicies.h>
#include <Access/QuotaUsage.h>
#include <Core/UUID.h>
#include <Interpreters/ClientInfo.h>
#include <base/scope_guard.h>
#include <Common/SettingsChanges.h>
namespace Poco { class Logger; }

View File

@ -89,10 +89,9 @@ bool operator ==(const ContextAccessParams & left, const ContextAccessParams & r
{
if (!x)
return !y;
else if (!y)
if (!y)
return false;
else
return *x == *y;
return *x == *y;
}
else
{
@ -132,23 +131,21 @@ bool operator <(const ContextAccessParams & left, const ContextAccessParams & ri
{
if (!x)
return y ? -1 : 0;
else if (!y)
if (!y)
return 1;
else if (*x == *y)
if (*x == *y)
return 0;
else if (*x < *y)
if (*x < *y)
return -1;
else
return 1;
return 1;
}
else
{
if (x == y)
return 0;
else if (x < y)
if (x < y)
return -1;
else
return 1;
return 1;
}
};

View File

@ -1,9 +1,8 @@
#pragma once
#include <base/types.h>
#include <boost/container/flat_set.hpp>
#include <Access/Common/SSLCertificateSubjects.h>
#include <memory>
#include <Access/Common/SSLCertificateSubjects.h>
#include <base/types.h>
#include "config.h"

View File

@ -166,8 +166,7 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
ret = krb5_get_init_creds_keytab(k5.ctx, &my_creds, k5.me, keytab, 0, nullptr, options);
if (ret)
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error in getting initial credentials: {}", fmtError(ret));
else
LOG_TRACE(log,"Got initial credentials");
LOG_TRACE(log, "Got initial credentials");
}
else
{

View File

@ -330,10 +330,7 @@ std::set<String> LDAPAccessStorage::mapExternalRolesNoLock(const LDAPClient::Sea
for (const auto & external_role : external_role_set)
{
if (
prefix.size() < external_role.size() &&
external_role.compare(0, prefix.size(), prefix) == 0
)
if (prefix.size() < external_role.size() && external_role.starts_with(prefix))
{
role_names.emplace(external_role, prefix.size());
}

View File

@ -62,8 +62,7 @@ String QuotaCache::QuotaInfo::calculateKey(const EnabledQuota & enabled, bool th
"Quota {} (for user {}) requires a client supplied key.",
quota->getName(),
params.user_name);
else
return ""; // Authentication quota has no client key at time of authentication.
return ""; // Authentication quota has no client key at time of authentication.
}
case QuotaKeyType::CLIENT_KEY_OR_USER_NAME:
{

View File

@ -1,10 +1,9 @@
#pragma once
#include <Access/EnabledRoles.h>
#include <Poco/AccessExpireCache.h>
#include <boost/container/flat_set.hpp>
#include <map>
#include <mutex>
#include <Access/EnabledRoles.h>
#include <Poco/AccessExpireCache.h>
namespace DB

View File

@ -80,15 +80,13 @@ void RolesOrUsersSet::init(const ASTRolesOrUsersSet & ast, const AccessControl *
return *id;
return access_control->getID<Role>(name);
}
else if (ast.allow_users)
if (ast.allow_users)
{
return access_control->getID<User>(name);
}
else
{
assert(ast.allow_roles);
return access_control->getID<Role>(name);
}
assert(ast.allow_roles);
return access_control->getID<Role>(name);
};
if (!ast.names.empty() && !all)

View File

@ -28,7 +28,7 @@ SettingsAuthResponseParser::parse(const Poco::Net::HTTPResponse & response, std:
try
{
Poco::Dynamic::Var json = parser.parse(*body_stream);
Poco::JSON::Object::Ptr obj = json.extract<Poco::JSON::Object::Ptr>();
const Poco::JSON::Object::Ptr & obj = json.extract<Poco::JSON::Object::Ptr>();
Poco::JSON::Object::Ptr settings_obj = obj->getObject(settings_key);
if (settings_obj)

View File

@ -54,8 +54,7 @@ SettingSourceRestrictions getSettingSourceRestrictions(std::string_view name)
auto settingConstraintIter = SETTINGS_SOURCE_RESTRICTIONS.find(name);
if (settingConstraintIter != SETTINGS_SOURCE_RESTRICTIONS.end())
return settingConstraintIter->second;
else
return SettingSourceRestrictions(); // allows everything
return SettingSourceRestrictions(); // allows everything
}
}
@ -310,8 +309,7 @@ bool SettingsConstraints::Checker::check(SettingChange & change,
{
if (reaction == THROW_ON_VIOLATION)
throw Exception(explain, code);
else
return false;
return false;
}
std::string_view setting_name = setting_name_resolver(change.name);
@ -335,8 +333,7 @@ bool SettingsConstraints::Checker::check(SettingChange & change,
{
if (reaction == THROW_ON_VIOLATION)
throw Exception(ErrorCodes::SETTING_CONSTRAINT_VIOLATION, "Setting {} should not be changed", setting_name);
else
return false;
return false;
}
const auto & min_value = constraint.min_value;
@ -351,8 +348,7 @@ bool SettingsConstraints::Checker::check(SettingChange & change,
max_value,
min_value,
setting_name);
else
return false;
return false;
}
if (!min_value.isNull() && less_or_cannot_compare(new_value, min_value))
@ -362,8 +358,7 @@ bool SettingsConstraints::Checker::check(SettingChange & change,
throw Exception(ErrorCodes::SETTING_CONSTRAINT_VIOLATION, "Setting {} shouldn't be less than {}",
setting_name, applyVisitor(FieldVisitorToString(), min_value));
}
else
change.value = min_value;
change.value = min_value;
}
if (!max_value.isNull() && less_or_cannot_compare(max_value, new_value))
@ -373,16 +368,14 @@ bool SettingsConstraints::Checker::check(SettingChange & change,
throw Exception(ErrorCodes::SETTING_CONSTRAINT_VIOLATION, "Setting {} shouldn't be greater than {}",
setting_name, applyVisitor(FieldVisitorToString(), max_value));
}
else
change.value = max_value;
change.value = max_value;
}
if (!getSettingSourceRestrictions(setting_name).isSourceAllowed(source))
{
if (reaction == THROW_ON_VIOLATION)
throw Exception(ErrorCodes::READONLY, "Setting {} is not allowed to be set by {}", setting_name, toString(source));
else
return false;
return false;
}
return true;
@ -431,8 +424,8 @@ SettingsConstraints::Checker SettingsConstraints::getMergeTreeChecker(std::strin
auto full_name = settingFullName<MergeTreeSettings>(short_name);
auto it = constraints.find(resolveSettingNameWithCache(full_name));
if (it == constraints.end())
return Checker(MergeTreeSettings::Traits::resolveName); // Allowed
return Checker(it->second, MergeTreeSettings::Traits::resolveName);
return Checker(MergeTreeSettings::resolveName); // Allowed
return Checker(it->second, MergeTreeSettings::resolveName);
}
bool SettingsConstraints::Constraint::operator==(const Constraint & other) const

View File

@ -28,6 +28,7 @@
#include <cstring>
#include <filesystem>
#include <base/FnTraits.h>
#include <base/range.h>
namespace DB
@ -648,8 +649,7 @@ namespace
{
if (users_without_row_policies_can_read_rows)
continue;
else
filter = "1";
filter = "1";
}
auto policy = std::make_shared<RowPolicy>();

View File

@ -157,13 +157,13 @@ public:
d.status = static_cast<Data::Status>(k);
if (d.status == Data::Status::NotSet)
return;
else if (d.status == Data::Status::SetNull)
if (d.status == Data::Status::SetNull)
{
if (!returns_nullable_type)
throw Exception(ErrorCodes::INCORRECT_DATA, "Incorrect type (NULL) in non-nullable {}State", getName());
return;
}
else if (d.status == Data::Status::SetOther)
if (d.status == Data::Status::SetOther)
{
serialization->deserializeBinary(d.value, buf, {});
return;

View File

@ -148,9 +148,8 @@ AggregateFunctionPtr createAggregateFunctionDeltaSum(
if (isInteger(data_type) || isFloat(data_type))
return AggregateFunctionPtr(createWithNumericType<AggregationFunctionDeltaSum>(
*data_type, arguments, params));
else
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument for aggregate function {}",
arguments[0]->getName(), name);
throw Exception(
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument for aggregate function {}", arguments[0]->getName(), name);
}
}

View File

@ -137,13 +137,12 @@ AggregateFunctionFactory::getAssociatedFunctionByNullsAction(const String & name
{
if (action == NullsAction::RESPECT_NULLS)
{
if (auto it = respect_nulls.find(name); it == respect_nulls.end())
auto it = respect_nulls.find(name);
if (it == respect_nulls.end())
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Function {} does not support RESPECT NULLS", name);
else if (auto associated_it = aggregate_functions.find(it->second); associated_it != aggregate_functions.end())
if (auto associated_it = aggregate_functions.find(it->second); associated_it != aggregate_functions.end())
return {associated_it->second};
else
throw Exception(
ErrorCodes::LOGICAL_ERROR, "Unable to find the function {} (equivalent to '{} RESPECT NULLS')", it->second, name);
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unable to find the function {} (equivalent to '{} RESPECT NULLS')", it->second, name);
}
if (action == NullsAction::IGNORE_NULLS)
@ -152,9 +151,8 @@ AggregateFunctionFactory::getAssociatedFunctionByNullsAction(const String & name
{
if (auto associated_it = aggregate_functions.find(it->second); associated_it != aggregate_functions.end())
return {associated_it->second};
else
throw Exception(
ErrorCodes::LOGICAL_ERROR, "Unable to find the function {} (equivalent to '{} IGNORE NULLS')", it->second, name);
throw Exception(
ErrorCodes::LOGICAL_ERROR, "Unable to find the function {} (equivalent to '{} IGNORE NULLS')", it->second, name);
}
/// We don't throw for IGNORE NULLS of other functions because that's the default in CH
}
@ -263,8 +261,7 @@ AggregateFunctionPtr AggregateFunctionFactory::getImpl(
if (!hints.empty())
throw Exception(ErrorCodes::UNKNOWN_AGGREGATE_FUNCTION,
"Unknown aggregate function {}{}. Maybe you meant: {}", name, extra_info, toString(hints));
else
throw Exception(ErrorCodes::UNKNOWN_AGGREGATE_FUNCTION, "Unknown aggregate function {}{}", name, extra_info);
throw Exception(ErrorCodes::UNKNOWN_AGGREGATE_FUNCTION, "Unknown aggregate function {}{}", name, extra_info);
}
std::optional<AggregateFunctionProperties> AggregateFunctionFactory::tryGetProperties(String name, NullsAction action) const

View File

@ -328,21 +328,19 @@ struct AggregateFunctionFlameGraphData
list = list->next;
return entry;
}
else
Entry * parent = list;
while (parent->next && parent->next->size != size)
parent = parent->next;
if (parent->next && parent->next->size == size)
{
Entry * parent = list;
while (parent->next && parent->next->size != size)
parent = parent->next;
if (parent->next && parent->next->size == size)
{
Entry * entry = parent->next;
parent->next = entry->next;
return entry;
}
return nullptr;
Entry * entry = parent->next;
parent->next = entry->next;
return entry;
}
return nullptr;
}
void add(UInt64 ptr, Int64 size, const UInt64 * stack, size_t stack_size, Arena * arena)

View File

@ -90,8 +90,7 @@ struct GroupArraySamplerData
/// With a large number of values, we will generate random numbers several times slower.
if (lim <= static_cast<UInt64>(pcg32_fast::max()))
return rng() % lim;
else
return (static_cast<UInt64>(rng()) * (static_cast<UInt64>(pcg32::max()) + 1ULL) + static_cast<UInt64>(rng())) % lim;
return (static_cast<UInt64>(rng()) * (static_cast<UInt64>(pcg32::max()) + 1ULL) + static_cast<UInt64>(rng())) % lim;
}
void randomShuffle()
@ -797,8 +796,8 @@ AggregateFunctionPtr createAggregateFunctionGroupArray(
throw Exception(ErrorCodes::BAD_ARGUMENTS, "groupArrayLast make sense only with max_elems (groupArrayLast(max_elems)())");
return createAggregateFunctionGroupArrayImpl<GroupArrayTrait</* Thas_limit= */ false, Tlast, /* Tsampler= */ Sampler::NONE>>(argument_types[0], parameters, max_elems, std::nullopt);
}
else
return createAggregateFunctionGroupArrayImpl<GroupArrayTrait</* Thas_limit= */ true, Tlast, /* Tsampler= */ Sampler::NONE>>(argument_types[0], parameters, max_elems, std::nullopt);
return createAggregateFunctionGroupArrayImpl<GroupArrayTrait</* Thas_limit= */ true, Tlast, /* Tsampler= */ Sampler::NONE>>(
argument_types[0], parameters, max_elems, std::nullopt);
}
AggregateFunctionPtr createAggregateFunctionGroupArraySample(

View File

@ -381,23 +381,22 @@ IAggregateFunction * createWithExtraTypes(const DataTypePtr & argument_type, con
{
WhichDataType which(argument_type);
if (which.idx == TypeIndex::Date) return new AggregateFunctionGroupArrayIntersectDate(argument_type, parameters);
else if (which.idx == TypeIndex::DateTime) return new AggregateFunctionGroupArrayIntersectDateTime(argument_type, parameters);
else if (which.idx == TypeIndex::Date32) return new AggregateFunctionGroupArrayIntersectDate32(argument_type, parameters);
else if (which.idx == TypeIndex::DateTime64)
if (which.idx == TypeIndex::DateTime)
return new AggregateFunctionGroupArrayIntersectDateTime(argument_type, parameters);
if (which.idx == TypeIndex::Date32)
return new AggregateFunctionGroupArrayIntersectDate32(argument_type, parameters);
if (which.idx == TypeIndex::DateTime64)
{
const auto * datetime64_type = dynamic_cast<const DataTypeDateTime64 *>(argument_type.get());
const auto return_type = std::make_shared<DataTypeArray>(std::make_shared<DataTypeDateTime64>(datetime64_type->getScale()));
return new AggregateFunctionGroupArrayIntersectGeneric<true>(argument_type, parameters, return_type);
}
else
{
/// Check that we can use plain version of AggregateFunctionGroupArrayIntersectGeneric
if (argument_type->isValueUnambiguouslyRepresentedInContiguousMemoryRegion())
return new AggregateFunctionGroupArrayIntersectGeneric<true>(argument_type, parameters);
else
return new AggregateFunctionGroupArrayIntersectGeneric<false>(argument_type, parameters);
}
/// Check that we can use plain version of AggregateFunctionGroupArrayIntersectGeneric
if (argument_type->isValueUnambiguouslyRepresentedInContiguousMemoryRegion())
return new AggregateFunctionGroupArrayIntersectGeneric<true>(argument_type, parameters);
return new AggregateFunctionGroupArrayIntersectGeneric<false>(argument_type, parameters);
}
inline AggregateFunctionPtr createAggregateFunctionGroupArrayIntersectImpl(const std::string & name, const DataTypePtr & argument_type, const Array & parameters)

View File

@ -65,8 +65,7 @@ struct MovingSumData : public MovingData<T>
{
if (idx < window_size)
return this->value[idx];
else
return this->value[idx] - this->value[idx - window_size];
return this->value[idx] - this->value[idx - window_size];
}
};
@ -79,8 +78,7 @@ struct MovingAvgData : public MovingData<T>
{
if (idx < window_size)
return this->value[idx] / T(window_size);
else
return (this->value[idx] - this->value[idx - window_size]) / T(window_size);
return (this->value[idx] - this->value[idx - window_size]) / T(window_size);
}
};
@ -285,16 +283,12 @@ AggregateFunctionPtr createAggregateFunctionMoving(
{
if (isDecimal(argument_type))
return createAggregateFunctionMovingImpl<Function, std::false_type, std::true_type>(name, argument_type);
else
return createAggregateFunctionMovingImpl<Function, std::false_type, std::false_type>(name, argument_type);
}
else
{
if (isDecimal(argument_type))
return createAggregateFunctionMovingImpl<Function, std::true_type, std::true_type>(name, argument_type, max_elems);
else
return createAggregateFunctionMovingImpl<Function, std::true_type, std::false_type>(name, argument_type, max_elems);
return createAggregateFunctionMovingImpl<Function, std::false_type, std::false_type>(name, argument_type);
}
if (isDecimal(argument_type))
return createAggregateFunctionMovingImpl<Function, std::true_type, std::true_type>(name, argument_type, max_elems);
return createAggregateFunctionMovingImpl<Function, std::true_type, std::false_type>(name, argument_type, max_elems);
}
}

View File

@ -391,21 +391,20 @@ AggregateFunctionPtr createAggregateFunctionGroupArray(
{
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter for aggregate function {} should have limit argument", name);
}
else if (parameters.size() == 1)
if (parameters.size() == 1)
{
auto type = parameters[0].getType();
if (type != Field::Types::Int64 && type != Field::Types::UInt64)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter for aggregate function {} should be positive number", name);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter for aggregate function {} should be positive number", name);
if ((type == Field::Types::Int64 && parameters[0].safeGet<Int64>() < 0) ||
(type == Field::Types::UInt64 && parameters[0].safeGet<UInt64>() == 0))
if ((type == Field::Types::Int64 && parameters[0].safeGet<Int64>() < 0)
|| (type == Field::Types::UInt64 && parameters[0].safeGet<UInt64>() == 0))
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter for aggregate function {} should be positive number", name);
max_elems = parameters[0].safeGet<UInt64>();
}
else
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
"Function {} does not support this number of arguments", name);
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} does not support this number of arguments", name);
if (max_elems > group_array_sorted_sort_strategy_max_elements_threshold)
return createAggregateFunctionGroupArraySortedImpl<GroupArraySortedSort>(argument_types[0], parameters, max_elems);

View File

@ -133,8 +133,7 @@ public:
{
if (revision >= STATE_VERSION_1_MIN_REVISION)
return 1;
else
return 0;
return 0;
}
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> version) const override

View File

@ -88,8 +88,7 @@ public:
{
if (isSmall())
return small.size();
else
return roaring_bitmap->cardinality();
return roaring_bitmap->cardinality();
}
void merge(const RoaringBitmapWithSmallSet & r1)
@ -454,8 +453,7 @@ public:
if (isSmall())
return small.find(static_cast<T>(x)) != small.end();
else
return roaring_bitmap->contains(static_cast<Value>(x));
return roaring_bitmap->contains(static_cast<Value>(x));
}
/**
@ -554,24 +552,22 @@ public:
r1.add(elem);
return answer.size();
}
else
{
UInt64 count = 0;
for (auto it = roaring_bitmap->begin(); it != roaring_bitmap->end(); ++it)
{
if (*it < range_start)
continue;
if (count < limit)
{
r1.add(*it);
++count;
}
else
break;
UInt64 count = 0;
for (auto it = roaring_bitmap->begin(); it != roaring_bitmap->end(); ++it)
{
if (*it < range_start)
continue;
if (count < limit)
{
r1.add(*it);
++count;
}
return count;
else
break;
}
return count;
}
UInt64 rb_offset_limit(UInt64 offset, UInt64 limit, RoaringBitmapWithSmallSet & r1) const /// NOLINT
@ -591,18 +587,16 @@ public:
r1.add(it->getValue());
return count;
}
else
{
UInt64 count = 0;
UInt64 offset_count = 0;
auto it = roaring_bitmap->begin();
for (;it != roaring_bitmap->end() && offset_count < offset; ++it)
++offset_count;
for (;it != roaring_bitmap->end() && count < limit; ++it, ++count)
r1.add(*it);
return count;
}
UInt64 count = 0;
UInt64 offset_count = 0;
auto it = roaring_bitmap->begin();
for (; it != roaring_bitmap->end() && offset_count < offset; ++it)
++offset_count;
for (; it != roaring_bitmap->end() && count < limit; ++it, ++count)
r1.add(*it);
return count;
}
UInt64 rb_min() const /// NOLINT
@ -620,8 +614,7 @@ public:
}
return min_val;
}
else
return roaring_bitmap->minimum();
return roaring_bitmap->minimum();
}
UInt64 rb_max() const /// NOLINT
@ -639,8 +632,7 @@ public:
}
return max_val;
}
else
return roaring_bitmap->maximum();
return roaring_bitmap->maximum();
}
/**

View File

@ -275,8 +275,7 @@ AggregateFunctionPtr createAggregateFunctionGroupConcat(
if (has_limit)
return std::make_shared<GroupConcatImpl</* has_limit= */ true>>(argument_types[0], parameters, limit, delimiter);
else
return std::make_shared<GroupConcatImpl</* has_limit= */ false>>(argument_types[0], parameters, limit, delimiter);
return std::make_shared<GroupConcatImpl</* has_limit= */ false>>(argument_types[0], parameters, limit, delimiter);
}
}

View File

@ -276,16 +276,15 @@ IAggregateFunction * createWithExtraTypes(const DataTypePtr & argument_type, TAr
{
WhichDataType which(argument_type);
if (which.idx == TypeIndex::Date) return new AggregateFunctionGroupUniqArrayDate<HasLimit>(argument_type, args...);
else if (which.idx == TypeIndex::DateTime) return new AggregateFunctionGroupUniqArrayDateTime<HasLimit>(argument_type, args...);
else if (which.idx == TypeIndex::IPv4) return new AggregateFunctionGroupUniqArrayIPv4<HasLimit>(argument_type, args...);
else
{
/// Check that we can use plain version of AggregateFunctionGroupUniqArrayGeneric
if (argument_type->isValueUnambiguouslyRepresentedInContiguousMemoryRegion())
return new AggregateFunctionGroupUniqArrayGeneric<true, HasLimit>(argument_type, args...);
else
return new AggregateFunctionGroupUniqArrayGeneric<false, HasLimit>(argument_type, args...);
}
if (which.idx == TypeIndex::DateTime)
return new AggregateFunctionGroupUniqArrayDateTime<HasLimit>(argument_type, args...);
if (which.idx == TypeIndex::IPv4)
return new AggregateFunctionGroupUniqArrayIPv4<HasLimit>(argument_type, args...);
/// Check that we can use plain version of AggregateFunctionGroupUniqArrayGeneric
if (argument_type->isValueUnambiguouslyRepresentedInContiguousMemoryRegion())
return new AggregateFunctionGroupUniqArrayGeneric<true, HasLimit>(argument_type, args...);
return new AggregateFunctionGroupUniqArrayGeneric<false, HasLimit>(argument_type, args...);
}
template <typename HasLimit, typename ... TArgs>
@ -336,8 +335,7 @@ AggregateFunctionPtr createAggregateFunctionGroupUniqArray(
if (!limit_size)
return createAggregateFunctionGroupUniqArrayImpl<std::false_type>(name, argument_types[0], parameters);
else
return createAggregateFunctionGroupUniqArrayImpl<std::true_type>(name, argument_types[0], parameters, max_elems);
return createAggregateFunctionGroupUniqArrayImpl<std::true_type>(name, argument_types[0], parameters, max_elems);
}
}

View File

@ -87,8 +87,7 @@ public:
{
if (kind_ == AggregateFunctionIntersectionsKind::Count)
return std::make_shared<DataTypeUInt64>();
else
return std::make_shared<DataTypeNumber<PointType>>();
return std::make_shared<DataTypeNumber<PointType>>();
}
/// MaxIntersectionsData::Allocator uses the arena

View File

@ -138,7 +138,9 @@ public:
{
if (other.count == 0)
return;
else if (count == 0)
/// NOLINTBEGIN(readability-else-after-return)
if (count == 0)
{
compress_threshold = other.compress_threshold;
relative_error = other.relative_error;
@ -237,6 +239,7 @@ public:
doCompress(2 * merged_relative_error * merged_count);
compressed = true;
}
/// NOLINTEND(readability-else-after-return)
}
void write(WriteBuffer & buf) const
@ -292,12 +295,10 @@ private:
Int64 max_rank = min_rank + curr_sample.delta;
if (max_rank - target_error <= rank && rank <= min_rank + target_error)
return {i, min_rank, curr_sample.value};
else
{
++i;
curr_sample = sampled[i];
min_rank += curr_sample.g;
}
++i;
curr_sample = sampled[i];
min_rank += curr_sample.g;
}
return {sampled.size() - 1, 0, sampled.back().value};
}

View File

@ -747,7 +747,7 @@ AggregateFunctionPtr createAggregateFunctionSequenceBase(
WhichDataType which(argument_types.front().get());
if (which.isDateTime())
return std::make_shared<AggregateFunction<DataTypeDateTime::FieldType, Data<DataTypeDateTime::FieldType>>>(argument_types, params, pattern);
else if (which.isDate())
if (which.isDate())
return std::make_shared<AggregateFunction<DataTypeDate::FieldType, Data<DataTypeDate::FieldType>>>(argument_types, params, pattern);
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,

View File

@ -124,8 +124,7 @@ private:
{
if (count < 2)
return std::numeric_limits<Float64>::infinity();
else
return m2 / (count - 1);
return m2 / (count - 1);
}
static Float64 getStddevSamp(Float64 m2, UInt64 count)
@ -137,10 +136,9 @@ private:
{
if (count == 0)
return std::numeric_limits<Float64>::infinity();
else if (count == 1)
if (count == 1)
return 0.0;
else
return m2 / count;
return m2 / count;
}
static Float64 getStddevPop(Float64 m2, UInt64 count)
@ -363,26 +361,23 @@ private:
{
if (count < 2)
return std::numeric_limits<Float64>::infinity();
else
return co_moment / (count - 1);
return co_moment / (count - 1);
}
static Float64 getCovarPop(Float64 co_moment, UInt64 count)
{
if (count == 0)
return std::numeric_limits<Float64>::infinity();
else if (count == 1)
if (count == 1)
return 0.0;
else
return co_moment / count;
return co_moment / count;
}
static Float64 getCorr(Float64 co_moment, Float64 left_m2, Float64 right_m2, UInt64 count)
{
if (count < 2)
return std::numeric_limits<Float64>::infinity();
else
return co_moment / sqrt(left_m2 * right_m2);
return co_moment / sqrt(left_m2 * right_m2);
}
Float64 getResult(ConstAggregateDataPtr __restrict place) const

View File

@ -112,8 +112,7 @@ public:
{
if (revision >= STATE_VERSION_1_MIN_REVISION)
return 1;
else
return 0;
return 0;
}
static DataTypePtr createResultType(
@ -298,12 +297,13 @@ public:
Field value = values[col_idx];
/// Compatibility with previous versions.
if (value.getType() == Field::Types::Decimal32)
WhichDataType value_type(values_types[col_idx]);
if (value_type.isDecimal32())
{
auto source = value.safeGet<DecimalField<Decimal32>>();
value = DecimalField<Decimal128>(source.getValue(), source.getScale());
}
else if (value.getType() == Field::Types::Decimal64)
else if (value_type.isDecimal64())
{
auto source = value.safeGet<DecimalField<Decimal64>>();
value = DecimalField<Decimal128>(source.getValue(), source.getScale());
@ -545,7 +545,28 @@ public:
}
}
bool keepKey(const Field & key) const { return keys_to_keep.contains(key); }
bool keepKey(const Field & key) const
{
if (keys_to_keep.contains(key))
return true;
// Determine whether the numerical value of the key can have both types (UInt or Int),
// and use the other type with the same numerical value for keepKey verification.
if (key.getType() == Field::Types::UInt64)
{
const auto & value = key.safeGet<const UInt64 &>();
if (value <= std::numeric_limits<Int64>::max())
return keys_to_keep.contains(Field(Int64(value)));
}
else if (key.getType() == Field::Types::Int64)
{
const auto & value = key.safeGet<const Int64 &>();
if (value >= 0)
return keys_to_keep.contains(Field(UInt64(value)));
}
return false;
}
};
@ -742,8 +763,7 @@ void registerAggregateFunctionSumMap(AggregateFunctionFactory & factory)
auto [keys_type, values_types, tuple_argument] = parseArguments(name, arguments);
if (tuple_argument)
return std::make_shared<AggregateFunctionSumMap<false, true>>(keys_type, values_types, arguments, params);
else
return std::make_shared<AggregateFunctionSumMap<false, false>>(keys_type, values_types, arguments, params);
return std::make_shared<AggregateFunctionSumMap<false, false>>(keys_type, values_types, arguments, params);
});
factory.registerFunction("minMappedArrays", [](const std::string & name, const DataTypes & arguments, const Array & params, const Settings *) -> AggregateFunctionPtr
@ -751,8 +771,7 @@ void registerAggregateFunctionSumMap(AggregateFunctionFactory & factory)
auto [keys_type, values_types, tuple_argument] = parseArguments(name, arguments);
if (tuple_argument)
return std::make_shared<AggregateFunctionMinMap<true>>(keys_type, values_types, arguments, params);
else
return std::make_shared<AggregateFunctionMinMap<false>>(keys_type, values_types, arguments, params);
return std::make_shared<AggregateFunctionMinMap<false>>(keys_type, values_types, arguments, params);
});
factory.registerFunction("maxMappedArrays", [](const std::string & name, const DataTypes & arguments, const Array & params, const Settings *) -> AggregateFunctionPtr
@ -760,8 +779,7 @@ void registerAggregateFunctionSumMap(AggregateFunctionFactory & factory)
auto [keys_type, values_types, tuple_argument] = parseArguments(name, arguments);
if (tuple_argument)
return std::make_shared<AggregateFunctionMaxMap<true>>(keys_type, values_types, arguments, params);
else
return std::make_shared<AggregateFunctionMaxMap<false>>(keys_type, values_types, arguments, params);
return std::make_shared<AggregateFunctionMaxMap<false>>(keys_type, values_types, arguments, params);
});
// these functions could be renamed to *MappedArrays too, but it would
@ -771,8 +789,7 @@ void registerAggregateFunctionSumMap(AggregateFunctionFactory & factory)
auto [keys_type, values_types, tuple_argument] = parseArguments(name, arguments);
if (tuple_argument)
return std::make_shared<AggregateFunctionSumMap<true, true>>(keys_type, values_types, arguments, params);
else
return std::make_shared<AggregateFunctionSumMap<true, false>>(keys_type, values_types, arguments, params);
return std::make_shared<AggregateFunctionSumMap<true, false>>(keys_type, values_types, arguments, params);
});
factory.registerFunction("sumMapFiltered", [](const std::string & name, const DataTypes & arguments, const Array & params, const Settings *) -> AggregateFunctionPtr
@ -780,8 +797,7 @@ void registerAggregateFunctionSumMap(AggregateFunctionFactory & factory)
auto [keys_type, values_types, tuple_argument] = parseArguments(name, arguments);
if (tuple_argument)
return std::make_shared<AggregateFunctionSumMapFiltered<false, true>>(keys_type, values_types, arguments, params);
else
return std::make_shared<AggregateFunctionSumMapFiltered<false, false>>(keys_type, values_types, arguments, params);
return std::make_shared<AggregateFunctionSumMapFiltered<false, false>>(keys_type, values_types, arguments, params);
});
factory.registerFunction("sumMapFilteredWithOverflow", [](const std::string & name, const DataTypes & arguments, const Array & params, const Settings *) -> AggregateFunctionPtr
@ -789,8 +805,7 @@ void registerAggregateFunctionSumMap(AggregateFunctionFactory & factory)
auto [keys_type, values_types, tuple_argument] = parseArguments(name, arguments);
if (tuple_argument)
return std::make_shared<AggregateFunctionSumMapFiltered<true, true>>(keys_type, values_types, arguments, params);
else
return std::make_shared<AggregateFunctionSumMapFiltered<true, false>>(keys_type, values_types, arguments, params);
return std::make_shared<AggregateFunctionSumMapFiltered<true, false>>(keys_type, values_types, arguments, params);
});
}

View File

@ -96,25 +96,18 @@ public:
std::move(names)
);
}
else
{
DataTypes types
{
std::make_shared<DataTypeNumber<Float64>>(),
std::make_shared<DataTypeNumber<Float64>>(),
};
Strings names
{
"t_statistic",
"p_value",
};
DataTypes types{
std::make_shared<DataTypeNumber<Float64>>(),
std::make_shared<DataTypeNumber<Float64>>(),
};
return std::make_shared<DataTypeTuple>(
std::move(types),
std::move(names)
);
}
Strings names{
"t_statistic",
"p_value",
};
return std::make_shared<DataTypeTuple>(std::move(types), std::move(names));
}
bool allocatesMemoryInArena() const override { return false; }

View File

@ -79,8 +79,7 @@ public:
{
if (is_approx_top_k)
return is_weighted ? "approx_top_sum" : "approx_top_k";
else
return is_weighted ? "topKWeighted" : "topK";
return is_weighted ? "topKWeighted" : "topK";
}
static DataTypePtr createResultType(const DataTypes & argument_types_, bool include_counts_)
@ -106,8 +105,7 @@ public:
std::move(names)
));
}
else
return std::make_shared<DataTypeArray>(argument_types_[0]);
return std::make_shared<DataTypeArray>(argument_types_[0]);
}
bool allocatesMemoryInArena() const override { return false; }
@ -226,8 +224,7 @@ public:
{
if (is_approx_top_k)
return is_weighted ? "approx_top_sum" : "approx_top_k";
else
return is_weighted ? "topKWeighted" : "topK";
return is_weighted ? "topKWeighted" : "topK";
}
static DataTypePtr createResultType(const DataTypes & argument_types_, bool include_counts_)
@ -253,10 +250,8 @@ public:
std::move(names)
));
} else
{
return std::make_shared<DataTypeArray>(argument_types_[0]);
}
return std::make_shared<DataTypeArray>(argument_types_[0]);
}
bool allocatesMemoryInArena() const override
@ -440,8 +435,8 @@ IAggregateFunction * createWithExtraTypes(const DataTypes & argument_types, UInt
/// Check that we can use plain version of AggregateFunctionTopKGeneric
if (argument_types[0]->isValueUnambiguouslyRepresentedInContiguousMemoryRegion())
return new AggregateFunctionTopKGeneric<true, is_weighted>(threshold, reserved, include_counts, is_approx_top_k, argument_types, params);
else
return new AggregateFunctionTopKGeneric<false, is_weighted>(threshold, reserved, include_counts, is_approx_top_k, argument_types, params);
return new AggregateFunctionTopKGeneric<false, is_weighted>(
threshold, reserved, include_counts, is_approx_top_k, argument_types, params);
}

View File

@ -54,34 +54,32 @@ createAggregateFunctionUniq(const std::string & name, const DataTypes & argument
WhichDataType which(argument_type);
if (res)
return res;
else if (which.isDate())
if (which.isDate())
return std::make_shared<AggregateFunctionUniq<DataTypeDate::FieldType, Data>>(argument_types);
else if (which.isDate32())
if (which.isDate32())
return std::make_shared<AggregateFunctionUniq<DataTypeDate32::FieldType, Data>>(argument_types);
else if (which.isDateTime())
if (which.isDateTime())
return std::make_shared<AggregateFunctionUniq<DataTypeDateTime::FieldType, Data>>(argument_types);
else if (which.isStringOrFixedString())
if (which.isStringOrFixedString())
return std::make_shared<AggregateFunctionUniq<String, Data>>(argument_types);
else if (which.isUUID())
if (which.isUUID())
return std::make_shared<AggregateFunctionUniq<DataTypeUUID::FieldType, Data>>(argument_types);
else if (which.isIPv4())
if (which.isIPv4())
return std::make_shared<AggregateFunctionUniq<DataTypeIPv4::FieldType, Data>>(argument_types);
else if (which.isIPv6())
if (which.isIPv6())
return std::make_shared<AggregateFunctionUniq<DataTypeIPv6::FieldType, Data>>(argument_types);
else if (which.isTuple())
if (which.isTuple())
{
if (use_exact_hash_function)
return std::make_shared<AggregateFunctionUniqVariadic<DataForVariadic<true, true>>>(argument_types);
else
return std::make_shared<AggregateFunctionUniqVariadic<DataForVariadic<false, true>>>(argument_types);
return std::make_shared<AggregateFunctionUniqVariadic<DataForVariadic<false, true>>>(argument_types);
}
}
/// "Variadic" method also works as a fallback generic case for single argument.
if (use_exact_hash_function)
return std::make_shared<AggregateFunctionUniqVariadic<DataForVariadic<true, false>>>(argument_types);
else
return std::make_shared<AggregateFunctionUniqVariadic<DataForVariadic<false, false>>>(argument_types);
return std::make_shared<AggregateFunctionUniqVariadic<DataForVariadic<false, false>>>(argument_types);
}
template <bool is_exact, template <typename, bool> typename Data, template <bool, bool, bool> typename DataForVariadic, bool is_able_to_parallelize_merge>
@ -107,34 +105,46 @@ createAggregateFunctionUniq(const std::string & name, const DataTypes & argument
WhichDataType which(argument_type);
if (res)
return res;
else if (which.isDate())
return std::make_shared<AggregateFunctionUniq<DataTypeDate::FieldType, Data<DataTypeDate::FieldType, is_able_to_parallelize_merge>>>(argument_types);
else if (which.isDate32())
return std::make_shared<AggregateFunctionUniq<DataTypeDate32::FieldType, Data<DataTypeDate32::FieldType, is_able_to_parallelize_merge>>>(argument_types);
else if (which.isDateTime())
return std::make_shared<AggregateFunctionUniq<DataTypeDateTime::FieldType, Data<DataTypeDateTime::FieldType, is_able_to_parallelize_merge>>>(argument_types);
else if (which.isStringOrFixedString())
if (which.isDate())
return std::make_shared<
AggregateFunctionUniq<DataTypeDate::FieldType, Data<DataTypeDate::FieldType, is_able_to_parallelize_merge>>>(
argument_types);
if (which.isDate32())
return std::make_shared<
AggregateFunctionUniq<DataTypeDate32::FieldType, Data<DataTypeDate32::FieldType, is_able_to_parallelize_merge>>>(
argument_types);
if (which.isDateTime())
return std::make_shared<
AggregateFunctionUniq<DataTypeDateTime::FieldType, Data<DataTypeDateTime::FieldType, is_able_to_parallelize_merge>>>(
argument_types);
if (which.isStringOrFixedString())
return std::make_shared<AggregateFunctionUniq<String, Data<String, is_able_to_parallelize_merge>>>(argument_types);
else if (which.isUUID())
return std::make_shared<AggregateFunctionUniq<DataTypeUUID::FieldType, Data<DataTypeUUID::FieldType, is_able_to_parallelize_merge>>>(argument_types);
else if (which.isIPv4())
return std::make_shared<AggregateFunctionUniq<DataTypeIPv4::FieldType, Data<DataTypeIPv4::FieldType, is_able_to_parallelize_merge>>>(argument_types);
else if (which.isIPv6())
return std::make_shared<AggregateFunctionUniq<DataTypeIPv6::FieldType, Data<DataTypeIPv6::FieldType, is_able_to_parallelize_merge>>>(argument_types);
else if (which.isTuple())
if (which.isUUID())
return std::make_shared<
AggregateFunctionUniq<DataTypeUUID::FieldType, Data<DataTypeUUID::FieldType, is_able_to_parallelize_merge>>>(
argument_types);
if (which.isIPv4())
return std::make_shared<
AggregateFunctionUniq<DataTypeIPv4::FieldType, Data<DataTypeIPv4::FieldType, is_able_to_parallelize_merge>>>(
argument_types);
if (which.isIPv6())
return std::make_shared<
AggregateFunctionUniq<DataTypeIPv6::FieldType, Data<DataTypeIPv6::FieldType, is_able_to_parallelize_merge>>>(
argument_types);
if (which.isTuple())
{
if (use_exact_hash_function)
return std::make_shared<AggregateFunctionUniqVariadic<DataForVariadic<true, true, is_able_to_parallelize_merge>>>(argument_types);
else
return std::make_shared<AggregateFunctionUniqVariadic<DataForVariadic<false, true, is_able_to_parallelize_merge>>>(argument_types);
return std::make_shared<AggregateFunctionUniqVariadic<DataForVariadic<true, true, is_able_to_parallelize_merge>>>(
argument_types);
return std::make_shared<AggregateFunctionUniqVariadic<DataForVariadic<false, true, is_able_to_parallelize_merge>>>(
argument_types);
}
}
/// "Variadic" method also works as a fallback generic case for single argument.
if (use_exact_hash_function)
return std::make_shared<AggregateFunctionUniqVariadic<DataForVariadic<true, false, is_able_to_parallelize_merge>>>(argument_types);
else
return std::make_shared<AggregateFunctionUniqVariadic<DataForVariadic<false, false, is_able_to_parallelize_merge>>>(argument_types);
return std::make_shared<AggregateFunctionUniqVariadic<DataForVariadic<false, false, is_able_to_parallelize_merge>>>(argument_types);
}
}
@ -155,9 +165,11 @@ void registerAggregateFunctionsUniq(AggregateFunctionFactory & factory)
if (settings && (*settings)[Setting::max_threads] > 1)
return createAggregateFunctionUniq<
true, AggregateFunctionUniqExactData, AggregateFunctionUniqExactDataForVariadic, true /* is_able_to_parallelize_merge */>(name, argument_types, params, settings);
else
return createAggregateFunctionUniq<
true, AggregateFunctionUniqExactData, AggregateFunctionUniqExactDataForVariadic, false /* is_able_to_parallelize_merge */>(name, argument_types, params, settings);
return createAggregateFunctionUniq<
true,
AggregateFunctionUniqExactData,
AggregateFunctionUniqExactDataForVariadic,
false /* is_able_to_parallelize_merge */>(name, argument_types, params, settings);
};
factory.registerFunction("uniqExact", {assign_bool_param, properties});

View File

@ -235,34 +235,40 @@ AggregateFunctionPtr createAggregateFunctionWithK(const DataTypes & argument_typ
WhichDataType which(argument_type);
if (res)
return res;
else if (which.isDate())
return std::make_shared<typename WithK<K, HashValueType>::template AggregateFunction<DataTypeDate::FieldType>>(argument_types, params);
else if (which.isDate32())
return std::make_shared<typename WithK<K, HashValueType>::template AggregateFunction<DataTypeDate32::FieldType>>(argument_types, params);
else if (which.isDateTime())
return std::make_shared<typename WithK<K, HashValueType>::template AggregateFunction<DataTypeDateTime::FieldType>>(argument_types, params);
else if (which.isStringOrFixedString())
if (which.isDate())
return std::make_shared<typename WithK<K, HashValueType>::template AggregateFunction<DataTypeDate::FieldType>>(
argument_types, params);
if (which.isDate32())
return std::make_shared<typename WithK<K, HashValueType>::template AggregateFunction<DataTypeDate32::FieldType>>(
argument_types, params);
if (which.isDateTime())
return std::make_shared<typename WithK<K, HashValueType>::template AggregateFunction<DataTypeDateTime::FieldType>>(
argument_types, params);
if (which.isStringOrFixedString())
return std::make_shared<typename WithK<K, HashValueType>::template AggregateFunction<String>>(argument_types, params);
else if (which.isUUID())
return std::make_shared<typename WithK<K, HashValueType>::template AggregateFunction<DataTypeUUID::FieldType>>(argument_types, params);
else if (which.isIPv4())
return std::make_shared<typename WithK<K, HashValueType>::template AggregateFunction<DataTypeIPv4::FieldType>>(argument_types, params);
else if (which.isIPv6())
return std::make_shared<typename WithK<K, HashValueType>::template AggregateFunction<DataTypeIPv6::FieldType>>(argument_types, params);
else if (which.isTuple())
if (which.isUUID())
return std::make_shared<typename WithK<K, HashValueType>::template AggregateFunction<DataTypeUUID::FieldType>>(
argument_types, params);
if (which.isIPv4())
return std::make_shared<typename WithK<K, HashValueType>::template AggregateFunction<DataTypeIPv4::FieldType>>(
argument_types, params);
if (which.isIPv6())
return std::make_shared<typename WithK<K, HashValueType>::template AggregateFunction<DataTypeIPv6::FieldType>>(
argument_types, params);
if (which.isTuple())
{
if (use_exact_hash_function)
return std::make_shared<typename WithK<K, HashValueType>::template AggregateFunctionVariadic<true, true>>(argument_types, params);
else
return std::make_shared<typename WithK<K, HashValueType>::template AggregateFunctionVariadic<false, true>>(argument_types, params);
return std::make_shared<typename WithK<K, HashValueType>::template AggregateFunctionVariadic<true, true>>(
argument_types, params);
return std::make_shared<typename WithK<K, HashValueType>::template AggregateFunctionVariadic<false, true>>(
argument_types, params);
}
}
/// "Variadic" method also works as a fallback generic case for a single argument.
if (use_exact_hash_function)
return std::make_shared<typename WithK<K, HashValueType>::template AggregateFunctionVariadic<true, false>>(argument_types, params);
else
return std::make_shared<typename WithK<K, HashValueType>::template AggregateFunctionVariadic<false, false>>(argument_types, params);
return std::make_shared<typename WithK<K, HashValueType>::template AggregateFunctionVariadic<false, false>>(argument_types, params);
}
template <UInt8 K>
@ -270,8 +276,7 @@ AggregateFunctionPtr createAggregateFunctionWithHashType(bool use_64_bit_hash, c
{
if (use_64_bit_hash)
return createAggregateFunctionWithK<K, UInt64>(argument_types, params);
else
return createAggregateFunctionWithK<K, UInt32>(argument_types, params);
return createAggregateFunctionWithK<K, UInt32>(argument_types, params);
}
/// Let's instantiate these templates in separate translation units,

View File

@ -324,30 +324,28 @@ AggregateFunctionPtr createAggregateFunctionUniqUpTo(const std::string & name, c
WhichDataType which(argument_type);
if (res)
return res;
else if (which.isDate())
if (which.isDate())
return std::make_shared<AggregateFunctionUniqUpTo<DataTypeDate::FieldType>>(threshold, argument_types, params);
else if (which.isDate32())
if (which.isDate32())
return std::make_shared<AggregateFunctionUniqUpTo<DataTypeDate32::FieldType>>(threshold, argument_types, params);
else if (which.isDateTime())
if (which.isDateTime())
return std::make_shared<AggregateFunctionUniqUpTo<DataTypeDateTime::FieldType>>(threshold, argument_types, params);
else if (which.isStringOrFixedString())
if (which.isStringOrFixedString())
return std::make_shared<AggregateFunctionUniqUpTo<String>>(threshold, argument_types, params);
else if (which.isUUID())
if (which.isUUID())
return std::make_shared<AggregateFunctionUniqUpTo<DataTypeUUID::FieldType>>(threshold, argument_types, params);
else if (which.isTuple())
if (which.isTuple())
{
if (use_exact_hash_function)
return std::make_shared<AggregateFunctionUniqUpToVariadic<true, true>>(argument_types, params, threshold);
else
return std::make_shared<AggregateFunctionUniqUpToVariadic<false, true>>(argument_types, params, threshold);
return std::make_shared<AggregateFunctionUniqUpToVariadic<false, true>>(argument_types, params, threshold);
}
}
/// "Variadic" method also works as a fallback generic case for single argument.
if (use_exact_hash_function)
return std::make_shared<AggregateFunctionUniqUpToVariadic<true, false>>(argument_types, params, threshold);
else
return std::make_shared<AggregateFunctionUniqUpToVariadic<false, false>>(argument_types, params, threshold);
return std::make_shared<AggregateFunctionUniqUpToVariadic<false, false>>(argument_types, params, threshold);
}
}

View File

@ -180,10 +180,9 @@ private:
{
if (first_event)
break;
else
continue;
continue;
}
else if (event_idx == 0)
if (event_idx == 0)
{
events_timestamp[0] = std::make_pair(timestamp, timestamp);
first_event = true;
@ -326,10 +325,11 @@ createAggregateFunctionWindowFunnel(const std::string & name, const DataTypes &
WhichDataType which(arguments.front().get());
if (res)
return res;
else if (which.isDate())
if (which.isDate())
return std::make_shared<AggregateFunctionWindowFunnel<DataTypeDate::FieldType, Data<DataTypeDate::FieldType>>>(arguments, params);
else if (which.isDateTime())
return std::make_shared<AggregateFunctionWindowFunnel<DataTypeDateTime::FieldType, Data<DataTypeDateTime::FieldType>>>(arguments, params);
if (which.isDateTime())
return std::make_shared<AggregateFunctionWindowFunnel<DataTypeDateTime::FieldType, Data<DataTypeDateTime::FieldType>>>(
arguments, params);
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"Illegal type {} of first argument of aggregate function {}, must "

View File

@ -50,10 +50,8 @@ public:
return std::make_shared<
AggregateFunctionDistinct<
AggregateFunctionDistinctSingleGenericData<true>>>(nested_function, arguments, params);
else
return std::make_shared<
AggregateFunctionDistinct<
AggregateFunctionDistinctSingleGenericData<false>>>(nested_function, arguments, params);
return std::make_shared<AggregateFunctionDistinct<AggregateFunctionDistinctSingleGenericData<false>>>(
nested_function, arguments, params);
}
return std::make_shared<AggregateFunctionDistinct<AggregateFunctionDistinctMultipleGenericData>>(nested_function, arguments, params);

View File

@ -473,28 +473,20 @@ AggregateFunctionPtr AggregateFunctionIf::getOwnNullAdapter(
{
return std::make_shared<AggregateFunctionIfNullUnary<true, true>>(nested_function->getName(), nested_func, arguments, params);
}
else
{
if (need_to_serialize_flag)
return std::make_shared<AggregateFunctionIfNullUnary<false, true>>(nested_function->getName(), nested_func, arguments, params);
else
return std::make_shared<AggregateFunctionIfNullUnary<false, false>>(nested_function->getName(), nested_func, arguments, params);
}
if (need_to_serialize_flag)
return std::make_shared<AggregateFunctionIfNullUnary<false, true>>(nested_function->getName(), nested_func, arguments, params);
return std::make_shared<AggregateFunctionIfNullUnary<false, false>>(nested_function->getName(), nested_func, arguments, params);
}
else
if (return_type_is_nullable)
{
if (return_type_is_nullable)
{
return std::make_shared<AggregateFunctionIfNullVariadic<true, true>>(nested_function, arguments, params);
}
else
{
if (need_to_serialize_flag)
return std::make_shared<AggregateFunctionIfNullVariadic<false, true>>(nested_function, arguments, params);
else
return std::make_shared<AggregateFunctionIfNullVariadic<false, false>>(nested_function, arguments, params);
}
return std::make_shared<AggregateFunctionIfNullVariadic<true, true>>(nested_function, arguments, params);
}
if (need_to_serialize_flag)
return std::make_shared<AggregateFunctionIfNullVariadic<false, true>>(nested_function, arguments, params);
return std::make_shared<AggregateFunctionIfNullVariadic<false, false>>(nested_function, arguments, params);
}
void registerAggregateFunctionCombinatorIf(AggregateFunctionCombinatorFactory & factory)

View File

@ -450,9 +450,8 @@ public:
auto action = NullsAction::EMPTY;
return aggr_func_factory.get(nested_func_name + "MappedArrays", action, arguments, params, out_properties);
}
else
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Aggregation '{}Map' is not implemented for mapped arrays",
nested_func_name);
throw Exception(
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Aggregation '{}Map' is not implemented for mapped arrays", nested_func_name);
}
}
};

View File

@ -112,8 +112,7 @@ public:
*/
if (properties.returns_default_when_only_null)
return std::make_shared<AggregateFunctionNothingUInt64>(arguments, params);
else
return std::make_shared<AggregateFunctionNothingNull>(arguments, params);
return std::make_shared<AggregateFunctionNothingNull>(arguments, params);
}
assert(nested_function);
@ -137,23 +136,18 @@ public:
{
return std::make_shared<AggregateFunctionNullUnary<true, true>>(nested_function, arguments, params);
}
else
{
if (serialize_flag)
return std::make_shared<AggregateFunctionNullUnary<false, true>>(nested_function, arguments, params);
else
return std::make_shared<AggregateFunctionNullUnary<false, false>>(nested_function, arguments, params);
}
if (serialize_flag)
return std::make_shared<AggregateFunctionNullUnary<false, true>>(nested_function, arguments, params);
return std::make_shared<AggregateFunctionNullUnary<false, false>>(nested_function, arguments, params);
}
else
if (return_type_is_nullable)
{
if (return_type_is_nullable)
{
return std::make_shared<AggregateFunctionNullVariadic<true, true>>(nested_function, arguments, params);
}
else
{
return std::make_shared<AggregateFunctionNullVariadic<false, true>>(nested_function, arguments, params);
return std::make_shared<AggregateFunctionNullVariadic<true, true>>(nested_function, arguments, params);
}
return std::make_shared<AggregateFunctionNullVariadic<false, true>>(nested_function, arguments, params);
#if 0
if (serialize_flag)
return std::make_shared<AggregateFunctionNullVariadic<false, true>>(nested_function, arguments, params);
@ -164,8 +158,6 @@ public:
return std::make_shared<AggregateFunctionNullVariadic<false, true>>(nested_function, arguments, params);
}
#endif
}
}
}
};

View File

@ -37,8 +37,7 @@ public:
{
if (kind == Kind::OrNull)
return std::make_shared<AggregateFunctionOrFill<true>>(nested_function, arguments, params);
else
return std::make_shared<AggregateFunctionOrFill<false>>(nested_function, arguments, params);
return std::make_shared<AggregateFunctionOrFill<false>>(nested_function, arguments, params);
}
};

Some files were not shown because too many files have changed in this diff Show More