Merge branch 'master' into variant_inference

This commit is contained in:
Shaun Struwig 2024-05-15 16:23:31 +02:00 committed by GitHub
commit f8e71b8c7f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
694 changed files with 4974 additions and 2218 deletions

View File

@ -123,7 +123,21 @@ Checks: [
'-readability-uppercase-literal-suffix',
'-readability-use-anyofallof',
'-zircon-*'
'-zircon-*',
# These are new in clang-18, and we have to sort them out:
'-readability-avoid-nested-conditional-operator',
'-modernize-use-designated-initializers',
'-performance-enum-size',
'-readability-redundant-inline-specifier',
'-readability-redundant-member-init',
'-bugprone-crtp-constructor-accessibility',
'-bugprone-suspicious-stringview-data-usage',
'-bugprone-multi-level-implicit-pointer-conversion',
'-cert-err33-c',
# This is a good check, but clang-tidy crashes, see https://github.com/llvm/llvm-project/issues/91872
'-modernize-use-constraints',
]
WarningsAsErrors: '*'

View File

@ -85,4 +85,4 @@ At a minimum, the following information should be added (but add more as needed)
- [ ] <!---batch_2--> 3
- [ ] <!---batch_3--> 4
<details>
</details>

View File

@ -9,6 +9,12 @@ on: # yamllint disable-line rule:truthy
push:
branches:
- 'backport/**'
# Cancel the previous wf run in PRs.
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
RunConfig:
runs-on: [self-hosted, style-checker-aarch64]

View File

@ -1,19 +0,0 @@
name: Cancel
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
on: # yamllint disable-line rule:truthy
workflow_run:
workflows: ["PullRequestCI", "ReleaseBranchCI", "DocsCheck", "BackportPR"]
types:
- requested
jobs:
cancel:
runs-on: [self-hosted, style-checker]
steps:
- uses: styfle/cancel-workflow-action@0.9.1
with:
all_but_latest: true
workflow_id: ${{ github.event.workflow.id }}

View File

@ -1,11 +0,0 @@
# The CI for each commit, prints envs and content of GITHUB_EVENT_PATH
name: Debug
'on':
[push, pull_request, pull_request_review, release, workflow_dispatch, workflow_call]
jobs:
DebugInfo:
runs-on: ubuntu-latest
steps:
- uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6

96
.github/workflows/merge_queue.yml vendored Normal file
View File

@ -0,0 +1,96 @@
# yamllint disable rule:comments-indentation
name: MergeQueueCI
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
on: # yamllint disable-line rule:truthy
merge_group:
jobs:
RunConfig:
runs-on: [self-hosted, style-checker-aarch64]
outputs:
data: ${{ steps.runconfig.outputs.CI_DATA }}
steps:
- name: DebugInfo
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true # to ensure correct digests
fetch-depth: 0 # to get version
filter: tree:0
- name: Python unit tests
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
echo "Testing the main ci directory"
python3 -m unittest discover -s . -p 'test_*.py'
for dir in *_lambda/; do
echo "Testing $dir"
python3 -m unittest discover -s "$dir" -p 'test_*.py'
done
- name: PrepareRunConfig
id: runconfig
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --outfile ${{ runner.temp }}/ci_run_data.json
echo "::group::CI configuration"
python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
echo "::endgroup::"
{
echo 'CI_DATA<<EOF'
cat ${{ runner.temp }}/ci_run_data.json
echo 'EOF'
} >> "$GITHUB_OUTPUT"
BuildDockers:
needs: [RunConfig]
if: ${{ !failure() && !cancelled() && toJson(fromJson(needs.RunConfig.outputs.data).docker_data.missing_multi) != '[]' }}
uses: ./.github/workflows/reusable_docker.yml
with:
data: ${{ needs.RunConfig.outputs.data }}
StyleCheck:
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Style check')}}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Style check
runner_type: style-checker
run_command: |
python3 style_check.py
data: ${{ needs.RunConfig.outputs.data }}
secrets:
secret_envs: |
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
RCSK
FastTest:
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Fast test') }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Fast test
runner_type: builder
data: ${{ needs.RunConfig.outputs.data }}
run_command: |
python3 fast_test_check.py
################################# Stage Final #################################
#
FinishCheck:
if: ${{ !failure() && !cancelled() }}
needs: [RunConfig, BuildDockers, StyleCheck, FastTest]
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
- name: Check sync status
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 sync_pr.py --status
- name: Finish label
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 finish_check.py ${{ (contains(needs.*.result, 'failure') && github.event_name == 'merge_group') && '--pipeline-failure' || '' }}

View File

@ -10,14 +10,13 @@ env:
workflow_dispatch:
jobs:
Debug:
# The task for having a preserved ENV and event.json for later investigation
uses: ./.github/workflows/debug.yml
RunConfig:
runs-on: [self-hosted, style-checker-aarch64]
outputs:
data: ${{ steps.runconfig.outputs.CI_DATA }}
steps:
- name: DebugInfo
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:

View File

@ -6,7 +6,6 @@ env:
PYTHONUNBUFFERED: 1
on: # yamllint disable-line rule:truthy
merge_group:
pull_request:
types:
- synchronize
@ -15,6 +14,11 @@ on: # yamllint disable-line rule:truthy
branches:
- master
# Cancel the previous wf run in PRs.
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
RunConfig:
runs-on: [self-hosted, style-checker-aarch64]
@ -30,7 +34,6 @@ jobs:
fetch-depth: 0 # to get version
filter: tree:0
- name: Labels check
if: ${{ github.event_name != 'merge_group' }}
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 run_check.py
@ -58,7 +61,6 @@ jobs:
echo 'EOF'
} >> "$GITHUB_OUTPUT"
- name: Re-create GH statuses for skipped jobs if any
if: ${{ github.event_name != 'merge_group' }}
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ runner.temp }}/ci_run_data.json --update-gh-statuses
BuildDockers:
@ -83,7 +85,7 @@ jobs:
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
RCSK
FastTest:
needs: [RunConfig, BuildDockers]
needs: [RunConfig, BuildDockers, StyleCheck]
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Fast test') }}
uses: ./.github/workflows/reusable_test.yml
with:
@ -163,20 +165,16 @@ jobs:
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
- name: Check sync status
if: ${{ github.event_name == 'merge_group' }}
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 sync_pr.py --status
- name: Finish label
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 finish_check.py ${{ (contains(needs.*.result, 'failure') && github.event_name == 'merge_group') && '--pipeline-failure' || '' }}
- name: Auto merge if approved
if: ${{ github.event_name != 'merge_group' }}
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 merge_pr.py --check-approved
python3 finish_check.py
# FIXME: merge on approval does not work with MQ. Could be fixed by using defaul GH's automerge after some corrections in Mergeable Check status
# - name: Auto merge if approved
# if: ${{ github.event_name != 'merge_group' }}
# run: |
# cd "$GITHUB_WORKSPACE/tests/ci"
# python3 merge_pr.py --check-approved
#############################################################################################

View File

@ -1,23 +0,0 @@
name: PullRequestApprovedCI
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
on: # yamllint disable-line rule:truthy
pull_request_review:
types:
- submitted
jobs:
MergeOnApproval:
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Merge approved PR
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 merge_pr.py --check-approved

View File

@ -51,11 +51,9 @@ struct DecomposedFloat
/// Returns 0 for both +0. and -0.
int sign() const
{
return (exponent() == 0 && mantissa() == 0)
? 0
: (isNegative()
? -1
: 1);
if (exponent() == 0 && mantissa() == 0)
return 0;
return isNegative() ? -1 : 1;
}
uint16_t exponent() const

View File

@ -11,7 +11,7 @@ namespace detail
template <is_enum E, class F, size_t ...I>
constexpr void static_for(F && f, std::index_sequence<I...>)
{
(std::forward<F>(f)(std::integral_constant<E, magic_enum::enum_value<E>(I)>()) , ...);
(f(std::integral_constant<E, magic_enum::enum_value<E>(I)>()) , ...);
}
}

View File

@ -651,7 +651,9 @@ std::string_view JSON::getRawString() const
Pos s = ptr_begin;
if (*s != '"')
throw JSONException(std::string("JSON: expected \", got ") + *s);
while (++s != ptr_end && *s != '"');
++s;
while (s != ptr_end && *s != '"')
++s;
if (s != ptr_end)
return std::string_view(ptr_begin + 1, s - ptr_begin - 1);
throw JSONException("JSON: incorrect syntax (expected end of string, found end of JSON).");

View File

@ -74,7 +74,7 @@ public:
const char * data() const { return ptr_begin; }
const char * dataEnd() const { return ptr_end; }
enum ElementType
enum ElementType : uint8_t
{
TYPE_OBJECT,
TYPE_ARRAY,

View File

@ -27,7 +27,7 @@ namespace TypeListUtils /// In some contexts it's more handy to use functions in
constexpr Root<Args...> changeRoot(TypeList<Args...>) { return {}; }
template <typename F, typename ...Args>
constexpr void forEach(TypeList<Args...>, F && f) { (std::forward<F>(f)(TypeList<Args>{}), ...); }
constexpr void forEach(TypeList<Args...>, F && f) { (f(TypeList<Args>{}), ...); }
}
template <typename TypeListLeft, typename TypeListRight>

View File

@ -21,7 +21,7 @@ bool func_wrapper(Func && func, Arg && arg)
template <typename T, T Begin, typename Func, T... Is>
constexpr bool static_for_impl(Func && f, std::integer_sequence<T, Is...>)
{
return (func_wrapper(std::forward<Func>(f), std::integral_constant<T, Begin + Is>{}) || ...);
return (func_wrapper(f, std::integral_constant<T, Begin + Is>{}) || ...);
}
template <auto Begin, decltype(Begin) End, typename Func>

View File

@ -147,7 +147,7 @@ constexpr uint16_t maybe_negate(uint16_t x)
return ~x;
}
enum class ReturnMode
enum class ReturnMode : uint8_t
{
End,
Nullptr,

View File

@ -77,8 +77,7 @@ uint64_t getMemoryAmountOrZero()
{
uint64_t limit_v1;
if (limit_file_v1 >> limit_v1)
if (limit_v1 < memory_amount)
memory_amount = limit_v1;
memory_amount = std::min(memory_amount, limit_v1);
}
}

View File

@ -146,7 +146,7 @@ namespace impl
TUInt res;
if constexpr (sizeof(TUInt) == 1)
{
res = static_cast<UInt8>(unhexDigit(data[0])) * 0x10 + static_cast<UInt8>(unhexDigit(data[1]));
res = unhexDigit(data[0]) * 0x10 + unhexDigit(data[1]);
}
else if constexpr (sizeof(TUInt) == 2)
{
@ -176,17 +176,19 @@ namespace impl
};
/// Helper template class to convert a value of any supported type to hexadecimal representation and back.
template <typename T, typename SFINAE = void>
template <typename T>
struct HexConversion;
template <typename TUInt>
struct HexConversion<TUInt, std::enable_if_t<std::is_integral_v<TUInt>>> : public HexConversionUInt<TUInt> {};
requires(std::is_integral_v<TUInt>)
struct HexConversion<TUInt> : public HexConversionUInt<TUInt> {};
template <size_t Bits, typename Signed>
struct HexConversion<wide::integer<Bits, Signed>> : public HexConversionUInt<wide::integer<Bits, Signed>> {};
template <typename CityHashUInt128> /// Partial specialization here allows not to include <city.h> in this header.
struct HexConversion<CityHashUInt128, std::enable_if_t<std::is_same_v<CityHashUInt128, typename CityHash_v1_0_2::uint128>>>
requires(std::is_same_v<CityHashUInt128, typename CityHash_v1_0_2::uint128>)
struct HexConversion<CityHashUInt128>
{
static const constexpr size_t num_hex_digits = 32;

View File

@ -20,24 +20,26 @@ Out & dumpValue(Out &, T &&);
/// Catch-all case.
template <int priority, typename Out, typename T>
std::enable_if_t<priority == -1, Out> & dumpImpl(Out & out, T &&) // NOLINT(cppcoreguidelines-missing-std-forward)
requires(priority == -1)
Out & dumpImpl(Out & out, T &&) // NOLINT(cppcoreguidelines-missing-std-forward)
{
return out << "{...}";
}
/// An object, that could be output with operator <<.
template <int priority, typename Out, typename T>
std::enable_if_t<priority == 0, Out> & dumpImpl(Out & out, T && x, std::decay_t<decltype(std::declval<Out &>() << std::declval<T>())> * = nullptr) // NOLINT(cppcoreguidelines-missing-std-forward)
requires(priority == 0)
Out & dumpImpl(Out & out, T && x, std::decay_t<decltype(std::declval<Out &>() << std::declval<T>())> * = nullptr) // NOLINT(cppcoreguidelines-missing-std-forward)
{
return out << x;
}
/// A pointer-like object.
template <int priority, typename Out, typename T>
std::enable_if_t<priority == 1
requires(priority == 1
/// Protect from the case when operator * do effectively nothing (function pointer).
&& !std::is_same_v<std::decay_t<T>, std::decay_t<decltype(*std::declval<T>())>>
, Out> & dumpImpl(Out & out, T && x, std::decay_t<decltype(*std::declval<T>())> * = nullptr) // NOLINT(cppcoreguidelines-missing-std-forward)
&& !std::is_same_v<std::decay_t<T>, std::decay_t<decltype(*std::declval<T>())>>)
Out & dumpImpl(Out & out, T && x, std::decay_t<decltype(*std::declval<T>())> * = nullptr) // NOLINT(cppcoreguidelines-missing-std-forward)
{
if (!x)
return out << "nullptr";
@ -46,7 +48,8 @@ std::enable_if_t<priority == 1
/// Container.
template <int priority, typename Out, typename T>
std::enable_if_t<priority == 2, Out> & dumpImpl(Out & out, T && x, std::decay_t<decltype(std::begin(std::declval<T>()))> * = nullptr) // NOLINT(cppcoreguidelines-missing-std-forward)
requires(priority == 2)
Out & dumpImpl(Out & out, T && x, std::decay_t<decltype(std::begin(std::declval<T>()))> * = nullptr) // NOLINT(cppcoreguidelines-missing-std-forward)
{
bool first = true;
out << "{";
@ -63,8 +66,8 @@ std::enable_if_t<priority == 2, Out> & dumpImpl(Out & out, T && x, std::decay_t<
template <int priority, typename Out, typename T>
std::enable_if_t<priority == 3 && std::is_enum_v<std::decay_t<T>>, Out> &
dumpImpl(Out & out, T && x) // NOLINT(cppcoreguidelines-missing-std-forward)
requires(priority == 3 && std::is_enum_v<std::decay_t<T>>)
Out & dumpImpl(Out & out, T && x) // NOLINT(cppcoreguidelines-missing-std-forward)
{
return out << magic_enum::enum_name(x);
}
@ -72,8 +75,8 @@ dumpImpl(Out & out, T && x) // NOLINT(cppcoreguidelines-missing-std-forward)
/// string and const char * - output not as container or pointer.
template <int priority, typename Out, typename T>
std::enable_if_t<priority == 3 && (std::is_same_v<std::decay_t<T>, std::string> || std::is_same_v<std::decay_t<T>, const char *>), Out> &
dumpImpl(Out & out, T && x) // NOLINT(cppcoreguidelines-missing-std-forward)
requires(priority == 3 && (std::is_same_v<std::decay_t<T>, std::string> || std::is_same_v<std::decay_t<T>, const char *>))
Out & dumpImpl(Out & out, T && x) // NOLINT(cppcoreguidelines-missing-std-forward)
{
return out << std::quoted(x);
}
@ -81,8 +84,8 @@ dumpImpl(Out & out, T && x) // NOLINT(cppcoreguidelines-missing-std-forward)
/// UInt8 - output as number, not char.
template <int priority, typename Out, typename T>
std::enable_if_t<priority == 3 && std::is_same_v<std::decay_t<T>, unsigned char>, Out> &
dumpImpl(Out & out, T && x) // NOLINT(cppcoreguidelines-missing-std-forward)
requires(priority == 3 && std::is_same_v<std::decay_t<T>, unsigned char>)
Out & dumpImpl(Out & out, T && x) // NOLINT(cppcoreguidelines-missing-std-forward)
{
return out << int(x);
}
@ -108,7 +111,8 @@ Out & dumpTupleImpl(Out & out, T && x) // NOLINT(cppcoreguidelines-missing-std-f
}
template <int priority, typename Out, typename T>
std::enable_if_t<priority == 4, Out> & dumpImpl(Out & out, T && x, std::decay_t<decltype(std::get<0>(std::declval<T>()))> * = nullptr) // NOLINT(cppcoreguidelines-missing-std-forward)
requires(priority == 4)
Out & dumpImpl(Out & out, T && x, std::decay_t<decltype(std::get<0>(std::declval<T>()))> * = nullptr) // NOLINT(cppcoreguidelines-missing-std-forward)
{
return dumpTupleImpl<0>(out, x);
}

View File

@ -250,14 +250,16 @@ ALWAYS_INLINE inline char * uitoa<UnsignedOfSize<1>, 1>(char * p, UnsignedOfSize
//===----------------------------------------------------------===//
// itoa: handle unsigned integral operands (selected by SFINAE)
template <typename U, std::enable_if_t<!std::is_signed_v<U> && std::is_integral_v<U>> * = nullptr>
template <typename U>
requires(!std::is_signed_v<U> && std::is_integral_v<U>)
ALWAYS_INLINE inline char * itoa(U u, char * p)
{
return convert::uitoa(p, u);
}
// itoa: handle signed integral operands (selected by SFINAE)
template <typename I, size_t N = sizeof(I), std::enable_if_t<std::is_signed_v<I> && std::is_integral_v<I>> * = nullptr>
template <typename I, size_t N = sizeof(I)>
requires(std::is_signed_v<I> && std::is_integral_v<I>)
ALWAYS_INLINE inline char * itoa(I i, char * p)
{
// Need "mask" to be filled with a copy of the sign bit.

View File

@ -19,8 +19,8 @@ auto map(const Collection<Params...> & collection, Mapper && mapper)
using value_type = unqualified_t<decltype(mapper(*std::begin(collection)))>;
return Collection<value_type>(
boost::make_transform_iterator(std::begin(collection), std::forward<Mapper>(mapper)),
boost::make_transform_iterator(std::end(collection), std::forward<Mapper>(mapper)));
boost::make_transform_iterator(std::begin(collection), mapper),
boost::make_transform_iterator(std::end(collection), mapper));
}
/** \brief Returns collection of specified container-type,
@ -33,8 +33,8 @@ auto map(const Collection & collection, Mapper && mapper)
using value_type = unqualified_t<decltype(mapper(*std::begin(collection)))>;
return ResultCollection<value_type>(
boost::make_transform_iterator(std::begin(collection), std::forward<Mapper>(mapper)),
boost::make_transform_iterator(std::end(collection), std::forward<Mapper>(mapper)));
boost::make_transform_iterator(std::begin(collection), mapper),
boost::make_transform_iterator(std::end(collection), mapper));
}
/** \brief Returns collection of specified type,
@ -45,8 +45,8 @@ template <typename ResultCollection, typename Collection, typename Mapper>
auto map(const Collection & collection, Mapper && mapper)
{
return ResultCollection(
boost::make_transform_iterator(std::begin(collection), std::forward<Mapper>(mapper)),
boost::make_transform_iterator(std::end(collection), std::forward<Mapper>(mapper)));
boost::make_transform_iterator(std::begin(collection), mapper),
boost::make_transform_iterator(std::end(collection), mapper));
}
}

View File

@ -23,12 +23,10 @@ namespace internal
/// For loop adaptor which is used to iterate through a half-closed interval [begin, end).
/// The parameters `begin` and `end` can have any integral or enum types.
template <typename BeginType,
typename EndType,
typename = std::enable_if_t<
(std::is_integral_v<BeginType> || std::is_enum_v<BeginType>) &&
(std::is_integral_v<EndType> || std::is_enum_v<EndType>) &&
(!std::is_enum_v<BeginType> || !std::is_enum_v<EndType> || std::is_same_v<BeginType, EndType>), void>>
template <typename BeginType, typename EndType>
requires((std::is_integral_v<BeginType> || std::is_enum_v<BeginType>) &&
(std::is_integral_v<EndType> || std::is_enum_v<EndType>) &&
(!std::is_enum_v<BeginType> || !std::is_enum_v<EndType> || std::is_same_v<BeginType, EndType>))
inline auto range(BeginType begin, EndType end)
{
if constexpr (std::is_integral_v<BeginType> && std::is_integral_v<EndType>)
@ -50,8 +48,8 @@ inline auto range(BeginType begin, EndType end)
/// For loop adaptor which is used to iterate through a half-closed interval [0, end).
/// The parameter `end` can have any integral or enum type.
/// The same as range(0, end).
template <typename Type,
typename = std::enable_if_t<std::is_integral_v<Type> || std::is_enum_v<Type>, void>>
template <typename Type>
requires(std::is_integral_v<Type> || std::is_enum_v<Type>)
inline auto range(Type end)
{
if constexpr (std::is_integral_v<Type>)

View File

@ -2,6 +2,7 @@
#include <ctime>
#include <cerrno>
#include <system_error>
#if defined(OS_DARWIN)
#include <mach/mach.h>
@ -34,7 +35,8 @@ void sleepForNanoseconds(uint64_t nanoseconds)
constexpr auto clock_type = CLOCK_MONOTONIC;
struct timespec current_time;
clock_gettime(clock_type, &current_time);
if (0 != clock_gettime(clock_type, &current_time))
throw std::system_error(std::error_code(errno, std::system_category()));
constexpr uint64_t resolution = 1'000'000'000;
struct timespec finish_time = current_time;

View File

@ -111,7 +111,8 @@ public:
constexpr explicit operator bool() const noexcept;
template <typename T, typename = std::enable_if_t<std::is_arithmetic_v<T>, T>>
template <typename T>
requires(std::is_arithmetic_v<T>)
constexpr operator T() const noexcept;
constexpr operator long double() const noexcept;
@ -208,12 +209,14 @@ constexpr integer<Bits, Signed> operator<<(const integer<Bits, Signed> & lhs, in
template <size_t Bits, typename Signed>
constexpr integer<Bits, Signed> operator>>(const integer<Bits, Signed> & lhs, int n) noexcept;
template <size_t Bits, typename Signed, typename Int, typename = std::enable_if_t<!std::is_same_v<Int, int>>>
template <size_t Bits, typename Signed, typename Int>
requires(!std::is_same_v<Int, int>)
constexpr integer<Bits, Signed> operator<<(const integer<Bits, Signed> & lhs, Int n) noexcept
{
return lhs << int(n);
}
template <size_t Bits, typename Signed, typename Int, typename = std::enable_if_t<!std::is_same_v<Int, int>>>
template <size_t Bits, typename Signed, typename Int>
requires(!std::is_same_v<Int, int>)
constexpr integer<Bits, Signed> operator>>(const integer<Bits, Signed> & lhs, Int n) noexcept
{
return lhs >> int(n);
@ -262,4 +265,3 @@ struct hash<wide::integer<Bits, Signed>>;
// NOLINTEND(*)
#include "wide_integer_impl.h"

View File

@ -1246,7 +1246,8 @@ constexpr integer<Bits, Signed>::operator bool() const noexcept
}
template <size_t Bits, typename Signed>
template <class T, class>
template <class T>
requires(std::is_arithmetic_v<T>)
constexpr integer<Bits, Signed>::operator T() const noexcept
{
static_assert(std::numeric_limits<T>::is_integer);

View File

@ -5,14 +5,14 @@ if (ENABLE_CLANG_TIDY)
find_program (CLANG_TIDY_CACHE_PATH NAMES "clang-tidy-cache")
if (CLANG_TIDY_CACHE_PATH)
find_program (_CLANG_TIDY_PATH NAMES "clang-tidy-17" "clang-tidy-16" "clang-tidy")
find_program (_CLANG_TIDY_PATH NAMES "clang-tidy-18" "clang-tidy-17" "clang-tidy-16" "clang-tidy")
# Why do we use ';' here?
# It's a cmake black magic: https://cmake.org/cmake/help/latest/prop_tgt/LANG_CLANG_TIDY.html#prop_tgt:%3CLANG%3E_CLANG_TIDY
# The CLANG_TIDY_PATH is passed to CMAKE_CXX_CLANG_TIDY, which follows CXX_CLANG_TIDY syntax.
set (CLANG_TIDY_PATH "${CLANG_TIDY_CACHE_PATH};${_CLANG_TIDY_PATH}" CACHE STRING "A combined command to run clang-tidy with caching wrapper")
else ()
find_program (CLANG_TIDY_PATH NAMES "clang-tidy-17" "clang-tidy-16" "clang-tidy")
find_program (CLANG_TIDY_PATH NAMES "clang-tidy-18" "clang-tidy-17" "clang-tidy-16" "clang-tidy")
endif ()
if (CLANG_TIDY_PATH)

View File

@ -9,7 +9,7 @@ execute_process(COMMAND ${CMAKE_CXX_COMPILER} --version OUTPUT_VARIABLE COMPILER
message (STATUS "Using compiler:\n${COMPILER_SELF_IDENTIFICATION}")
# Require minimum compiler versions
set (CLANG_MINIMUM_VERSION 16)
set (CLANG_MINIMUM_VERSION 17)
set (XCODE_MINIMUM_VERSION 12.0)
set (APPLE_CLANG_MINIMUM_VERSION 12.0.0)

View File

@ -52,7 +52,7 @@ function(absl_cc_library)
)
target_include_directories(${_NAME}
PUBLIC "${ABSL_COMMON_INCLUDE_DIRS}")
SYSTEM PUBLIC "${ABSL_COMMON_INCLUDE_DIRS}")
target_compile_options(${_NAME}
PRIVATE ${ABSL_CC_LIB_COPTS})
target_compile_definitions(${_NAME} PUBLIC ${ABSL_CC_LIB_DEFINES})
@ -61,7 +61,7 @@ function(absl_cc_library)
# Generating header-only library
add_library(${_NAME} INTERFACE)
target_include_directories(${_NAME}
INTERFACE "${ABSL_COMMON_INCLUDE_DIRS}")
SYSTEM INTERFACE "${ABSL_COMMON_INCLUDE_DIRS}")
target_link_libraries(${_NAME}
INTERFACE

2
contrib/yaml-cpp vendored

@ -1 +1 @@
Subproject commit 0c86adac6d117ee2b4afcedb8ade19036ca0327d
Subproject commit f91e938341273b5f9d341380ab17bcc3de5daa06

View File

@ -3,10 +3,10 @@ compilers and build settings. Correctly configured Docker daemon is single depen
Usage:
Build deb package with `clang-17` in `debug` mode:
Build deb package with `clang-18` in `debug` mode:
```
$ mkdir deb/test_output
$ ./packager --output-dir deb/test_output/ --package-type deb --compiler=clang-17 --debug-build
$ ./packager --output-dir deb/test_output/ --package-type deb --compiler=clang-18 --debug-build
$ ls -l deb/test_output
-rw-r--r-- 1 root root 3730 clickhouse-client_22.2.2+debug_all.deb
-rw-r--r-- 1 root root 84221888 clickhouse-common-static_22.2.2+debug_amd64.deb
@ -17,11 +17,11 @@ $ ls -l deb/test_output
```
Build ClickHouse binary with `clang-17` and `address` sanitizer in `relwithdebuginfo`
Build ClickHouse binary with `clang-18` and `address` sanitizer in `relwithdebuginfo`
mode:
```
$ mkdir $HOME/some_clickhouse
$ ./packager --output-dir=$HOME/some_clickhouse --package-type binary --compiler=clang-17 --sanitizer=address
$ ./packager --output-dir=$HOME/some_clickhouse --package-type binary --compiler=clang-18 --sanitizer=address
$ ls -l $HOME/some_clickhouse
-rwxr-xr-x 1 root root 787061952 clickhouse
lrwxrwxrwx 1 root root 10 clickhouse-benchmark -> clickhouse

View File

@ -403,19 +403,19 @@ def parse_args() -> argparse.Namespace:
parser.add_argument(
"--compiler",
choices=(
"clang-17",
"clang-17-darwin",
"clang-17-darwin-aarch64",
"clang-17-aarch64",
"clang-17-aarch64-v80compat",
"clang-17-ppc64le",
"clang-17-riscv64",
"clang-17-s390x",
"clang-17-amd64-compat",
"clang-17-amd64-musl",
"clang-17-freebsd",
"clang-18",
"clang-18-darwin",
"clang-18-darwin-aarch64",
"clang-18-aarch64",
"clang-18-aarch64-v80compat",
"clang-18-ppc64le",
"clang-18-riscv64",
"clang-18-s390x",
"clang-18-amd64-compat",
"clang-18-amd64-musl",
"clang-18-freebsd",
),
default="clang-17",
default="clang-18",
help="a compiler to use",
)
parser.add_argument(

View File

@ -17,7 +17,7 @@ stage=${stage:-}
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
echo "$script_dir"
repo_dir=ch
BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-17_debug_none_unsplitted_disable_False_binary"}
BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-18_debug_none_unsplitted_disable_False_binary"}
BINARY_URL_TO_DOWNLOAD=${BINARY_URL_TO_DOWNLOAD:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/$BINARY_TO_DOWNLOAD/clickhouse"}
function git_clone_with_retry

View File

@ -2,7 +2,7 @@
set -euo pipefail
CLICKHOUSE_PACKAGE=${CLICKHOUSE_PACKAGE:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-17_relwithdebuginfo_none_unsplitted_disable_False_binary/clickhouse"}
CLICKHOUSE_PACKAGE=${CLICKHOUSE_PACKAGE:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-18_relwithdebuginfo_none_unsplitted_disable_False_binary/clickhouse"}
CLICKHOUSE_REPO_PATH=${CLICKHOUSE_REPO_PATH:=""}

View File

@ -2,7 +2,7 @@
set -euo pipefail
CLICKHOUSE_PACKAGE=${CLICKHOUSE_PACKAGE:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-17_relwithdebuginfo_none_unsplitted_disable_False_binary/clickhouse"}
CLICKHOUSE_PACKAGE=${CLICKHOUSE_PACKAGE:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-18_relwithdebuginfo_none_unsplitted_disable_False_binary/clickhouse"}
CLICKHOUSE_REPO_PATH=${CLICKHOUSE_REPO_PATH:=""}

View File

@ -6,7 +6,7 @@ set -e
set -u
set -o pipefail
BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-17_debug_none_unsplitted_disable_False_binary"}
BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-18_debug_none_unsplitted_disable_False_binary"}
BINARY_URL_TO_DOWNLOAD=${BINARY_URL_TO_DOWNLOAD:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/$BINARY_TO_DOWNLOAD/clickhouse"}
function wget_with_retry

View File

@ -5,7 +5,7 @@ FROM ubuntu:22.04
ARG apt_archive="http://archive.ubuntu.com"
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=17
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=18
RUN apt-get update \
&& apt-get install \

View File

@ -13,14 +13,14 @@ The cross-build for macOS is based on the [Build instructions](../development/bu
The following sections provide a walk-through for building ClickHouse for `x86_64` macOS. If youre targeting ARM architecture, simply substitute all occurrences of `x86_64` with `aarch64`. For example, replace `x86_64-apple-darwin` with `aarch64-apple-darwin` throughout the steps.
## Install Clang-17
## Install clang-18
Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup.
For example the commands for Bionic are like:
``` bash
sudo echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-17 main" >> /etc/apt/sources.list
sudo apt-get install clang-17
sudo apt-get install clang-18
```
## Install Cross-Compilation Toolset {#install-cross-compilation-toolset}
@ -59,7 +59,7 @@ curl -L 'https://github.com/phracker/MacOSX-SDKs/releases/download/11.3/MacOSX11
cd ClickHouse
mkdir build-darwin
cd build-darwin
CC=clang-17 CXX=clang++-17 cmake -DCMAKE_AR:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ar -DCMAKE_INSTALL_NAME_TOOL=${CCTOOLS}/bin/x86_64-apple-darwin-install_name_tool -DCMAKE_RANLIB:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ranlib -DLINKER_NAME=${CCTOOLS}/bin/x86_64-apple-darwin-ld -DCMAKE_TOOLCHAIN_FILE=cmake/darwin/toolchain-x86_64.cmake ..
CC=clang-18 CXX=clang++-18 cmake -DCMAKE_AR:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ar -DCMAKE_INSTALL_NAME_TOOL=${CCTOOLS}/bin/x86_64-apple-darwin-install_name_tool -DCMAKE_RANLIB:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ranlib -DLINKER_NAME=${CCTOOLS}/bin/x86_64-apple-darwin-ld -DCMAKE_TOOLCHAIN_FILE=cmake/darwin/toolchain-x86_64.cmake ..
ninja
```

View File

@ -23,7 +23,7 @@ sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
``` bash
cd ClickHouse
mkdir build-riscv64
CC=clang-17 CXX=clang++-17 cmake . -Bbuild-riscv64 -G Ninja -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-riscv64.cmake -DGLIBC_COMPATIBILITY=OFF -DENABLE_LDAP=OFF -DOPENSSL_NO_ASM=ON -DENABLE_JEMALLOC=ON -DENABLE_PARQUET=OFF -DENABLE_GRPC=OFF -DENABLE_HDFS=OFF -DENABLE_MYSQL=OFF
CC=clang-18 CXX=clang++-18 cmake . -Bbuild-riscv64 -G Ninja -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-riscv64.cmake -DGLIBC_COMPATIBILITY=OFF -DENABLE_LDAP=OFF -DOPENSSL_NO_ASM=ON -DENABLE_JEMALLOC=ON -DENABLE_PARQUET=OFF -DENABLE_GRPC=OFF -DENABLE_HDFS=OFF -DENABLE_MYSQL=OFF
ninja -C build-riscv64
```

View File

@ -109,7 +109,7 @@ The build requires the following components:
- Git (used to checkout the sources, not needed for the build)
- CMake 3.20 or newer
- Compiler: clang-17 or newer
- Compiler: clang-18 or newer
- Linker: lld-17 or newer
- Ninja
- Yasm

View File

@ -153,7 +153,7 @@ Builds ClickHouse in various configurations for use in further steps. You have t
### Report Details
- **Compiler**: `clang-17`, optionally with the name of a target platform
- **Compiler**: `clang-18`, optionally with the name of a target platform
- **Build type**: `Debug` or `RelWithDebInfo` (cmake).
- **Sanitizer**: `none` (without sanitizers), `address` (ASan), `memory` (MSan), `undefined` (UBSan), or `thread` (TSan).
- **Status**: `success` or `fail`
@ -177,7 +177,7 @@ Performs static analysis and code style checks using `clang-tidy`. The report is
There is a convenience `packager` script that runs the clang-tidy build in docker
```sh
mkdir build_tidy
./docker/packager/packager --output-dir=./build_tidy --package-type=binary --compiler=clang-17 --debug-build --clang-tidy
./docker/packager/packager --output-dir=./build_tidy --package-type=binary --compiler=clang-18 --debug-build --clang-tidy
```

View File

@ -121,7 +121,7 @@ While inside the `build` directory, configure your build by running CMake. Befor
export CC=clang CXX=clang++
cmake ..
If you installed clang using the automatic installation script above, also specify the version of clang installed in the first command, e.g. `export CC=clang-17 CXX=clang++-17`. The clang version will be in the script output.
If you installed clang using the automatic installation script above, also specify the version of clang installed in the first command, e.g. `export CC=clang-18 CXX=clang++-18`. The clang version will be in the script output.
The `CC` variable specifies the compiler for C (short for C Compiler), and `CXX` variable instructs which C++ compiler is to be used for building.

View File

@ -75,7 +75,7 @@ The supported formats are:
| [ArrowStream](#data-format-arrow-stream) | ✔ | ✔ |
| [ORC](#data-format-orc) | ✔ | ✔ |
| [One](#data-format-one) | ✔ | ✗ |
| [Npy](#data-format-npy) | ✔ | |
| [Npy](#data-format-npy) | ✔ | |
| [RowBinary](#rowbinary) | ✔ | ✔ |
| [RowBinaryWithNames](#rowbinarywithnamesandtypes) | ✔ | ✔ |
| [RowBinaryWithNamesAndTypes](#rowbinarywithnamesandtypes) | ✔ | ✔ |
@ -2466,23 +2466,22 @@ Result:
## Npy {#data-format-npy}
This function is designed to load a NumPy array from a .npy file into ClickHouse. The NumPy file format is a binary format used for efficiently storing arrays of numerical data. During import, ClickHouse treats top level dimension as an array of rows with single column. Supported Npy data types and their corresponding type in ClickHouse:
| Npy type | ClickHouse type |
|:--------:|:---------------:|
| b1 | UInt8 |
| i1 | Int8 |
| i2 | Int16 |
| i4 | Int32 |
| i8 | Int64 |
| u1 | UInt8 |
| u2 | UInt16 |
| u4 | UInt32 |
| u8 | UInt64 |
| f2 | Float32 |
| f4 | Float32 |
| f8 | Float64 |
| S | String |
| U | String |
This function is designed to load a NumPy array from a .npy file into ClickHouse. The NumPy file format is a binary format used for efficiently storing arrays of numerical data. During import, ClickHouse treats top level dimension as an array of rows with single column. Supported Npy data types and their corresponding type in ClickHouse:
| Npy data type (`INSERT`) | ClickHouse data type | Npy data type (`SELECT`) |
|--------------------------|-----------------------------------------------------------------|--------------------------|
| `i1` | [Int8](/docs/en/sql-reference/data-types/int-uint.md) | `i1` |
| `i2` | [Int16](/docs/en/sql-reference/data-types/int-uint.md) | `i2` |
| `i4` | [Int32](/docs/en/sql-reference/data-types/int-uint.md) | `i4` |
| `i8` | [Int64](/docs/en/sql-reference/data-types/int-uint.md) | `i8` |
| `u1`, `b1` | [UInt8](/docs/en/sql-reference/data-types/int-uint.md) | `u1` |
| `u2` | [UInt16](/docs/en/sql-reference/data-types/int-uint.md) | `u2` |
| `u4` | [UInt32](/docs/en/sql-reference/data-types/int-uint.md) | `u4` |
| `u8` | [UInt64](/docs/en/sql-reference/data-types/int-uint.md) | `u8` |
| `f2`, `f4` | [Float32](/docs/en/sql-reference/data-types/float.md) | `f4` |
| `f8` | [Float64](/docs/en/sql-reference/data-types/float.md) | `f8` |
| `S`, `U` | [String](/docs/en/sql-reference/data-types/string.md) | `S` |
| | [FixedString](/docs/en/sql-reference/data-types/fixedstring.md) | `S` |
**Example of saving an array in .npy format using Python**
@ -2509,6 +2508,14 @@ Result:
└───────────────┘
```
**Selecting Data**
You can select data from a ClickHouse table and save them into some file in the Npy format by the following command:
```bash
$ clickhouse-client --query="SELECT {column} FROM {some_table} FORMAT Npy" > {filename.npy}
```
## LineAsString {#lineasstring}
In this format, every line of input data is interpreted as a single string value. This format can only be parsed for table with a single field of type [String](/docs/en/sql-reference/data-types/string.md). The remaining columns must be set to [DEFAULT](/docs/en/sql-reference/statements/create/table.md/#default) or [MATERIALIZED](/docs/en/sql-reference/statements/create/table.md/#materialized), or omitted.

View File

@ -7,6 +7,7 @@ sidebar_label: Map(K, V)
# Map(K, V)
`Map(K, V)` data type stores `key:value` pairs.
The Map datatype is implemented as `Array(Tuple(key T1, value T2))`, which means that the order of keys in each map does not change, i.e., this data type maintains insertion order.
**Parameters**

View File

@ -234,3 +234,34 @@ SELECT least(toDateTime32(now() + toIntervalDay(1)), toDateTime64(now(), 3))
:::note
The type returned is a DateTime64 as the DataTime32 must be promoted to 64 bit for the comparison.
:::
## clamp
Constrain the return value between A and B.
**Syntax**
``` sql
clamp(value, min, max)
```
**Arguments**
- `value` Input value.
- `min` Limit the lower bound.
- `max` Limit the upper bound.
**Returned values**
If the value is less than the minimum value, return the minimum value; if it is greater than the maximum value, return the maximum value; otherwise, return the current value.
Examples:
```sql
SELECT clamp(1, 2, 3) result, toTypeName(result) type;
```
```response
┌─result─┬─type────┐
│ 2 │ Float64 │
└────────┴─────────┘
```

View File

@ -2558,13 +2558,27 @@ Like function `YYYYMMDDhhmmssToDate()` but produces a [DateTime64](../../sql-ref
Accepts an additional, optional `precision` parameter after the `timezone` parameter.
## addYears, addQuarters, addMonths, addWeeks, addDays, addHours, addMinutes, addSeconds, addMilliseconds, addMicroseconds, addNanoseconds
## addYears
These functions add units of the interval specified by the function name to a date, a date with time or a string-encoded date / date with time. A date or date with time is returned.
Adds a specified number of years to a date, a date with time or a string-encoded date / date with time.
Example:
**Syntax**
``` sql
```sql
addYears(date, num)
```
**Parameters**
- `date`: Date / date with time to add specified number of years to. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md), [String](../data-types/string.md).
- `num`: Number of years to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date` plus `num` years. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Example**
```sql
WITH
toDate('2024-01-01') AS date,
toDateTime('2024-01-01 00:00:00') AS date_time,
@ -2575,12 +2589,456 @@ SELECT
addYears(date_time_string, 1) AS add_years_with_date_time_string
```
``` text
```response
┌─add_years_with_date─┬─add_years_with_date_time─┬─add_years_with_date_time_string─┐
│ 2025-01-01 │ 2025-01-01 00:00:00 │ 2025-01-01 00:00:00.000 │
└─────────────────────┴──────────────────────────┴─────────────────────────────────┘
```
## addQuarters
Adds a specified number of quarters to a date, a date with time or a string-encoded date / date with time.
**Syntax**
```sql
addQuarters(date, num)
```
**Parameters**
- `date`: Date / date with time to add specified number of quarters to. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md), [String](../data-types/string.md).
- `num`: Number of quarters to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date` plus `num` quarters. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Example**
```sql
WITH
toDate('2024-01-01') AS date,
toDateTime('2024-01-01 00:00:00') AS date_time,
'2024-01-01 00:00:00' AS date_time_string
SELECT
addQuarters(date, 1) AS add_quarters_with_date,
addQuarters(date_time, 1) AS add_quarters_with_date_time,
addQuarters(date_time_string, 1) AS add_quarters_with_date_time_string
```
```response
┌─add_quarters_with_date─┬─add_quarters_with_date_time─┬─add_quarters_with_date_time_string─┐
│ 2024-04-01 │ 2024-04-01 00:00:00 │ 2024-04-01 00:00:00.000 │
└────────────────────────┴─────────────────────────────┴────────────────────────────────────┘
```
## addMonths
Adds a specified number of months to a date, a date with time or a string-encoded date / date with time.
**Syntax**
```sql
addMonths(date, num)
```
**Parameters**
- `date`: Date / date with time to add specified number of months to. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md), [String](../data-types/string.md).
- `num`: Number of months to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date` plus `num` months. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Example**
```sql
WITH
toDate('2024-01-01') AS date,
toDateTime('2024-01-01 00:00:00') AS date_time,
'2024-01-01 00:00:00' AS date_time_string
SELECT
addMonths(date, 6) AS add_months_with_date,
addMonths(date_time, 6) AS add_months_with_date_time,
addMonths(date_time_string, 6) AS add_months_with_date_time_string
```
```response
┌─add_months_with_date─┬─add_months_with_date_time─┬─add_months_with_date_time_string─┐
│ 2024-07-01 │ 2024-07-01 00:00:00 │ 2024-07-01 00:00:00.000 │
└──────────────────────┴───────────────────────────┴──────────────────────────────────┘
```
## addWeeks
Adds a specified number of weeks to a date, a date with time or a string-encoded date / date with time.
**Syntax**
```sql
addWeeks(date, num)
```
**Parameters**
- `date`: Date / date with time to add specified number of weeks to. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md), [String](../data-types/string.md).
- `num`: Number of weeks to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date` plus `num` weeks. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Example**
```sql
WITH
toDate('2024-01-01') AS date,
toDateTime('2024-01-01 00:00:00') AS date_time,
'2024-01-01 00:00:00' AS date_time_string
SELECT
addWeeks(date, 5) AS add_weeks_with_date,
addWeeks(date_time, 5) AS add_weeks_with_date_time,
addWeeks(date_time_string, 5) AS add_weeks_with_date_time_string
```
```response
┌─add_weeks_with_date─┬─add_weeks_with_date_time─┬─add_weeks_with_date_time_string─┐
│ 2024-02-05 │ 2024-02-05 00:00:00 │ 2024-02-05 00:00:00.000 │
└─────────────────────┴──────────────────────────┴─────────────────────────────────┘
```
## addDays
Adds a specified number of days to a date, a date with time or a string-encoded date / date with time.
**Syntax**
```sql
addDays(date, num)
```
**Parameters**
- `date`: Date / date with time to add specified number of days to. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md), [String](../data-types/string.md).
- `num`: Number of days to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date` plus `num` days. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Example**
```sql
WITH
toDate('2024-01-01') AS date,
toDateTime('2024-01-01 00:00:00') AS date_time,
'2024-01-01 00:00:00' AS date_time_string
SELECT
addDays(date, 5) AS add_days_with_date,
addDays(date_time, 5) AS add_days_with_date_time,
addDays(date_time_string, 5) AS add_days_with_date_time_string
```
```response
┌─add_days_with_date─┬─add_days_with_date_time─┬─add_days_with_date_time_string─┐
│ 2024-01-06 │ 2024-01-06 00:00:00 │ 2024-01-06 00:00:00.000 │
└────────────────────┴─────────────────────────┴────────────────────────────────┘
```
## addHours
Adds a specified number of days to a date, a date with time or a string-encoded date / date with time.
**Syntax**
```sql
addHours(date, num)
```
**Parameters**
- `date`: Date / date with time to add specified number of hours to. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md), [String](../data-types/string.md).
- `num`: Number of hours to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date` plus `num` hours. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Example**
```sql
WITH
toDate('2024-01-01') AS date,
toDateTime('2024-01-01 00:00:00') AS date_time,
'2024-01-01 00:00:00' AS date_time_string
SELECT
addHours(date, 12) AS add_hours_with_date,
addHours(date_time, 12) AS add_hours_with_date_time,
addHours(date_time_string, 12) AS add_hours_with_date_time_string
```
```response
┌─add_hours_with_date─┬─add_hours_with_date_time─┬─add_hours_with_date_time_string─┐
│ 2024-01-01 12:00:00 │ 2024-01-01 12:00:00 │ 2024-01-01 12:00:00.000 │
└─────────────────────┴──────────────────────────┴─────────────────────────────────┘
```
## addMinutes
Adds a specified number of minutes to a date, a date with time or a string-encoded date / date with time.
**Syntax**
```sql
addMinutes(date, num)
```
**Parameters**
- `date`: Date / date with time to add specified number of minutes to. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md), [String](../data-types/string.md).
- `num`: Number of minutes to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date` plus `num` minutes. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Example**
```sql
WITH
toDate('2024-01-01') AS date,
toDateTime('2024-01-01 00:00:00') AS date_time,
'2024-01-01 00:00:00' AS date_time_string
SELECT
addMinutes(date, 20) AS add_minutes_with_date,
addMinutes(date_time, 20) AS add_minutes_with_date_time,
addMinutes(date_time_string, 20) AS add_minutes_with_date_time_string
```
```response
┌─add_minutes_with_date─┬─add_minutes_with_date_time─┬─add_minutes_with_date_time_string─┐
│ 2024-01-01 00:20:00 │ 2024-01-01 00:20:00 │ 2024-01-01 00:20:00.000 │
└───────────────────────┴────────────────────────────┴───────────────────────────────────┘
```
## addSeconds
Adds a specified number of seconds to a date, a date with time or a string-encoded date / date with time.
**Syntax**
```sql
addSeconds(date, num)
```
**Parameters**
- `date`: Date / date with time to add specified number of seconds to. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md), [String](../data-types/string.md).
- `num`: Number of seconds to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date` plus `num` seconds. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Example**
```sql
WITH
toDate('2024-01-01') AS date,
toDateTime('2024-01-01 00:00:00') AS date_time,
'2024-01-01 00:00:00' AS date_time_string
SELECT
addSeconds(date, 30) AS add_seconds_with_date,
addSeconds(date_time, 30) AS add_seconds_with_date_time,
addSeconds(date_time_string, 30) AS add_seconds_with_date_time_string
```
```response
┌─add_seconds_with_date─┬─add_seconds_with_date_time─┬─add_seconds_with_date_time_string─┐
│ 2024-01-01 00:00:30 │ 2024-01-01 00:00:30 │ 2024-01-01 00:00:30.000 │
└───────────────────────┴────────────────────────────┴───────────────────────────────────┘
```
## addMilliseconds
Adds a specified number of milliseconds to a date with time or a string-encoded date with time.
**Syntax**
```sql
addMilliseconds(date_time, num)
```
**Parameters**
- `date_time`: Date with time to add specified number of milliseconds to. [DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md), [String](../data-types/string.md).
- `num`: Number of milliseconds to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date_time` plus `num` milliseconds. [DateTime64](../data-types/datetime64.md).
**Example**
```sql
WITH
toDateTime('2024-01-01 00:00:00') AS date_time,
'2024-01-01 00:00:00' AS date_time_string
SELECT
addMilliseconds(date_time, 1000) AS add_milliseconds_with_date_time,
addMilliseconds(date_time_string, 1000) AS add_milliseconds_with_date_time_string
```
```response
┌─add_milliseconds_with_date_time─┬─add_milliseconds_with_date_time_string─┐
│ 2024-01-01 00:00:01.000 │ 2024-01-01 00:00:01.000 │
└─────────────────────────────────┴────────────────────────────────────────┘
```
## addMicroseconds
Adds a specified number of microseconds to a date with time or a string-encoded date with time.
**Syntax**
```sql
addMicroseconds(date_time, num)
```
**Parameters**
- `date_time`: Date with time to add specified number of microseconds to. [DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md), [String](../data-types/string.md).
- `num`: Number of microseconds to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date_time` plus `num` microseconds. [DateTime64](../data-types/datetime64.md).
**Example**
```sql
WITH
toDateTime('2024-01-01 00:00:00') AS date_time,
'2024-01-01 00:00:00' AS date_time_string
SELECT
addMicroseconds(date_time, 1000000) AS add_microseconds_with_date_time,
addMicroseconds(date_time_string, 1000000) AS add_microseconds_with_date_time_string
```
```response
┌─add_microseconds_with_date_time─┬─add_microseconds_with_date_time_string─┐
│ 2024-01-01 00:00:01.000000 │ 2024-01-01 00:00:01.000000 │
└─────────────────────────────────┴────────────────────────────────────────┘
```
## addNanoseconds
Adds a specified number of microseconds to a date with time or a string-encoded date with time.
**Syntax**
```sql
addNanoseconds(date_time, num)
```
**Parameters**
- `date_time`: Date with time to add specified number of nanoseconds to. [DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md), [String](../data-types/string.md).
- `num`: Number of nanoseconds to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date_time` plus `num` nanoseconds. [DateTime64](../data-types/datetime64.md).
**Example**
```sql
WITH
toDateTime('2024-01-01 00:00:00') AS date_time,
'2024-01-01 00:00:00' AS date_time_string
SELECT
addNanoseconds(date_time, 1000) AS add_nanoseconds_with_date_time,
addNanoseconds(date_time_string, 1000) AS add_nanoseconds_with_date_time_string
```
```response
┌─add_nanoseconds_with_date_time─┬─add_nanoseconds_with_date_time_string─┐
│ 2024-01-01 00:00:00.000001000 │ 2024-01-01 00:00:00.000001000 │
└────────────────────────────────┴───────────────────────────────────────┘
```
## addInterval
Adds an interval to another interval or tuple of intervals.
**Syntax**
```sql
addInterval(interval_1, interval_2)
```
**Parameters**
- `interval_1`: First interval or tuple of intervals. [interval](../data-types/special-data-types/interval.md), [tuple](../data-types/tuple.md)([interval](../data-types/special-data-types/interval.md)).
- `interval_2`: Second interval to be added. [interval](../data-types/special-data-types/interval.md).
**Returned value**
- Returns a tuple of intervals. [tuple](../data-types/tuple.md)([interval](../data-types/special-data-types/interval.md)).
:::note
Intervals of the same type will be combined into a single interval. For instance if `toIntervalDay(1)` and `toIntervalDay(2)` are passed then the result will be `(3)` rather than `(1,1)`.
:::
**Example**
Query:
```sql
SELECT addInterval(INTERVAL 1 DAY, INTERVAL 1 MONTH);
SELECT addInterval((INTERVAL 1 DAY, INTERVAL 1 YEAR), INTERVAL 1 MONTH);
SELECT addInterval(INTERVAL 2 DAY, INTERVAL 1 DAY);
```
Result:
```response
┌─addInterval(toIntervalDay(1), toIntervalMonth(1))─┐
│ (1,1) │
└───────────────────────────────────────────────────┘
┌─addInterval((toIntervalDay(1), toIntervalYear(1)), toIntervalMonth(1))─┐
│ (1,1,1) │
└────────────────────────────────────────────────────────────────────────┘
┌─addInterval(toIntervalDay(2), toIntervalDay(1))─┐
│ (3) │
└─────────────────────────────────────────────────┘
```
## addTupleOfIntervals
Consecutively adds a tuple of intervals to a Date or a DateTime.
**Syntax**
```sql
addTupleOfIntervals(interval_1, interval_2)
```
**Parameters**
- `date`: First interval or interval of tuples. [date](../data-types/date.md)/[date32](../data-types/date32.md)/[datetime](../data-types/datetime.md)/[datetime64](../data-types/datetime64.md).
- `intervals`: Tuple of intervals to add to `date`. [tuple](../data-types/tuple.md)([interval](../data-types/special-data-types/interval.md)).
**Returned value**
- Returns `date` with added `intervals`. [date](../data-types/date.md)/[date32](../data-types/date32.md)/[datetime](../data-types/datetime.md)/[datetime64](../data-types/datetime64.md).
**Example**
Query:
```sql
WITH toDate('2018-01-01') AS date
SELECT addTupleOfIntervals(date, (INTERVAL 1 DAY, INTERVAL 1 MONTH, INTERVAL 1 YEAR))
```
Result:
```response
┌─addTupleOfIntervals(date, (toIntervalDay(1), toIntervalMonth(1), toIntervalYear(1)))─┐
│ 2019-02-02 │
└──────────────────────────────────────────────────────────────────────────────────────┘
```
## subtractYears
Subtracts a specified number of years from a date, a date with time or a string-encoded date / date with time.
@ -2893,7 +3351,7 @@ subtractMilliseconds(date_time, num)
- `num`: Number of milliseconds to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date_time` minus `num` milliseconds. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
- Returns `date_time` minus `num` milliseconds. [DateTime64](../data-types/datetime64.md).
**Example**
@ -2928,7 +3386,7 @@ subtractMicroseconds(date_time, num)
- `num`: Number of microseconds to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date_time` minus `num` microseconds. [DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
- Returns `date_time` minus `num` microseconds. [DateTime64](../data-types/datetime64.md).
**Example**
@ -2963,7 +3421,7 @@ subtractNanoseconds(date_time, num)
- `num`: Number of nanoseconds to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date_time` minus `num` nanoseconds. [DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
- Returns `date_time` minus `num` nanoseconds. [DateTime64](../data-types/datetime64.md).
**Example**
@ -3001,7 +3459,7 @@ subtractInterval(interval_1, interval_2)
- Returns a tuple of intervals. [tuple](../data-types/tuple.md)([interval](../data-types/special-data-types/interval.md)).
:::note
If the types of the first interval (or the interval in the tuple) and the second interval are the same they will be merged into one interval.
Intervals of the same type will be combined into a single interval. For instance if `toIntervalDay(2)` and `toIntervalDay(1)` are passed then the result will be `(1)` rather than `(2,1)`
:::
**Example**

View File

@ -12,6 +12,8 @@ Returns whether the argument is [NULL](../../sql-reference/syntax.md#null).
See also operator [`IS NULL`](../operators/index.md#is_null).
**Syntax**
``` sql
isNull(x)
```
@ -52,6 +54,45 @@ Result:
└───┘
```
## isNullable
Returns `1` if a column is [Nullable](../data-types/nullable.md) (i.e allows `NULL` values), `0` otherwise.
**Syntax**
``` sql
isNullable(x)
```
**Arguments**
- `x` — column.
**Returned value**
- `1` if `x` allows `NULL` values. [UInt8](../data-types/int-uint.md).
- `0` if `x` does not allow `NULL` values. [UInt8](../data-types/int-uint.md).
**Example**
Query:
``` sql
CREATE TABLE tab (ordinary_col UInt32, nullable_col Nullable(UInt32)) ENGINE = Log;
INSERT INTO tab (ordinary_col, nullable_col) VALUES (1,1), (2, 2), (3,3);
SELECT isNullable(ordinary_col), isNullable(nullable_col) FROM tab;
```
Result:
``` text
┌───isNullable(ordinary_col)──┬───isNullable(nullable_col)──┐
1. │ 0 │ 1 │
2. │ 0 │ 1 │
3. │ 0 │ 1 │
└─────────────────────────────┴─────────────────────────────┘
```
## isNotNull
Returns whether the argument is not [NULL](../../sql-reference/syntax.md#null-literal).
@ -96,6 +137,36 @@ Result:
└───┘
```
## isNotDistinctFrom
Performs null-safe comparison. Used to compare JOIN keys which contain NULL values in the JOIN ON section.
This function will consider two `NULL` values as identical and will return `true`, which is distinct from the usual
equals behavior where comparing two `NULL` values would return `NULL`.
:::note
This function is an internal function used by the implementation of JOIN ON. Please do not use it manually in queries.
:::
**Syntax**
``` sql
isNotDistinctFrom(x, y)
```
**Arguments**
- `x` — first JOIN key.
- `y` — second JOIN key.
**Returned value**
- `true` when `x` and `y` are both `NULL`.
- `false` otherwise.
**Example**
For a complete example see: [NULL values in JOIN keys](../../sql-reference/statements/select/join#null-values-in-join-keys).
## isZeroOrNull
Returns whether the argument is 0 (zero) or [NULL](../../sql-reference/syntax.md#null-literal).

View File

@ -3301,3 +3301,31 @@ The setting is not enabled by default for security reasons, because some headers
HTTP headers are case sensitive for this function.
If the function is used in the context of a distributed query, it returns non-empty result only on the initiator node.
## showCertificate
Shows information about the current server's Secure Sockets Layer (SSL) certificate if it has been configured. See [Configuring SSL-TLS](https://clickhouse.com/docs/en/guides/sre/configuring-ssl) for more information on how to configure ClickHouse to use OpenSSL certificates to validate connections.
**Syntax**
```sql
showCertificate()
```
**Returned value**
- Map of key-value pairs relating to the configured SSL certificate. [Map](../../sql-reference/data-types/map.md)([String](../../sql-reference/data-types/string.md), [String](../../sql-reference/data-types/string.md)).
**Example**
Query:
```sql
SELECT showCertificate() FORMAT LineAsString;
```
Result:
```response
{'version':'1','serial_number':'2D9071D64530052D48308473922C7ADAFA85D6C5','signature_algo':'sha256WithRSAEncryption','issuer':'/CN=marsnet.local CA','not_before':'May 7 17:01:21 2024 GMT','not_after':'May 7 17:01:21 2025 GMT','subject':'/CN=chnode1','pkey_algo':'rsaEncryption'}
```

View File

@ -151,6 +151,14 @@ Result:
Query with `INNER` type of a join and conditions with `OR` and `AND`:
:::note
By default, non-equal conditions are supported as long as they use columns from the same table.
For example, `t1.a = t2.key AND t1.b > 0 AND t2.b > t2.c`, because `t1.b > 0` uses columns only from `t1` and `t2.b > t2.c` uses columns only from `t2`.
However, you can try experimental support for conditions like `t1.a = t2.key AND t1.b > t2.key`, check out section below for more details.
:::
``` sql
SELECT a, b, val FROM t1 INNER JOIN t2 ON t1.a = t2.key OR t1.b = t2.key AND t2.val > 3;
```
@ -165,7 +173,7 @@ Result:
└───┴────┴─────┘
```
## [experimental] Join with inequality conditions
## [experimental] Join with inequality conditions for columns from different tables
:::note
This feature is experimental. To use it, set `allow_experimental_join_condition` to 1 in your configuration files or by using the `SET` command:

View File

@ -1,5 +1,5 @@
---
slug: /en/operations/utilities/backupview
slug: /ru/operations/utilities/backupview
title: clickhouse_backupview
---

View File

@ -0,0 +1,311 @@
---
slug: /ru/sql-reference/functions/null-functions
sidebar_position: 63
sidebar_label: "Функции для работы с Nullable-аргументами"
---
# Функции для работы с Nullable-аргументами {#funktsii-dlia-raboty-s-nullable-argumentami}
## isNull {#isnull}
Проверяет является ли аргумент [NULL](../../sql-reference/syntax.md#null-literal).
``` sql
isNull(x)
```
Синоним: `ISNULL`.
**Аргументы**
- `x` — значение с не составным типом данных.
**Возвращаемое значение**
- `1`, если `x``NULL`.
- `0`, если `x` — не `NULL`.
**Пример**
Входная таблица
``` text
┌─x─┬────y─┐
│ 1 │ ᴺᵁᴸᴸ │
│ 2 │ 3 │
└───┴──────┘
```
Запрос
``` sql
SELECT x FROM t_null WHERE isNull(y);
```
``` text
┌─x─┐
│ 1 │
└───┘
```
## isNotNull {#isnotnull}
Проверяет не является ли аргумент [NULL](../../sql-reference/syntax.md#null-literal).
``` sql
isNotNull(x)
```
**Аргументы**
- `x` — значение с не составным типом данных.
**Возвращаемое значение**
- `0`, если `x``NULL`.
- `1`, если `x` — не `NULL`.
**Пример**
Входная таблица
``` text
┌─x─┬────y─┐
│ 1 │ ᴺᵁᴸᴸ │
│ 2 │ 3 │
└───┴──────┘
```
Запрос
``` sql
SELECT x FROM t_null WHERE isNotNull(y);
```
``` text
┌─x─┐
│ 2 │
└───┘
```
## coalesce {#coalesce}
Последовательно слева-направо проверяет являются ли переданные аргументы `NULL` и возвращает первый не `NULL`.
``` sql
coalesce(x,...)
```
**Аргументы**
- Произвольное количество параметров не составного типа. Все параметры должны быть совместимы по типу данных.
**Возвращаемые значения**
- Первый не `NULL` аргумент.
- `NULL`, если все аргументы — `NULL`.
**Пример**
Рассмотрим адресную книгу, в которой может быть указано несколько способов связи с клиентом.
``` text
┌─name─────┬─mail─┬─phone─────┬──icq─┐
│ client 1 │ ᴺᵁᴸᴸ │ 123-45-67 │ 123 │
│ client 2 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │
└──────────┴──────┴───────────┴──────┘
```
Поля `mail` и `phone` имеют тип String, а поле `icq``UInt32`, его необходимо будет преобразовать в `String`.
Получим из адресной книги первый доступный способ связаться с клиентом:
``` sql
SELECT coalesce(mail, phone, CAST(icq,'Nullable(String)')) FROM aBook;
```
``` text
┌─name─────┬─coalesce(mail, phone, CAST(icq, 'Nullable(String)'))─┐
│ client 1 │ 123-45-67 │
│ client 2 │ ᴺᵁᴸᴸ │
└──────────┴──────────────────────────────────────────────────────┘
```
## ifNull {#ifnull}
Возвращает альтернативное значение, если основной аргумент — `NULL`.
``` sql
ifNull(x,alt)
```
**Аргументы**
- `x` — значение для проверки на `NULL`,
- `alt` — значение, которое функция вернёт, если `x``NULL`.
**Возвращаемые значения**
- Значение `x`, если `x` — не `NULL`.
- Значение `alt`, если `x``NULL`.
**Пример**
``` sql
SELECT ifNull('a', 'b');
```
``` text
┌─ifNull('a', 'b')─┐
│ a │
└──────────────────┘
```
``` sql
SELECT ifNull(NULL, 'b');
```
``` text
┌─ifNull(NULL, 'b')─┐
│ b │
└───────────────────┘
```
## nullIf {#nullif}
Возвращает `NULL`, если аргументы равны.
``` sql
nullIf(x, y)
```
**Аргументы**
`x`, `y` — значения для сравнивания. Они должны быть совместимых типов, иначе ClickHouse сгенерирует исключение.
**Возвращаемые значения**
- `NULL`, если аргументы равны.
- Значение `x`, если аргументы не равны.
**Пример**
``` sql
SELECT nullIf(1, 1);
```
``` text
┌─nullIf(1, 1)─┐
│ ᴺᵁᴸᴸ │
└──────────────┘
```
``` sql
SELECT nullIf(1, 2);
```
``` text
┌─nullIf(1, 2)─┐
│ 1 │
└──────────────┘
```
## assumeNotNull {#assumenotnull}
Приводит значение типа [Nullable](../../sql-reference/functions/functions-for-nulls.md) к не `Nullable`, если значение не `NULL`.
``` sql
assumeNotNull(x)
```
**Аргументы**
- `x` — исходное значение.
**Возвращаемые значения**
- Исходное значение с не `Nullable` типом, если оно — не `NULL`.
- Неспецифицированный результат, зависящий от реализации, если исходное значение — `NULL`.
**Пример**
Рассмотрим таблицу `t_null`.
``` sql
SHOW CREATE TABLE t_null;
```
``` text
┌─statement─────────────────────────────────────────────────────────────────┐
│ CREATE TABLE default.t_null ( x Int8, y Nullable(Int8)) ENGINE = TinyLog │
└───────────────────────────────────────────────────────────────────────────┘
```
``` text
┌─x─┬────y─┐
│ 1 │ ᴺᵁᴸᴸ │
│ 2 │ 3 │
└───┴──────┘
```
Применим функцию `assumeNotNull` к столбцу `y`.
``` sql
SELECT assumeNotNull(y) FROM t_null;
```
``` text
┌─assumeNotNull(y)─┐
│ 0 │
│ 3 │
└──────────────────┘
```
``` sql
SELECT toTypeName(assumeNotNull(y)) FROM t_null;
```
``` text
┌─toTypeName(assumeNotNull(y))─┐
│ Int8 │
│ Int8 │
└──────────────────────────────┘
```
## toNullable {#tonullable}
Преобразует тип аргумента к `Nullable`.
``` sql
toNullable(x)
```
**Аргументы**
- `x` — значение произвольного не составного типа.
**Возвращаемое значение**
- Входное значение с типом не `Nullable`.
**Пример**
``` sql
SELECT toTypeName(10);
```
``` text
┌─toTypeName(10)─┐
│ UInt8 │
└────────────────┘
```
``` sql
SELECT toTypeName(toNullable(10));
```
``` text
┌─toTypeName(toNullable(10))─┐
│ Nullable(UInt8) │
└────────────────────────────┘
```

View File

@ -0,0 +1,254 @@
---
slug: /zh/sql-reference/functions/null-functions
---
# Nullable处理函数 {#nullablechu-li-han-shu}
## isNull {#isnull}
检查参数是否为[NULL](../../sql-reference/syntax.md#null-literal)。
isNull(x)
**参数**
- `x` — 一个非复合数据类型的值。
**返回值**
- `1` 如果`x`为`NULL`。
- `0` 如果`x`不为`NULL`。
**示例**
存在以下内容的表
```response
┌─x─┬────y─┐
│ 1 │ ᴺᵁᴸᴸ │
│ 2 │ 3 │
└───┴──────┘
```
对其进行查询
```sql
SELECT x FROM t_null WHERE isNull(y)
```
```response
┌─x─┐
│ 1 │
└───┘
```
## isNotNull {#isnotnull}
检查参数是否不为 [NULL](../../sql-reference/syntax.md#null-literal).
isNotNull(x)
**参数:**
- `x` — 一个非复合数据类型的值。
**返回值**
- `0` 如果`x`为`NULL`。
- `1` 如果`x`不为`NULL`。
**示例**
存在以下内容的表
```response
┌─x─┬────y─┐
│ 1 │ ᴺᵁᴸᴸ │
│ 2 │ 3 │
└───┴──────┘
```
对其进行查询
```sql
SELECT x FROM t_null WHERE isNotNull(y)
```
```response
┌─x─┐
│ 2 │
└───┘
```
## 合并 {#coalesce}
检查从左到右是否传递了«NULL»参数并返回第一个非`'NULL`参数。
coalesce(x,...)
**参数:**
- 任何数量的非复合类型的参数。所有参数必须与数据类型兼容。
**返回值**
- 第一个非NULL\`参数。
- `NULL`如果所有参数都是NULL\`。
**示例**
考虑可以指定多种联系客户的方式的联系人列表。
```response
┌─name─────┬─mail─┬─phone─────┬──icq─┐
│ client 1 │ ᴺᵁᴸᴸ │ 123-45-67 │ 123 │
│ client 2 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │
└──────────┴──────┴───────────┴──────┘
```
`mail`和`phone`字段是String类型但`icq`字段是`UInt32`,所以它需要转换为`String`。
从联系人列表中获取客户的第一个可用联系方式:
```sql
SELECT coalesce(mail, phone, CAST(icq,'Nullable(String)')) FROM aBook
```
```response
┌─name─────┬─coalesce(mail, phone, CAST(icq, 'Nullable(String)'))─┐
│ client 1 │ 123-45-67 │
│ client 2 │ ᴺᵁᴸᴸ │
└──────────┴──────────────────────────────────────────────────────┘
```
## ifNull {#ifnull}
如果第一个参数为«NULL»则返回第二个参数的值。
ifNull(x,alt)
**参数:**
- `x` — 要检查«NULL»的值。
- `alt` — 如果`x`为NULL\`,函数返回的值。
**返回值**
- 价值 `x`,如果 `x` 不是 `NULL`.
- 价值 `alt`,如果 `x``NULL`.
**示例**
SELECT ifNull('a', 'b')
┌─ifNull('a', 'b')─┐
│ a │
└──────────────────┘
SELECT ifNull(NULL, 'b')
┌─ifNull(NULL, 'b')─┐
│ b │
└───────────────────┘
## nullIf {#nullif}
如果参数相等,则返回`NULL`。
nullIf(x, y)
**参数:**
`x`, `y` — 用于比较的值。 它们必须是类型兼容的,否则将抛出异常。
**返回值**
- 如果参数相等,则为`NULL`。
- 如果参数不相等,则为`x`值。
**示例**
SELECT nullIf(1, 1)
┌─nullIf(1, 1)─┐
│ ᴺᵁᴸᴸ │
└──────────────┘
SELECT nullIf(1, 2)
┌─nullIf(1, 2)─┐
│ 1 │
└──────────────┘
## assumeNotNull {#assumenotnull}
将[可为空](../../sql-reference/functions/functions-for-nulls.md)类型的值转换为非`Nullable`类型的值。
assumeNotNull(x)
**参数:**
- `x` — 原始值。
**返回值**
- 如果`x`不为`NULL`,返回非`Nullable`类型的原始值。
- 如果`x`为`NULL`,则返回任意值。
**示例**
存在如下`t_null`表。
SHOW CREATE TABLE t_null
┌─statement─────────────────────────────────────────────────────────────────┐
│ CREATE TABLE default.t_null ( x Int8, y Nullable(Int8)) ENGINE = TinyLog │
└───────────────────────────────────────────────────────────────────────────┘
┌─x─┬────y─┐
│ 1 │ ᴺᵁᴸᴸ │
│ 2 │ 3 │
└───┴──────┘
将列`y`作为`assumeNotNull`函数的参数。
SELECT assumeNotNull(y) FROM t_null
┌─assumeNotNull(y)─┐
│ 0 │
│ 3 │
└──────────────────┘
SELECT toTypeName(assumeNotNull(y)) FROM t_null
┌─toTypeName(assumeNotNull(y))─┐
│ Int8 │
│ Int8 │
└──────────────────────────────┘
## 可调整 {#tonullable}
将参数的类型转换为`Nullable`。
toNullable(x)
**参数:**
- `x` — 任何非复合类型的值。
**返回值**
- 输入的值,但其类型为`Nullable`。
**示例**
SELECT toTypeName(10)
┌─toTypeName(10)─┐
│ UInt8 │
└────────────────┘
SELECT toTypeName(toNullable(10))
┌─toTypeName(toNullable(10))─┐
│ Nullable(UInt8) │
└────────────────────────────┘

View File

@ -233,7 +233,7 @@ struct Commit
};
enum class FileChangeType
enum class FileChangeType : uint8_t
{
Add,
Delete,
@ -291,7 +291,7 @@ struct FileChange
};
enum class LineType
enum class LineType : uint8_t
{
Empty,
Comment,

View File

@ -323,7 +323,7 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
{
fmt::print("Symlink {} already exists but it points to {}. Will replace the old symlink to {}.\n",
main_bin_path.string(), points_to.string(), binary_self_canonical_path.string());
fs::remove(main_bin_path);
(void)fs::remove(main_bin_path);
}
}
}
@ -489,7 +489,7 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
{
fmt::print("Symlink {} already exists but it points to {}. Will replace the old symlink to {}.\n",
symlink_path.string(), points_to.string(), main_bin_path.string());
fs::remove(symlink_path);
(void)fs::remove(symlink_path);
}
}
}
@ -1006,7 +1006,7 @@ namespace
else
{
fmt::print("{} file exists but damaged, ignoring.\n", pid_file.string());
fs::remove(pid_file);
(void)fs::remove(pid_file);
}
}
else
@ -1014,7 +1014,7 @@ namespace
/// Create a directory for pid file.
/// It's created by "install" but we also support cases when ClickHouse is already installed different way.
fs::path pid_path = pid_file;
pid_path.remove_filename();
pid_path = pid_path.remove_filename();
fs::create_directories(pid_path);
/// All users are allowed to read pid file (for clickhouse status command).
fs::permissions(pid_path, fs::perms::owner_all | fs::perms::group_read | fs::perms::others_read, fs::perm_options::replace);
@ -1098,7 +1098,7 @@ namespace
else
{
fmt::print("{} file exists but damaged, ignoring.\n", pid_file.string());
fs::remove(pid_file);
(void)fs::remove(pid_file);
}
}
catch (const Exception & e)

View File

@ -86,7 +86,10 @@ std::vector<String> KeeperClient::getCompletions(const String & prefix) const
void KeeperClient::askConfirmation(const String & prompt, std::function<void()> && callback)
{
if (!ask_confirmation)
return callback();
{
callback();
return;
}
std::cout << prompt << " Continue?\n";
waiting_confirmation = true;

View File

@ -284,7 +284,6 @@ void ExternalDictionaryLibraryBridgeRequestHandler::handleRequest(HTTPServerRequ
else if (method == "extDict_loadIds")
{
LOG_DEBUG(log, "Getting diciontary ids for dictionary with id: {}", dictionary_id);
String ids_string;
std::vector<uint64_t> ids = parseIdsFromBinary(request.getStream());
auto library_handler = ExternalDictionaryLibraryHandlerFactory::instance().get(dictionary_id);

View File

@ -14,7 +14,7 @@ namespace ErrorCodes
SharedLibrary::SharedLibrary(std::string_view path, int flags)
{
handle = dlopen(path.data(), flags);
handle = dlopen(path.data(), flags); // NOLINT
if (!handle)
throw Exception(ErrorCodes::CANNOT_DLOPEN, "Cannot dlopen: ({})", dlerror()); // NOLINT(concurrency-mt-unsafe) // MT-Safe on Linux, see man dlerror
@ -34,7 +34,7 @@ void * SharedLibrary::getImpl(std::string_view name, bool no_throw)
{
dlerror(); // NOLINT(concurrency-mt-unsafe) // MT-Safe on Linux, see man dlerror
auto * res = dlsym(handle, name.data());
auto * res = dlsym(handle, name.data()); // NOLINT
if (char * error = dlerror()) // NOLINT(concurrency-mt-unsafe) // MT-Safe on Linux, see man dlerror
{

View File

@ -119,7 +119,7 @@ std::pair<std::string_view, std::string_view> clickhouse_short_names[] =
};
enum class InstructionFail
enum class InstructionFail : uint8_t
{
NONE = 0,
SSE3 = 1,

View File

@ -674,8 +674,7 @@ private:
if (pos + length > end)
length = end - pos;
if (length > sizeof(CodePoint))
length = sizeof(CodePoint);
length = std::min(length, sizeof(CodePoint));
CodePoint res = 0;
memcpy(&res, pos, length);
@ -883,9 +882,7 @@ public:
throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error in markov model");
size_t offset_from_begin_of_string = pos - data;
size_t determinator_sliding_window_size = params.determinator_sliding_window_size;
if (determinator_sliding_window_size > determinator_size)
determinator_sliding_window_size = determinator_size;
size_t determinator_sliding_window_size = std::min(params.determinator_sliding_window_size, determinator_size);
size_t determinator_sliding_window_overflow = offset_from_begin_of_string + determinator_sliding_window_size > determinator_size
? offset_from_begin_of_string + determinator_sliding_window_size - determinator_size : 0;

View File

@ -119,8 +119,7 @@ void ODBCSource::insertValue(
time_t time = 0;
const DataTypeDateTime & datetime_type = assert_cast<const DataTypeDateTime &>(*data_type);
readDateTimeText(time, in, datetime_type.getTimeZone());
if (time < 0)
time = 0;
time = std::max<time_t>(time, 0);
column.insert(static_cast<UInt32>(time));
break;
}

View File

@ -37,7 +37,7 @@ std::string getIdentifierQuote(nanodbc::ConnectionHolderPtr connection_holder)
IdentifierQuotingStyle getQuotingStyle(nanodbc::ConnectionHolderPtr connection)
{
auto identifier_quote = getIdentifierQuote(connection);
if (identifier_quote.length() == 0)
if (identifier_quote.empty())
return IdentifierQuotingStyle::None;
else if (identifier_quote[0] == '`')
return IdentifierQuotingStyle::Backticks;

View File

@ -111,13 +111,11 @@ void processTableFiles(const fs::path & data_path, fs::path dst_path, bool test_
std::shared_ptr<WriteBuffer> directory_meta;
if (test_mode)
{
auto files_root = dst_path / prefix;
directory_meta = std::make_shared<WriteBufferFromHTTP>(HTTPConnectionGroupType::HTTP, Poco::URI(dst_path / directory_prefix / ".index"), Poco::Net::HTTPRequest::HTTP_PUT);
}
else
{
dst_path = fs::canonical(dst_path);
auto files_root = dst_path / prefix;
fs::create_directories(dst_path / directory_prefix);
directory_meta = std::make_shared<WriteBufferFromFile>(dst_path / directory_prefix / ".index");
}

View File

@ -93,8 +93,6 @@ namespace
break;
}
size_t id_endpos = line.find('\t');
String id_as_string = line.substr(0, id_endpos);
UUID id = parse<UUID>(line);
line.clear();

View File

@ -8,7 +8,7 @@
namespace DB
{
class AccessControl;
enum class AccessEntityType;
enum class AccessEntityType : uint8_t;
struct IAccessEntity;
using AccessEntityPtr = std::shared_ptr<const IAccessEntity>;
class AccessRightsElements;

View File

@ -233,7 +233,7 @@ namespace
/**
* Levels:
* 1. GLOBAL
* 1. GLOBAL
* 2. DATABASE_LEVEL 2. GLOBAL_WITH_PARAMETER (parameter example: named collection)
* 3. TABLE_LEVEL
* 4. COLUMN_LEVEL
@ -241,11 +241,12 @@ namespace
enum Level
{
GLOBAL_LEVEL,
DATABASE_LEVEL,
GLOBAL_LEVEL = 0,
DATABASE_LEVEL = 1,
GLOBAL_WITH_PARAMETER = DATABASE_LEVEL,
TABLE_LEVEL,
COLUMN_LEVEL,
TABLE_LEVEL = 2,
COLUMN_LEVEL = 3,
MAX = COLUMN_LEVEL,
};
AccessFlags getAllGrantableFlags(Level level)
@ -520,7 +521,7 @@ public:
private:
AccessFlags getAllGrantableFlags() const { return ::DB::getAllGrantableFlags(level); }
AccessFlags getChildAllGrantableFlags() const { return ::DB::getAllGrantableFlags(static_cast<Level>(level + 1)); }
AccessFlags getChildAllGrantableFlags() const { return ::DB::getAllGrantableFlags(static_cast<Level>(level == Level::MAX ? level : (level + 1))); }
Node * tryGetChild(std::string_view name) const
{

View File

@ -118,13 +118,16 @@ void AuthenticationData::setPassword(const String & password_)
switch (type)
{
case AuthenticationType::PLAINTEXT_PASSWORD:
return setPasswordHashBinary(Util::stringToDigest(password_));
setPasswordHashBinary(Util::stringToDigest(password_));
return;
case AuthenticationType::SHA256_PASSWORD:
return setPasswordHashBinary(Util::encodeSHA256(password_));
setPasswordHashBinary(Util::encodeSHA256(password_));
return;
case AuthenticationType::DOUBLE_SHA1_PASSWORD:
return setPasswordHashBinary(Util::encodeDoubleSHA1(password_));
setPasswordHashBinary(Util::encodeDoubleSHA1(password_));
return;
case AuthenticationType::BCRYPT_PASSWORD:
case AuthenticationType::NO_PASSWORD:
@ -146,7 +149,7 @@ void AuthenticationData::setPasswordBcrypt(const String & password_, int workfac
if (type != AuthenticationType::BCRYPT_PASSWORD)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot specify bcrypt password for authentication type {}", toString(type));
return setPasswordHashBinary(Util::encodeBcrypt(password_, workfactor_));
setPasswordHashBinary(Util::encodeBcrypt(password_, workfactor_));
}
String AuthenticationData::getPassword() const

View File

@ -7,7 +7,7 @@ namespace DB
{
/// Represents the type of an access entity (see the IAccessEntity class).
enum class AccessEntityType
enum class AccessEntityType : uint8_t
{
USER,
ROLE,

View File

@ -115,15 +115,15 @@ namespace
{
UNKNOWN = -2,
GROUP = -1,
GLOBAL,
DATABASE,
TABLE,
GLOBAL = 0,
DATABASE = 1,
TABLE = 2,
VIEW = TABLE,
COLUMN,
DICTIONARY,
NAMED_COLLECTION,
USER_NAME,
TABLE_ENGINE,
COLUMN = 3,
DICTIONARY = 4,
NAMED_COLLECTION = 5,
USER_NAME = 6,
TABLE_ENGINE = 7,
};
struct Node;

View File

@ -245,7 +245,7 @@ bool AccessRightsElements::sameOptions() const
void AccessRightsElements::eraseNonGrantable()
{
boost::range::remove_erase_if(*this, [](AccessRightsElement & element)
std::erase_if(*this, [](AccessRightsElement & element)
{
element.eraseNonGrantable();
return element.empty();

View File

@ -7,7 +7,7 @@ namespace DB
{
/// Represents an access type which can be granted on databases, tables, columns, etc.
enum class AccessType
enum class AccessType : uint8_t
{
/// Macro M should be defined as M(name, aliases, node_type, parent_group_name)
/// where name is identifier with underscores (instead of spaces);

View File

@ -308,7 +308,7 @@ void AllowedClientHosts::removeAddress(const IPAddress & address)
if (address.isLoopback())
local_host = false;
else
boost::range::remove_erase(addresses, address);
std::erase(addresses, address);
}
void AllowedClientHosts::addSubnet(const IPSubnet & subnet)
@ -328,7 +328,7 @@ void AllowedClientHosts::removeSubnet(const IPSubnet & subnet)
else if (subnet.isMaskAllBitsOne())
removeAddress(subnet.getPrefix());
else
boost::range::remove_erase(subnets, subnet);
std::erase(subnets, subnet);
}
void AllowedClientHosts::addName(const String & name)
@ -344,7 +344,7 @@ void AllowedClientHosts::removeName(const String & name)
if (boost::iequals(name, "localhost"))
local_host = false;
else
boost::range::remove_erase(names, name);
std::erase(names, name);
}
void AllowedClientHosts::addNameRegexp(const String & name_regexp)
@ -364,7 +364,7 @@ void AllowedClientHosts::removeNameRegexp(const String & name_regexp)
else if (name_regexp == ".*")
any_host = false;
else
boost::range::remove_erase(name_regexps, name_regexp);
std::erase(name_regexps, name_regexp);
}
void AllowedClientHosts::addLikePattern(const String & pattern)
@ -384,7 +384,7 @@ void AllowedClientHosts::removeLikePattern(const String & pattern)
else if ((pattern == "%") || (pattern == "0.0.0.0/0") || (pattern == "::/0"))
any_host = false;
else
boost::range::remove_erase(like_patterns, pattern);
std::erase(like_patterns, pattern);
}
void AllowedClientHosts::addLocalHost()

View File

@ -6,7 +6,7 @@
namespace DB
{
enum class AuthenticationType
enum class AuthenticationType : uint8_t
{
/// User doesn't have to enter password.
NO_PASSWORD,

View File

@ -9,7 +9,7 @@ namespace DB
using QuotaValue = UInt64;
/// Kinds of resource what we wish to quota.
enum class QuotaType
enum class QuotaType : uint8_t
{
QUERIES, /// Number of queries.
QUERY_SELECTS, /// Number of select queries.
@ -45,7 +45,7 @@ struct QuotaTypeInfo
/// Key to share quota consumption.
/// Users with the same key share the same amount of resource.
enum class QuotaKeyType
enum class QuotaKeyType : uint8_t
{
NONE, /// All users share the same quota.
USER_NAME, /// Connections with the same user name share the same quota.

View File

@ -25,7 +25,7 @@ struct RowPolicyName
/// Types of the filters of row policies.
/// Currently only RowPolicyFilterType::SELECT is supported.
enum class RowPolicyFilterType
enum class RowPolicyFilterType : uint8_t
{
/// Filter is a SQL conditional expression used to figure out which rows should be visible
/// for user or available for modification. If the expression returns NULL or false for some rows

View File

@ -71,7 +71,7 @@ namespace
SCOPE_EXIT(
{
if (!succeeded)
std::filesystem::remove(tmp_file_path);
(void)std::filesystem::remove(tmp_file_path);
});
/// Write the file.
@ -302,7 +302,7 @@ void DiskAccessStorage::writeLists()
}
/// The list files was successfully written, we don't need the 'need_rebuild_lists.mark' file any longer.
std::filesystem::remove(getNeedRebuildListsMarkFilePath(directory_path));
(void)std::filesystem::remove(getNeedRebuildListsMarkFilePath(directory_path));
types_of_lists_to_write.clear();
}
@ -419,7 +419,7 @@ void DiskAccessStorage::removeAllExceptInMemory(const boost::container::flat_set
const auto & id = it->first;
++it; /// We must go to the next element in the map `entries_by_id` here because otherwise removeNoLock() can invalidate our iterator.
if (!ids_to_keep.contains(id))
removeNoLock(id, /* throw_if_not_exists */ true, /* write_on_disk= */ false);
(void)removeNoLock(id, /* throw_if_not_exists */ true, /* write_on_disk= */ false);
}
}
@ -549,7 +549,7 @@ bool DiskAccessStorage::insertNoLock(const UUID & id, const AccessEntityPtr & ne
if (name_collision && (id_by_name != id))
{
assert(replace_if_exists);
removeNoLock(id_by_name, /* throw_if_not_exists= */ false, write_on_disk);
removeNoLock(id_by_name, /* throw_if_not_exists= */ false, write_on_disk); // NOLINT
}
if (id_collision)
@ -574,7 +574,7 @@ bool DiskAccessStorage::insertNoLock(const UUID & id, const AccessEntityPtr & ne
return true;
}
removeNoLock(id, /* throw_if_not_exists= */ false, write_on_disk);
removeNoLock(id, /* throw_if_not_exists= */ false, write_on_disk); // NOLINT
}
/// Do insertion.

View File

@ -161,9 +161,9 @@ void GrantedRoles::makeUnion(const GrantedRoles & other)
void GrantedRoles::makeIntersection(const GrantedRoles & other)
{
boost::range::remove_erase_if(roles, [&other](const UUID & id) { return other.roles.find(id) == other.roles.end(); });
boost::range::remove_erase_if(roles, [&other](const UUID & id) { return other.roles.find(id) == other.roles.end(); }); // NOLINT
boost::range::remove_erase_if(roles_with_admin_option, [&other](const UUID & id)
boost::range::remove_erase_if(roles_with_admin_option, [&other](const UUID & id) // NOLINT
{
return other.roles_with_admin_option.find(id) == other.roles_with_admin_option.end();
});

View File

@ -583,7 +583,7 @@ void IAccessStorage::backup(BackupEntriesCollector & backup_entries_collector, c
throwBackupNotAllowed();
auto entities = readAllWithIDs(type);
boost::range::remove_erase_if(entities, [](const std::pair<UUID, AccessEntityPtr> & x) { return !x.second->isBackupAllowed(); });
std::erase_if(entities, [](const std::pair<UUID, AccessEntityPtr> & x) { return !x.second->isBackupAllowed(); });
if (entities.empty())
return;

View File

@ -24,7 +24,7 @@ namespace DB
struct User;
class Credentials;
class ExternalAuthenticators;
enum class AuthenticationType;
enum class AuthenticationType : uint8_t;
class BackupEntriesCollector;
class RestorerFromBackup;

View File

@ -76,7 +76,7 @@ void LDAPAccessStorage::setConfiguration(const Poco::Util::AbstractConfiguration
config.keys(prefix, all_keys);
for (const auto & key : all_keys)
{
if (key == "role_mapping" || key.find("role_mapping[") == 0)
if (key == "role_mapping" || key.starts_with("role_mapping["))
parseLDAPRoleSearchParams(role_search_params_cfg.emplace_back(), config, prefix_str + key);
}
}
@ -94,7 +94,7 @@ void LDAPAccessStorage::setConfiguration(const Poco::Util::AbstractConfiguration
role_change_subscription = access_control.subscribeForChanges<Role>(
[this] (const UUID & id, const AccessEntityPtr & entity)
{
return this->processRoleChange(id, entity);
this->processRoleChange(id, entity);
}
);
}
@ -200,7 +200,7 @@ void LDAPAccessStorage::applyRoleChangeNoLock(bool grant, const UUID & role_id,
void LDAPAccessStorage::assignRolesNoLock(User & user, const LDAPClient::SearchResultsList & external_roles) const
{
const auto external_roles_hash = boost::hash<LDAPClient::SearchResultsList>{}(external_roles);
return assignRolesNoLock(user, external_roles, external_roles_hash);
assignRolesNoLock(user, external_roles, external_roles_hash);
}

View File

@ -26,7 +26,7 @@ class LDAPClient
public:
struct SearchParams
{
enum class Scope
enum class Scope : uint8_t
{
BASE,
ONE_LEVEL,
@ -57,20 +57,20 @@ public:
struct Params
{
enum class ProtocolVersion
enum class ProtocolVersion : uint8_t
{
V2,
V3
};
enum class TLSEnable
enum class TLSEnable : uint8_t
{
NO,
YES_STARTTLS,
YES
};
enum class TLSProtocolVersion
enum class TLSProtocolVersion : uint8_t
{
SSL2,
SSL3,
@ -79,7 +79,7 @@ public:
TLS1_2
};
enum class TLSRequireCert
enum class TLSRequireCert : uint8_t
{
NEVER,
ALLOW,
@ -87,7 +87,7 @@ public:
DEMAND
};
enum class SASLMechanism
enum class SASLMechanism : uint8_t
{
UNKNOWN,
SIMPLE

View File

@ -106,7 +106,7 @@ bool MemoryAccessStorage::insertNoLock(const UUID & id, const AccessEntityPtr &
if (name_collision && (id_by_name != id))
{
assert(replace_if_exists);
removeNoLock(id_by_name, /* throw_if_not_exists= */ true);
removeNoLock(id_by_name, /* throw_if_not_exists= */ true); // NOLINT
}
if (id_collision)
@ -128,7 +128,7 @@ bool MemoryAccessStorage::insertNoLock(const UUID & id, const AccessEntityPtr &
}
return true;
}
removeNoLock(id, /* throw_if_not_exists= */ true);
removeNoLock(id, /* throw_if_not_exists= */ true); // NOLINT
}
/// Do insertion.
@ -238,7 +238,7 @@ void MemoryAccessStorage::removeAllExceptNoLock(const boost::container::flat_set
const auto & id = it->first;
++it; /// We must go to the next element in the map `entries_by_id` here because otherwise removeNoLock() can invalidate our iterator.
if (!ids_to_keep.contains(id))
removeNoLock(id, /* throw_if_not_exists */ true);
removeNoLock(id, /* throw_if_not_exists */ true); // NOLINT
}
}

View File

@ -238,7 +238,7 @@ void MultipleAccessStorage::moveAccessEntities(const std::vector<UUID> & ids, co
try
{
source_storage->remove(ids);
source_storage->remove(ids); // NOLINT
need_rollback = true;
destination_storage->insert(to_move, ids);
}

View File

@ -616,7 +616,7 @@ void ReplicatedAccessStorage::setEntityNoLock(const UUID & id, const AccessEntit
void ReplicatedAccessStorage::removeEntityNoLock(const UUID & id)
{
LOG_DEBUG(getLogger(), "Removing entity with id {}", toString(id));
memory_storage.remove(id, /* throw_if_not_exists= */ false);
memory_storage.remove(id, /* throw_if_not_exists= */ false); // NOLINT
}
@ -654,7 +654,7 @@ void ReplicatedAccessStorage::backup(BackupEntriesCollector & backup_entries_col
throwBackupNotAllowed();
auto entities = readAllWithIDs(type);
boost::range::remove_erase_if(entities, [](const std::pair<UUID, AccessEntityPtr> & x) { return !x.second->isBackupAllowed(); });
std::erase_if(entities, [](const std::pair<UUID, AccessEntityPtr> & x) { return !x.second->isBackupAllowed(); });
if (entities.empty())
return;

View File

@ -190,11 +190,11 @@ void SettingsConstraints::check(const Settings & current_settings, const Setting
void SettingsConstraints::check(const Settings & current_settings, SettingsChanges & changes, SettingSource source) const
{
boost::range::remove_erase_if(
std::erase_if(
changes,
[&](SettingChange & change) -> bool
{
return !checkImpl(current_settings, const_cast<SettingChange &>(change), THROW_ON_VIOLATION, source);
return !checkImpl(current_settings, change, THROW_ON_VIOLATION, source);
});
}
@ -211,7 +211,7 @@ void SettingsConstraints::check(const MergeTreeSettings & current_settings, cons
void SettingsConstraints::clamp(const Settings & current_settings, SettingsChanges & changes, SettingSource source) const
{
boost::range::remove_erase_if(
std::erase_if(
changes,
[&](SettingChange & change) -> bool
{

View File

@ -269,9 +269,9 @@ struct AggregateFunctionFlameGraphData
using Entries = HashMap<UInt64, Pair>;
AggregateFunctionFlameGraphTree tree;
Entries entries;
Entry * free_list = nullptr;
AggregateFunctionFlameGraphTree tree;
Entry * alloc(Arena * arena)
{

View File

@ -43,7 +43,7 @@ namespace ErrorCodes
namespace
{
enum class Sampler
enum class Sampler : uint8_t
{
NONE,
RNG,
@ -735,14 +735,14 @@ IAggregateFunction * createWithNumericOrTimeType(const IDataType & argument_type
template <typename Trait, typename ... TArgs>
inline AggregateFunctionPtr createAggregateFunctionGroupArrayImpl(const DataTypePtr & argument_type, const Array & parameters, TArgs ... args)
{
if (auto res = createWithNumericOrTimeType<GroupArrayNumericImpl, Trait>(*argument_type, argument_type, parameters, std::forward<TArgs>(args)...))
if (auto res = createWithNumericOrTimeType<GroupArrayNumericImpl, Trait>(*argument_type, argument_type, parameters, args...))
return AggregateFunctionPtr(res);
WhichDataType which(argument_type);
if (which.idx == TypeIndex::String)
return std::make_shared<GroupArrayGeneralImpl<GroupArrayNodeString, Trait>>(argument_type, parameters, std::forward<TArgs>(args)...);
return std::make_shared<GroupArrayGeneralImpl<GroupArrayNodeString, Trait>>(argument_type, parameters, args...);
return std::make_shared<GroupArrayGeneralImpl<GroupArrayNodeGeneral, Trait>>(argument_type, parameters, std::forward<TArgs>(args)...);
return std::make_shared<GroupArrayGeneralImpl<GroupArrayNodeGeneral, Trait>>(argument_type, parameters, args...);
}
size_t getMaxArraySize()
@ -753,13 +753,21 @@ size_t getMaxArraySize()
return 0xFFFFFF;
}
bool hasLimitArraySize()
{
if (auto context = Context::getGlobalContextInstance())
return context->getServerSettings().aggregate_function_group_array_has_limit_size;
return false;
}
template <bool Tlast>
AggregateFunctionPtr createAggregateFunctionGroupArray(
const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings *)
{
assertUnary(name, argument_types);
bool limit_size = false;
bool limit_size = hasLimitArraySize();
UInt64 max_elems = getMaxArraySize();
if (parameters.empty())

View File

@ -47,7 +47,7 @@ namespace ErrorCodes
namespace
{
enum class GroupArraySortedStrategy
enum class GroupArraySortedStrategy : uint8_t
{
heap,
sort
@ -374,10 +374,10 @@ AggregateFunctionPtr createWithNumericOrTimeType(const IDataType & argument_type
template <template <typename> class AggregateFunctionTemplate, typename ... TArgs>
inline AggregateFunctionPtr createAggregateFunctionGroupArraySortedImpl(const DataTypePtr & argument_type, const Array & parameters, TArgs ... args)
{
if (auto res = createWithNumericOrTimeType<AggregateFunctionTemplate>(*argument_type, argument_type, parameters, std::forward<TArgs>(args)...))
if (auto res = createWithNumericOrTimeType<AggregateFunctionTemplate>(*argument_type, argument_type, parameters, args...))
return AggregateFunctionPtr(res);
return std::make_shared<AggregateFunctionTemplate<Field>>(argument_type, parameters, std::forward<TArgs>(args)...);
return std::make_shared<AggregateFunctionTemplate<Field>>(argument_type, parameters, args...);
}
AggregateFunctionPtr createAggregateFunctionGroupArray(

View File

@ -275,16 +275,16 @@ template <typename HasLimit, typename ... TArgs>
IAggregateFunction * createWithExtraTypes(const DataTypePtr & argument_type, TArgs && ... args)
{
WhichDataType which(argument_type);
if (which.idx == TypeIndex::Date) return new AggregateFunctionGroupUniqArrayDate<HasLimit>(argument_type, std::forward<TArgs>(args)...);
else if (which.idx == TypeIndex::DateTime) return new AggregateFunctionGroupUniqArrayDateTime<HasLimit>(argument_type, std::forward<TArgs>(args)...);
else if (which.idx == TypeIndex::IPv4) return new AggregateFunctionGroupUniqArrayIPv4<HasLimit>(argument_type, std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::Date) return new AggregateFunctionGroupUniqArrayDate<HasLimit>(argument_type, args...);
else if (which.idx == TypeIndex::DateTime) return new AggregateFunctionGroupUniqArrayDateTime<HasLimit>(argument_type, args...);
else if (which.idx == TypeIndex::IPv4) return new AggregateFunctionGroupUniqArrayIPv4<HasLimit>(argument_type, args...);
else
{
/// Check that we can use plain version of AggregateFunctionGroupUniqArrayGeneric
if (argument_type->isValueUnambiguouslyRepresentedInContiguousMemoryRegion())
return new AggregateFunctionGroupUniqArrayGeneric<true, HasLimit>(argument_type, std::forward<TArgs>(args)...);
return new AggregateFunctionGroupUniqArrayGeneric<true, HasLimit>(argument_type, args...);
else
return new AggregateFunctionGroupUniqArrayGeneric<false, HasLimit>(argument_type, std::forward<TArgs>(args)...);
return new AggregateFunctionGroupUniqArrayGeneric<false, HasLimit>(argument_type, args...);
}
}
@ -292,10 +292,10 @@ template <typename HasLimit, typename ... TArgs>
inline AggregateFunctionPtr createAggregateFunctionGroupUniqArrayImpl(const std::string & name, const DataTypePtr & argument_type, TArgs ... args)
{
AggregateFunctionPtr res(createWithNumericType<AggregateFunctionGroupUniqArray, HasLimit, const DataTypePtr &, TArgs...>(*argument_type, argument_type, std::forward<TArgs>(args)...));
AggregateFunctionPtr res(createWithNumericType<AggregateFunctionGroupUniqArray, HasLimit, const DataTypePtr &>(*argument_type, argument_type, args...));
if (!res)
res = AggregateFunctionPtr(createWithExtraTypes<HasLimit>(argument_type, std::forward<TArgs>(args)...));
res = AggregateFunctionPtr(createWithExtraTypes<HasLimit>(argument_type, args...));
if (!res)
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument for aggregate function {}",

View File

@ -31,7 +31,7 @@ namespace
struct KolmogorovSmirnov : public StatisticalSample<Float64, Float64>
{
enum class Alternative
enum class Alternative : uint8_t
{
TwoSided,
Less,

View File

@ -120,8 +120,7 @@ struct LargestTriangleThreeBucketsData : public StatisticalSample<Float64, Float
// the end index of next bucket
size_t end_index = 1 + static_cast<int>(floor(single_bucket_size * (i + 2)));
// current bucket is the last bucket
if (end_index > this->x.size())
end_index = this->x.size();
end_index = std::min(end_index, this->x.size());
// Compute the average point in the next bucket
Float64 avg_x = 0;

View File

@ -39,7 +39,7 @@ struct MannWhitneyData : public StatisticalSample<Float64, Float64>
*the probability of X being greater than Y is equal to the probability of Y being greater than X".
*Or "the distribution F of first sample equals to the distribution G of second sample".
*Then alternative for this hypothesis (H1) is "two-sided"(F != G), "less"(F < G), "greater" (F > G). */
enum class Alternative
enum class Alternative : uint8_t
{
TwoSided,
Less,

View File

@ -48,7 +48,7 @@ struct MaxIntersectionsData
Array value;
};
enum class AggregateFunctionIntersectionsKind
enum class AggregateFunctionIntersectionsKind : uint8_t
{
Count,
Position

View File

@ -184,7 +184,7 @@ public:
}
private:
enum class PatternActionType
enum class PatternActionType : uint8_t
{
SpecificEvent,
AnyEvent,
@ -577,7 +577,7 @@ protected:
}
private:
enum class DFATransition : char
enum class DFATransition : uint8_t
{
/// .-------.
/// | |

View File

@ -41,13 +41,13 @@ namespace ErrorCodes
namespace
{
enum class SequenceDirection
enum class SequenceDirection : uint8_t
{
Forward,
Backward,
};
enum SequenceBase
enum SequenceBase : uint8_t
{
Head,
Tail,

View File

@ -104,7 +104,7 @@ struct AggregateFunctionVarianceData
Float64 m2 = 0.0;
};
enum class VarKind
enum class VarKind : uint8_t
{
varSampStable,
stddevSampStable,
@ -343,7 +343,7 @@ struct CovarianceData : public BaseCovarianceData<compute_marginal_moments>
Float64 co_moment = 0.0;
};
enum class CovarKind
enum class CovarKind : uint8_t
{
covarSampStable,
covarPopStable,

View File

@ -35,7 +35,7 @@ namespace DB
struct Settings;
enum class StatisticsFunctionKind
enum class StatisticsFunctionKind : uint8_t
{
varPop, varSamp,
stddevPop, stddevSamp,

View File

@ -20,7 +20,7 @@ namespace ErrorCodes
namespace
{
enum class StatisticsMatrixFunctionKind
enum class StatisticsMatrixFunctionKind : uint8_t
{
covarPopMatrix,
covarSampMatrix,

View File

@ -7,7 +7,7 @@ namespace DB
namespace
{
enum class Kind
enum class Kind : uint8_t
{
OrNull,
OrDefault

View File

@ -200,7 +200,7 @@ public:
throw Exception(ErrorCodes::INCORRECT_DATA, "Invalid flag for zero count");
}
readBinary(zero_count, buf);
count = static_cast<Float64>(negative_store->count + zero_count + store->count);
count = negative_store->count + zero_count + store->count;
}
/// NOLINTEND(readability-static-accessed-through-instance)

View File

@ -14,11 +14,11 @@ static IAggregateFunction * createWithNumericType(const IDataType & argument_typ
{
WhichDataType which(argument_type);
#define DISPATCH(TYPE) \
if (which.idx == TypeIndex::TYPE) return new AggregateFunctionTemplate<TYPE>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::TYPE) return new AggregateFunctionTemplate<TYPE>(args...);
FOR_NUMERIC_TYPES(DISPATCH)
#undef DISPATCH
if (which.idx == TypeIndex::Enum8) return new AggregateFunctionTemplate<Int8>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::Enum16) return new AggregateFunctionTemplate<Int16>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::Enum8) return new AggregateFunctionTemplate<Int8>(args...);
if (which.idx == TypeIndex::Enum16) return new AggregateFunctionTemplate<Int16>(args...);
return nullptr;
}
@ -27,11 +27,11 @@ static IAggregateFunction * createWithNumericType(const IDataType & argument_typ
{
WhichDataType which(argument_type);
#define DISPATCH(TYPE) \
if (which.idx == TypeIndex::TYPE) return new AggregateFunctionTemplate<Data<TYPE>>(std::forward<TArgs>(args)...); /// NOLINT
if (which.idx == TypeIndex::TYPE) return new AggregateFunctionTemplate<Data<TYPE>>(args...); /// NOLINT
FOR_NUMERIC_TYPES(DISPATCH)
#undef DISPATCH
if (which.idx == TypeIndex::Enum8) return new AggregateFunctionTemplate<Data<Int8>>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::Enum16) return new AggregateFunctionTemplate<Data<Int16>>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::Enum8) return new AggregateFunctionTemplate<Data<Int8>>(args...);
if (which.idx == TypeIndex::Enum16) return new AggregateFunctionTemplate<Data<Int16>>(args...);
return nullptr;
}
@ -40,11 +40,11 @@ static IAggregateFunction * createWithNumericType(const IDataType & argument_typ
{
WhichDataType which(argument_type);
#define DISPATCH(TYPE) \
if (which.idx == TypeIndex::TYPE) return new AggregateFunctionTemplate<TYPE, bool_param>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::TYPE) return new AggregateFunctionTemplate<TYPE, bool_param>(args...);
FOR_NUMERIC_TYPES(DISPATCH)
#undef DISPATCH
if (which.idx == TypeIndex::Enum8) return new AggregateFunctionTemplate<Int8, bool_param>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::Enum16) return new AggregateFunctionTemplate<Int16, bool_param>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::Enum8) return new AggregateFunctionTemplate<Int8, bool_param>(args...);
if (which.idx == TypeIndex::Enum16) return new AggregateFunctionTemplate<Int16, bool_param>(args...);
return nullptr;
}
@ -53,11 +53,11 @@ static IAggregateFunction * createWithNumericType(const IDataType & argument_typ
{
WhichDataType which(argument_type);
#define DISPATCH(TYPE) \
if (which.idx == TypeIndex::TYPE) return new AggregateFunctionTemplate<TYPE, Data>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::TYPE) return new AggregateFunctionTemplate<TYPE, Data>(args...);
FOR_NUMERIC_TYPES(DISPATCH)
#undef DISPATCH
if (which.idx == TypeIndex::Enum8) return new AggregateFunctionTemplate<Int8, Data>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::Enum16) return new AggregateFunctionTemplate<Int16, Data>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::Enum8) return new AggregateFunctionTemplate<Int8, Data>(args...);
if (which.idx == TypeIndex::Enum16) return new AggregateFunctionTemplate<Int16, Data>(args...);
return nullptr;
}
@ -66,11 +66,11 @@ static IAggregateFunction * createWithNumericType(const IDataType & argument_typ
{
WhichDataType which(argument_type);
#define DISPATCH(TYPE) \
if (which.idx == TypeIndex::TYPE) return new AggregateFunctionTemplate<TYPE, Data<TYPE>>(std::forward<TArgs>(args)...); /// NOLINT
if (which.idx == TypeIndex::TYPE) return new AggregateFunctionTemplate<TYPE, Data<TYPE>>(args...); /// NOLINT
FOR_NUMERIC_TYPES(DISPATCH)
#undef DISPATCH
if (which.idx == TypeIndex::Enum8) return new AggregateFunctionTemplate<Int8, Data<Int8>>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::Enum16) return new AggregateFunctionTemplate<Int16, Data<Int16>>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::Enum8) return new AggregateFunctionTemplate<Int8, Data<Int8>>(args...);
if (which.idx == TypeIndex::Enum16) return new AggregateFunctionTemplate<Int16, Data<Int16>>(args...);
return nullptr;
}
@ -79,11 +79,11 @@ static IAggregateFunction * createWithNumericType(const IDataType & argument_typ
{
WhichDataType which(argument_type);
#define DISPATCH(TYPE) \
if (which.idx == TypeIndex::TYPE) return new AggregateFunctionTemplate<TYPE, Data<TYPE, bool_param>>(std::forward<TArgs>(args)...); /// NOLINT
if (which.idx == TypeIndex::TYPE) return new AggregateFunctionTemplate<TYPE, Data<TYPE, bool_param>>(args...); /// NOLINT
FOR_NUMERIC_TYPES(DISPATCH)
#undef DISPATCH
if (which.idx == TypeIndex::Enum8) return new AggregateFunctionTemplate<Int8, Data<Int8, bool_param>>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::Enum16) return new AggregateFunctionTemplate<Int16, Data<Int16, bool_param>>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::Enum8) return new AggregateFunctionTemplate<Int8, Data<Int8, bool_param>>(args...);
if (which.idx == TypeIndex::Enum16) return new AggregateFunctionTemplate<Int16, Data<Int16, bool_param>>(args...);
return nullptr;
}
@ -91,12 +91,12 @@ template <template <typename, typename> class AggregateFunctionTemplate, templat
static IAggregateFunction * createWithUnsignedIntegerType(const IDataType & argument_type, TArgs && ... args)
{
WhichDataType which(argument_type);
if (which.idx == TypeIndex::UInt8) return new AggregateFunctionTemplate<UInt8, Data<UInt8>>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::UInt16) return new AggregateFunctionTemplate<UInt16, Data<UInt16>>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::UInt32) return new AggregateFunctionTemplate<UInt32, Data<UInt32>>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::UInt64) return new AggregateFunctionTemplate<UInt64, Data<UInt64>>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::UInt128) return new AggregateFunctionTemplate<UInt128, Data<UInt128>>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::UInt256) return new AggregateFunctionTemplate<UInt256, Data<UInt256>>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::UInt8) return new AggregateFunctionTemplate<UInt8, Data<UInt8>>(args...);
if (which.idx == TypeIndex::UInt16) return new AggregateFunctionTemplate<UInt16, Data<UInt16>>(args...);
if (which.idx == TypeIndex::UInt32) return new AggregateFunctionTemplate<UInt32, Data<UInt32>>(args...);
if (which.idx == TypeIndex::UInt64) return new AggregateFunctionTemplate<UInt64, Data<UInt64>>(args...);
if (which.idx == TypeIndex::UInt128) return new AggregateFunctionTemplate<UInt128, Data<UInt128>>(args...);
if (which.idx == TypeIndex::UInt256) return new AggregateFunctionTemplate<UInt256, Data<UInt256>>(args...);
return nullptr;
}
@ -104,22 +104,22 @@ template <template <typename, typename> class AggregateFunctionTemplate, templat
static IAggregateFunction * createWithSignedIntegerType(const IDataType & argument_type, TArgs && ... args)
{
WhichDataType which(argument_type);
if (which.idx == TypeIndex::Int8) return new AggregateFunctionTemplate<Int8, Data<Int8>>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::Int16) return new AggregateFunctionTemplate<Int16, Data<Int16>>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::Int32) return new AggregateFunctionTemplate<Int32, Data<Int32>>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::Int64) return new AggregateFunctionTemplate<Int64, Data<Int64>>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::Int128) return new AggregateFunctionTemplate<Int128, Data<Int128>>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::Int256) return new AggregateFunctionTemplate<Int256, Data<Int256>>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::Int8) return new AggregateFunctionTemplate<Int8, Data<Int8>>(args...);
if (which.idx == TypeIndex::Int16) return new AggregateFunctionTemplate<Int16, Data<Int16>>(args...);
if (which.idx == TypeIndex::Int32) return new AggregateFunctionTemplate<Int32, Data<Int32>>(args...);
if (which.idx == TypeIndex::Int64) return new AggregateFunctionTemplate<Int64, Data<Int64>>(args...);
if (which.idx == TypeIndex::Int128) return new AggregateFunctionTemplate<Int128, Data<Int128>>(args...);
if (which.idx == TypeIndex::Int256) return new AggregateFunctionTemplate<Int256, Data<Int256>>(args...);
return nullptr;
}
template <template <typename, typename> class AggregateFunctionTemplate, template <typename> class Data, typename... TArgs>
static IAggregateFunction * createWithIntegerType(const IDataType & argument_type, TArgs && ... args)
{
IAggregateFunction * f = createWithUnsignedIntegerType<AggregateFunctionTemplate, Data>(argument_type, std::forward<TArgs>(args)...);
IAggregateFunction * f = createWithUnsignedIntegerType<AggregateFunctionTemplate, Data>(argument_type, args...);
if (f)
return f;
return createWithSignedIntegerType<AggregateFunctionTemplate, Data>(argument_type, std::forward<TArgs>(args)...);
return createWithSignedIntegerType<AggregateFunctionTemplate, Data>(argument_type, args...);
}
template <template <typename, typename> class AggregateFunctionTemplate, template <typename> class Data, typename... TArgs>
@ -128,14 +128,14 @@ static IAggregateFunction * createWithBasicNumberOrDateOrDateTime(const IDataTyp
WhichDataType which(argument_type);
#define DISPATCH(TYPE) \
if (which.idx == TypeIndex::TYPE) \
return new AggregateFunctionTemplate<TYPE, Data<TYPE>>(std::forward<TArgs>(args)...); /// NOLINT
return new AggregateFunctionTemplate<TYPE, Data<TYPE>>(args...); /// NOLINT
FOR_BASIC_NUMERIC_TYPES(DISPATCH)
#undef DISPATCH
if (which.idx == TypeIndex::Date)
return new AggregateFunctionTemplate<UInt16, Data<UInt16>>(std::forward<TArgs>(args)...);
return new AggregateFunctionTemplate<UInt16, Data<UInt16>>(args...);
if (which.idx == TypeIndex::DateTime)
return new AggregateFunctionTemplate<UInt32, Data<UInt32>>(std::forward<TArgs>(args)...);
return new AggregateFunctionTemplate<UInt32, Data<UInt32>>(args...);
return nullptr;
}
@ -143,17 +143,17 @@ static IAggregateFunction * createWithBasicNumberOrDateOrDateTime(const IDataTyp
template <template <typename> class AggregateFunctionTemplate, typename... TArgs>
static IAggregateFunction * createWithNumericBasedType(const IDataType & argument_type, TArgs && ... args)
{
IAggregateFunction * f = createWithNumericType<AggregateFunctionTemplate>(argument_type, std::forward<TArgs>(args)...);
IAggregateFunction * f = createWithNumericType<AggregateFunctionTemplate>(argument_type, args...);
if (f)
return f;
/// expects that DataTypeDate based on UInt16, DataTypeDateTime based on UInt32
WhichDataType which(argument_type);
if (which.idx == TypeIndex::Date) return new AggregateFunctionTemplate<UInt16>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::DateTime) return new AggregateFunctionTemplate<UInt32>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::UUID) return new AggregateFunctionTemplate<UUID>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::IPv4) return new AggregateFunctionTemplate<IPv4>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::IPv6) return new AggregateFunctionTemplate<IPv6>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::Date) return new AggregateFunctionTemplate<UInt16>(args...);
if (which.idx == TypeIndex::DateTime) return new AggregateFunctionTemplate<UInt32>(args...);
if (which.idx == TypeIndex::UUID) return new AggregateFunctionTemplate<UUID>(args...);
if (which.idx == TypeIndex::IPv4) return new AggregateFunctionTemplate<IPv4>(args...);
if (which.idx == TypeIndex::IPv6) return new AggregateFunctionTemplate<IPv6>(args...);
return nullptr;
}
@ -161,12 +161,12 @@ template <template <typename> class AggregateFunctionTemplate, typename... TArgs
static IAggregateFunction * createWithDecimalType(const IDataType & argument_type, TArgs && ... args)
{
WhichDataType which(argument_type);
if (which.idx == TypeIndex::Decimal32) return new AggregateFunctionTemplate<Decimal32>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::Decimal64) return new AggregateFunctionTemplate<Decimal64>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::Decimal128) return new AggregateFunctionTemplate<Decimal128>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::Decimal256) return new AggregateFunctionTemplate<Decimal256>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::Decimal32) return new AggregateFunctionTemplate<Decimal32>(args...);
if (which.idx == TypeIndex::Decimal64) return new AggregateFunctionTemplate<Decimal64>(args...);
if (which.idx == TypeIndex::Decimal128) return new AggregateFunctionTemplate<Decimal128>(args...);
if (which.idx == TypeIndex::Decimal256) return new AggregateFunctionTemplate<Decimal256>(args...);
if constexpr (AggregateFunctionTemplate<DateTime64>::DateTime64Supported)
if (which.idx == TypeIndex::DateTime64) return new AggregateFunctionTemplate<DateTime64>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::DateTime64) return new AggregateFunctionTemplate<DateTime64>(args...);
return nullptr;
}
@ -174,12 +174,12 @@ template <template <typename, typename> class AggregateFunctionTemplate, typenam
static IAggregateFunction * createWithDecimalType(const IDataType & argument_type, TArgs && ... args)
{
WhichDataType which(argument_type);
if (which.idx == TypeIndex::Decimal32) return new AggregateFunctionTemplate<Decimal32, Data>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::Decimal64) return new AggregateFunctionTemplate<Decimal64, Data>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::Decimal128) return new AggregateFunctionTemplate<Decimal128, Data>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::Decimal256) return new AggregateFunctionTemplate<Decimal256, Data>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::Decimal32) return new AggregateFunctionTemplate<Decimal32, Data>(args...);
if (which.idx == TypeIndex::Decimal64) return new AggregateFunctionTemplate<Decimal64, Data>(args...);
if (which.idx == TypeIndex::Decimal128) return new AggregateFunctionTemplate<Decimal128, Data>(args...);
if (which.idx == TypeIndex::Decimal256) return new AggregateFunctionTemplate<Decimal256, Data>(args...);
if constexpr (AggregateFunctionTemplate<DateTime64, Data>::DateTime64Supported)
if (which.idx == TypeIndex::DateTime64) return new AggregateFunctionTemplate<DateTime64, Data>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::DateTime64) return new AggregateFunctionTemplate<DateTime64, Data>(args...);
return nullptr;
}
@ -190,11 +190,11 @@ static IAggregateFunction * createWithTwoNumericTypesSecond(const IDataType & se
{
WhichDataType which(second_type);
#define DISPATCH(TYPE) \
if (which.idx == TypeIndex::TYPE) return new AggregateFunctionTemplate<FirstType, TYPE>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::TYPE) return new AggregateFunctionTemplate<FirstType, TYPE>(args...);
FOR_NUMERIC_TYPES(DISPATCH)
#undef DISPATCH
if (which.idx == TypeIndex::Enum8) return new AggregateFunctionTemplate<FirstType, Int8>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::Enum16) return new AggregateFunctionTemplate<FirstType, Int16>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::Enum8) return new AggregateFunctionTemplate<FirstType, Int8>(args...);
if (which.idx == TypeIndex::Enum16) return new AggregateFunctionTemplate<FirstType, Int16>(args...);
return nullptr;
}
@ -204,13 +204,13 @@ static IAggregateFunction * createWithTwoNumericTypes(const IDataType & first_ty
WhichDataType which(first_type);
#define DISPATCH(TYPE) \
if (which.idx == TypeIndex::TYPE) \
return createWithTwoNumericTypesSecond<TYPE, AggregateFunctionTemplate>(second_type, std::forward<TArgs>(args)...);
return createWithTwoNumericTypesSecond<TYPE, AggregateFunctionTemplate>(second_type, args...);
FOR_NUMERIC_TYPES(DISPATCH)
#undef DISPATCH
if (which.idx == TypeIndex::Enum8)
return createWithTwoNumericTypesSecond<Int8, AggregateFunctionTemplate>(second_type, std::forward<TArgs>(args)...);
return createWithTwoNumericTypesSecond<Int8, AggregateFunctionTemplate>(second_type, args...);
if (which.idx == TypeIndex::Enum16)
return createWithTwoNumericTypesSecond<Int16, AggregateFunctionTemplate>(second_type, std::forward<TArgs>(args)...);
return createWithTwoNumericTypesSecond<Int16, AggregateFunctionTemplate>(second_type, args...);
return nullptr;
}
@ -219,7 +219,7 @@ static IAggregateFunction * createWithTwoBasicNumericTypesSecond(const IDataType
{
WhichDataType which(second_type);
#define DISPATCH(TYPE) \
if (which.idx == TypeIndex::TYPE) return new AggregateFunctionTemplate<FirstType, TYPE>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::TYPE) return new AggregateFunctionTemplate<FirstType, TYPE>(args...);
FOR_BASIC_NUMERIC_TYPES(DISPATCH)
#undef DISPATCH
return nullptr;
@ -231,7 +231,7 @@ static IAggregateFunction * createWithTwoBasicNumericTypes(const IDataType & fir
WhichDataType which(first_type);
#define DISPATCH(TYPE) \
if (which.idx == TypeIndex::TYPE) \
return createWithTwoBasicNumericTypesSecond<TYPE, AggregateFunctionTemplate>(second_type, std::forward<TArgs>(args)...);
return createWithTwoBasicNumericTypesSecond<TYPE, AggregateFunctionTemplate>(second_type, args...);
FOR_BASIC_NUMERIC_TYPES(DISPATCH)
#undef DISPATCH
return nullptr;
@ -242,15 +242,15 @@ static IAggregateFunction * createWithTwoNumericOrDateTypesSecond(const IDataTyp
{
WhichDataType which(second_type);
#define DISPATCH(TYPE) \
if (which.idx == TypeIndex::TYPE) return new AggregateFunctionTemplate<FirstType, TYPE>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::TYPE) return new AggregateFunctionTemplate<FirstType, TYPE>(args...);
FOR_NUMERIC_TYPES(DISPATCH)
#undef DISPATCH
if (which.idx == TypeIndex::Enum8) return new AggregateFunctionTemplate<FirstType, Int8>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::Enum16) return new AggregateFunctionTemplate<FirstType, Int16>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::Enum8) return new AggregateFunctionTemplate<FirstType, Int8>(args...);
if (which.idx == TypeIndex::Enum16) return new AggregateFunctionTemplate<FirstType, Int16>(args...);
/// expects that DataTypeDate based on UInt16, DataTypeDateTime based on UInt32
if (which.idx == TypeIndex::Date) return new AggregateFunctionTemplate<FirstType, UInt16>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::DateTime) return new AggregateFunctionTemplate<FirstType, UInt32>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::Date) return new AggregateFunctionTemplate<FirstType, UInt16>(args...);
if (which.idx == TypeIndex::DateTime) return new AggregateFunctionTemplate<FirstType, UInt32>(args...);
return nullptr;
}
@ -261,19 +261,19 @@ static IAggregateFunction * createWithTwoNumericOrDateTypes(const IDataType & fi
WhichDataType which(first_type);
#define DISPATCH(TYPE) \
if (which.idx == TypeIndex::TYPE) \
return createWithTwoNumericOrDateTypesSecond<TYPE, AggregateFunctionTemplate>(second_type, std::forward<TArgs>(args)...);
return createWithTwoNumericOrDateTypesSecond<TYPE, AggregateFunctionTemplate>(second_type, args...);
FOR_NUMERIC_TYPES(DISPATCH)
#undef DISPATCH
if (which.idx == TypeIndex::Enum8)
return createWithTwoNumericOrDateTypesSecond<Int8, AggregateFunctionTemplate>(second_type, std::forward<TArgs>(args)...);
return createWithTwoNumericOrDateTypesSecond<Int8, AggregateFunctionTemplate>(second_type, args...);
if (which.idx == TypeIndex::Enum16)
return createWithTwoNumericOrDateTypesSecond<Int16, AggregateFunctionTemplate>(second_type, std::forward<TArgs>(args)...);
return createWithTwoNumericOrDateTypesSecond<Int16, AggregateFunctionTemplate>(second_type, args...);
/// expects that DataTypeDate based on UInt16, DataTypeDateTime based on UInt32
if (which.idx == TypeIndex::Date)
return createWithTwoNumericOrDateTypesSecond<UInt16, AggregateFunctionTemplate>(second_type, std::forward<TArgs>(args)...);
return createWithTwoNumericOrDateTypesSecond<UInt16, AggregateFunctionTemplate>(second_type, args...);
if (which.idx == TypeIndex::DateTime)
return createWithTwoNumericOrDateTypesSecond<UInt32, AggregateFunctionTemplate>(second_type, std::forward<TArgs>(args)...);
return createWithTwoNumericOrDateTypesSecond<UInt32, AggregateFunctionTemplate>(second_type, args...);
return nullptr;
}
@ -281,8 +281,8 @@ template <template <typename> class AggregateFunctionTemplate, typename... TArgs
static IAggregateFunction * createWithStringType(const IDataType & argument_type, TArgs && ... args)
{
WhichDataType which(argument_type);
if (which.idx == TypeIndex::String) return new AggregateFunctionTemplate<String>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::FixedString) return new AggregateFunctionTemplate<String>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::String) return new AggregateFunctionTemplate<String>(args...);
if (which.idx == TypeIndex::FixedString) return new AggregateFunctionTemplate<String>(args...);
return nullptr;
}

View File

@ -234,8 +234,7 @@ public:
BetterFloat qr = (sum + l_count + r->count * 0.5) / count;
BetterFloat err2 = qr * (1 - qr);
if (err > err2)
err = err2;
err = std::min(err, err2);
BetterFloat k = count_epsilon_4 * err;

Some files were not shown because too many files have changed in this diff Show More