Merge remote-tracking branch 'origin/master' into fix_catboost_test

This commit is contained in:
Alexander Sapin 2024-05-15 12:44:44 +02:00
commit 2a6962c78c
1170 changed files with 13652 additions and 4339 deletions

View File

@ -123,7 +123,21 @@ Checks: [
'-readability-uppercase-literal-suffix',
'-readability-use-anyofallof',
'-zircon-*'
'-zircon-*',
# These are new in clang-18, and we have to sort them out:
'-readability-avoid-nested-conditional-operator',
'-modernize-use-designated-initializers',
'-performance-enum-size',
'-readability-redundant-inline-specifier',
'-readability-redundant-member-init',
'-bugprone-crtp-constructor-accessibility',
'-bugprone-suspicious-stringview-data-usage',
'-bugprone-multi-level-implicit-pointer-conversion',
'-cert-err33-c',
# This is a good check, but clang-tidy crashes, see https://github.com/llvm/llvm-project/issues/91872
'-modernize-use-constraints',
]
WarningsAsErrors: '*'

View File

@ -85,4 +85,4 @@ At a minimum, the following information should be added (but add more as needed)
- [ ] <!---batch_2--> 3
- [ ] <!---batch_3--> 4
<details>
</details>

View File

@ -9,6 +9,12 @@ on: # yamllint disable-line rule:truthy
push:
branches:
- 'backport/**'
# Cancel the previous wf run in PRs.
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
RunConfig:
runs-on: [self-hosted, style-checker-aarch64]

View File

@ -1,19 +0,0 @@
name: Cancel
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
on: # yamllint disable-line rule:truthy
workflow_run:
workflows: ["PullRequestCI", "ReleaseBranchCI", "DocsCheck", "BackportPR"]
types:
- requested
jobs:
cancel:
runs-on: [self-hosted, style-checker]
steps:
- uses: styfle/cancel-workflow-action@0.9.1
with:
all_but_latest: true
workflow_id: ${{ github.event.workflow.id }}

View File

@ -1,11 +0,0 @@
# The CI for each commit, prints envs and content of GITHUB_EVENT_PATH
name: Debug
'on':
[push, pull_request, pull_request_review, release, workflow_dispatch, workflow_call]
jobs:
DebugInfo:
runs-on: ubuntu-latest
steps:
- uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6

96
.github/workflows/merge_queue.yml vendored Normal file
View File

@ -0,0 +1,96 @@
# yamllint disable rule:comments-indentation
name: MergeQueueCI
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
on: # yamllint disable-line rule:truthy
merge_group:
jobs:
RunConfig:
runs-on: [self-hosted, style-checker-aarch64]
outputs:
data: ${{ steps.runconfig.outputs.CI_DATA }}
steps:
- name: DebugInfo
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true # to ensure correct digests
fetch-depth: 0 # to get version
filter: tree:0
- name: Python unit tests
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
echo "Testing the main ci directory"
python3 -m unittest discover -s . -p 'test_*.py'
for dir in *_lambda/; do
echo "Testing $dir"
python3 -m unittest discover -s "$dir" -p 'test_*.py'
done
- name: PrepareRunConfig
id: runconfig
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --outfile ${{ runner.temp }}/ci_run_data.json
echo "::group::CI configuration"
python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
echo "::endgroup::"
{
echo 'CI_DATA<<EOF'
cat ${{ runner.temp }}/ci_run_data.json
echo 'EOF'
} >> "$GITHUB_OUTPUT"
BuildDockers:
needs: [RunConfig]
if: ${{ !failure() && !cancelled() && toJson(fromJson(needs.RunConfig.outputs.data).docker_data.missing_multi) != '[]' }}
uses: ./.github/workflows/reusable_docker.yml
with:
data: ${{ needs.RunConfig.outputs.data }}
StyleCheck:
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Style check')}}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Style check
runner_type: style-checker
run_command: |
python3 style_check.py
data: ${{ needs.RunConfig.outputs.data }}
secrets:
secret_envs: |
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
RCSK
FastTest:
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Fast test') }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Fast test
runner_type: builder
data: ${{ needs.RunConfig.outputs.data }}
run_command: |
python3 fast_test_check.py
################################# Stage Final #################################
#
FinishCheck:
if: ${{ !failure() && !cancelled() }}
needs: [RunConfig, BuildDockers, StyleCheck, FastTest]
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
- name: Check sync status
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 sync_pr.py --status
- name: Finish label
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 finish_check.py ${{ (contains(needs.*.result, 'failure') && github.event_name == 'merge_group') && '--pipeline-failure' || '' }}

View File

@ -10,14 +10,13 @@ env:
workflow_dispatch:
jobs:
Debug:
# The task for having a preserved ENV and event.json for later investigation
uses: ./.github/workflows/debug.yml
RunConfig:
runs-on: [self-hosted, style-checker-aarch64]
outputs:
data: ${{ steps.runconfig.outputs.CI_DATA }}
steps:
- name: DebugInfo
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:

View File

@ -6,7 +6,6 @@ env:
PYTHONUNBUFFERED: 1
on: # yamllint disable-line rule:truthy
merge_group:
pull_request:
types:
- synchronize
@ -15,6 +14,11 @@ on: # yamllint disable-line rule:truthy
branches:
- master
# Cancel the previous wf run in PRs.
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
RunConfig:
runs-on: [self-hosted, style-checker-aarch64]
@ -30,7 +34,6 @@ jobs:
fetch-depth: 0 # to get version
filter: tree:0
- name: Labels check
if: ${{ github.event_name != 'merge_group' }}
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 run_check.py
@ -58,7 +61,6 @@ jobs:
echo 'EOF'
} >> "$GITHUB_OUTPUT"
- name: Re-create GH statuses for skipped jobs if any
if: ${{ github.event_name != 'merge_group' }}
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ runner.temp }}/ci_run_data.json --update-gh-statuses
BuildDockers:
@ -83,7 +85,7 @@ jobs:
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
RCSK
FastTest:
needs: [RunConfig, BuildDockers]
needs: [RunConfig, BuildDockers, StyleCheck]
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Fast test') }}
uses: ./.github/workflows/reusable_test.yml
with:
@ -163,20 +165,16 @@ jobs:
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
- name: Check sync status
if: ${{ github.event_name == 'merge_group' }}
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 sync_pr.py --status
- name: Finish label
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 finish_check.py ${{ (contains(needs.*.result, 'failure') && github.event_name == 'merge_group') && '--pipeline-failure' || '' }}
- name: Auto merge if approved
if: ${{ github.event_name != 'merge_group' }}
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 merge_pr.py --check-approved
python3 finish_check.py
# FIXME: merge on approval does not work with MQ. Could be fixed by using defaul GH's automerge after some corrections in Mergeable Check status
# - name: Auto merge if approved
# if: ${{ github.event_name != 'merge_group' }}
# run: |
# cd "$GITHUB_WORKSPACE/tests/ci"
# python3 merge_pr.py --check-approved
#############################################################################################

View File

@ -1,23 +0,0 @@
name: PullRequestApprovedCI
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
on: # yamllint disable-line rule:truthy
pull_request_review:
types:
- submitted
jobs:
MergeOnApproval:
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Merge approved PR
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 merge_pr.py --check-approved

2
.gitmodules vendored
View File

@ -6,7 +6,7 @@
url = https://github.com/facebook/zstd
[submodule "contrib/lz4"]
path = contrib/lz4
url = https://github.com/ClickHouse/lz4
url = https://github.com/lz4/lz4
[submodule "contrib/librdkafka"]
path = contrib/librdkafka
url = https://github.com/ClickHouse/librdkafka

View File

@ -135,23 +135,21 @@ endif ()
include (cmake/check_flags.cmake)
include (cmake/add_warning.cmake)
if (COMPILER_CLANG)
# generate ranges for fast "addr2line" search
if (NOT CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE")
# NOTE: that clang has a bug because of it does not emit .debug_aranges
# with ThinLTO, so custom ld.lld wrapper is shipped in docker images.
set(COMPILER_FLAGS "${COMPILER_FLAGS} -gdwarf-aranges")
endif ()
# See https://blog.llvm.org/posts/2021-04-05-constructor-homing-for-debug-info/
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG" OR CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Xclang -fuse-ctor-homing")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Xclang -fuse-ctor-homing")
endif()
no_warning(enum-constexpr-conversion) # breaks Protobuf in clang-16
# generate ranges for fast "addr2line" search
if (NOT CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE")
# NOTE: that clang has a bug because of it does not emit .debug_aranges
# with ThinLTO, so custom ld.lld wrapper is shipped in docker images.
set(COMPILER_FLAGS "${COMPILER_FLAGS} -gdwarf-aranges")
endif ()
# See https://blog.llvm.org/posts/2021-04-05-constructor-homing-for-debug-info/
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG" OR CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Xclang -fuse-ctor-homing")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Xclang -fuse-ctor-homing")
endif()
no_warning(enum-constexpr-conversion) # breaks Protobuf in clang-16
option(ENABLE_TESTS "Provide unit_test_dbms target with Google.Test unit tests" ON)
option(ENABLE_EXAMPLES "Build all example programs in 'examples' subdirectories" OFF)
option(ENABLE_BENCHMARKS "Build all benchmark programs in 'benchmarks' subdirectories" OFF)
@ -284,16 +282,12 @@ endif ()
option (ENABLE_BUILD_PROFILING "Enable profiling of build time" OFF)
if (ENABLE_BUILD_PROFILING)
if (COMPILER_CLANG)
set (COMPILER_FLAGS "${COMPILER_FLAGS} -ftime-trace")
set (COMPILER_FLAGS "${COMPILER_FLAGS} -ftime-trace")
if (LINKER_NAME MATCHES "lld")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--time-trace")
set (CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -Wl,--time-trace")
endif ()
else ()
message (${RECONFIGURE_MESSAGE_LEVEL} "Build profiling is only available with CLang")
endif ()
if (LINKER_NAME MATCHES "lld")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--time-trace")
set (CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -Wl,--time-trace")
endif ()
endif ()
set (CMAKE_CXX_STANDARD 23)
@ -304,22 +298,20 @@ set (CMAKE_C_STANDARD 11)
set (CMAKE_C_EXTENSIONS ON) # required by most contribs written in C
set (CMAKE_C_STANDARD_REQUIRED ON)
if (COMPILER_CLANG)
# Enable C++14 sized global deallocation functions. It should be enabled by setting -std=c++14 but I'm not sure.
# See https://reviews.llvm.org/D112921
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsized-deallocation")
# Enable C++14 sized global deallocation functions. It should be enabled by setting -std=c++14 but I'm not sure.
# See https://reviews.llvm.org/D112921
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsized-deallocation")
# falign-functions=32 prevents from random performance regressions with the code change. Thus, providing more stable
# benchmarks.
set(COMPILER_FLAGS "${COMPILER_FLAGS} -falign-functions=32")
# falign-functions=32 prevents from random performance regressions with the code change. Thus, providing more stable
# benchmarks.
set(COMPILER_FLAGS "${COMPILER_FLAGS} -falign-functions=32")
if (ARCH_AMD64)
# align branches within a 32-Byte boundary to avoid the potential performance loss when code layout change,
# which makes benchmark results more stable.
set(BRANCHES_WITHIN_32B_BOUNDARIES "-mbranches-within-32B-boundaries")
set(COMPILER_FLAGS "${COMPILER_FLAGS} ${BRANCHES_WITHIN_32B_BOUNDARIES}")
endif()
endif ()
if (ARCH_AMD64)
# align branches within a 32-Byte boundary to avoid the potential performance loss when code layout change,
# which makes benchmark results more stable.
set(BRANCHES_WITHIN_32B_BOUNDARIES "-mbranches-within-32B-boundaries")
set(COMPILER_FLAGS "${COMPILER_FLAGS} ${BRANCHES_WITHIN_32B_BOUNDARIES}")
endif()
# Disable floating-point expression contraction in order to get consistent floating point calculation results across platforms
set (COMPILER_FLAGS "${COMPILER_FLAGS} -ffp-contract=off")
@ -348,39 +340,34 @@ set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} ${COMPILER_FLAGS} $
set (CMAKE_ASM_FLAGS_RELWITHDEBINFO "${CMAKE_ASM_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -O0 ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
if (COMPILER_CLANG)
if (OS_DARWIN)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-U,_inside_main")
endif()
if (OS_DARWIN)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-U,_inside_main")
endif()
# Display absolute paths in error messages. Otherwise KDevelop fails to navigate to correct file and opens a new file instead.
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdiagnostics-absolute-paths")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fdiagnostics-absolute-paths")
# Display absolute paths in error messages. Otherwise KDevelop fails to navigate to correct file and opens a new file instead.
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdiagnostics-absolute-paths")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fdiagnostics-absolute-paths")
if (NOT ENABLE_TESTS AND NOT SANITIZE AND NOT SANITIZE_COVERAGE AND OS_LINUX)
# https://clang.llvm.org/docs/ThinLTO.html
# Applies to clang and linux only.
# Disabled when building with tests or sanitizers.
option(ENABLE_THINLTO "Clang-specific link time optimization" ON)
endif()
if (NOT ENABLE_TESTS AND NOT SANITIZE AND NOT SANITIZE_COVERAGE AND OS_LINUX)
# https://clang.llvm.org/docs/ThinLTO.html
# Applies to clang and linux only.
# Disabled when building with tests or sanitizers.
option(ENABLE_THINLTO "Clang-specific link time optimization" ON)
endif()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fstrict-vtable-pointers")
# We cannot afford to use LTO when compiling unit tests, and it's not enough
# to only supply -fno-lto at the final linking stage. So we disable it
# completely.
if (ENABLE_THINLTO AND NOT ENABLE_TESTS AND NOT SANITIZE)
# Link time optimization
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -flto=thin -fwhole-program-vtables")
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -flto=thin -fwhole-program-vtables")
set (CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} -flto=thin -fwhole-program-vtables")
elseif (ENABLE_THINLTO)
message (${RECONFIGURE_MESSAGE_LEVEL} "Cannot enable ThinLTO")
endif ()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fstrict-vtable-pointers")
# We cannot afford to use LTO when compiling unit tests, and it's not enough
# to only supply -fno-lto at the final linking stage. So we disable it
# completely.
if (ENABLE_THINLTO AND NOT ENABLE_TESTS AND NOT SANITIZE)
# Link time optimization
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -flto=thin -fwhole-program-vtables")
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -flto=thin -fwhole-program-vtables")
set (CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} -flto=thin -fwhole-program-vtables")
elseif (ENABLE_THINLTO)
message (${RECONFIGURE_MESSAGE_LEVEL} "ThinLTO is only available with Clang")
message (${RECONFIGURE_MESSAGE_LEVEL} "Cannot enable ThinLTO")
endif ()
# Turns on all external libs like s3, kafka, ODBC, ...

View File

@ -34,19 +34,25 @@ curl https://clickhouse.com/ | sh
Every month we get together with the community (users, contributors, customers, those interested in learning more about ClickHouse) to discuss what is coming in the latest release. If you are interested in sharing what you've built on ClickHouse, let us know.
* [v24.3 Community Call](https://clickhouse.com/company/events/v24-3-community-release-call) - Mar 26
* [v24.4 Community Call](https://clickhouse.com/company/events/v24-4-community-release-call) - Apr 30
* [v24.5 Community Call](https://clickhouse.com/company/events/v24-5-community-release-call) - May 30
## Upcoming Events
Keep an eye out for upcoming meetups and events around the world. Somewhere else you want us to be? Please feel free to reach out to tyler `<at>` clickhouse `<dot>` com. You can also peruse [ClickHouse Events](https://clickhouse.com/company/news-events) for a list of all upcoming trainings, meetups, speaking engagements, etc.
* [ClickHouse Meetup in Dubai](https://www.meetup.com/clickhouse-dubai-meetup-group/events/299629189/) - May 28
* [ClickHouse Happy Hour @ Tom's Watch Bar - Los Angeles](https://www.meetup.com/clickhouse-los-angeles-user-group/events/300740584/) - May 22
* [ClickHouse & Confluent Meetup in Dubai](https://www.meetup.com/clickhouse-dubai-meetup-group/events/299629189/) - May 28
* [ClickHouse Meetup in Stockholm](https://www.meetup.com/clickhouse-stockholm-user-group/events/299752651/) - Jun 3
* [ClickHouse Meetup @ Cloudflare - San Francisco](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/300523061/) - Jun 4
* [ClickHouse (クリックハウス) Meetup Tokyo](https://www.meetup.com/clickhouse-tokyo-user-group/events/300798053/) - Jun 5
* [ClickHouse Meetup in Amsterdam](https://www.meetup.com/clickhouse-netherlands-user-group/events/300781068/) - Jun 27
* [ClickHouse Meetup in Paris](https://www.meetup.com/clickhouse-france-user-group/events/300783448/) - Jul 9
* [ClickHouse Meetup @ Ramp - New York City](https://www.meetup.com/clickhouse-new-york-user-group/events/300595845/) - Jul 9
* [ClickHouse Meetup @ Klaviyo - Boston](https://www.meetup.com/clickhouse-boston-user-group/events/300907870) - Jul 11
## Recent Recordings
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
* **Recording available**: [**v24.2 Release Call**](https://www.youtube.com/watch?v=iN2y-TK8f3A) All the features of 24.2, one convenient video! Watch it now!
* **Recording available**: [**v24.4 Release Call**](https://www.youtube.com/watch?v=dtUqgcfOGmE) All the features of 24.4, one convenient video! Watch it now!
## Interested in joining ClickHouse and making it your full-time job?

View File

@ -51,11 +51,9 @@ struct DecomposedFloat
/// Returns 0 for both +0. and -0.
int sign() const
{
return (exponent() == 0 && mantissa() == 0)
? 0
: (isNegative()
? -1
: 1);
if (exponent() == 0 && mantissa() == 0)
return 0;
return isNegative() ? -1 : 1;
}
uint16_t exponent() const

View File

@ -11,7 +11,7 @@ namespace detail
template <is_enum E, class F, size_t ...I>
constexpr void static_for(F && f, std::index_sequence<I...>)
{
(std::forward<F>(f)(std::integral_constant<E, magic_enum::enum_value<E>(I)>()) , ...);
(f(std::integral_constant<E, magic_enum::enum_value<E>(I)>()) , ...);
}
}

View File

@ -651,7 +651,9 @@ std::string_view JSON::getRawString() const
Pos s = ptr_begin;
if (*s != '"')
throw JSONException(std::string("JSON: expected \", got ") + *s);
while (++s != ptr_end && *s != '"');
++s;
while (s != ptr_end && *s != '"')
++s;
if (s != ptr_end)
return std::string_view(ptr_begin + 1, s - ptr_begin - 1);
throw JSONException("JSON: incorrect syntax (expected end of string, found end of JSON).");

View File

@ -74,7 +74,7 @@ public:
const char * data() const { return ptr_begin; }
const char * dataEnd() const { return ptr_end; }
enum ElementType
enum ElementType : uint8_t
{
TYPE_OBJECT,
TYPE_ARRAY,

View File

@ -27,7 +27,7 @@ namespace TypeListUtils /// In some contexts it's more handy to use functions in
constexpr Root<Args...> changeRoot(TypeList<Args...>) { return {}; }
template <typename F, typename ...Args>
constexpr void forEach(TypeList<Args...>, F && f) { (std::forward<F>(f)(TypeList<Args>{}), ...); }
constexpr void forEach(TypeList<Args...>, F && f) { (f(TypeList<Args>{}), ...); }
}
template <typename TypeListLeft, typename TypeListRight>

View File

@ -21,7 +21,7 @@ bool func_wrapper(Func && func, Arg && arg)
template <typename T, T Begin, typename Func, T... Is>
constexpr bool static_for_impl(Func && f, std::integer_sequence<T, Is...>)
{
return (func_wrapper(std::forward<Func>(f), std::integral_constant<T, Begin + Is>{}) || ...);
return (func_wrapper(f, std::integral_constant<T, Begin + Is>{}) || ...);
}
template <auto Begin, decltype(Begin) End, typename Func>

View File

@ -147,7 +147,7 @@ constexpr uint16_t maybe_negate(uint16_t x)
return ~x;
}
enum class ReturnMode
enum class ReturnMode : uint8_t
{
End,
Nullptr,

View File

@ -77,8 +77,7 @@ uint64_t getMemoryAmountOrZero()
{
uint64_t limit_v1;
if (limit_file_v1 >> limit_v1)
if (limit_v1 < memory_amount)
memory_amount = limit_v1;
memory_amount = std::min(memory_amount, limit_v1);
}
}

View File

@ -146,7 +146,7 @@ namespace impl
TUInt res;
if constexpr (sizeof(TUInt) == 1)
{
res = static_cast<UInt8>(unhexDigit(data[0])) * 0x10 + static_cast<UInt8>(unhexDigit(data[1]));
res = unhexDigit(data[0]) * 0x10 + unhexDigit(data[1]);
}
else if constexpr (sizeof(TUInt) == 2)
{
@ -176,17 +176,19 @@ namespace impl
};
/// Helper template class to convert a value of any supported type to hexadecimal representation and back.
template <typename T, typename SFINAE = void>
template <typename T>
struct HexConversion;
template <typename TUInt>
struct HexConversion<TUInt, std::enable_if_t<std::is_integral_v<TUInt>>> : public HexConversionUInt<TUInt> {};
requires(std::is_integral_v<TUInt>)
struct HexConversion<TUInt> : public HexConversionUInt<TUInt> {};
template <size_t Bits, typename Signed>
struct HexConversion<wide::integer<Bits, Signed>> : public HexConversionUInt<wide::integer<Bits, Signed>> {};
template <typename CityHashUInt128> /// Partial specialization here allows not to include <city.h> in this header.
struct HexConversion<CityHashUInt128, std::enable_if_t<std::is_same_v<CityHashUInt128, typename CityHash_v1_0_2::uint128>>>
requires(std::is_same_v<CityHashUInt128, typename CityHash_v1_0_2::uint128>)
struct HexConversion<CityHashUInt128>
{
static const constexpr size_t num_hex_digits = 32;

View File

@ -20,24 +20,26 @@ Out & dumpValue(Out &, T &&);
/// Catch-all case.
template <int priority, typename Out, typename T>
std::enable_if_t<priority == -1, Out> & dumpImpl(Out & out, T &&) // NOLINT(cppcoreguidelines-missing-std-forward)
requires(priority == -1)
Out & dumpImpl(Out & out, T &&) // NOLINT(cppcoreguidelines-missing-std-forward)
{
return out << "{...}";
}
/// An object, that could be output with operator <<.
template <int priority, typename Out, typename T>
std::enable_if_t<priority == 0, Out> & dumpImpl(Out & out, T && x, std::decay_t<decltype(std::declval<Out &>() << std::declval<T>())> * = nullptr) // NOLINT(cppcoreguidelines-missing-std-forward)
requires(priority == 0)
Out & dumpImpl(Out & out, T && x, std::decay_t<decltype(std::declval<Out &>() << std::declval<T>())> * = nullptr) // NOLINT(cppcoreguidelines-missing-std-forward)
{
return out << x;
}
/// A pointer-like object.
template <int priority, typename Out, typename T>
std::enable_if_t<priority == 1
requires(priority == 1
/// Protect from the case when operator * do effectively nothing (function pointer).
&& !std::is_same_v<std::decay_t<T>, std::decay_t<decltype(*std::declval<T>())>>
, Out> & dumpImpl(Out & out, T && x, std::decay_t<decltype(*std::declval<T>())> * = nullptr) // NOLINT(cppcoreguidelines-missing-std-forward)
&& !std::is_same_v<std::decay_t<T>, std::decay_t<decltype(*std::declval<T>())>>)
Out & dumpImpl(Out & out, T && x, std::decay_t<decltype(*std::declval<T>())> * = nullptr) // NOLINT(cppcoreguidelines-missing-std-forward)
{
if (!x)
return out << "nullptr";
@ -46,7 +48,8 @@ std::enable_if_t<priority == 1
/// Container.
template <int priority, typename Out, typename T>
std::enable_if_t<priority == 2, Out> & dumpImpl(Out & out, T && x, std::decay_t<decltype(std::begin(std::declval<T>()))> * = nullptr) // NOLINT(cppcoreguidelines-missing-std-forward)
requires(priority == 2)
Out & dumpImpl(Out & out, T && x, std::decay_t<decltype(std::begin(std::declval<T>()))> * = nullptr) // NOLINT(cppcoreguidelines-missing-std-forward)
{
bool first = true;
out << "{";
@ -63,8 +66,8 @@ std::enable_if_t<priority == 2, Out> & dumpImpl(Out & out, T && x, std::decay_t<
template <int priority, typename Out, typename T>
std::enable_if_t<priority == 3 && std::is_enum_v<std::decay_t<T>>, Out> &
dumpImpl(Out & out, T && x) // NOLINT(cppcoreguidelines-missing-std-forward)
requires(priority == 3 && std::is_enum_v<std::decay_t<T>>)
Out & dumpImpl(Out & out, T && x) // NOLINT(cppcoreguidelines-missing-std-forward)
{
return out << magic_enum::enum_name(x);
}
@ -72,8 +75,8 @@ dumpImpl(Out & out, T && x) // NOLINT(cppcoreguidelines-missing-std-forward)
/// string and const char * - output not as container or pointer.
template <int priority, typename Out, typename T>
std::enable_if_t<priority == 3 && (std::is_same_v<std::decay_t<T>, std::string> || std::is_same_v<std::decay_t<T>, const char *>), Out> &
dumpImpl(Out & out, T && x) // NOLINT(cppcoreguidelines-missing-std-forward)
requires(priority == 3 && (std::is_same_v<std::decay_t<T>, std::string> || std::is_same_v<std::decay_t<T>, const char *>))
Out & dumpImpl(Out & out, T && x) // NOLINT(cppcoreguidelines-missing-std-forward)
{
return out << std::quoted(x);
}
@ -81,8 +84,8 @@ dumpImpl(Out & out, T && x) // NOLINT(cppcoreguidelines-missing-std-forward)
/// UInt8 - output as number, not char.
template <int priority, typename Out, typename T>
std::enable_if_t<priority == 3 && std::is_same_v<std::decay_t<T>, unsigned char>, Out> &
dumpImpl(Out & out, T && x) // NOLINT(cppcoreguidelines-missing-std-forward)
requires(priority == 3 && std::is_same_v<std::decay_t<T>, unsigned char>)
Out & dumpImpl(Out & out, T && x) // NOLINT(cppcoreguidelines-missing-std-forward)
{
return out << int(x);
}
@ -108,7 +111,8 @@ Out & dumpTupleImpl(Out & out, T && x) // NOLINT(cppcoreguidelines-missing-std-f
}
template <int priority, typename Out, typename T>
std::enable_if_t<priority == 4, Out> & dumpImpl(Out & out, T && x, std::decay_t<decltype(std::get<0>(std::declval<T>()))> * = nullptr) // NOLINT(cppcoreguidelines-missing-std-forward)
requires(priority == 4)
Out & dumpImpl(Out & out, T && x, std::decay_t<decltype(std::get<0>(std::declval<T>()))> * = nullptr) // NOLINT(cppcoreguidelines-missing-std-forward)
{
return dumpTupleImpl<0>(out, x);
}

View File

@ -250,14 +250,16 @@ ALWAYS_INLINE inline char * uitoa<UnsignedOfSize<1>, 1>(char * p, UnsignedOfSize
//===----------------------------------------------------------===//
// itoa: handle unsigned integral operands (selected by SFINAE)
template <typename U, std::enable_if_t<!std::is_signed_v<U> && std::is_integral_v<U>> * = nullptr>
template <typename U>
requires(!std::is_signed_v<U> && std::is_integral_v<U>)
ALWAYS_INLINE inline char * itoa(U u, char * p)
{
return convert::uitoa(p, u);
}
// itoa: handle signed integral operands (selected by SFINAE)
template <typename I, size_t N = sizeof(I), std::enable_if_t<std::is_signed_v<I> && std::is_integral_v<I>> * = nullptr>
template <typename I, size_t N = sizeof(I)>
requires(std::is_signed_v<I> && std::is_integral_v<I>)
ALWAYS_INLINE inline char * itoa(I i, char * p)
{
// Need "mask" to be filled with a copy of the sign bit.

View File

@ -19,8 +19,8 @@ auto map(const Collection<Params...> & collection, Mapper && mapper)
using value_type = unqualified_t<decltype(mapper(*std::begin(collection)))>;
return Collection<value_type>(
boost::make_transform_iterator(std::begin(collection), std::forward<Mapper>(mapper)),
boost::make_transform_iterator(std::end(collection), std::forward<Mapper>(mapper)));
boost::make_transform_iterator(std::begin(collection), mapper),
boost::make_transform_iterator(std::end(collection), mapper));
}
/** \brief Returns collection of specified container-type,
@ -33,8 +33,8 @@ auto map(const Collection & collection, Mapper && mapper)
using value_type = unqualified_t<decltype(mapper(*std::begin(collection)))>;
return ResultCollection<value_type>(
boost::make_transform_iterator(std::begin(collection), std::forward<Mapper>(mapper)),
boost::make_transform_iterator(std::end(collection), std::forward<Mapper>(mapper)));
boost::make_transform_iterator(std::begin(collection), mapper),
boost::make_transform_iterator(std::end(collection), mapper));
}
/** \brief Returns collection of specified type,
@ -45,8 +45,8 @@ template <typename ResultCollection, typename Collection, typename Mapper>
auto map(const Collection & collection, Mapper && mapper)
{
return ResultCollection(
boost::make_transform_iterator(std::begin(collection), std::forward<Mapper>(mapper)),
boost::make_transform_iterator(std::end(collection), std::forward<Mapper>(mapper)));
boost::make_transform_iterator(std::begin(collection), mapper),
boost::make_transform_iterator(std::end(collection), mapper));
}
}

View File

@ -23,12 +23,10 @@ namespace internal
/// For loop adaptor which is used to iterate through a half-closed interval [begin, end).
/// The parameters `begin` and `end` can have any integral or enum types.
template <typename BeginType,
typename EndType,
typename = std::enable_if_t<
(std::is_integral_v<BeginType> || std::is_enum_v<BeginType>) &&
(std::is_integral_v<EndType> || std::is_enum_v<EndType>) &&
(!std::is_enum_v<BeginType> || !std::is_enum_v<EndType> || std::is_same_v<BeginType, EndType>), void>>
template <typename BeginType, typename EndType>
requires((std::is_integral_v<BeginType> || std::is_enum_v<BeginType>) &&
(std::is_integral_v<EndType> || std::is_enum_v<EndType>) &&
(!std::is_enum_v<BeginType> || !std::is_enum_v<EndType> || std::is_same_v<BeginType, EndType>))
inline auto range(BeginType begin, EndType end)
{
if constexpr (std::is_integral_v<BeginType> && std::is_integral_v<EndType>)
@ -50,8 +48,8 @@ inline auto range(BeginType begin, EndType end)
/// For loop adaptor which is used to iterate through a half-closed interval [0, end).
/// The parameter `end` can have any integral or enum type.
/// The same as range(0, end).
template <typename Type,
typename = std::enable_if_t<std::is_integral_v<Type> || std::is_enum_v<Type>, void>>
template <typename Type>
requires(std::is_integral_v<Type> || std::is_enum_v<Type>)
inline auto range(Type end)
{
if constexpr (std::is_integral_v<Type>)

View File

@ -2,6 +2,7 @@
#include <ctime>
#include <cerrno>
#include <system_error>
#if defined(OS_DARWIN)
#include <mach/mach.h>
@ -34,7 +35,8 @@ void sleepForNanoseconds(uint64_t nanoseconds)
constexpr auto clock_type = CLOCK_MONOTONIC;
struct timespec current_time;
clock_gettime(clock_type, &current_time);
if (0 != clock_gettime(clock_type, &current_time))
throw std::system_error(std::error_code(errno, std::system_category()));
constexpr uint64_t resolution = 1'000'000'000;
struct timespec finish_time = current_time;

View File

@ -111,7 +111,8 @@ public:
constexpr explicit operator bool() const noexcept;
template <typename T, typename = std::enable_if_t<std::is_arithmetic_v<T>, T>>
template <typename T>
requires(std::is_arithmetic_v<T>)
constexpr operator T() const noexcept;
constexpr operator long double() const noexcept;
@ -208,12 +209,14 @@ constexpr integer<Bits, Signed> operator<<(const integer<Bits, Signed> & lhs, in
template <size_t Bits, typename Signed>
constexpr integer<Bits, Signed> operator>>(const integer<Bits, Signed> & lhs, int n) noexcept;
template <size_t Bits, typename Signed, typename Int, typename = std::enable_if_t<!std::is_same_v<Int, int>>>
template <size_t Bits, typename Signed, typename Int>
requires(!std::is_same_v<Int, int>)
constexpr integer<Bits, Signed> operator<<(const integer<Bits, Signed> & lhs, Int n) noexcept
{
return lhs << int(n);
}
template <size_t Bits, typename Signed, typename Int, typename = std::enable_if_t<!std::is_same_v<Int, int>>>
template <size_t Bits, typename Signed, typename Int>
requires(!std::is_same_v<Int, int>)
constexpr integer<Bits, Signed> operator>>(const integer<Bits, Signed> & lhs, Int n) noexcept
{
return lhs >> int(n);
@ -262,4 +265,3 @@ struct hash<wide::integer<Bits, Signed>>;
// NOLINTEND(*)
#include "wide_integer_impl.h"

View File

@ -1246,7 +1246,8 @@ constexpr integer<Bits, Signed>::operator bool() const noexcept
}
template <size_t Bits, typename Signed>
template <class T, class>
template <class T>
requires(std::is_arithmetic_v<T>)
constexpr integer<Bits, Signed>::operator T() const noexcept
{
static_assert(std::numeric_limits<T>::is_integer);

View File

@ -5,14 +5,14 @@ if (ENABLE_CLANG_TIDY)
find_program (CLANG_TIDY_CACHE_PATH NAMES "clang-tidy-cache")
if (CLANG_TIDY_CACHE_PATH)
find_program (_CLANG_TIDY_PATH NAMES "clang-tidy-17" "clang-tidy-16" "clang-tidy")
find_program (_CLANG_TIDY_PATH NAMES "clang-tidy-18" "clang-tidy-17" "clang-tidy-16" "clang-tidy")
# Why do we use ';' here?
# It's a cmake black magic: https://cmake.org/cmake/help/latest/prop_tgt/LANG_CLANG_TIDY.html#prop_tgt:%3CLANG%3E_CLANG_TIDY
# The CLANG_TIDY_PATH is passed to CMAKE_CXX_CLANG_TIDY, which follows CXX_CLANG_TIDY syntax.
set (CLANG_TIDY_PATH "${CLANG_TIDY_CACHE_PATH};${_CLANG_TIDY_PATH}" CACHE STRING "A combined command to run clang-tidy with caching wrapper")
else ()
find_program (CLANG_TIDY_PATH NAMES "clang-tidy-17" "clang-tidy-16" "clang-tidy")
find_program (CLANG_TIDY_PATH NAMES "clang-tidy-18" "clang-tidy-17" "clang-tidy-16" "clang-tidy")
endif ()
if (CLANG_TIDY_PATH)

View File

@ -5,21 +5,24 @@ set (DEFAULT_LIBS "-nodefaultlibs")
# We need builtins from Clang's RT even without libcxx - for ubsan+int128.
# See https://bugs.llvm.org/show_bug.cgi?id=16404
if (COMPILER_CLANG)
execute_process (COMMAND ${CMAKE_CXX_COMPILER} --target=${CMAKE_CXX_COMPILER_TARGET} --print-libgcc-file-name --rtlib=compiler-rt OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE)
execute_process (COMMAND ${CMAKE_CXX_COMPILER} --target=${CMAKE_CXX_COMPILER_TARGET} --print-libgcc-file-name --rtlib=compiler-rt OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE)
if (NOT EXISTS "${BUILTINS_LIBRARY}")
set (BUILTINS_LIBRARY "-lgcc")
endif ()
# Apparently, in clang-19, the UBSan support library for C++ was moved out into ubsan_standalone_cxx.a, so we have to include both.
if (SANITIZE STREQUAL undefined)
string(REPLACE "builtins.a" "ubsan_standalone_cxx.a" EXTRA_BUILTINS_LIBRARY "${BUILTINS_LIBRARY}")
endif ()
if (NOT EXISTS "${BUILTINS_LIBRARY}")
set (BUILTINS_LIBRARY "-lgcc")
endif ()
if (OS_ANDROID)
# pthread and rt are included in libc
set (DEFAULT_LIBS "${DEFAULT_LIBS} ${BUILTINS_LIBRARY} ${COVERAGE_OPTION} -lc -lm -ldl")
set (DEFAULT_LIBS "${DEFAULT_LIBS} ${BUILTINS_LIBRARY} ${EXTRA_BUILTINS_LIBRARY} ${COVERAGE_OPTION} -lc -lm -ldl")
elseif (USE_MUSL)
set (DEFAULT_LIBS "${DEFAULT_LIBS} ${BUILTINS_LIBRARY} ${COVERAGE_OPTION} -static -lc")
set (DEFAULT_LIBS "${DEFAULT_LIBS} ${BUILTINS_LIBRARY} ${EXTRA_BUILTINS_LIBRARY} ${COVERAGE_OPTION} -static -lc")
else ()
set (DEFAULT_LIBS "${DEFAULT_LIBS} ${BUILTINS_LIBRARY} ${COVERAGE_OPTION} -lc -lm -lrt -lpthread -ldl")
set (DEFAULT_LIBS "${DEFAULT_LIBS} ${BUILTINS_LIBRARY} ${EXTRA_BUILTINS_LIBRARY} ${COVERAGE_OPTION} -lc -lm -lrt -lpthread -ldl")
endif ()
message(STATUS "Default libraries: ${DEFAULT_LIBS}")

View File

@ -26,9 +26,7 @@ if (SANITIZE)
elseif (SANITIZE STREQUAL "thread")
set (TSAN_FLAGS "-fsanitize=thread")
if (COMPILER_CLANG)
set (TSAN_FLAGS "${TSAN_FLAGS} -fsanitize-ignorelist=${PROJECT_SOURCE_DIR}/tests/tsan_ignorelist.txt")
endif()
set (TSAN_FLAGS "${TSAN_FLAGS} -fsanitize-ignorelist=${PROJECT_SOURCE_DIR}/tests/tsan_ignorelist.txt")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${TSAN_FLAGS}")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} ${TSAN_FLAGS}")
@ -44,9 +42,7 @@ if (SANITIZE)
# that's why we often receive reports about UIO. The simplest way to avoid this is just set this flag here.
set(UBSAN_FLAGS "${UBSAN_FLAGS} -fno-sanitize=unsigned-integer-overflow")
endif()
if (COMPILER_CLANG)
set (UBSAN_FLAGS "${UBSAN_FLAGS} -fsanitize-ignorelist=${PROJECT_SOURCE_DIR}/tests/ubsan_ignorelist.txt")
endif()
set (UBSAN_FLAGS "${UBSAN_FLAGS} -fsanitize-ignorelist=${PROJECT_SOURCE_DIR}/tests/ubsan_ignorelist.txt")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${UBSAN_FLAGS}")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} ${UBSAN_FLAGS}")

View File

@ -1,10 +1,6 @@
# Compiler
if (CMAKE_CXX_COMPILER_ID MATCHES "AppleClang")
set (COMPILER_CLANG 1) # Safe to treat AppleClang as a regular Clang, in general.
elseif (CMAKE_CXX_COMPILER_ID MATCHES "Clang")
set (COMPILER_CLANG 1)
else ()
if (NOT CMAKE_CXX_COMPILER_ID MATCHES "Clang")
message (FATAL_ERROR "Compiler ${CMAKE_CXX_COMPILER_ID} is not supported")
endif ()
@ -13,34 +9,30 @@ execute_process(COMMAND ${CMAKE_CXX_COMPILER} --version OUTPUT_VARIABLE COMPILER
message (STATUS "Using compiler:\n${COMPILER_SELF_IDENTIFICATION}")
# Require minimum compiler versions
set (CLANG_MINIMUM_VERSION 16)
set (CLANG_MINIMUM_VERSION 17)
set (XCODE_MINIMUM_VERSION 12.0)
set (APPLE_CLANG_MINIMUM_VERSION 12.0.0)
if (COMPILER_CLANG)
if (CMAKE_CXX_COMPILER_ID MATCHES "AppleClang")
# (Experimental!) Specify "-DALLOW_APPLECLANG=ON" when running CMake configuration step, if you want to experiment with using it.
if (NOT ALLOW_APPLECLANG AND NOT DEFINED ENV{ALLOW_APPLECLANG})
message (FATAL_ERROR "Compilation with AppleClang is unsupported. Please use vanilla Clang, e.g. from Homebrew.")
endif ()
if (CMAKE_CXX_COMPILER_ID MATCHES "AppleClang")
# (Experimental!) Specify "-DALLOW_APPLECLANG=ON" when running CMake configuration step, if you want to experiment with using it.
if (NOT ALLOW_APPLECLANG AND NOT DEFINED ENV{ALLOW_APPLECLANG})
message (FATAL_ERROR "Compilation with AppleClang is unsupported. Please use vanilla Clang, e.g. from Homebrew.")
endif ()
# For a mapping between XCode / AppleClang / vanilla Clang versions, see https://en.wikipedia.org/wiki/Xcode
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${APPLE_CLANG_MINIMUM_VERSION})
message (FATAL_ERROR "Compilation with AppleClang version ${CMAKE_CXX_COMPILER_VERSION} is unsupported, the minimum required version is ${APPLE_CLANG_MINIMUM_VERSION} (Xcode ${XCODE_MINIMUM_VERSION}).")
endif ()
else ()
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${CLANG_MINIMUM_VERSION})
message (FATAL_ERROR "Compilation with Clang version ${CMAKE_CXX_COMPILER_VERSION} is unsupported, the minimum required version is ${CLANG_MINIMUM_VERSION}.")
endif ()
# For a mapping between XCode / AppleClang / vanilla Clang versions, see https://en.wikipedia.org/wiki/Xcode
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${APPLE_CLANG_MINIMUM_VERSION})
message (FATAL_ERROR "Compilation with AppleClang version ${CMAKE_CXX_COMPILER_VERSION} is unsupported, the minimum required version is ${APPLE_CLANG_MINIMUM_VERSION} (Xcode ${XCODE_MINIMUM_VERSION}).")
endif ()
else ()
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${CLANG_MINIMUM_VERSION})
message (FATAL_ERROR "Compilation with Clang version ${CMAKE_CXX_COMPILER_VERSION} is unsupported, the minimum required version is ${CLANG_MINIMUM_VERSION}.")
endif ()
endif ()
# Linker
string (REGEX MATCHALL "[0-9]+" COMPILER_VERSION_LIST ${CMAKE_CXX_COMPILER_VERSION})
list (GET COMPILER_VERSION_LIST 0 COMPILER_VERSION_MAJOR)
# Example values: `lld-10`
# Linker
option (LINKER_NAME "Linker name or full path")
if (LINKER_NAME MATCHES "gold")
@ -48,19 +40,15 @@ if (LINKER_NAME MATCHES "gold")
endif ()
if (NOT LINKER_NAME)
if (COMPILER_CLANG)
if (OS_LINUX AND NOT ARCH_S390X)
find_program (LLD_PATH NAMES "ld.lld-${COMPILER_VERSION_MAJOR}" "ld.lld")
elseif (OS_DARWIN)
find_program (LLD_PATH NAMES "ld")
endif ()
if (OS_LINUX AND NOT ARCH_S390X)
find_program (LLD_PATH NAMES "ld.lld-${COMPILER_VERSION_MAJOR}" "ld.lld")
elseif (OS_DARWIN)
find_program (LLD_PATH NAMES "ld")
endif ()
if (LLD_PATH)
if (OS_LINUX OR OS_DARWIN)
if (COMPILER_CLANG)
# Clang driver simply allows full linker path.
set (LINKER_NAME ${LLD_PATH})
endif ()
# Clang driver simply allows full linker path.
set (LINKER_NAME ${LLD_PATH})
endif ()
endif()
endif()
@ -82,47 +70,28 @@ else ()
endif ()
# Archiver
if (COMPILER_CLANG)
find_program (LLVM_AR_PATH NAMES "llvm-ar-${COMPILER_VERSION_MAJOR}" "llvm-ar")
endif ()
find_program (LLVM_AR_PATH NAMES "llvm-ar-${COMPILER_VERSION_MAJOR}" "llvm-ar")
if (LLVM_AR_PATH)
set (CMAKE_AR "${LLVM_AR_PATH}")
endif ()
message(STATUS "Using archiver: ${CMAKE_AR}")
# Ranlib
if (COMPILER_CLANG)
find_program (LLVM_RANLIB_PATH NAMES "llvm-ranlib-${COMPILER_VERSION_MAJOR}" "llvm-ranlib")
endif ()
find_program (LLVM_RANLIB_PATH NAMES "llvm-ranlib-${COMPILER_VERSION_MAJOR}" "llvm-ranlib")
if (LLVM_RANLIB_PATH)
set (CMAKE_RANLIB "${LLVM_RANLIB_PATH}")
endif ()
message(STATUS "Using ranlib: ${CMAKE_RANLIB}")
# Install Name Tool
if (COMPILER_CLANG)
find_program (LLVM_INSTALL_NAME_TOOL_PATH NAMES "llvm-install-name-tool-${COMPILER_VERSION_MAJOR}" "llvm-install-name-tool")
endif ()
find_program (LLVM_INSTALL_NAME_TOOL_PATH NAMES "llvm-install-name-tool-${COMPILER_VERSION_MAJOR}" "llvm-install-name-tool")
if (LLVM_INSTALL_NAME_TOOL_PATH)
set (CMAKE_INSTALL_NAME_TOOL "${LLVM_INSTALL_NAME_TOOL_PATH}")
endif ()
message(STATUS "Using install-name-tool: ${CMAKE_INSTALL_NAME_TOOL}")
# Objcopy
if (COMPILER_CLANG)
find_program (OBJCOPY_PATH NAMES "llvm-objcopy-${COMPILER_VERSION_MAJOR}" "llvm-objcopy" "objcopy")
endif ()
find_program (OBJCOPY_PATH NAMES "llvm-objcopy-${COMPILER_VERSION_MAJOR}" "llvm-objcopy" "objcopy")
if (OBJCOPY_PATH)
message (STATUS "Using objcopy: ${OBJCOPY_PATH}")
else ()
@ -130,11 +99,7 @@ else ()
endif ()
# Strip
if (COMPILER_CLANG)
find_program (STRIP_PATH NAMES "llvm-strip-${COMPILER_VERSION_MAJOR}" "llvm-strip" "strip")
endif ()
find_program (STRIP_PATH NAMES "llvm-strip-${COMPILER_VERSION_MAJOR}" "llvm-strip" "strip")
if (STRIP_PATH)
message (STATUS "Using strip: ${STRIP_PATH}")
else ()

View File

@ -15,37 +15,35 @@ if ((NOT CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG") AND (NOT SANITIZE) AND (NOT CMAKE
add_warning(frame-larger-than=65536)
endif ()
if (COMPILER_CLANG)
# Add some warnings that are not available even with -Wall -Wextra -Wpedantic.
# We want to get everything out of the compiler for code quality.
add_warning(everything)
add_warning(pedantic)
no_warning(zero-length-array)
no_warning(c++98-compat-pedantic)
no_warning(c++98-compat)
no_warning(c++20-compat) # Use constinit in C++20 without warnings
no_warning(sign-conversion)
no_warning(implicit-int-conversion)
no_warning(implicit-int-float-conversion)
no_warning(ctad-maybe-unsupported) # clang 9+, linux-only
no_warning(disabled-macro-expansion)
no_warning(documentation-unknown-command)
no_warning(double-promotion)
no_warning(exit-time-destructors)
no_warning(float-equal)
no_warning(global-constructors)
no_warning(missing-prototypes)
no_warning(missing-variable-declarations)
no_warning(padded)
no_warning(switch-enum)
no_warning(undefined-func-template)
no_warning(unused-template)
no_warning(vla)
no_warning(weak-template-vtables)
no_warning(weak-vtables)
no_warning(thread-safety-negative) # experimental flag, too many false positives
no_warning(enum-constexpr-conversion) # breaks magic-enum library in clang-16
no_warning(unsafe-buffer-usage) # too aggressive
no_warning(switch-default) # conflicts with "defaults in a switch covering all enum values"
# TODO Enable conversion, sign-conversion, double-promotion warnings.
endif ()
# Add some warnings that are not available even with -Wall -Wextra -Wpedantic.
# We want to get everything out of the compiler for code quality.
add_warning(everything)
add_warning(pedantic)
no_warning(zero-length-array)
no_warning(c++98-compat-pedantic)
no_warning(c++98-compat)
no_warning(c++20-compat) # Use constinit in C++20 without warnings
no_warning(sign-conversion)
no_warning(implicit-int-conversion)
no_warning(implicit-int-float-conversion)
no_warning(ctad-maybe-unsupported) # clang 9+, linux-only
no_warning(disabled-macro-expansion)
no_warning(documentation-unknown-command)
no_warning(double-promotion)
no_warning(exit-time-destructors)
no_warning(float-equal)
no_warning(global-constructors)
no_warning(missing-prototypes)
no_warning(missing-variable-declarations)
no_warning(padded)
no_warning(switch-enum)
no_warning(undefined-func-template)
no_warning(unused-template)
no_warning(vla)
no_warning(weak-template-vtables)
no_warning(weak-vtables)
no_warning(thread-safety-negative) # experimental flag, too many false positives
no_warning(enum-constexpr-conversion) # breaks magic-enum library in clang-16
no_warning(unsafe-buffer-usage) # too aggressive
no_warning(switch-default) # conflicts with "defaults in a switch covering all enum values"
# TODO Enable conversion, sign-conversion, double-promotion warnings.

View File

@ -52,7 +52,7 @@ function(absl_cc_library)
)
target_include_directories(${_NAME}
PUBLIC "${ABSL_COMMON_INCLUDE_DIRS}")
SYSTEM PUBLIC "${ABSL_COMMON_INCLUDE_DIRS}")
target_compile_options(${_NAME}
PRIVATE ${ABSL_CC_LIB_COPTS})
target_compile_definitions(${_NAME} PUBLIC ${ABSL_CC_LIB_DEFINES})
@ -61,7 +61,7 @@ function(absl_cc_library)
# Generating header-only library
add_library(${_NAME} INTERFACE)
target_include_directories(${_NAME}
INTERFACE "${ABSL_COMMON_INCLUDE_DIRS}")
SYSTEM INTERFACE "${ABSL_COMMON_INCLUDE_DIRS}")
target_link_libraries(${_NAME}
INTERFACE

View File

@ -81,9 +81,7 @@ set (CAPNPC_SRCS
add_library(_capnpc ${CAPNPC_SRCS})
target_link_libraries(_capnpc PUBLIC _capnp)
if (COMPILER_CLANG)
set (CAPNP_PRIVATE_CXX_FLAGS -fno-char8_t)
endif ()
set (CAPNP_PRIVATE_CXX_FLAGS -fno-char8_t)
target_compile_options(_kj PRIVATE ${CAPNP_PRIVATE_CXX_FLAGS})
target_compile_options(_capnp PRIVATE ${CAPNP_PRIVATE_CXX_FLAGS})

2
contrib/lz4 vendored

@ -1 +1 @@
Subproject commit ce45a9dbdb059511a3e9576b19db3e7f1a4f172e
Subproject commit 145f3804ca5ef5482cda0f2a4f6a2d04ba57f965

View File

@ -91,12 +91,10 @@ set(LIB_SOVERSION ${VERSION_MAJOR})
enable_language(ASM)
if(COMPILER_CLANG)
add_definitions(-Wno-unused-command-line-argument)
# Note that s390x build uses mold linker
if(NOT ARCH_S390X)
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fuse-ld=lld") # only relevant for -DENABLE_OPENSSL_DYNAMIC=1
endif()
add_definitions(-Wno-unused-command-line-argument)
# Note that s390x build uses mold linker
if(NOT ARCH_S390X)
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fuse-ld=lld") # only relevant for -DENABLE_OPENSSL_DYNAMIC=1
endif()
if(ARCH_AMD64)

View File

@ -1,4 +1,4 @@
if (NOT OS_FREEBSD AND NOT (OS_DARWIN AND COMPILER_CLANG))
if (NOT OS_FREEBSD AND NOT OS_DARWIN)
option (ENABLE_SENTRY "Enable Sentry" ${ENABLE_LIBRARIES})
else()
option (ENABLE_SENTRY "Enable Sentry" OFF)

2
contrib/yaml-cpp vendored

@ -1 +1 @@
Subproject commit 0c86adac6d117ee2b4afcedb8ade19036ca0327d
Subproject commit f91e938341273b5f9d341380ab17bcc3de5daa06

View File

@ -3,10 +3,10 @@ compilers and build settings. Correctly configured Docker daemon is single depen
Usage:
Build deb package with `clang-17` in `debug` mode:
Build deb package with `clang-18` in `debug` mode:
```
$ mkdir deb/test_output
$ ./packager --output-dir deb/test_output/ --package-type deb --compiler=clang-17 --debug-build
$ ./packager --output-dir deb/test_output/ --package-type deb --compiler=clang-18 --debug-build
$ ls -l deb/test_output
-rw-r--r-- 1 root root 3730 clickhouse-client_22.2.2+debug_all.deb
-rw-r--r-- 1 root root 84221888 clickhouse-common-static_22.2.2+debug_amd64.deb
@ -17,11 +17,11 @@ $ ls -l deb/test_output
```
Build ClickHouse binary with `clang-17` and `address` sanitizer in `relwithdebuginfo`
Build ClickHouse binary with `clang-18` and `address` sanitizer in `relwithdebuginfo`
mode:
```
$ mkdir $HOME/some_clickhouse
$ ./packager --output-dir=$HOME/some_clickhouse --package-type binary --compiler=clang-17 --sanitizer=address
$ ./packager --output-dir=$HOME/some_clickhouse --package-type binary --compiler=clang-18 --sanitizer=address
$ ls -l $HOME/some_clickhouse
-rwxr-xr-x 1 root root 787061952 clickhouse
lrwxrwxrwx 1 root root 10 clickhouse-benchmark -> clickhouse

View File

@ -403,19 +403,19 @@ def parse_args() -> argparse.Namespace:
parser.add_argument(
"--compiler",
choices=(
"clang-17",
"clang-17-darwin",
"clang-17-darwin-aarch64",
"clang-17-aarch64",
"clang-17-aarch64-v80compat",
"clang-17-ppc64le",
"clang-17-riscv64",
"clang-17-s390x",
"clang-17-amd64-compat",
"clang-17-amd64-musl",
"clang-17-freebsd",
"clang-18",
"clang-18-darwin",
"clang-18-darwin-aarch64",
"clang-18-aarch64",
"clang-18-aarch64-v80compat",
"clang-18-ppc64le",
"clang-18-riscv64",
"clang-18-s390x",
"clang-18-amd64-compat",
"clang-18-amd64-musl",
"clang-18-freebsd",
),
default="clang-17",
default="clang-18",
help="a compiler to use",
)
parser.add_argument(

View File

@ -1,11 +1,14 @@
FROM ubuntu:20.04
# see https://github.com/moby/moby/issues/4032#issuecomment-192327844
# It could be removed after we move on a version 23:04+
ARG DEBIAN_FRONTEND=noninteractive
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
# We shouldn't use `apt upgrade` to not change the upstream image. It's updated biweekly
# user/group precreated explicitly with fixed uid/gid on purpose.
# It is especially important for rootless containers: in that case entrypoint
# can't do chown and owners of mounted volumes should be configured externally.
@ -16,13 +19,11 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
&& groupadd -r clickhouse --gid=101 \
&& useradd -r -g clickhouse --uid=101 --home-dir=/var/lib/clickhouse --shell=/bin/bash clickhouse \
&& apt-get update \
&& apt-get upgrade -yq \
&& apt-get install --yes --no-install-recommends \
ca-certificates \
locales \
tzdata \
wget \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
ARG REPO_CHANNEL="stable"
@ -30,6 +31,9 @@ ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https
ARG VERSION="24.4.1.2088"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
#docker-official-library:off
# The part between `docker-official-library` tags is related to our builds
# set non-empty deb_location_url url to create a docker image
# from debs created by CI build, for example:
# docker build . --network host --build-arg version="21.4.1.6282" --build-arg deb_location_url="https://..." -t ...
@ -80,19 +84,22 @@ RUN if [ -n "${single_binary_location_url}" ]; then \
&& rm -rf /tmp/* ; \
fi
# The rest is the same in the official docker and in our build system
#docker-official-library:on
# A fallback to installation from ClickHouse repository
RUN if ! clickhouse local -q "SELECT ''" > /dev/null 2>&1; then \
apt-get update \
&& apt-get install --yes --no-install-recommends \
apt-transport-https \
ca-certificates \
dirmngr \
gnupg2 \
&& mkdir -p /etc/apt/sources.list.d \
&& GNUPGHOME=$(mktemp -d) \
&& GNUPGHOME="$GNUPGHOME" gpg --no-default-keyring \
&& GNUPGHOME="$GNUPGHOME" gpg --batch --no-default-keyring \
--keyring /usr/share/keyrings/clickhouse-keyring.gpg \
--keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 8919F6BD2B48D754 \
--keyserver hkp://keyserver.ubuntu.com:80 \
--recv-keys 3a9ea1193a97b548be1457d48919f6bd2b48d754 \
&& rm -rf "$GNUPGHOME" \
&& chmod +r /usr/share/keyrings/clickhouse-keyring.gpg \
&& echo "${REPOSITORY}" > /etc/apt/sources.list.d/clickhouse.list \
@ -127,7 +134,6 @@ RUN mkdir /docker-entrypoint-initdb.d
COPY docker_related_config.xml /etc/clickhouse-server/config.d/
COPY entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
EXPOSE 9000 8123 9009
VOLUME /var/lib/clickhouse

View File

@ -4,33 +4,34 @@
ClickHouse is an open-source column-oriented DBMS (columnar database management system) for online analytical processing (OLAP) that allows users to generate analytical reports using SQL queries in real-time.
ClickHouse works 100-1000x faster than traditional database management systems, and processes hundreds of millions to over a billion rows and tens of gigabytes of data per server per second. With a widespread user base around the globe, the technology has received praise for its reliability, ease of use, and fault tolerance.
ClickHouse works 100-1000x faster than traditional database management systems, and processes hundreds of millions to over a billion rows and tens of gigabytes of data per server per second. With a widespread user base around the globe, the technology has received praise for its reliability, ease of use, and fault tolerance.
For more information and documentation see https://clickhouse.com/.
## Versions
- The `latest` tag points to the latest release of the latest stable branch.
- Branch tags like `22.2` point to the latest release of the corresponding branch.
- Full version tags like `22.2.3.5` point to the corresponding release.
- The tag `head` is built from the latest commit to the default branch.
- Each tag has optional `-alpine` suffix to reflect that it's built on top of `alpine`.
- The `latest` tag points to the latest release of the latest stable branch.
- Branch tags like `22.2` point to the latest release of the corresponding branch.
- Full version tags like `22.2.3.5` point to the corresponding release.
- The tag `head` is built from the latest commit to the default branch.
- Each tag has optional `-alpine` suffix to reflect that it's built on top of `alpine`.
### Compatibility
- The amd64 image requires support for [SSE3 instructions](https://en.wikipedia.org/wiki/SSE3). Virtually all x86 CPUs after 2005 support SSE3.
- The arm64 image requires support for the [ARMv8.2-A architecture](https://en.wikipedia.org/wiki/AArch64#ARMv8.2-A) and additionally the Load-Acquire RCpc register. The register is optional in version ARMv8.2-A and mandatory in [ARMv8.3-A](https://en.wikipedia.org/wiki/AArch64#ARMv8.3-A). Supported in Graviton >=2, Azure and GCP instances. Examples for unsupported devices are Raspberry Pi 4 (ARMv8.0-A) and Jetson AGX Xavier/Orin (ARMv8.2-A).
- The amd64 image requires support for [SSE3 instructions](https://en.wikipedia.org/wiki/SSE3). Virtually all x86 CPUs after 2005 support SSE3.
- The arm64 image requires support for the [ARMv8.2-A architecture](https://en.wikipedia.org/wiki/AArch64#ARMv8.2-A) and additionally the Load-Acquire RCpc register. The register is optional in version ARMv8.2-A and mandatory in [ARMv8.3-A](https://en.wikipedia.org/wiki/AArch64#ARMv8.3-A). Supported in Graviton >=2, Azure and GCP instances. Examples for unsupported devices are Raspberry Pi 4 (ARMv8.0-A) and Jetson AGX Xavier/Orin (ARMv8.2-A).
## How to use this image
### start server instance
```bash
docker run -d --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server
```
By default, ClickHouse will be accessible only via the Docker network. See the [networking section below](#networking).
By default, starting above server instance will be run as the `default` user without password.
By default, starting above server instance will be run as the `default` user without password.
### connect to it from a native client
@ -66,9 +67,7 @@ docker run -d -p 18123:8123 -p19000:9000 --name some-clickhouse-server --ulimit
echo 'SELECT version()' | curl 'http://localhost:18123/' --data-binary @-
```
```
22.6.3.35
```
`22.6.3.35`
or by allowing the container to use [host ports directly](https://docs.docker.com/network/host/) using `--network=host` (also allows achieving better network performance):
@ -77,16 +76,14 @@ docker run -d --network=host --name some-clickhouse-server --ulimit nofile=26214
echo 'SELECT version()' | curl 'http://localhost:8123/' --data-binary @-
```
```
22.6.3.35
```
`22.6.3.35`
### Volumes
Typically you may want to mount the following folders inside your container to achieve persistency:
* `/var/lib/clickhouse/` - main folder where ClickHouse stores the data
* `/var/log/clickhouse-server/` - logs
- `/var/lib/clickhouse/` - main folder where ClickHouse stores the data
- `/var/log/clickhouse-server/` - logs
```bash
docker run -d \
@ -97,9 +94,9 @@ docker run -d \
You may also want to mount:
* `/etc/clickhouse-server/config.d/*.xml` - files with server configuration adjustments
* `/etc/clickhouse-server/users.d/*.xml` - files with user settings adjustments
* `/docker-entrypoint-initdb.d/` - folder with database initialization scripts (see below).
- `/etc/clickhouse-server/config.d/*.xml` - files with server configuration adjustments
- `/etc/clickhouse-server/users.d/*.xml` - files with user settings adjustments
- `/docker-entrypoint-initdb.d/` - folder with database initialization scripts (see below).
### Linux capabilities
@ -150,7 +147,7 @@ docker run --rm -e CLICKHOUSE_DB=my_database -e CLICKHOUSE_USER=username -e CLIC
## How to extend this image
To perform additional initialization in an image derived from this one, add one or more `*.sql`, `*.sql.gz`, or `*.sh` scripts under `/docker-entrypoint-initdb.d`. After the entrypoint calls `initdb`, it will run any `*.sql` files, run any executable `*.sh` scripts, and source any non-executable `*.sh` scripts found in that directory to do further initialization before starting the service.
To perform additional initialization in an image derived from this one, add one or more `*.sql`, `*.sql.gz`, or `*.sh` scripts under `/docker-entrypoint-initdb.d`. After the entrypoint calls `initdb`, it will run any `*.sql` files, run any executable `*.sh` scripts, and source any non-executable `*.sh` scripts found in that directory to do further initialization before starting the service.
Also, you can provide environment variables `CLICKHOUSE_USER` & `CLICKHOUSE_PASSWORD` that will be used for clickhouse-client during initialization.
For example, to add an additional user and database, add the following to `/docker-entrypoint-initdb.d/init-db.sh`:

View File

@ -127,9 +127,6 @@ function setup_logs_replication
echo 'Create all configured system logs'
clickhouse-client --query "SYSTEM FLUSH LOGS"
# It's doesn't make sense to try creating tables if SYNC fails
echo "SYSTEM SYNC DATABASE REPLICA default" | clickhouse-client "${CONNECTION_ARGS[@]}" || return 0
debug_or_sanitizer_build=$(clickhouse-client -q "WITH ((SELECT value FROM system.build_options WHERE name='BUILD_TYPE') AS build, (SELECT value FROM system.build_options WHERE name='CXX_FLAGS') as flags) SELECT build='Debug' OR flags LIKE '%fsanitize%'")
echo "Build is debug or sanitizer: $debug_or_sanitizer_build"
@ -143,7 +140,7 @@ function setup_logs_replication
time DateTime COMMENT 'The time of test run',
test_name String COMMENT 'The name of the test',
coverage Array(UInt64) COMMENT 'An array of addresses of the code (a subset of addresses instrumented for coverage) that were encountered during the test run'
) ENGINE = Null COMMENT 'Contains information about per-test coverage from the CI, but used only for exporting to the CI cluster'
) ENGINE = MergeTree ORDER BY test_name COMMENT 'Contains information about per-test coverage from the CI, but used only for exporting to the CI cluster'
"
# For each system log table:

View File

@ -31,6 +31,11 @@
<allow_experimental_analyzer>
<readonly/>
</allow_experimental_analyzer>
<!-- This feature is broken, deprecated and will be removed. We don't want more reports about it -->
<allow_experimental_object_type>
<readonly/>
</allow_experimental_object_type>
</constraints>
</default>
</profiles>

View File

@ -17,7 +17,7 @@ stage=${stage:-}
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
echo "$script_dir"
repo_dir=ch
BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-17_debug_none_unsplitted_disable_False_binary"}
BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-18_debug_none_unsplitted_disable_False_binary"}
BINARY_URL_TO_DOWNLOAD=${BINARY_URL_TO_DOWNLOAD:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/$BINARY_TO_DOWNLOAD/clickhouse"}
function git_clone_with_retry

View File

@ -101,7 +101,8 @@ RUN python3 -m pip install --no-cache-dir \
retry==0.9.2 \
bs4==0.0.2 \
lxml==5.1.0 \
urllib3==2.0.7
urllib3==2.0.7 \
jwcrypto==1.5.6
# bs4, lxml are for cloud tests, do not delete
# Hudi supports only spark 3.3.*, not 3.4

View File

@ -2,7 +2,7 @@
set -euo pipefail
CLICKHOUSE_PACKAGE=${CLICKHOUSE_PACKAGE:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-17_relwithdebuginfo_none_unsplitted_disable_False_binary/clickhouse"}
CLICKHOUSE_PACKAGE=${CLICKHOUSE_PACKAGE:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-18_relwithdebuginfo_none_unsplitted_disable_False_binary/clickhouse"}
CLICKHOUSE_REPO_PATH=${CLICKHOUSE_REPO_PATH:=""}

View File

@ -2,7 +2,7 @@
set -euo pipefail
CLICKHOUSE_PACKAGE=${CLICKHOUSE_PACKAGE:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-17_relwithdebuginfo_none_unsplitted_disable_False_binary/clickhouse"}
CLICKHOUSE_PACKAGE=${CLICKHOUSE_PACKAGE:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-18_relwithdebuginfo_none_unsplitted_disable_False_binary/clickhouse"}
CLICKHOUSE_REPO_PATH=${CLICKHOUSE_REPO_PATH:=""}

View File

@ -6,7 +6,7 @@ set -e
set -u
set -o pipefail
BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-17_debug_none_unsplitted_disable_False_binary"}
BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-18_debug_none_unsplitted_disable_False_binary"}
BINARY_URL_TO_DOWNLOAD=${BINARY_URL_TO_DOWNLOAD:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/$BINARY_TO_DOWNLOAD/clickhouse"}
function wget_with_retry

View File

@ -89,8 +89,8 @@ ENV MINIO_ROOT_USER="clickhouse"
ENV MINIO_ROOT_PASSWORD="clickhouse"
ENV EXPORT_S3_STORAGE_POLICIES=1
RUN npm install -g azurite \
&& npm install -g tslib
RUN npm install -g azurite@3.30.0 \
&& npm install -g tslib && npm install -g node
COPY run.sh /
COPY setup_minio.sh /

View File

@ -78,7 +78,8 @@ function configure()
randomize_config_boolean_value use_compression zookeeper
fi
randomize_config_boolean_value allow_experimental_block_number_column block_number
randomize_config_boolean_value enable_block_number_column block_number
randomize_config_boolean_value enable_block_offset_column block_number
# for clickhouse-server (via service)
echo "ASAN_OPTIONS='malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'" >> /etc/environment
@ -119,13 +120,41 @@ EOL
local max_users_mem
max_users_mem=$((total_mem*30/100)) # 30%
# Similar to docker/test/fuzzer/query-fuzzer-tweaks-users.xml
echo "Setting max_memory_usage_for_user=$max_users_mem and max_memory_usage for queries to 10G"
cat > /etc/clickhouse-server/users.d/max_memory_usage_for_user.xml <<EOL
cat > /etc/clickhouse-server/users.d/stress_test_tweaks-users.xml <<EOL
<clickhouse>
<profiles>
<default>
<max_execution_time>60</max_execution_time>
<max_memory_usage>10G</max_memory_usage>
<max_memory_usage_for_user>${max_users_mem}</max_memory_usage_for_user>
<table_function_remote_max_addresses>200</table_function_remote_max_addresses>
<constraints>
<max_execution_time>
<max>60</max>
</max_execution_time>
<max_memory_usage>
<max>10G</max>
</max_memory_usage>
<table_function_remote_max_addresses>
<max>200</max>
</table_function_remote_max_addresses>
<!-- Don't waste cycles testing the old interpreter. Spend time in the new analyzer instead -->
<allow_experimental_analyzer>
<readonly/>
</allow_experimental_analyzer>
<!-- This feature is broken, deprecated and will be removed. We don't want more reports about it -->
<allow_experimental_object_type>
<readonly/>
</allow_experimental_object_type>
</constraints>
</default>
</profiles>
</clickhouse>

View File

@ -5,7 +5,7 @@ FROM ubuntu:22.04
ARG apt_archive="http://archive.ubuntu.com"
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=17
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=18
RUN apt-get update \
&& apt-get install \

View File

@ -0,0 +1,14 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v23.8.14.6-lts (967e51c1d6b) FIXME as compared to v23.8.13.25-lts (37e034f903e)
#### Bug Fix (user-visible misbehavior in an official stable release)
* Set server name for SSL handshake in MongoDB engine [#63122](https://github.com/ClickHouse/ClickHouse/pull/63122) ([Alexander Gololobov](https://github.com/davenger)).
* Use user specified db instead of "config" for MongoDB wire protocol version check [#63126](https://github.com/ClickHouse/ClickHouse/pull/63126) ([Alexander Gololobov](https://github.com/davenger)).

View File

@ -13,14 +13,14 @@ The cross-build for macOS is based on the [Build instructions](../development/bu
The following sections provide a walk-through for building ClickHouse for `x86_64` macOS. If youre targeting ARM architecture, simply substitute all occurrences of `x86_64` with `aarch64`. For example, replace `x86_64-apple-darwin` with `aarch64-apple-darwin` throughout the steps.
## Install Clang-17
## Install clang-18
Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup.
For example the commands for Bionic are like:
``` bash
sudo echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-17 main" >> /etc/apt/sources.list
sudo apt-get install clang-17
sudo apt-get install clang-18
```
## Install Cross-Compilation Toolset {#install-cross-compilation-toolset}
@ -59,7 +59,7 @@ curl -L 'https://github.com/phracker/MacOSX-SDKs/releases/download/11.3/MacOSX11
cd ClickHouse
mkdir build-darwin
cd build-darwin
CC=clang-17 CXX=clang++-17 cmake -DCMAKE_AR:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ar -DCMAKE_INSTALL_NAME_TOOL=${CCTOOLS}/bin/x86_64-apple-darwin-install_name_tool -DCMAKE_RANLIB:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ranlib -DLINKER_NAME=${CCTOOLS}/bin/x86_64-apple-darwin-ld -DCMAKE_TOOLCHAIN_FILE=cmake/darwin/toolchain-x86_64.cmake ..
CC=clang-18 CXX=clang++-18 cmake -DCMAKE_AR:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ar -DCMAKE_INSTALL_NAME_TOOL=${CCTOOLS}/bin/x86_64-apple-darwin-install_name_tool -DCMAKE_RANLIB:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ranlib -DLINKER_NAME=${CCTOOLS}/bin/x86_64-apple-darwin-ld -DCMAKE_TOOLCHAIN_FILE=cmake/darwin/toolchain-x86_64.cmake ..
ninja
```

View File

@ -23,7 +23,7 @@ sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
``` bash
cd ClickHouse
mkdir build-riscv64
CC=clang-17 CXX=clang++-17 cmake . -Bbuild-riscv64 -G Ninja -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-riscv64.cmake -DGLIBC_COMPATIBILITY=OFF -DENABLE_LDAP=OFF -DOPENSSL_NO_ASM=ON -DENABLE_JEMALLOC=ON -DENABLE_PARQUET=OFF -DENABLE_GRPC=OFF -DENABLE_HDFS=OFF -DENABLE_MYSQL=OFF
CC=clang-18 CXX=clang++-18 cmake . -Bbuild-riscv64 -G Ninja -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-riscv64.cmake -DGLIBC_COMPATIBILITY=OFF -DENABLE_LDAP=OFF -DOPENSSL_NO_ASM=ON -DENABLE_JEMALLOC=ON -DENABLE_PARQUET=OFF -DENABLE_GRPC=OFF -DENABLE_HDFS=OFF -DENABLE_MYSQL=OFF
ninja -C build-riscv64
```

View File

@ -5,22 +5,13 @@ title: How to Build, Run and Debug ClickHouse on Linux for s390x (zLinux)
sidebar_label: Build on Linux for s390x (zLinux)
---
As of writing (2023/3/10) building for s390x considered to be experimental. Not all features can be enabled, has broken features and is currently under active development.
At the time of writing (2024 May), support for the s390x platform is considered experimental, i.e. some features are disabled or broken on s390x.
## Building ClickHouse for s390x
## Building
s390x has two OpenSSL-related build options.
- By default, the s390x build will dynamically link to OpenSSL libraries. It will build OpenSSL shared objects, so it's not necessary to install OpenSSL beforehand. (This option is recommended in all cases.)
- Another option is to build OpenSSL in-tree. In this case two build flags need to be supplied to cmake
```bash
-DENABLE_OPENSSL_DYNAMIC=0
```
:::note
s390x builds are temporarily disabled in CI.
:::
s390x has two OpenSSL-related build options:
- By default, OpenSSL is build on s390x as a shared library. This is different from all other platforms, where OpenSSL is build as static library.
- To build OpenSSL as a static library regardless, pass `-DENABLE_OPENSSL_DYNAMIC=0` to CMake.
These instructions assume that the host machine is x86_64 and has all the tooling required to build natively based on the [build instructions](../development/build.md). It also assumes that the host is Ubuntu 22.04 but the following instructions should also work on Ubuntu 20.04.
@ -31,11 +22,16 @@ apt-get install binutils-s390x-linux-gnu libc6-dev-s390x-cross gcc-s390x-linux-g
```
If you wish to cross compile rust code install the rust cross compile target for s390x:
```bash
rustup target add s390x-unknown-linux-gnu
```
The s390x build uses the mold linker, download it from https://github.com/rui314/mold/releases/download/v2.0.0/mold-2.0.0-x86_64-linux.tar.gz
and place it into your `$PATH`.
To build for s390x:
```bash
cmake -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-s390x.cmake ..
ninja

View File

@ -109,7 +109,7 @@ The build requires the following components:
- Git (used to checkout the sources, not needed for the build)
- CMake 3.20 or newer
- Compiler: clang-17 or newer
- Compiler: clang-18 or newer
- Linker: lld-17 or newer
- Ninja
- Yasm

View File

@ -153,7 +153,7 @@ Builds ClickHouse in various configurations for use in further steps. You have t
### Report Details
- **Compiler**: `clang-17`, optionally with the name of a target platform
- **Compiler**: `clang-18`, optionally with the name of a target platform
- **Build type**: `Debug` or `RelWithDebInfo` (cmake).
- **Sanitizer**: `none` (without sanitizers), `address` (ASan), `memory` (MSan), `undefined` (UBSan), or `thread` (TSan).
- **Status**: `success` or `fail`
@ -177,7 +177,7 @@ Performs static analysis and code style checks using `clang-tidy`. The report is
There is a convenience `packager` script that runs the clang-tidy build in docker
```sh
mkdir build_tidy
./docker/packager/packager --output-dir=./build_tidy --package-type=binary --compiler=clang-17 --debug-build --clang-tidy
./docker/packager/packager --output-dir=./build_tidy --package-type=binary --compiler=clang-18 --debug-build --clang-tidy
```

View File

@ -121,7 +121,7 @@ While inside the `build` directory, configure your build by running CMake. Befor
export CC=clang CXX=clang++
cmake ..
If you installed clang using the automatic installation script above, also specify the version of clang installed in the first command, e.g. `export CC=clang-17 CXX=clang++-17`. The clang version will be in the script output.
If you installed clang using the automatic installation script above, also specify the version of clang installed in the first command, e.g. `export CC=clang-18 CXX=clang++-18`. The clang version will be in the script output.
The `CC` variable specifies the compiler for C (short for C Compiler), and `CXX` variable instructs which C++ compiler is to be used for building.

View File

@ -51,6 +51,9 @@ ENGINE = MaterializedMySQL('host:port', ['database' | database], 'user', 'passwo
### allows_query_when_mysql_lost
`allows_query_when_mysql_lost` — Allows to query a materialized table when MySQL is lost. Default: `0` (`false`).
### allow_startup_database_without_connection_to_mysql
`allow_startup_database_without_connection_to_mysql` — Allow to create and attach database without available connection to MySQL. Default: `0` (`false`).
### materialized_mysql_tables_list
`materialized_mysql_tables_list` — a comma-separated list of mysql database tables, which will be replicated by MaterializedMySQL database engine. Default value: empty list — means whole tables will be replicated.

View File

@ -17,6 +17,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2],
...
) ENGINE = EmbeddedRocksDB([ttl, rocksdb_dir, read_only]) PRIMARY KEY(primary_key_name)
[ SETTINGS name=value, ... ]
```
Engine parameters:
@ -29,6 +30,11 @@ Engine parameters:
- columns other than the primary key will be serialized in binary as `rocksdb` value in corresponding order.
- queries with key `equals` or `in` filtering will be optimized to multi keys lookup from `rocksdb`.
Engine settings:
- `optimize_for_bulk_insert` Table is optimized for bulk insertions (insert pipeline will create SST files and import to rocksdb database instead of writing to memtables); default value: `1`.
- `bulk_insert_block_size` - Minimum size of SST files (in term of rows) created by bulk insertion; default value: `1048449`.
Example:
``` sql

View File

@ -73,6 +73,7 @@ Optional parameters:
- `rabbitmq_queue_consume` - Use user-defined queues and do not make any RabbitMQ setup: declaring exchanges, queues, bindings. Default: `false`.
- `rabbitmq_username` - RabbitMQ username.
- `rabbitmq_password` - RabbitMQ password.
- `reject_unhandled_messages` - Reject messages (send RabbitMQ negative acknowledgement) in case of errors. This setting is automatically enabled if there is a `x-dead-letter-exchange` defined in `rabbitmq_queue_settings_list`.
- `rabbitmq_commit_on_select` - Commit messages when select query is made. Default: `false`.
- `rabbitmq_max_rows_per_message` — The maximum number of rows written in one RabbitMQ message for row-based formats. Default : `1`.
- `rabbitmq_empty_queue_backoff_start` — A start backoff point to reschedule read if the rabbitmq queue is empty.

View File

@ -22,9 +22,8 @@ ORDER BY Distance(vectors, Point)
LIMIT N
```
`vectors` contains N-dimensional values of type [Array](../../../sql-reference/data-types/array.md) or
[Tuple](../../../sql-reference/data-types/tuple.md), for example embeddings. Function `Distance` computes the distance between two vectors.
Often, the Euclidean (L2) distance is chosen as distance function but [other
`vectors` contains N-dimensional values of type [Array(Float32)](../../../sql-reference/data-types/array.md), for example embeddings.
Function `Distance` computes the distance between two vectors. Often, the Euclidean (L2) distance is chosen as distance function but [other
distance functions](/docs/en/sql-reference/functions/distance-functions.md) are also possible. `Point` is the reference point, e.g. `(0.17,
0.33, ...)`, and `N` limits the number of search results.
@ -47,7 +46,7 @@ of the search space (using clustering, search trees, etc.) which allows to compu
# Creating and Using ANN Indexes {#creating_using_ann_indexes}
Syntax to create an ANN index over an [Array](../../../sql-reference/data-types/array.md) column:
Syntax to create an ANN index over an [Array(Float32)](../../../sql-reference/data-types/array.md) column:
```sql
CREATE TABLE table_with_ann_index
@ -60,19 +59,6 @@ ENGINE = MergeTree
ORDER BY id;
```
Syntax to create an ANN index over a [Tuple](../../../sql-reference/data-types/tuple.md) column:
```sql
CREATE TABLE table_with_ann_index
(
`id` Int64,
`vectors` Tuple(Float32[, Float32[, ...]]),
INDEX [ann_index_name] vectors TYPE [ann_index_type]([ann_index_parameters]) [GRANULARITY [N]]
)
ENGINE = MergeTree
ORDER BY id;
```
ANN indexes are built during column insertion and merge. As a result, `INSERT` and `OPTIMIZE` statements will be slower than for ordinary
tables. ANNIndexes are ideally used only with immutable or rarely changed data, respectively when are far more read requests than write
requests.
@ -164,7 +150,7 @@ linear surfaces (lines in 2D, planes in 3D etc.).
</iframe>
</div>
Syntax to create an Annoy index over an [Array](../../../sql-reference/data-types/array.md) column:
Syntax to create an Annoy index over an [Array(Float32)](../../../sql-reference/data-types/array.md) column:
```sql
CREATE TABLE table_with_annoy_index
@ -177,19 +163,6 @@ ENGINE = MergeTree
ORDER BY id;
```
Syntax to create an ANN index over a [Tuple](../../../sql-reference/data-types/tuple.md) column:
```sql
CREATE TABLE table_with_annoy_index
(
id Int64,
vectors Tuple(Float32[, Float32[, ...]]),
INDEX [ann_index_name] vectors TYPE annoy([Distance[, NumTrees]]) [GRANULARITY N]
)
ENGINE = MergeTree
ORDER BY id;
```
Annoy currently supports two distance functions:
- `L2Distance`, also called Euclidean distance, is the length of a line segment between two points in Euclidean space
([Wikipedia](https://en.wikipedia.org/wiki/Euclidean_distance)).
@ -203,10 +176,9 @@ Parameter `NumTrees` is the number of trees which the algorithm creates (default
more accurate search results but slower index creation / query times (approximately linearly) as well as larger index sizes.
:::note
Indexes over columns of type `Array` will generally work faster than indexes on `Tuple` columns. All arrays must have same length. To avoid
errors, you can use a [CONSTRAINT](/docs/en/sql-reference/statements/create/table.md#constraints), for example, `CONSTRAINT
constraint_name_1 CHECK length(vectors) = 256`. Also, empty `Arrays` and unspecified `Array` values in INSERT statements (i.e. default
values) are not supported.
All arrays must have same length. To avoid errors, you can use a
[CONSTRAINT](/docs/en/sql-reference/statements/create/table.md#constraints), for example, `CONSTRAINT constraint_name_1 CHECK
length(vectors) = 256`. Also, empty `Arrays` and unspecified `Array` values in INSERT statements (i.e. default values) are not supported.
:::
The creation of Annoy indexes (whenever a new part is build, e.g. at the end of a merge) is a relatively slow process. You can increase
@ -264,19 +236,6 @@ ENGINE = MergeTree
ORDER BY id;
```
Syntax to create an ANN index over a [Tuple](../../../sql-reference/data-types/tuple.md) column:
```sql
CREATE TABLE table_with_usearch_index
(
id Int64,
vectors Tuple(Float32[, Float32[, ...]]),
INDEX [ann_index_name] vectors TYPE usearch([Distance[, ScalarKind]]) [GRANULARITY N]
)
ENGINE = MergeTree
ORDER BY id;
```
USearch currently supports two distance functions:
- `L2Distance`, also called Euclidean distance, is the length of a line segment between two points in Euclidean space
([Wikipedia](https://en.wikipedia.org/wiki/Euclidean_distance)).

View File

@ -2,7 +2,7 @@
slug: /en/engines/table-engines/mergetree-family/invertedindexes
sidebar_label: Full-text Indexes
description: Quickly find search terms in text.
keywords: [full-text search, text search, inverted, index, indices]
keywords: [full-text search, text search, index, indices]
---
# Full-text Search using Full-text Indexes [experimental]
@ -53,6 +53,10 @@ ENGINE = MergeTree
ORDER BY key
```
:::note
In earlier versions of ClickHouse, the corresponding index type name was `inverted`.
:::
where `N` specifies the tokenizer:
- `full_text(0)` (or shorter: `full_text()`) set the tokenizer to "tokens", i.e. split strings along spaces,

View File

@ -494,7 +494,7 @@ Syntax: `tokenbf_v1(size_of_bloom_filter_in_bytes, number_of_hash_functions, ran
#### Special-purpose
- Experimental indexes to support approximate nearest neighbor (ANN) search. See [here](annindexes.md) for details.
- An experimental inverted index to support full-text search. See [here](invertedindexes.md) for details.
- An experimental full-text index to support full-text search. See [here](invertedindexes.md) for details.
### Functions Support {#functions-support}
@ -502,31 +502,31 @@ Conditions in the `WHERE` clause contains calls of the functions that operate wi
Indexes of type `set` can be utilized by all functions. The other index types are supported as follows:
| Function (operator) / Index | primary key | minmax | ngrambf_v1 | tokenbf_v1 | bloom_filter | inverted |
|------------------------------------------------------------------------------------------------------------|-------------|--------|------------|------------|--------------|----------|
| [equals (=, ==)](/docs/en/sql-reference/functions/comparison-functions.md/#equals) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
| [notEquals(!=, &lt;&gt;)](/docs/en/sql-reference/functions/comparison-functions.md/#notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
| [like](/docs/en/sql-reference/functions/string-search-functions.md/#like) | ✔ | ✔ | ✔ | ✔ | ✗ | ✔ |
| [notLike](/docs/en/sql-reference/functions/string-search-functions.md/#notlike) | ✔ | ✔ | ✔ | ✔ | ✗ | ✔ |
| [match](/docs/en/sql-reference/functions/string-search-functions.md/#match) | ✗ | ✗ | ✔ | ✔ | ✗ | ✔ |
| [startsWith](/docs/en/sql-reference/functions/string-functions.md/#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ | ✔ |
| [endsWith](/docs/en/sql-reference/functions/string-functions.md/#endswith) | ✗ | ✗ | ✔ | ✔ | ✗ | ✔ |
| [multiSearchAny](/docs/en/sql-reference/functions/string-search-functions.md/#multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ | ✔ |
| [in](/docs/en/sql-reference/functions/in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
| [notIn](/docs/en/sql-reference/functions/in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
| [less (<)](/docs/en/sql-reference/functions/comparison-functions.md/#less) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
| [greater (>)](/docs/en/sql-reference/functions/comparison-functions.md/#greater) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
| [lessOrEquals (<=)](/docs/en/sql-reference/functions/comparison-functions.md/#lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
| [greaterOrEquals (>=)](/docs/en/sql-reference/functions/comparison-functions.md/#greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
| [empty](/docs/en/sql-reference/functions/array-functions/#empty) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
| [notEmpty](/docs/en/sql-reference/functions/array-functions/#notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
| [has](/docs/en/sql-reference/functions/array-functions/#has) | ✗ | ✗ | ✔ | ✔ | ✔ | ✔ |
| [hasAny](/docs/en/sql-reference/functions/array-functions/#hasany) | ✗ | ✗ | ✔ | ✔ | ✔ | ✗ |
| [hasAll](/docs/en/sql-reference/functions/array-functions/#hasall) | ✗ | ✗ | ✗ | ✗ | ✔ | ✗ |
| hasToken | ✗ | ✗ | ✗ | ✔ | ✗ | ✔ |
| hasTokenOrNull | ✗ | ✗ | ✗ | ✔ | ✗ | ✔ |
| hasTokenCaseInsensitive (*) | ✗ | ✗ | ✗ | ✔ | ✗ | ✗ |
| hasTokenCaseInsensitiveOrNull (*) | ✗ | ✗ | ✗ | ✔ | ✗ | ✗ |
| Function (operator) / Index | primary key | minmax | ngrambf_v1 | tokenbf_v1 | bloom_filter | full_text |
|------------------------------------------------------------------------------------------------------------|-------------|--------|------------|------------|--------------|-----------|
| [equals (=, ==)](/docs/en/sql-reference/functions/comparison-functions.md/#equals) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
| [notEquals(!=, &lt;&gt;)](/docs/en/sql-reference/functions/comparison-functions.md/#notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
| [like](/docs/en/sql-reference/functions/string-search-functions.md/#like) | ✔ | ✔ | ✔ | ✔ | ✗ | ✔ |
| [notLike](/docs/en/sql-reference/functions/string-search-functions.md/#notlike) | ✔ | ✔ | ✔ | ✔ | ✗ | ✔ |
| [match](/docs/en/sql-reference/functions/string-search-functions.md/#match) | ✗ | ✗ | ✔ | ✔ | ✗ | ✔ |
| [startsWith](/docs/en/sql-reference/functions/string-functions.md/#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ | ✔ |
| [endsWith](/docs/en/sql-reference/functions/string-functions.md/#endswith) | ✗ | ✗ | ✔ | ✔ | ✗ | ✔ |
| [multiSearchAny](/docs/en/sql-reference/functions/string-search-functions.md/#multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ | ✔ |
| [in](/docs/en/sql-reference/functions/in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
| [notIn](/docs/en/sql-reference/functions/in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
| [less (<)](/docs/en/sql-reference/functions/comparison-functions.md/#less) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
| [greater (>)](/docs/en/sql-reference/functions/comparison-functions.md/#greater) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
| [lessOrEquals (<=)](/docs/en/sql-reference/functions/comparison-functions.md/#lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
| [greaterOrEquals (>=)](/docs/en/sql-reference/functions/comparison-functions.md/#greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
| [empty](/docs/en/sql-reference/functions/array-functions/#empty) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
| [notEmpty](/docs/en/sql-reference/functions/array-functions/#notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
| [has](/docs/en/sql-reference/functions/array-functions/#has) | ✗ | ✗ | ✔ | ✔ | ✔ | ✔ |
| [hasAny](/docs/en/sql-reference/functions/array-functions/#hasany) | ✗ | ✗ | ✔ | ✔ | ✔ | ✗ |
| [hasAll](/docs/en/sql-reference/functions/array-functions/#hasall) | ✗ | ✗ | ✗ | ✗ | ✔ | ✗ |
| hasToken | ✗ | ✗ | ✗ | ✔ | ✗ | ✔ |
| hasTokenOrNull | ✗ | ✗ | ✗ | ✔ | ✗ | ✔ |
| hasTokenCaseInsensitive (*) | ✗ | ✗ | ✗ | ✔ | ✗ | ✗ |
| hasTokenCaseInsensitiveOrNull (*) | ✗ | ✗ | ✗ | ✔ | ✗ | ✗ |
Functions with a constant argument that is less than ngram size cant be used by `ngrambf_v1` for query optimization.

View File

@ -10,7 +10,7 @@ sidebar_label: Data Replication
In ClickHouse Cloud replication is managed for you. Please create your tables without adding arguments. For example, in the text below you would replace:
```sql
ENGINE = ReplicatedReplacingMergeTree(
ENGINE = ReplicatedMergeTree(
'/clickhouse/tables/{shard}/table_name',
'{replica}',
ver
@ -20,7 +20,7 @@ ENGINE = ReplicatedReplacingMergeTree(
with:
```sql
ENGINE = ReplicatedReplacingMergeTree
ENGINE = ReplicatedMergeTree
```
:::
@ -140,11 +140,11 @@ The system monitors data synchronicity on replicas and is able to recover after
:::note
In ClickHouse Cloud replication is managed for you. Please create your tables without adding arguments. For example, in the text below you would replace:
```
ENGINE = ReplicatedReplacingMergeTree('/clickhouse/tables/{shard}/table_name', '{replica}', ver)
ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/table_name', '{replica}', ver)
```
with:
```
ENGINE = ReplicatedReplacingMergeTree
ENGINE = ReplicatedMergeTree
```
:::
@ -177,7 +177,7 @@ CREATE TABLE table_name
CounterID UInt32,
UserID UInt32,
ver UInt16
) ENGINE = ReplicatedReplacingMergeTree('/clickhouse/tables/{shard}/table_name', '{replica}', ver)
) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/table_name', '{replica}', ver)
PARTITION BY toYYYYMM(EventDate)
ORDER BY (CounterID, EventDate, intHash32(UserID))
SAMPLE BY intHash32(UserID);

View File

@ -11,7 +11,7 @@ import CodeBlock from '@theme/CodeBlock';
# Install ClickHouse
You have three options for getting up and running with ClickHouse:
You have four options for getting up and running with ClickHouse:
- **[ClickHouse Cloud](https://clickhouse.com/cloud/):** The official ClickHouse as a service, - built by, maintained and supported by the creators of ClickHouse
- **[Quick Install](#quick-install):** an easy-to-download binary for testing and developing with ClickHouse
@ -32,37 +32,49 @@ On Linux, macOS and FreeBSD:
1. If you are just getting started and want to see what ClickHouse can do, the simplest way to download ClickHouse locally is to run the
following command. It downloads a single binary for your operating system that can be used to run the ClickHouse server,
clickhouse-client, clickhouse-local, ClickHouse Keeper, and other tools:
`clickhouse-client`, `clickhouse-local`, ClickHouse Keeper, and other tools:
```bash
curl https://clickhouse.com/ | sh
```
```bash
curl https://clickhouse.com/ | sh
```
1. Run the following command to start the ClickHouse server:
2. Run the following command to start [clickhouse-local](../operations/utilities/clickhouse-local.md):
```bash
./clickhouse
```
`clickhouse-local` allows you to process local and remote files using ClickHouse's powerful SQL and without a need for configuration. Table
data is stored in a temporary location, meaning that after a restart of `clickhouse-local` previously created tables are no longer
available.
As an alternative, you can start the ClickHouse server with this command ...
```bash
./clickhouse server
```
The first time you run this script, the necessary files and folders are created in the current directory, then the server starts.
... and open a new terminal to connect to the server with `clickhouse-client`:
1. Open a new terminal and use the **./clickhouse client** to connect to your service:
```bash
./clickhouse client
```
```bash
./clickhouse client
```
```response
./clickhouse client
ClickHouse client version 24.5.1.117 (official build).
Connecting to localhost:9000 as user default.
Connected to ClickHouse server version 24.5.1.
```response
./clickhouse client
ClickHouse client version 23.2.1.1501 (official build).
Connecting to localhost:9000 as user default.
Connected to ClickHouse server version 23.2.1.
local-host :)
```
local-host :)
```
You are ready to start sending DDL and SQL commands to ClickHouse!
Table data is stored in the current directory and still available after a restart of ClickHouse server. If necessary, you can pass
`-C config.xml` as an additional command line argument to `./clickhouse server` and provide further configuration in a configuration
file. All available configuration settings are documented [here](../operations/settings/settings.md) and in an [example configuration file
template](https://github.com/ClickHouse/ClickHouse/blob/master/programs/server/config.xml).
You are ready to start sending SQL commands to ClickHouse!
:::tip
The [Quick Start](/docs/en/quick-start.mdx) walks through the steps for creating tables and inserting data.
@ -377,14 +389,14 @@ build.
### macOS-only: Install with Homebrew
To install ClickHouse using [homebrew](https://brew.sh/), see [here](https://formulae.brew.sh/cask/clickhouse).
To install ClickHouse on macOS using [homebrew](https://brew.sh/), please see the ClickHouse [community homebrew formula](https://formulae.brew.sh/cask/clickhouse).
## Launch {#launch}
To start the server as a daemon, run:
``` bash
$ sudo clickhouse start
$ clickhouse start
```
There are also other ways to run ClickHouse:

View File

@ -75,7 +75,7 @@ The supported formats are:
| [ArrowStream](#data-format-arrow-stream) | ✔ | ✔ |
| [ORC](#data-format-orc) | ✔ | ✔ |
| [One](#data-format-one) | ✔ | ✗ |
| [Npy](#data-format-npy) | ✔ | |
| [Npy](#data-format-npy) | ✔ | |
| [RowBinary](#rowbinary) | ✔ | ✔ |
| [RowBinaryWithNames](#rowbinarywithnamesandtypes) | ✔ | ✔ |
| [RowBinaryWithNamesAndTypes](#rowbinarywithnamesandtypes) | ✔ | ✔ |
@ -91,6 +91,7 @@ The supported formats are:
| [MySQLDump](#mysqldump) | ✔ | ✗ |
| [DWARF](#dwarf) | ✔ | ✗ |
| [Markdown](#markdown) | ✗ | ✔ |
| [Form](#form) | ✔ | ✗ |
You can control some format processing parameters with the ClickHouse settings. For more information read the [Settings](/docs/en/operations/settings/settings-formats.md) section.
@ -206,7 +207,7 @@ SELECT * FROM nestedt FORMAT TSV
Differs from `TabSeparated` format in that the rows are written without escaping.
When parsing with this format, tabs or linefeeds are not allowed in each field.
This format is also available under the name `TSVRaw`.
This format is also available under the names `TSVRaw`, `Raw`.
## TabSeparatedWithNames {#tabseparatedwithnames}
@ -241,14 +242,14 @@ This format is also available under the name `TSVWithNamesAndTypes`.
Differs from `TabSeparatedWithNames` format in that the rows are written without escaping.
When parsing with this format, tabs or linefeeds are not allowed in each field.
This format is also available under the name `TSVRawWithNames`.
This format is also available under the names `TSVRawWithNames`, `RawWithNames`.
## TabSeparatedRawWithNamesAndTypes {#tabseparatedrawwithnamesandtypes}
Differs from `TabSeparatedWithNamesAndTypes` format in that the rows are written without escaping.
When parsing with this format, tabs or linefeeds are not allowed in each field.
This format is also available under the name `TSVRawWithNamesAndNames`.
This format is also available under the names `TSVRawWithNamesAndNames`, `RawWithNamesAndNames`.
## Template {#format-template}
@ -2465,23 +2466,22 @@ Result:
## Npy {#data-format-npy}
This function is designed to load a NumPy array from a .npy file into ClickHouse. The NumPy file format is a binary format used for efficiently storing arrays of numerical data. During import, ClickHouse treats top level dimension as an array of rows with single column. Supported Npy data types and their corresponding type in ClickHouse:
| Npy type | ClickHouse type |
|:--------:|:---------------:|
| b1 | UInt8 |
| i1 | Int8 |
| i2 | Int16 |
| i4 | Int32 |
| i8 | Int64 |
| u1 | UInt8 |
| u2 | UInt16 |
| u4 | UInt32 |
| u8 | UInt64 |
| f2 | Float32 |
| f4 | Float32 |
| f8 | Float64 |
| S | String |
| U | String |
This function is designed to load a NumPy array from a .npy file into ClickHouse. The NumPy file format is a binary format used for efficiently storing arrays of numerical data. During import, ClickHouse treats top level dimension as an array of rows with single column. Supported Npy data types and their corresponding type in ClickHouse:
| Npy data type (`INSERT`) | ClickHouse data type | Npy data type (`SELECT`) |
|--------------------------|-----------------------------------------------------------------|--------------------------|
| `i1` | [Int8](/docs/en/sql-reference/data-types/int-uint.md) | `i1` |
| `i2` | [Int16](/docs/en/sql-reference/data-types/int-uint.md) | `i2` |
| `i4` | [Int32](/docs/en/sql-reference/data-types/int-uint.md) | `i4` |
| `i8` | [Int64](/docs/en/sql-reference/data-types/int-uint.md) | `i8` |
| `u1`, `b1` | [UInt8](/docs/en/sql-reference/data-types/int-uint.md) | `u1` |
| `u2` | [UInt16](/docs/en/sql-reference/data-types/int-uint.md) | `u2` |
| `u4` | [UInt32](/docs/en/sql-reference/data-types/int-uint.md) | `u4` |
| `u8` | [UInt64](/docs/en/sql-reference/data-types/int-uint.md) | `u8` |
| `f2`, `f4` | [Float32](/docs/en/sql-reference/data-types/float.md) | `f4` |
| `f8` | [Float64](/docs/en/sql-reference/data-types/float.md) | `f8` |
| `S`, `U` | [String](/docs/en/sql-reference/data-types/string.md) | `S` |
| | [FixedString](/docs/en/sql-reference/data-types/fixedstring.md) | `S` |
**Example of saving an array in .npy format using Python**
@ -2508,6 +2508,14 @@ Result:
└───────────────┘
```
**Selecting Data**
You can select data from a ClickHouse table and save them into some file in the Npy format by the following command:
```bash
$ clickhouse-client --query="SELECT {column} FROM {some_table} FORMAT Npy" > {filename.npy}
```
## LineAsString {#lineasstring}
In this format, every line of input data is interpreted as a single string value. This format can only be parsed for table with a single field of type [String](/docs/en/sql-reference/data-types/string.md). The remaining columns must be set to [DEFAULT](/docs/en/sql-reference/statements/create/table.md/#default) or [MATERIALIZED](/docs/en/sql-reference/statements/create/table.md/#materialized), or omitted.
@ -2843,3 +2851,31 @@ FORMAT Markdown
```
Markdown table will be generated automatically and can be used on markdown-enabled platforms, like Github. This format is used only for output.
## Form {#form}
The Form format can be used to read or write a single record in the application/x-www-form-urlencoded format in which data is formatted `key1=value1&key2=value2`
Examples:
Given a file `data.tmp` placed in the `user_files` path with some URL encoded data:
```text
t_page=116&c.e=ls7xfkpm&c.tti.m=raf&rt.start=navigation&rt.bmr=390%2C11%2C10
```
```sql
SELECT * FROM file(data.tmp, Form) FORMAT vertical;
```
Result:
```text
Row 1:
──────
t_page: 116
c.e: ls7xfkpm
c.tti.m: raf
rt.start: navigation
rt.bmr: 390,11,10
```

View File

@ -210,6 +210,18 @@ Features:
- Pre-built metrics dashboards.
- Multiple users/projects via YAML config.
### clickhouse-monitoring {#clickhouse-monitoring}
[clickhouse-monitoring](https://github.com/duyet/clickhouse-monitoring) is a simple Next.js dashboard that relies on `system.*` tables to help monitor and provide an overview of your ClickHouse cluster.
Features:
- Query monitor: current queries, query history, query resources (memory, parts read, file_open, ...), most expensive queries, most used tables or columns, etc.
- Cluster monitor: total memory/CPU usage, distributed queue, global settings, mergetree settings, metrics, etc.
- Tables and parts information: size, row count, compression, part size, etc., at the column level detail.
- Useful tools: Zookeeper data exploration, query EXPLAIN, kill queries, etc.
- Visualization metric charts: queries and resource usage, number of merges/mutation, merge performance, query performance, etc.
## Commercial {#commercial}
### DataGrip {#datagrip}

View File

@ -2860,7 +2860,7 @@ table functions, and dictionaries.
User wishing to see secrets must also have
[`format_display_secrets_in_show_and_select` format setting](../settings/formats#format_display_secrets_in_show_and_select)
turned on and a
[`displaySecretsInShowAndSelect`](../../sql-reference/statements/grant#grant-display-secrets) privilege.
[`displaySecretsInShowAndSelect`](../../sql-reference/statements/grant#display-secrets) privilege.
Possible values:

View File

@ -15,7 +15,7 @@ table functions, and dictionaries.
User wishing to see secrets must also have
[`display_secrets_in_show_and_select` server setting](../server-configuration-parameters/settings#display_secrets_in_show_and_select)
turned on and a
[`displaySecretsInShowAndSelect`](../../sql-reference/statements/grant#grant-display-secrets) privilege.
[`displaySecretsInShowAndSelect`](../../sql-reference/statements/grant#display-secrets) privilege.
Possible values:

View File

@ -1925,7 +1925,9 @@ Default value: `16`.
### wait_for_async_insert {#wait-for-async-insert}
Enables or disables waiting for processing of asynchronous insertion. If enabled, server will return `OK` only after the data is inserted. Otherwise, it will return `OK` even if the data wasn't inserted.
Enables or disables waiting for processing of asynchronous insertion. If enabled, server will return `OK` only after the data is inserted. Otherwise, it will return `OK` as soon it has received the data, but it might still fail to parse or insert it later (You can check in system.asynchronous_insert_log)
If you want to use asynchronous inserts, we need to also enable [`async_insert`](#async-insert).
Possible values:

View File

@ -371,6 +371,8 @@ is equal to
</s3_plain_rewritable>
```
Starting from `24.5` it is possible configure any object storage disk (`s3`, `azure`, `local`) using `plain_rewritable` metadata type.
### Using Azure Blob Storage {#azure-blob-storage}
`MergeTree` family table engines can store data to [Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/blobs/) using a disk with type `azure_blob_storage`.

View File

@ -22,4 +22,4 @@ Columns:
- `0` — The row describes a partial revoke.
- `1` — The row describes a grant.
- `grant_option` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Permission is granted `WITH GRANT OPTION`, see [GRANT](../../sql-reference/statements/grant.md#grant-privigele-syntax).
- `grant_option` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Permission is granted `WITH GRANT OPTION`, see [GRANT](../../sql-reference/statements/grant.md#granting-privilege-syntax).

View File

@ -14,7 +14,7 @@ The `system.part_log` table contains the following columns:
- `event_type` ([Enum8](../../sql-reference/data-types/enum.md)) — Type of the event that occurred with the data part. Can have one of the following values:
- `NewPart` — Inserting of a new data part.
- `MergeParts` — Merging of data parts.
- `DownloadParts` — Downloading a data part.
- `DownloadPart` — Downloading a data part.
- `RemovePart` — Removing or detaching a data part using [DETACH PARTITION](../../sql-reference/statements/alter/partition.md#alter_detach-partition).
- `MutatePart` — Mutating of a data part.
- `MovePart` — Moving the data part from the one disk to another one.

View File

@ -7,15 +7,50 @@ sidebar_position: 6
Selects the first encountered value of a column.
By default, it ignores NULL values and returns the first NOT NULL value found in the column. As [`first_value`](../../../sql-reference/aggregate-functions/reference/first_value.md) if supports `RESPECT NULLS`, in which case it will select the first value passed, independently on whether it's NULL or not.
**Syntax**
```sql
any(column)
```
Aliases: `any_value`, [`first_value`](../reference/first_value.md).
**Parameters**
- `column`: The column name.
**Returned value**
By default, it ignores NULL values and returns the first NOT NULL value found in the column. Like [`first_value`](../../../sql-reference/aggregate-functions/reference/first_value.md) it supports `RESPECT NULLS`, in which case it will select the first value passed, independently on whether it's NULL or not.
:::note
The return type of the function is the same as the input, except for LowCardinality which is discarded. This means that given no rows as input it will return the default value of that type (0 for integers, or Null for a Nullable() column). You might use the `-OrNull` [combinator](../../../sql-reference/aggregate-functions/combinators.md) ) to modify this behaviour.
:::
:::warning
The query can be executed in any order and even in a different order each time, so the result of this function is indeterminate.
To get a determinate result, you can use the min or max function instead of any.
To get a determinate result, you can use the [`min`](../reference/min.md) or [`max`](../reference/max.md) function instead of `any`.
:::
In some cases, you can rely on the order of execution. This applies to cases when SELECT comes from a subquery that uses ORDER BY.
**Implementation details**
In some cases, you can rely on the order of execution. This applies to cases when `SELECT` comes from a subquery that uses `ORDER BY`.
When a `SELECT` query has the `GROUP BY` clause or at least one aggregate function, ClickHouse (in contrast to MySQL) requires that all expressions in the `SELECT`, `HAVING`, and `ORDER BY` clauses be calculated from keys or from aggregate functions. In other words, each column selected from the table must be used either in keys or inside aggregate functions. To get behavior like in MySQL, you can put the other columns in the `any` aggregate function.
- Alias: `any_value`, `first_value`.
**Example**
Query:
```sql
CREATE TABLE any_nulls (city Nullable(String)) ENGINE=Log;
INSERT INTO any_nulls (city) VALUES (NULL), ('Amsterdam'), ('New York'), ('Tokyo'), ('Valencia'), (NULL);
SELECT any(city) FROM any_nulls;
```
```response
┌─any(city)─┐
│ Amsterdam │
└───────────┘
```

View File

@ -0,0 +1,44 @@
---
slug: /en/sql-reference/aggregate-functions/reference/any_respect_nulls
sidebar_position: 103
---
# any_respect_nulls
Selects the first encountered value of a column, irregardless of whether it is a `NULL` value or not.
Alias: `any_value_respect_nulls`, `first_value_repect_nulls`.
**Syntax**
```sql
any_respect_nulls(column)
```
**Parameters**
- `column`: The column name.
**Returned value**
- The last value encountered, irregardless of whether it is a `NULL` value or not.
**Example**
Query:
```sql
CREATE TABLE any_nulls (city Nullable(String)) ENGINE=Log;
INSERT INTO any_nulls (city) VALUES (NULL), ('Amsterdam'), ('New York'), ('Tokyo'), ('Valencia'), (NULL);
SELECT any(city), any_respect_nulls(city) FROM any_nulls;
```
```response
┌─any(city)─┬─any_respect_nulls(city)─┐
│ Amsterdam │ ᴺᵁᴸᴸ │
└───────────┴─────────────────────────┘
```
**See Also**
- [any](../reference/any.md)

View File

@ -5,5 +5,35 @@ sidebar_position: 104
# anyLast
Selects the last value encountered.
The result is just as indeterminate as for the [any](../../../sql-reference/aggregate-functions/reference/any.md) function.
Selects the last value encountered. The result is just as indeterminate as for the [any](../../../sql-reference/aggregate-functions/reference/any.md) function.
**Syntax**
```sql
anyLast(column)
```
**Parameters**
- `column`: The column name.
**Returned value**
- The last value encountered.
**Example**
Query:
```sql
CREATE TABLE any_last_nulls (city Nullable(String)) ENGINE=Log;
INSERT INTO any_last_nulls (city) VALUES ('Amsterdam'),(NULL),('New York'),('Tokyo'),('Valencia'),(NULL);
SELECT anyLast(city) FROM any_last_nulls;
```
```response
┌─anyLast(city)─┐
│ Valencia │
└───────────────┘
```

View File

@ -0,0 +1,39 @@
---
slug: /en/sql-reference/aggregate-functions/reference/anylast_respect_nulls
sidebar_position: 104
---
# anyLast_respect_nulls
Selects the last value encountered, irregardless of whether it is `NULL` or not.
**Syntax**
```sql
anyLast_respect_nulls(column)
```
**Parameters**
- `column`: The column name.
**Returned value**
- The last value encountered, irregardless of whether it is `NULL` or not.
**Example**
Query:
```sql
CREATE TABLE any_last_nulls (city Nullable(String)) ENGINE=Log;
INSERT INTO any_last_nulls (city) VALUES ('Amsterdam'),(NULL),('New York'),('Tokyo'),('Valencia'),(NULL);
SELECT anyLast(city), anyLast_respect_nulls(city) FROM any_last_nulls;
```
```response
┌─anyLast(city)─┬─anyLast_respect_nulls(city)─┐
│ Valencia │ ᴺᵁᴸᴸ │
└───────────────┴─────────────────────────────┘
```

View File

@ -37,8 +37,10 @@ Standard aggregate functions:
ClickHouse-specific aggregate functions:
- [any](/docs/en/sql-reference/aggregate-functions/reference/any_respect_nulls.md)
- [anyHeavy](/docs/en/sql-reference/aggregate-functions/reference/anyheavy.md)
- [anyLast](/docs/en/sql-reference/aggregate-functions/reference/anylast.md)
- [anyLast](/docs/en/sql-reference/aggregate-functions/reference/anylast_respect_nulls.md)
- [boundingRatio](/docs/en/sql-reference/aggregate-functions/reference/boundrat.md)
- [first_value](/docs/en/sql-reference/aggregate-functions/reference/first_value.md)
- [last_value](/docs/en/sql-reference/aggregate-functions/reference/last_value.md)
@ -97,6 +99,7 @@ ClickHouse-specific aggregate functions:
- [quantileBFloat16Weighted](/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md#quantilebfloat16weighted)
- [quantileDD](/docs/en/sql-reference/aggregate-functions/reference/quantileddsketch.md#quantileddsketch)
- [simpleLinearRegression](/docs/en/sql-reference/aggregate-functions/reference/simplelinearregression.md)
- [singleValueOrNull](/docs/en/sql-reference/aggregate-functions/reference/singlevalueornull.md)
- [stochasticLinearRegression](/docs/en/sql-reference/aggregate-functions/reference/stochasticlinearregression.md)
- [stochasticLogisticRegression](/docs/en/sql-reference/aggregate-functions/reference/stochasticlogisticregression.md)
- [categoricalInformationValue](/docs/en/sql-reference/aggregate-functions/reference/categoricalinformationvalue.md)

View File

@ -0,0 +1,57 @@
---
slug: /en/sql-reference/aggregate-functions/reference/singlevalueornull
sidebar_position: 220
---
# singleValueOrNull
The aggregate function `singleValueOrNull` is used to implement subquery operators, such as `x = ALL (SELECT ...)`. It checks if there is only one unique non-NULL value in the data.
If there is only one unique value, it returns it. If there are zero or at least two distinct values, it returns NULL.
**Syntax**
``` sql
singleValueOrNull(x)
```
**Parameters**
- `x` — Column of any [data type](../../data-types/index.md).
**Returned values**
- The unique value, if there is only one unique non-NULL value in `x`.
- `NULL`, if there are zero or at least two distinct values.
**Examples**
Query:
``` sql
CREATE TABLE test (x UInt8 NULL) ENGINE=Log;
INSERT INTO test (x) VALUES (NULL), (NULL), (5), (NULL), (NULL);
SELECT singleValueOrNull(x) FROM test;
```
Result:
```response
┌─singleValueOrNull(x)─┐
│ 5 │
└──────────────────────┘
```
Query:
```sql
INSERT INTO test (x) VALUES (10);
SELECT singleValueOrNull(x) FROM test;
```
Result:
```response
┌─singleValueOrNull(x)─┐
│ ᴺᵁᴸᴸ │
└──────────────────────┘
```

View File

@ -7,10 +7,11 @@ sidebar_label: Map(K, V)
# Map(K, V)
`Map(K, V)` data type stores `key:value` pairs.
The Map datatype is implemented as `Array(Tuple(key T1, value T2))`, which means that the order of keys in each map does not change, i.e., this data type maintains insertion order.
**Parameters**
- `key` — The key part of the pair. [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md), [LowCardinality](../../sql-reference/data-types/lowcardinality.md), [FixedString](../../sql-reference/data-types/fixedstring.md), [UUID](../../sql-reference/data-types/uuid.md), [Date](../../sql-reference/data-types/date.md), [DateTime](../../sql-reference/data-types/datetime.md), [Date32](../../sql-reference/data-types/date32.md), [Enum](../../sql-reference/data-types/enum.md).
- `key` — The key part of the pair. Arbitrary type, except [Nullable](../../sql-reference/data-types/nullable.md) and [LowCardinality](../../sql-reference/data-types/lowcardinality.md) nested with [Nullable](../../sql-reference/data-types/nullable.md) types.
- `value` — The value part of the pair. Arbitrary type, including [Map](../../sql-reference/data-types/map.md) and [Array](../../sql-reference/data-types/array.md).
To get the value from an `a Map('key', 'value')` column, use `a['key']` syntax. This lookup works now with a linear complexity.

View File

@ -23,8 +23,8 @@ The default UUID is all-zero. It is used, for example, when a new record is inse
00000000-0000-0000-0000-000000000000
```
Due to historical reasons, UUIDs are sorted by their second half (which is unintuitive).
UUIDs should therefore not be used in an primary key (or sorting key) of a table, or as partition key.
Due to historical reasons, UUIDs are sorted by their second half.
UUIDs should therefore not be used directly in a primary key, sorting key, or partition key of a table.
Example:
@ -43,18 +43,7 @@ Result:
│ 3fda7c49-282e-421a-85ab-c5684ef1d350 │
│ 16ab55a7-45f6-44a8-873c-7a0b44346b3e │
│ e3776711-6359-4f22-878d-bf290d052c85 │
│ 1be30226-57b2-4739-88ec-5e3d490090f2 │
│ f65853a9-4375-4f0e-8b96-906ff622ed3c │
│ d5a0c7a6-79c6-4107-8bb8-df85915edcb7 │
│ 258e6068-17d1-4a1a-8be3-ed2ceb21815c │
│ 04b0f6a9-1f7b-4a42-8bfc-62f37b8a32b8 │
│ 9924f0d9-9c16-43a9-8f08-0944ab495aed │
│ 6720dc14-4eab-4e3e-8f0c-10c4ae8d2673 │
│ 5ddadb52-0452-4f5d-9030-c3f969af93a4 │
│ [...] │
│ 2dde30e6-59a1-48f8-b260-eb37921185b6 │
│ d5402a1b-77b3-4897-b288-29edf5c3ed12 │
│ 01843939-3ba7-4fea-b2aa-45f9a6f1e057 │
│ 9eceda2f-6946-40e3-b725-16f2709ca41a │
│ 03644f74-47ba-4020-b865-be5fd4c8c7ff │
│ ce3bc93d-ab19-4c74-b8cc-737cb9212099 │
@ -63,6 +52,34 @@ Result:
└──────────────────────────────────────┘
```
As a workaround, the UUID can be converted to a type with an intuitive sort order.
Example using conversion to UInt128:
``` sql
CREATE TABLE tab (uuid UUID) ENGINE = Memory;
INSERT INTO tab SELECT generateUUIDv4() FROM numbers(50);
SELECT * FROM tab ORDER BY toUInt128(uuid);
```
Result:
```sql
┌─uuid─────────────────────────────────┐
│ 018b81cd-aca1-4e9c-9e56-a84a074dc1a8 │
│ 02380033-c96a-438e-913f-a2c67e341def │
│ 057cf435-7044-456a-893b-9183a4475cea │
│ 0a3c1d4c-f57d-44cc-8567-60cb0c46f76e │
│ 0c15bf1c-8633-4414-a084-7017eead9e41 │
│ [...] │
│ f808cf05-ea57-4e81-8add-29a195bde63d │
│ f859fb5d-764b-4a33-81e6-9e4239dae083 │
│ fb1b7e37-ab7b-421a-910b-80e60e2bf9eb │
│ fc3174ff-517b-49b5-bfe2-9b369a5c506d │
│ fece9bf6-3832-449a-b058-cd1d70a02c8b │
└──────────────────────────────────────┘
```
## Generating UUIDs
ClickHouse provides the [generateUUIDv4](../../sql-reference/functions/uuid-functions.md) function to generate random UUID version 4 values.

View File

@ -234,3 +234,34 @@ SELECT least(toDateTime32(now() + toIntervalDay(1)), toDateTime64(now(), 3))
:::note
The type returned is a DateTime64 as the DataTime32 must be promoted to 64 bit for the comparison.
:::
## clamp
Constrain the return value between A and B.
**Syntax**
``` sql
clamp(value, min, max)
```
**Arguments**
- `value` Input value.
- `min` Limit the lower bound.
- `max` Limit the upper bound.
**Returned values**
If the value is less than the minimum value, return the minimum value; if it is greater than the maximum value, return the maximum value; otherwise, return the current value.
Examples:
```sql
SELECT clamp(1, 2, 3) result, toTypeName(result) type;
```
```response
┌─result─┬─type────┐
│ 2 │ Float64 │
└────────┴─────────┘
```

File diff suppressed because it is too large Load Diff

View File

@ -12,6 +12,8 @@ Returns whether the argument is [NULL](../../sql-reference/syntax.md#null).
See also operator [`IS NULL`](../operators/index.md#is_null).
**Syntax**
``` sql
isNull(x)
```
@ -52,6 +54,45 @@ Result:
└───┘
```
## isNullable
Returns `1` if a column is [Nullable](../data-types/nullable.md) (i.e allows `NULL` values), `0` otherwise.
**Syntax**
``` sql
isNullable(x)
```
**Arguments**
- `x` — column.
**Returned value**
- `1` if `x` allows `NULL` values. [UInt8](../data-types/int-uint.md).
- `0` if `x` does not allow `NULL` values. [UInt8](../data-types/int-uint.md).
**Example**
Query:
``` sql
CREATE TABLE tab (ordinary_col UInt32, nullable_col Nullable(UInt32)) ENGINE = Log;
INSERT INTO tab (ordinary_col, nullable_col) VALUES (1,1), (2, 2), (3,3);
SELECT isNullable(ordinary_col), isNullable(nullable_col) FROM tab;
```
Result:
``` text
┌───isNullable(ordinary_col)──┬───isNullable(nullable_col)──┐
1. │ 0 │ 1 │
2. │ 0 │ 1 │
3. │ 0 │ 1 │
└─────────────────────────────┴─────────────────────────────┘
```
## isNotNull
Returns whether the argument is not [NULL](../../sql-reference/syntax.md#null-literal).
@ -96,6 +137,36 @@ Result:
└───┘
```
## isNotDistinctFrom
Performs null-safe comparison. Used to compare JOIN keys which contain NULL values in the JOIN ON section.
This function will consider two `NULL` values as identical and will return `true`, which is distinct from the usual
equals behavior where comparing two `NULL` values would return `NULL`.
:::note
This function is an internal function used by the implementation of JOIN ON. Please do not use it manually in queries.
:::
**Syntax**
``` sql
isNotDistinctFrom(x, y)
```
**Arguments**
- `x` — first JOIN key.
- `y` — second JOIN key.
**Returned value**
- `true` when `x` and `y` are both `NULL`.
- `false` otherwise.
**Example**
For a complete example see: [NULL values in JOIN keys](../../sql-reference/statements/select/join#null-values-in-join-keys).
## isZeroOrNull
Returns whether the argument is 0 (zero) or [NULL](../../sql-reference/syntax.md#null-literal).

View File

@ -792,6 +792,39 @@ Result:
│ -1 │
└──────────┘
```
## sigmoid
Returns the [sigmoid function](https://en.wikipedia.org/wiki/Sigmoid_function).
**Syntax**
```sql
sigmoid(x)
```
**Parameters**
- `x` — input value. Values from the interval: `-∞ < x < +∞`. [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md).
**Returned value**
- Corresponding value along the sigmoid curve between 0 and 1. [Float64](../../sql-reference/data-types/float.md).
**Example**
Query:
``` sql
SELECT round(sigmoid(x), 5) FROM (SELECT arrayJoin([-1, 0, 1]) AS x);
```
Result:
```result
0.26894
0.5
0.73106
```
## degrees

View File

@ -556,7 +556,7 @@ hasColumnInTable(\[hostname\[, username\[, password\]\],\]
**Parameters**
- `database` : name of the database. [String literal](../syntax#syntax-string-literal)
- `table` : name of the table. [String literal](../syntax#syntax-string-literal)
- `table` : name of the table. [String literal](../syntax#syntax-string-literal)
- `column` : name of the column. [String literal](../syntax#syntax-string-literal)
- `hostname` : remote server name to perform the check on. [String literal](../syntax#syntax-string-literal)
- `username` : username for remote server. [String literal](../syntax#syntax-string-literal)
@ -565,7 +565,7 @@ hasColumnInTable(\[hostname\[, username\[, password\]\],\]
**Returned value**
- `1` if the given column exists.
- `0`, otherwise.
- `0`, otherwise.
**Implementation details**
@ -1024,6 +1024,7 @@ The result of the function depends on the affected data blocks and the order of
:::note
Only returns neighbor inside the currently processed data block.
Because of this error-prone behavior the function is DEPRECATED, please use proper window functions instead.
:::
The order of rows during calculation of `neighbor()` can differ from the order of rows returned to the user.
@ -1134,6 +1135,7 @@ Returns 0 for the first row, and for subsequent rows the difference to the previ
:::note
Only returns differences inside the currently processed data block.
Because of this error-prone behavior the function is DEPRECATED, please use proper window functions instead.
:::
The result of the function depends on the affected data blocks and the order of data in the block.
@ -1207,6 +1209,10 @@ WHERE diff != 1
## runningDifferenceStartingWithFirstValue
:::note
This function is DEPRECATED (see the note for `runningDifference`).
:::
Same as [runningDifference](./other-functions.md#other_functions-runningdifference), but returns the value of the first row as the value on the first row.
## runningConcurrency
@ -1930,6 +1936,7 @@ Accumulates the states of an aggregate function for each row of a data block.
:::note
The state is reset for each new block of data.
Because of this error-prone behavior the function is DEPRECATED, please use proper window functions instead.
:::
**Syntax**
@ -2444,7 +2451,7 @@ Type: [Array](../../sql-reference/data-types/array.md)([String](../../sql-refere
## defaultRoles
Returns the roles which are enabled by default for the current user when he logs in. Initially these are all roles granted to the current user (see [GRANT](../../sql-reference/statements/grant.md#grant-select)), but that can be changed with the [SET DEFAULT ROLE](../../sql-reference/statements/set-role.md#set-default-role-statement) statement.
Returns the roles which are enabled by default for the current user when he logs in. Initially these are all roles granted to the current user (see [GRANT](../../sql-reference/statements/grant.md#select)), but that can be changed with the [SET DEFAULT ROLE](../../sql-reference/statements/set-role.md#set-default-role-statement) statement.
**Syntax**
@ -3294,3 +3301,31 @@ The setting is not enabled by default for security reasons, because some headers
HTTP headers are case sensitive for this function.
If the function is used in the context of a distributed query, it returns non-empty result only on the initiator node.
## showCertificate
Shows information about the current server's Secure Sockets Layer (SSL) certificate if it has been configured. See [Configuring SSL-TLS](https://clickhouse.com/docs/en/guides/sre/configuring-ssl) for more information on how to configure ClickHouse to use OpenSSL certificates to validate connections.
**Syntax**
```sql
showCertificate()
```
**Returned value**
- Map of key-value pairs relating to the configured SSL certificate. [Map](../../sql-reference/data-types/map.md)([String](../../sql-reference/data-types/string.md), [String](../../sql-reference/data-types/string.md)).
**Example**
Query:
```sql
SELECT showCertificate() FORMAT LineAsString;
```
Result:
```response
{'version':'1','serial_number':'2D9071D64530052D48308473922C7ADAFA85D6C5','signature_algo':'sha256WithRSAEncryption','issuer':'/CN=marsnet.local CA','not_before':'May 7 17:01:21 2024 GMT','not_after':'May 7 17:01:21 2025 GMT','subject':'/CN=chnode1','pkey_algo':'rsaEncryption'}
```

View File

@ -1058,7 +1058,7 @@ convertCharset(s, from, to)
## base58Encode
Encodes a String using [Base58](https://tools.ietf.org/id/draft-msporny-base58-01.html) in the "Bitcoin" alphabet.
Encodes a String using [Base58](https://datatracker.ietf.org/doc/html/draft-msporny-base58) in the "Bitcoin" alphabet.
**Syntax**
@ -1092,7 +1092,7 @@ Result:
## base58Decode
Accepts a String and decodes it using [Base58](https://tools.ietf.org/id/draft-msporny-base58-01.html) encoding scheme using "Bitcoin" alphabet.
Accepts a String and decodes it using [Base58](https://datatracker.ietf.org/doc/html/draft-msporny-base58) encoding scheme using "Bitcoin" alphabet.
**Syntax**

View File

@ -16,7 +16,7 @@ map(key1, value1[, key2, value2, ...])
**Arguments**
- `key` — The key part of the pair. [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md), [LowCardinality](../../sql-reference/data-types/lowcardinality.md), [FixedString](../../sql-reference/data-types/fixedstring.md), [UUID](../../sql-reference/data-types/uuid.md), [Date](../../sql-reference/data-types/date.md), [DateTime](../../sql-reference/data-types/datetime.md), [Date32](../../sql-reference/data-types/date32.md), [Enum](../../sql-reference/data-types/enum.md).
- `key` — The key part of the pair. Arbitrary type, except [Nullable](../../sql-reference/data-types/nullable.md) and [LowCardinality](../../sql-reference/data-types/lowcardinality.md) nested with [Nullable](../../sql-reference/data-types/nullable.md).
- `value` — The value part of the pair. Arbitrary type, including [Map](../../sql-reference/data-types/map.md) and [Array](../../sql-reference/data-types/array.md).
**Returned value**

View File

@ -20,11 +20,11 @@ ALTER USER [IF EXISTS] name1 [ON CLUSTER cluster_name1] [RENAME TO new_name1]
[SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY | WRITABLE] | PROFILE 'profile_name'] [,...]
```
To use `ALTER USER` you must have the [ALTER USER](../../../sql-reference/statements/grant.md#grant-access-management) privilege.
To use `ALTER USER` you must have the [ALTER USER](../../../sql-reference/statements/grant.md#access-management) privilege.
## GRANTEES Clause
Specifies users or roles which are allowed to receive [privileges](../../../sql-reference/statements/grant.md#grant-privileges) from this user on the condition this user has also all required access granted with [GRANT OPTION](../../../sql-reference/statements/grant.md#grant-privigele-syntax). Options of the `GRANTEES` clause:
Specifies users or roles which are allowed to receive [privileges](../../../sql-reference/statements/grant.md#privileges) from this user on the condition this user has also all required access granted with [GRANT OPTION](../../../sql-reference/statements/grant.md#granting-privilege-syntax). Options of the `GRANTEES` clause:
- `user` — Specifies a user this user can grant privileges to.
- `role` — Specifies a role this user can grant privileges to.

View File

@ -177,7 +177,7 @@ Examples:
## GRANTEES Clause
Specifies users or roles which are allowed to receive [privileges](../../../sql-reference/statements/grant.md#grant-privileges) from this user on the condition this user has also all required access granted with [GRANT OPTION](../../../sql-reference/statements/grant.md#grant-privigele-syntax). Options of the `GRANTEES` clause:
Specifies users or roles which are allowed to receive [privileges](../../../sql-reference/statements/grant.md#privileges) from this user on the condition this user has also all required access granted with [GRANT OPTION](../../../sql-reference/statements/grant.md#granting-privilege-syntax). Options of the `GRANTEES` clause:
- `user` — Specifies a user this user can grant privileges to.
- `role` — Specifies a role this user can grant privileges to.

View File

@ -6,7 +6,7 @@ sidebar_label: GRANT
# GRANT Statement
- Grants [privileges](#grant-privileges) to ClickHouse user accounts or roles.
- Grants [privileges](#privileges) to ClickHouse user accounts or roles.
- Assigns roles to user accounts or to the other roles.
To revoke privileges, use the [REVOKE](../../sql-reference/statements/revoke.md) statement. Also you can list granted privileges with the [SHOW GRANTS](../../sql-reference/statements/show.md#show-grants) statement.
@ -82,9 +82,9 @@ Privileges have a hierarchical structure. A set of permitted queries depends on
Hierarchy of privileges:
- [SELECT](#grant-select)
- [INSERT](#grant-insert)
- [ALTER](#grant-alter)
- [SELECT](#select)
- [INSERT](#insert)
- [ALTER](#alter)
- `ALTER TABLE`
- `ALTER UPDATE`
- `ALTER DELETE`
@ -115,7 +115,7 @@ Hierarchy of privileges:
- `ALTER VIEW REFRESH`
- `ALTER VIEW MODIFY QUERY`
- `ALTER VIEW MODIFY SQL SECURITY`
- [CREATE](#grant-create)
- [CREATE](#create)
- `CREATE DATABASE`
- `CREATE TABLE`
- `CREATE ARBITRARY TEMPORARY TABLE`
@ -123,21 +123,21 @@ Hierarchy of privileges:
- `CREATE VIEW`
- `CREATE DICTIONARY`
- `CREATE FUNCTION`
- [DROP](#grant-drop)
- [DROP](#drop)
- `DROP DATABASE`
- `DROP TABLE`
- `DROP VIEW`
- `DROP DICTIONARY`
- `DROP FUNCTION`
- [TRUNCATE](#grant-truncate)
- [OPTIMIZE](#grant-optimize)
- [SHOW](#grant-show)
- [TRUNCATE](#truncate)
- [OPTIMIZE](#optimize)
- [SHOW](#show)
- `SHOW DATABASES`
- `SHOW TABLES`
- `SHOW COLUMNS`
- `SHOW DICTIONARIES`
- [KILL QUERY](#grant-kill-query)
- [ACCESS MANAGEMENT](#grant-access-management)
- [KILL QUERY](#kill-query)
- [ACCESS MANAGEMENT](#access-management)
- `CREATE USER`
- `ALTER USER`
- `DROP USER`
@ -160,7 +160,7 @@ Hierarchy of privileges:
- `SHOW_QUOTAS`
- `SHOW_SETTINGS_PROFILES`
- `ROLE ADMIN`
- [SYSTEM](#grant-system)
- [SYSTEM](#system)
- `SYSTEM SHUTDOWN`
- `SYSTEM DROP CACHE`
- `SYSTEM DROP DNS CACHE`
@ -186,12 +186,12 @@ Hierarchy of privileges:
- `SYSTEM FLUSH DISTRIBUTED`
- `SYSTEM FLUSH LOGS`
- `CLUSTER` (see also `access_control_improvements.on_cluster_queries_require_cluster_grant` configuration directive)
- [INTROSPECTION](#grant-introspection)
- [INTROSPECTION](#introspection)
- `addressToLine`
- `addressToLineWithInlines`
- `addressToSymbol`
- `demangle`
- [SOURCES](#grant-sources)
- [SOURCES](#sources)
- `FILE`
- `URL`
- `REMOTE`
@ -200,16 +200,16 @@ Hierarchy of privileges:
- `JDBC`
- `HDFS`
- `S3`
- [dictGet](#grant-dictget)
- [displaySecretsInShowAndSelect](#grant-display-secrets)
- [NAMED COLLECTION ADMIN](#grant-named-collection-admin)
- [dictGet](#dictget)
- [displaySecretsInShowAndSelect](#display-secrets)
- [NAMED COLLECTION ADMIN](#named-collection-admin)
- `CREATE NAMED COLLECTION`
- `DROP NAMED COLLECTION`
- `ALTER NAMED COLLECTION`
- `SHOW NAMED COLLECTIONS`
- `SHOW NAMED COLLECTIONS SECRETS`
- `NAMED COLLECTION`
- [TABLE ENGINE](#grant-table-engine)
- [TABLE ENGINE](#table-engine)
Examples of how this hierarchy is treated:
@ -238,11 +238,11 @@ Examples of disallowed syntax:
- `GRANT CREATE USER(x) ON db.table TO user`
- `GRANT CREATE USER ON db.* TO user`
The special privilege [ALL](#grant-all) grants all the privileges to a user account or a role.
The special privilege [ALL](#all) grants all the privileges to a user account or a role.
By default, a user account or a role has no privileges.
If a user or a role has no privileges, it is displayed as [NONE](#grant-none) privilege.
If a user or a role has no privileges, it is displayed as [NONE](#none) privilege.
Some queries by their implementation require a set of privileges. For example, to execute the [RENAME](../../sql-reference/statements/optimize.md) query you need the following privileges: `SELECT`, `CREATE TABLE`, `INSERT` and `DROP TABLE`.
@ -326,8 +326,8 @@ Examples of how this hierarchy is treated:
**Notes**
- The `MODIFY SETTING` privilege allows modifying table engine settings. It does not affect settings or server configuration parameters.
- The `ATTACH` operation needs the [CREATE](#grant-create) privilege.
- The `DETACH` operation needs the [DROP](#grant-drop) privilege.
- The `ATTACH` operation needs the [CREATE](#create) privilege.
- The `DETACH` operation needs the [DROP](#drop) privilege.
- To stop mutation by the [KILL MUTATION](../../sql-reference/statements/kill.md#kill-mutation) query, you need to have a privilege to start this mutation. For example, if you want to stop the `ALTER UPDATE` query, you need the `ALTER UPDATE`, `ALTER TABLE`, or `ALTER` privilege.
### CREATE
@ -344,7 +344,7 @@ Allows executing [CREATE](../../sql-reference/statements/create/index.md) and [A
**Notes**
- To delete the created table, a user needs [DROP](#grant-drop).
- To delete the created table, a user needs [DROP](#drop).
### DROP
@ -498,7 +498,7 @@ Privilege level: `DICTIONARY`.
- `GRANT dictGet ON mydictionary TO john`
### displaySecretsInShowAndSelect {#grant-display-secrets}
### displaySecretsInShowAndSelect {#display-secrets}
Allows a user to view secrets in `SHOW` and `SELECT` queries if both
[`display_secrets_in_show_and_select` server setting](../../operations/server-configuration-parameters/settings#display_secrets_in_show_and_select)

View File

@ -151,6 +151,14 @@ Result:
Query with `INNER` type of a join and conditions with `OR` and `AND`:
:::note
By default, non-equal conditions are supported as long as they use columns from the same table.
For example, `t1.a = t2.key AND t1.b > 0 AND t2.b > t2.c`, because `t1.b > 0` uses columns only from `t1` and `t2.b > t2.c` uses columns only from `t2`.
However, you can try experimental support for conditions like `t1.a = t2.key AND t1.b > t2.key`, check out section below for more details.
:::
``` sql
SELECT a, b, val FROM t1 INNER JOIN t2 ON t1.a = t2.key OR t1.b = t2.key AND t2.val > 3;
```
@ -165,7 +173,7 @@ Result:
└───┴────┴─────┘
```
## [experimental] Join with inequality conditions
## [experimental] Join with inequality conditions for columns from different tables
:::note
This feature is experimental. To use it, set `allow_experimental_join_condition` to 1 in your configuration files or by using the `SET` command:

View File

@ -5,21 +5,21 @@ sidebar_label: WITH
# WITH Clause
ClickHouse supports Common Table Expressions ([CTE](https://en.wikipedia.org/wiki/Hierarchical_and_recursive_queries_in_SQL)) and substitutes the code defined in the `WITH` clause in all places of use for the rest of `SELECT` query. Named subqueries can be included to the current and child query context in places where table objects are allowed. Recursion is prevented by hiding the current level CTEs from the WITH expression.
ClickHouse supports Common Table Expressions ([CTE](https://en.wikipedia.org/wiki/Hierarchical_and_recursive_queries_in_SQL)) and substitutes the code defined in the `WITH` clause in all places of use for the rest of `SELECT` query. Named subqueries can be included to the current and child query context in places where table objects are allowed. Recursion is prevented by hiding the current level CTEs from the WITH expression.
Please note that CTEs do not guarantee the same results in all places they are called because the query will be re-executed for each use case.
An example of such behavior is below
``` sql
with cte_numbers as
with cte_numbers as
(
select
num
from generateRandom('num UInt64', NULL)
select
num
from generateRandom('num UInt64', NULL)
limit 1000000
)
select
count()
count()
from cte_numbers
where num in (select num from cte_numbers)
```
@ -87,3 +87,226 @@ LIMIT 10;
WITH test1 AS (SELECT i + 1, j + 1 FROM test1)
SELECT * FROM test1;
```
## Recursive Queries
The optional RECURSIVE modifier allows for a WITH query to refer to its own output. Example:
**Example:** Sum integers from 1 through 100
```sql
WITH RECURSIVE test_table AS (
SELECT 1 AS number
UNION ALL
SELECT number + 1 FROM test_table WHERE number < 100
)
SELECT sum(number) FROM test_table;
```
``` text
┌─sum(number)─┐
│ 5050 │
└─────────────┘
```
The general form of a recursive `WITH` query is always a non-recursive term, then `UNION ALL`, then a recursive term, where only the recursive term can contain a reference to the query's own output. Recursive CTE query is executed as follows:
1. Evaluate the non-recursive term. Place result of non-recursive term query in a temporary working table.
2. As long as the working table is not empty, repeat these steps:
1. Evaluate the recursive term, substituting the current contents of the working table for the recursive self-reference. Place result of recursive term query in a temporary intermediate table.
2. Replace the contents of the working table with the contents of the intermediate table, then empty the intermediate table.
Recursive queries are typically used to work with hierarchical or tree-structured data. For example, we can write a query that performs tree traversal:
**Example:** Tree traversal
First let's create tree table:
```sql
DROP TABLE IF EXISTS tree;
CREATE TABLE tree
(
id UInt64,
parent_id Nullable(UInt64),
data String
) ENGINE = MergeTree ORDER BY id;
INSERT INTO tree VALUES (0, NULL, 'ROOT'), (1, 0, 'Child_1'), (2, 0, 'Child_2'), (3, 1, 'Child_1_1');
```
We can traverse those tree with such query:
**Example:** Tree traversal
```sql
WITH RECURSIVE search_tree AS (
SELECT id, parent_id, data
FROM tree t
WHERE t.id = 0
UNION ALL
SELECT t.id, t.parent_id, t.data
FROM tree t, search_tree st
WHERE t.parent_id = st.id
)
SELECT * FROM search_tree;
```
```text
┌─id─┬─parent_id─┬─data──────┐
│ 0 │ ᴺᵁᴸᴸ │ ROOT │
│ 1 │ 0 │ Child_1 │
│ 2 │ 0 │ Child_2 │
│ 3 │ 1 │ Child_1_1 │
└────┴───────────┴───────────┘
```
### Search order
To create a depth-first order, we compute for each result row an array of rows that we have already visited:
**Example:** Tree traversal depth-first order
```sql
WITH RECURSIVE search_tree AS (
SELECT id, parent_id, data, [t.id] AS path
FROM tree t
WHERE t.id = 0
UNION ALL
SELECT t.id, t.parent_id, t.data, arrayConcat(path, [t.id])
FROM tree t, search_tree st
WHERE t.parent_id = st.id
)
SELECT * FROM search_tree ORDER BY path;
```
```text
┌─id─┬─parent_id─┬─data──────┬─path────┐
│ 0 │ ᴺᵁᴸᴸ │ ROOT │ [0] │
│ 1 │ 0 │ Child_1 │ [0,1] │
│ 3 │ 1 │ Child_1_1 │ [0,1,3] │
│ 2 │ 0 │ Child_2 │ [0,2] │
└────┴───────────┴───────────┴─────────┘
```
To create a breadth-first order, standard approach is to add column that tracks the depth of the search:
**Example:** Tree traversal breadth-first order
```sql
WITH RECURSIVE search_tree AS (
SELECT id, parent_id, data, [t.id] AS path, toUInt64(0) AS depth
FROM tree t
WHERE t.id = 0
UNION ALL
SELECT t.id, t.parent_id, t.data, arrayConcat(path, [t.id]), depth + 1
FROM tree t, search_tree st
WHERE t.parent_id = st.id
)
SELECT * FROM search_tree ORDER BY depth;
```
```text
┌─id─┬─link─┬─data──────┬─path────┬─depth─┐
│ 0 │ ᴺᵁᴸᴸ │ ROOT │ [0] │ 0 │
│ 1 │ 0 │ Child_1 │ [0,1] │ 1 │
│ 2 │ 0 │ Child_2 │ [0,2] │ 1 │
│ 3 │ 1 │ Child_1_1 │ [0,1,3] │ 2 │
└────┴──────┴───────────┴─────────┴───────┘
```
### Cycle detection
First let's create graph table:
```sql
DROP TABLE IF EXISTS graph;
CREATE TABLE graph
(
from UInt64,
to UInt64,
label String
) ENGINE = MergeTree ORDER BY (from, to);
INSERT INTO graph VALUES (1, 2, '1 -> 2'), (1, 3, '1 -> 3'), (2, 3, '2 -> 3'), (1, 4, '1 -> 4'), (4, 5, '4 -> 5');
```
We can traverse that graph with such query:
**Example:** Graph traversal without cycle detection
```sql
WITH RECURSIVE search_graph AS (
SELECT from, to, label FROM graph g
UNION ALL
SELECT g.from, g.to, g.label
FROM graph g, search_graph sg
WHERE g.from = sg.to
)
SELECT DISTINCT * FROM search_graph ORDER BY from;
```
```text
┌─from─┬─to─┬─label──┐
│ 1 │ 4 │ 1 -> 4 │
│ 1 │ 2 │ 1 -> 2 │
│ 1 │ 3 │ 1 -> 3 │
│ 2 │ 3 │ 2 -> 3 │
│ 4 │ 5 │ 4 -> 5 │
└──────┴────┴────────┘
```
But if we add cycle in that graph, previous query will fail with `Maximum recursive CTE evaluation depth` error:
```sql
INSERT INTO graph VALUES (5, 1, '5 -> 1');
WITH RECURSIVE search_graph AS (
SELECT from, to, label FROM graph g
UNION ALL
SELECT g.from, g.to, g.label
FROM graph g, search_graph sg
WHERE g.from = sg.to
)
SELECT DISTINCT * FROM search_graph ORDER BY from;
```
```text
Code: 306. DB::Exception: Received from localhost:9000. DB::Exception: Maximum recursive CTE evaluation depth (1000) exceeded, during evaluation of search_graph AS (SELECT from, to, label FROM graph AS g UNION ALL SELECT g.from, g.to, g.label FROM graph AS g, search_graph AS sg WHERE g.from = sg.to). Consider raising max_recursive_cte_evaluation_depth setting.: While executing RecursiveCTESource. (TOO_DEEP_RECURSION)
```
The standard method for handling cycles is to compute an array of the already visited nodes:
**Example:** Graph traversal with cycle detection
```sql
WITH RECURSIVE search_graph AS (
SELECT from, to, label, false AS is_cycle, [tuple(g.from, g.to)] AS path FROM graph g
UNION ALL
SELECT g.from, g.to, g.label, has(path, tuple(g.from, g.to)), arrayConcat(sg.path, [tuple(g.from, g.to)])
FROM graph g, search_graph sg
WHERE g.from = sg.to AND NOT is_cycle
)
SELECT * FROM search_graph WHERE is_cycle ORDER BY from;
```
```text
┌─from─┬─to─┬─label──┬─is_cycle─┬─path──────────────────────┐
│ 1 │ 4 │ 1 -> 4 │ true │ [(1,4),(4,5),(5,1),(1,4)] │
│ 4 │ 5 │ 4 -> 5 │ true │ [(4,5),(5,1),(1,4),(4,5)] │
│ 5 │ 1 │ 5 -> 1 │ true │ [(5,1),(1,4),(4,5),(5,1)] │
└──────┴────┴────────┴──────────┴───────────────────────────┘
```
### Infinite queries
It is also possible to use infinite recursive CTE queries if `LIMIT` is used in outer query:
**Example:** Infinite recursive CTE query
```sql
WITH RECURSIVE test_table AS (
SELECT 1 AS number
UNION ALL
SELECT number + 1 FROM test_table
)
SELECT sum(number) FROM (SELECT number FROM test_table LIMIT 100);
```
```text
┌─sum(number)─┐
│ 5050 │
└─────────────┘
```

View File

@ -11,7 +11,7 @@ N.B. `SHOW CREATE (TABLE|DATABASE|USER)` hides secrets unless
is turned on,
[`format_display_secrets_in_show_and_select` format setting](../../operations/settings/formats#format_display_secrets_in_show_and_select)
is turned on and user has
[`displaySecretsInShowAndSelect`](grant.md#grant-display-secrets) privilege.
[`displaySecretsInShowAndSelect`](grant.md#display-secrets) privilege.
## SHOW CREATE TABLE | DICTIONARY | VIEW | DATABASE
@ -466,7 +466,7 @@ SHOW [CURRENT] QUOTA
```
## SHOW ACCESS
Shows all [users](../../guides/sre/user-management/index.md#user-account-management), [roles](../../guides/sre/user-management/index.md#role-management), [profiles](../../guides/sre/user-management/index.md#settings-profiles-management), etc. and all their [grants](../../sql-reference/statements/grant.md#grant-privileges).
Shows all [users](../../guides/sre/user-management/index.md#user-account-management), [roles](../../guides/sre/user-management/index.md#role-management), [profiles](../../guides/sre/user-management/index.md#settings-profiles-management), etc. and all their [grants](../../sql-reference/statements/grant.md#privileges).
**Syntax**

View File

@ -25,7 +25,7 @@ If the `alter_sync` is set to `2` and some replicas are not active for more than
## TRUNCATE ALL TABLES
``` sql
TRUNCATE ALL TABLES [IF EXISTS] db [ON CLUSTER cluster]
TRUNCATE ALL TABLES FROM [IF EXISTS] db [ON CLUSTER cluster]
```
Removes all data from all tables in a database.

View File

@ -1,5 +1,5 @@
---
slug: /en/operations/utilities/backupview
slug: /ru/operations/utilities/backupview
title: clickhouse_backupview
---

View File

@ -0,0 +1,311 @@
---
slug: /ru/sql-reference/functions/null-functions
sidebar_position: 63
sidebar_label: "Функции для работы с Nullable-аргументами"
---
# Функции для работы с Nullable-аргументами {#funktsii-dlia-raboty-s-nullable-argumentami}
## isNull {#isnull}
Проверяет является ли аргумент [NULL](../../sql-reference/syntax.md#null-literal).
``` sql
isNull(x)
```
Синоним: `ISNULL`.
**Аргументы**
- `x` — значение с не составным типом данных.
**Возвращаемое значение**
- `1`, если `x``NULL`.
- `0`, если `x` — не `NULL`.
**Пример**
Входная таблица
``` text
┌─x─┬────y─┐
│ 1 │ ᴺᵁᴸᴸ │
│ 2 │ 3 │
└───┴──────┘
```
Запрос
``` sql
SELECT x FROM t_null WHERE isNull(y);
```
``` text
┌─x─┐
│ 1 │
└───┘
```
## isNotNull {#isnotnull}
Проверяет не является ли аргумент [NULL](../../sql-reference/syntax.md#null-literal).
``` sql
isNotNull(x)
```
**Аргументы**
- `x` — значение с не составным типом данных.
**Возвращаемое значение**
- `0`, если `x``NULL`.
- `1`, если `x` — не `NULL`.
**Пример**
Входная таблица
``` text
┌─x─┬────y─┐
│ 1 │ ᴺᵁᴸᴸ │
│ 2 │ 3 │
└───┴──────┘
```
Запрос
``` sql
SELECT x FROM t_null WHERE isNotNull(y);
```
``` text
┌─x─┐
│ 2 │
└───┘
```
## coalesce {#coalesce}
Последовательно слева-направо проверяет являются ли переданные аргументы `NULL` и возвращает первый не `NULL`.
``` sql
coalesce(x,...)
```
**Аргументы**
- Произвольное количество параметров не составного типа. Все параметры должны быть совместимы по типу данных.
**Возвращаемые значения**
- Первый не `NULL` аргумент.
- `NULL`, если все аргументы — `NULL`.
**Пример**
Рассмотрим адресную книгу, в которой может быть указано несколько способов связи с клиентом.
``` text
┌─name─────┬─mail─┬─phone─────┬──icq─┐
│ client 1 │ ᴺᵁᴸᴸ │ 123-45-67 │ 123 │
│ client 2 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │
└──────────┴──────┴───────────┴──────┘
```
Поля `mail` и `phone` имеют тип String, а поле `icq``UInt32`, его необходимо будет преобразовать в `String`.
Получим из адресной книги первый доступный способ связаться с клиентом:
``` sql
SELECT coalesce(mail, phone, CAST(icq,'Nullable(String)')) FROM aBook;
```
``` text
┌─name─────┬─coalesce(mail, phone, CAST(icq, 'Nullable(String)'))─┐
│ client 1 │ 123-45-67 │
│ client 2 │ ᴺᵁᴸᴸ │
└──────────┴──────────────────────────────────────────────────────┘
```
## ifNull {#ifnull}
Возвращает альтернативное значение, если основной аргумент — `NULL`.
``` sql
ifNull(x,alt)
```
**Аргументы**
- `x` — значение для проверки на `NULL`,
- `alt` — значение, которое функция вернёт, если `x``NULL`.
**Возвращаемые значения**
- Значение `x`, если `x` — не `NULL`.
- Значение `alt`, если `x``NULL`.
**Пример**
``` sql
SELECT ifNull('a', 'b');
```
``` text
┌─ifNull('a', 'b')─┐
│ a │
└──────────────────┘
```
``` sql
SELECT ifNull(NULL, 'b');
```
``` text
┌─ifNull(NULL, 'b')─┐
│ b │
└───────────────────┘
```
## nullIf {#nullif}
Возвращает `NULL`, если аргументы равны.
``` sql
nullIf(x, y)
```
**Аргументы**
`x`, `y` — значения для сравнивания. Они должны быть совместимых типов, иначе ClickHouse сгенерирует исключение.
**Возвращаемые значения**
- `NULL`, если аргументы равны.
- Значение `x`, если аргументы не равны.
**Пример**
``` sql
SELECT nullIf(1, 1);
```
``` text
┌─nullIf(1, 1)─┐
│ ᴺᵁᴸᴸ │
└──────────────┘
```
``` sql
SELECT nullIf(1, 2);
```
``` text
┌─nullIf(1, 2)─┐
│ 1 │
└──────────────┘
```
## assumeNotNull {#assumenotnull}
Приводит значение типа [Nullable](../../sql-reference/functions/functions-for-nulls.md) к не `Nullable`, если значение не `NULL`.
``` sql
assumeNotNull(x)
```
**Аргументы**
- `x` — исходное значение.
**Возвращаемые значения**
- Исходное значение с не `Nullable` типом, если оно — не `NULL`.
- Неспецифицированный результат, зависящий от реализации, если исходное значение — `NULL`.
**Пример**
Рассмотрим таблицу `t_null`.
``` sql
SHOW CREATE TABLE t_null;
```
``` text
┌─statement─────────────────────────────────────────────────────────────────┐
│ CREATE TABLE default.t_null ( x Int8, y Nullable(Int8)) ENGINE = TinyLog │
└───────────────────────────────────────────────────────────────────────────┘
```
``` text
┌─x─┬────y─┐
│ 1 │ ᴺᵁᴸᴸ │
│ 2 │ 3 │
└───┴──────┘
```
Применим функцию `assumeNotNull` к столбцу `y`.
``` sql
SELECT assumeNotNull(y) FROM t_null;
```
``` text
┌─assumeNotNull(y)─┐
│ 0 │
│ 3 │
└──────────────────┘
```
``` sql
SELECT toTypeName(assumeNotNull(y)) FROM t_null;
```
``` text
┌─toTypeName(assumeNotNull(y))─┐
│ Int8 │
│ Int8 │
└──────────────────────────────┘
```
## toNullable {#tonullable}
Преобразует тип аргумента к `Nullable`.
``` sql
toNullable(x)
```
**Аргументы**
- `x` — значение произвольного не составного типа.
**Возвращаемое значение**
- Входное значение с типом не `Nullable`.
**Пример**
``` sql
SELECT toTypeName(10);
```
``` text
┌─toTypeName(10)─┐
│ UInt8 │
└────────────────┘
```
``` sql
SELECT toTypeName(toNullable(10));
```
``` text
┌─toTypeName(toNullable(10))─┐
│ Nullable(UInt8) │
└────────────────────────────┘
```

Some files were not shown because too many files have changed in this diff Show More