Merge branch 'master' into ci-fuzzer-enable

This commit is contained in:
Yakov Olkhovskiy 2024-07-14 05:31:56 +00:00
commit 698b5ce7fa
744 changed files with 24179 additions and 10832 deletions

View File

@ -7,3 +7,4 @@ self-hosted-runner:
- stress-tester
- style-checker
- style-checker-aarch64
- release-maker

View File

@ -6,8 +6,8 @@ concurrency:
'on':
workflow_dispatch:
inputs:
sha:
description: 'The SHA hash of the commit from which to create the release'
ref:
description: 'Git reference (branch or commit sha) from which to create the release'
required: true
type: string
type:
@ -15,15 +15,152 @@ concurrency:
required: true
type: choice
options:
- new
- patch
- new
dry-run:
description: 'Dry run'
required: false
default: true
type: boolean
jobs:
Release:
runs-on: [self-hosted, style-checker-aarch64]
CreateRelease:
env:
GH_TOKEN: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
runs-on: [self-hosted, release-maker]
steps:
- name: DebugInfo
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
- name: Set envs
# https://docs.github.com/en/actions/learn-github-actions/workflow-commands-for-github-actions#multiline-strings
run: |
cat >> "$GITHUB_ENV" << 'EOF'
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
RCSK
RELEASE_INFO_FILE=${{ runner.temp }}/release_info.json
EOF
- name: Check out repository code
uses: ClickHouse/checkout@v1
- name: Print greeting
with:
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
fetch-depth: 0
- name: Prepare Release Info
run: |
python3 ./tests/ci/release.py --commit ${{ inputs.sha }} --type ${{ inputs.type }} --dry-run
python3 ./tests/ci/create_release.py --prepare-release-info \
--ref ${{ inputs.ref }} --release-type ${{ inputs.type }} \
--outfile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
echo "::group::Release Info"
python3 -m json.tool "$RELEASE_INFO_FILE"
echo "::endgroup::"
release_tag=$(jq -r '.release_tag' "$RELEASE_INFO_FILE")
commit_sha=$(jq -r '.commit_sha' "$RELEASE_INFO_FILE")
echo "Release Tag: $release_tag"
echo "RELEASE_TAG=$release_tag" >> "$GITHUB_ENV"
echo "COMMIT_SHA=$commit_sha" >> "$GITHUB_ENV"
- name: Download All Release Artifacts
if: ${{ inputs.type == 'patch' }}
run: |
python3 ./tests/ci/create_release.py --infile "$RELEASE_INFO_FILE" --download-packages ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Push Git Tag for the Release
run: |
python3 ./tests/ci/create_release.py --push-release-tag --infile "$RELEASE_INFO_FILE" ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Push New Release Branch
if: ${{ inputs.type == 'new' }}
run: |
python3 ./tests/ci/create_release.py --push-new-release-branch --infile "$RELEASE_INFO_FILE" ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Bump CH Version and Update Contributors' List
run: |
python3 ./tests/ci/create_release.py --create-bump-version-pr --infile "$RELEASE_INFO_FILE" ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Checkout master
run: |
git checkout master
- name: Bump Docker versions, Changelog, Security
if: ${{ inputs.type == 'patch' }}
run: |
[ "$(git branch --show-current)" != "master" ] && echo "not on the master" && exit 1
echo "List versions"
./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv
echo "Update docker version"
./utils/list-versions/update-docker-version.sh
echo "Generate ChangeLog"
export CI=1
docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \
--volume=".:/ClickHouse" clickhouse/style-test \
/ClickHouse/tests/ci/changelog.py -v --debug-helpers \
--gh-user-or-token="$GH_TOKEN" --jobs=5 \
--output="/ClickHouse/docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }}
git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md
echo "Generate Security"
python3 ./utils/security-generator/generate_security.py > SECURITY.md
git diff HEAD
- name: Generate ChangeLog
if: ${{ inputs.type == 'patch' && ! inputs.dry-run }}
uses: peter-evans/create-pull-request@v6
with:
author: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
token: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
committer: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
commit-message: Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
branch: auto/${{ env.RELEASE_TAG }}
assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher
delete-branch: true
title: Update version_date.tsv and changelog after ${{ env.RELEASE_TAG }}
labels: do not test
body: |
Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
### Changelog category (leave one):
- Not for changelog (changelog entry is not required)
- name: Reset changes if Dry-run
if: ${{ inputs.dry-run }}
run: |
git reset --hard HEAD
- name: Checkout back to GITHUB_REF
run: |
git checkout "$GITHUB_REF_NAME"
- name: Create GH Release
if: ${{ inputs.type == 'patch' }}
run: |
python3 ./tests/ci/create_release.py --create-gh-release \
--infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Export TGZ Packages
if: ${{ inputs.type == 'patch' }}
run: |
python3 ./tests/ci/artifactory.py --export-tgz --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Test TGZ Packages
if: ${{ inputs.type == 'patch' }}
run: |
python3 ./tests/ci/artifactory.py --test-tgz --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Export RPM Packages
if: ${{ inputs.type == 'patch' }}
run: |
python3 ./tests/ci/artifactory.py --export-rpm --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Test RPM Packages
if: ${{ inputs.type == 'patch' }}
run: |
python3 ./tests/ci/artifactory.py --test-rpm --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Export Debian Packages
if: ${{ inputs.type == 'patch' }}
run: |
python3 ./tests/ci/artifactory.py --export-debian --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Test Debian Packages
if: ${{ inputs.type == 'patch' }}
run: |
python3 ./tests/ci/artifactory.py --test-debian --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Docker clickhouse/clickhouse-server building
if: ${{ inputs.type == 'patch' }}
run: |
cd "./tests/ci"
export CHECK_NAME="Docker server image"
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
- name: Docker clickhouse/clickhouse-keeper building
if: ${{ inputs.type == 'patch' }}
run: |
cd "./tests/ci"
export CHECK_NAME="Docker keeper image"
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
- name: Post Slack Message
if: always()
run: |
echo Slack Message

View File

@ -172,7 +172,7 @@ jobs:
################################# Stage Final #################################
#
FinishCheck:
if: ${{ !cancelled() }}
if: ${{ !failure() && !cancelled() }}
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3]
runs-on: [self-hosted, style-checker-aarch64]
steps:

View File

@ -34,14 +34,12 @@ curl https://clickhouse.com/ | sh
Every month we get together with the community (users, contributors, customers, those interested in learning more about ClickHouse) to discuss what is coming in the latest release. If you are interested in sharing what you've built on ClickHouse, let us know.
* [v24.6 Community Call](https://clickhouse.com/company/events/v24-6-community-release-call) - Jul 2
* [v24.7 Community Call](https://clickhouse.com/company/events/v24-7-community-release-call) - Jul 30
## Upcoming Events
Keep an eye out for upcoming meetups and events around the world. Somewhere else you want us to be? Please feel free to reach out to tyler `<at>` clickhouse `<dot>` com. You can also peruse [ClickHouse Events](https://clickhouse.com/company/news-events) for a list of all upcoming trainings, meetups, speaking engagements, etc.
* [AWS Summit in DC](https://clickhouse.com/company/events/2024-06-aws-summit-dc) - Jun 26
* [ClickHouse Meetup in Amsterdam](https://www.meetup.com/clickhouse-netherlands-user-group/events/300781068/) - Jun 27
* [ClickHouse Meetup in Paris](https://www.meetup.com/clickhouse-france-user-group/events/300783448/) - Jul 9
* [ClickHouse Cloud - Live Update Call](https://clickhouse.com/company/events/202407-cloud-update-live) - Jul 9
* [ClickHouse Meetup @ Ramp - New York City](https://www.meetup.com/clickhouse-new-york-user-group/events/300595845/) - Jul 9

View File

@ -1,5 +1,6 @@
#pragma once
#include <cstdlib>
#include <memory>
#include <string>

View File

@ -108,6 +108,14 @@ struct make_unsigned // NOLINT(readability-identifier-naming)
using type = std::make_unsigned_t<T>;
};
template <> struct make_unsigned<Int8> { using type = UInt8; };
template <> struct make_unsigned<UInt8> { using type = UInt8; };
template <> struct make_unsigned<Int16> { using type = UInt16; };
template <> struct make_unsigned<UInt16> { using type = UInt16; };
template <> struct make_unsigned<Int32> { using type = UInt32; };
template <> struct make_unsigned<UInt32> { using type = UInt32; };
template <> struct make_unsigned<Int64> { using type = UInt64; };
template <> struct make_unsigned<UInt64> { using type = UInt64; };
template <> struct make_unsigned<Int128> { using type = UInt128; };
template <> struct make_unsigned<UInt128> { using type = UInt128; };
template <> struct make_unsigned<Int256> { using type = UInt256; };
@ -121,6 +129,14 @@ struct make_signed // NOLINT(readability-identifier-naming)
using type = std::make_signed_t<T>;
};
template <> struct make_signed<Int8> { using type = Int8; };
template <> struct make_signed<UInt8> { using type = Int8; };
template <> struct make_signed<Int16> { using type = Int16; };
template <> struct make_signed<UInt16> { using type = Int16; };
template <> struct make_signed<Int32> { using type = Int32; };
template <> struct make_signed<UInt32> { using type = Int32; };
template <> struct make_signed<Int64> { using type = Int64; };
template <> struct make_signed<UInt64> { using type = Int64; };
template <> struct make_signed<Int128> { using type = Int128; };
template <> struct make_signed<UInt128> { using type = Int128; };
template <> struct make_signed<Int256> { using type = Int256; };

View File

@ -0,0 +1,9 @@
#pragma once
#include <memory>
template <typename T>
bool isSharedPtrUnique(const std::shared_ptr<T> & ptr)
{
return ptr.use_count() == 1;
}

View File

@ -232,7 +232,7 @@ void Foundation_API format(
const Any & value10);
void Foundation_API format(std::string & result, const std::string & fmt, const std::vector<Any> & values);
void Foundation_API formatVector(std::string & result, const std::string & fmt, const std::vector<Any> & values);
/// Supports a variable number of arguments and is used by
/// all other variants of format().

View File

@ -21,6 +21,8 @@
#include "Poco/AtomicCounter.h"
#include "Poco/Foundation.h"
#include <atomic>
namespace Poco
{

View File

@ -51,8 +51,8 @@ namespace
}
if (width != 0) str.width(width);
}
void parsePrec(std::ostream& str, std::string::const_iterator& itFmt, const std::string::const_iterator& endFmt)
{
if (itFmt != endFmt && *itFmt == '.')
@ -67,7 +67,7 @@ namespace
if (prec >= 0) str.precision(prec);
}
}
char parseMod(std::string::const_iterator& itFmt, const std::string::const_iterator& endFmt)
{
char mod = 0;
@ -77,13 +77,13 @@ namespace
{
case 'l':
case 'h':
case 'L':
case 'L':
case '?': mod = *itFmt++; break;
}
}
return mod;
}
std::size_t parseIndex(std::string::const_iterator& itFmt, const std::string::const_iterator& endFmt)
{
int index = 0;
@ -110,8 +110,8 @@ namespace
case 'f': str << std::fixed; break;
}
}
void writeAnyInt(std::ostream& str, const Any& any)
{
if (any.type() == typeid(char))
@ -201,7 +201,7 @@ namespace
str << RefAnyCast<std::string>(*itVal++);
break;
case 'z':
str << AnyCast<std::size_t>(*itVal++);
str << AnyCast<std::size_t>(*itVal++);
break;
case 'I':
case 'D':
@ -303,7 +303,7 @@ void format(std::string& result, const std::string& fmt, const Any& value)
{
std::vector<Any> args;
args.push_back(value);
format(result, fmt, args);
formatVector(result, fmt, args);
}
@ -312,7 +312,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
std::vector<Any> args;
args.push_back(value1);
args.push_back(value2);
format(result, fmt, args);
formatVector(result, fmt, args);
}
@ -322,7 +322,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
args.push_back(value1);
args.push_back(value2);
args.push_back(value3);
format(result, fmt, args);
formatVector(result, fmt, args);
}
@ -333,7 +333,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
args.push_back(value2);
args.push_back(value3);
args.push_back(value4);
format(result, fmt, args);
formatVector(result, fmt, args);
}
@ -345,7 +345,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
args.push_back(value3);
args.push_back(value4);
args.push_back(value5);
format(result, fmt, args);
formatVector(result, fmt, args);
}
@ -358,7 +358,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
args.push_back(value4);
args.push_back(value5);
args.push_back(value6);
format(result, fmt, args);
formatVector(result, fmt, args);
}
@ -372,7 +372,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
args.push_back(value5);
args.push_back(value6);
args.push_back(value7);
format(result, fmt, args);
formatVector(result, fmt, args);
}
@ -387,7 +387,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
args.push_back(value6);
args.push_back(value7);
args.push_back(value8);
format(result, fmt, args);
formatVector(result, fmt, args);
}
@ -403,7 +403,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
args.push_back(value7);
args.push_back(value8);
args.push_back(value9);
format(result, fmt, args);
formatVector(result, fmt, args);
}
@ -420,16 +420,16 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
args.push_back(value8);
args.push_back(value9);
args.push_back(value10);
format(result, fmt, args);
formatVector(result, fmt, args);
}
void format(std::string& result, const std::string& fmt, const std::vector<Any>& values)
void formatVector(std::string& result, const std::string& fmt, const std::vector<Any>& values)
{
std::string::const_iterator itFmt = fmt.begin();
std::string::const_iterator endFmt = fmt.end();
std::vector<Any>::const_iterator itVal = values.begin();
std::vector<Any>::const_iterator endVal = values.end();
std::vector<Any>::const_iterator endVal = values.end();
while (itFmt != endFmt)
{
switch (*itFmt)

View File

@ -57,7 +57,7 @@ std::string ObjectId::toString(const std::string& fmt) const
for (int i = 0; i < 12; ++i)
{
s += format(fmt, (unsigned int) _id[i]);
s += Poco::format(fmt, (unsigned int) _id[i]);
}
return s;
}

View File

@ -43,9 +43,9 @@ namespace Poco {
namespace MongoDB {
static const std::string keyCursor {"cursor"};
static const std::string keyFirstBatch {"firstBatch"};
static const std::string keyNextBatch {"nextBatch"};
[[ maybe_unused ]] static const std::string keyCursor {"cursor"};
[[ maybe_unused ]] static const std::string keyFirstBatch {"firstBatch"};
[[ maybe_unused ]] static const std::string keyNextBatch {"nextBatch"};
static Poco::Int64 cursorIdFromResponse(const MongoDB::Document& doc);
@ -131,7 +131,7 @@ OpMsgMessage& OpMsgCursor::next(Connection& connection)
connection.readResponse(_response);
}
else
#endif
#endif
{
_response.clear();
_query.setCursor(_cursorID, _batchSize);

View File

@ -79,7 +79,7 @@ namespace Net
/// Returns the value of the first name-value pair with the given name.
/// If no value with the given name has been found, the defaultValue is returned.
const std::vector<std::reference_wrapper<const std::string>> getAll(const std::string & name) const;
std::vector<std::string> getAll(const std::string & name) const;
/// Returns all values of all name-value pairs with the given name.
///
/// Returns an empty vector if there are no name-value pairs with the given name.

View File

@ -17,9 +17,9 @@
#include "Poco/NumberFormatter.h"
#include "Poco/NumberParser.h"
#include "Poco/String.h"
#include <charconv>
#include <format>
using Poco::NumberFormatter;
using Poco::NumberParser;
using Poco::icompare;
@ -75,7 +75,7 @@ void HTTPMessage::setContentLength(std::streamsize length)
erase(CONTENT_LENGTH);
}
std::streamsize HTTPMessage::getContentLength() const
{
const std::string& contentLength = get(CONTENT_LENGTH, EMPTY);
@ -98,7 +98,7 @@ void HTTPMessage::setContentLength64(Poco::Int64 length)
erase(CONTENT_LENGTH);
}
Poco::Int64 HTTPMessage::getContentLength64() const
{
const std::string& contentLength = get(CONTENT_LENGTH, EMPTY);
@ -133,13 +133,13 @@ void HTTPMessage::setChunkedTransferEncoding(bool flag)
setTransferEncoding(IDENTITY_TRANSFER_ENCODING);
}
bool HTTPMessage::getChunkedTransferEncoding() const
{
return icompare(getTransferEncoding(), CHUNKED_TRANSFER_ENCODING) == 0;
}
void HTTPMessage::setContentType(const std::string& mediaType)
{
if (mediaType.empty())
@ -154,7 +154,7 @@ void HTTPMessage::setContentType(const MediaType& mediaType)
setContentType(mediaType.toString());
}
const std::string& HTTPMessage::getContentType() const
{
return get(CONTENT_TYPE, UNKNOWN_CONTENT_TYPE);

View File

@ -102,9 +102,9 @@ const std::string& NameValueCollection::get(const std::string& name, const std::
return defaultValue;
}
const std::vector<std::reference_wrapper<const std::string>> NameValueCollection::getAll(const std::string& name) const
std::vector<std::string> NameValueCollection::getAll(const std::string& name) const
{
std::vector<std::reference_wrapper<const std::string>> values;
std::vector<std::string> values;
for (ConstIterator it = _map.find(name); it != _map.end(); it++)
if (it->first == name)
values.push_back(it->second);

View File

@ -42,9 +42,19 @@ endif ()
# But use 2 parallel jobs, since:
# - this is what llvm does
# - and I've verfied that lld-11 does not use all available CPU time (in peak) while linking one binary
if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND ENABLE_THINLTO AND PARALLEL_LINK_JOBS GREATER 2)
message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 2.")
set (PARALLEL_LINK_JOBS 2)
if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND ENABLE_THINLTO)
if (ARCH_AARCH64)
# aarch64 builds start to often fail with OOMs (reason not yet clear), for now let's limit the concurrency
message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 1.")
set (PARALLEL_LINK_JOBS 1)
if (LINKER_NAME MATCHES "lld")
math(EXPR LTO_JOBS ${NUMBER_OF_LOGICAL_CORES}/4)
set (CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} -Wl,--thinlto-jobs=${LTO_JOBS}")
endif()
elseif (PARALLEL_LINK_JOBS GREATER 2)
message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 2.")
set (PARALLEL_LINK_JOBS 2)
endif ()
endif()
message(STATUS "Building sub-tree with ${PARALLEL_COMPILE_JOBS} compile jobs and ${PARALLEL_LINK_JOBS} linker jobs (system: ${NUMBER_OF_LOGICAL_CORES} cores, ${TOTAL_PHYSICAL_MEMORY} MB RAM, 'OFF' means the native core count).")

View File

@ -84,5 +84,5 @@ if (CMAKE_CROSSCOMPILING)
message (FATAL_ERROR "Trying to cross-compile to unsupported system: ${CMAKE_SYSTEM_NAME}!")
endif ()
message (STATUS "Cross-compiling for target: ${CMAKE_CXX_COMPILE_TARGET}")
message (STATUS "Cross-compiling for target: ${CMAKE_CXX_COMPILER_TARGET}")
endif ()

2
contrib/avro vendored

@ -1 +1 @@
Subproject commit d43acc84d3d455b016f847d6666fbc3cd27f16a9
Subproject commit 545e7002683cbc2198164d93088ac8e4955b4628

View File

@ -125,7 +125,7 @@ configure_file("${AWS_SDK_CORE_DIR}/include/aws/core/SDKConfig.h.in"
"${CMAKE_CURRENT_BINARY_DIR}/include/aws/core/SDKConfig.h" @ONLY)
aws_get_version(AWS_CRT_CPP_VERSION_MAJOR AWS_CRT_CPP_VERSION_MINOR AWS_CRT_CPP_VERSION_PATCH FULL_VERSION GIT_HASH)
configure_file("${AWS_CRT_DIR}/include/aws/crt/Config.h.in" "${AWS_CRT_DIR}/include/aws/crt/Config.h" @ONLY)
configure_file("${AWS_CRT_DIR}/include/aws/crt/Config.h.in" "${CMAKE_CURRENT_BINARY_DIR}/include/aws/crt/Config.h" @ONLY)
list(APPEND AWS_SOURCES ${AWS_SDK_CORE_SRC} ${AWS_SDK_CORE_NET_SRC} ${AWS_SDK_CORE_PLATFORM_SRC})

2
contrib/azure vendored

@ -1 +1 @@
Subproject commit 92c94d7f37a43cc8fc4d466884a95f610c0593bf
Subproject commit ea3e19a7be08519134c643177d56c7484dfec884

2
contrib/grpc vendored

@ -1 +1 @@
Subproject commit 77b2737a709d43d8c6895e3f03ca62b00bd9201c
Subproject commit f5b7fdc2dff09ada06dbf6c75df298fb40f898df

View File

@ -179,12 +179,19 @@ endif ()
target_compile_definitions(_jemalloc PRIVATE -DJEMALLOC_PROF=1)
# jemalloc provides support for two different libunwind flavors: the original HP libunwind and the one coming with gcc / g++ / libstdc++.
# The latter is identified by `JEMALLOC_PROF_LIBGCC` and uses `_Unwind_Backtrace` method instead of `unw_backtrace`.
# At the time ClickHouse uses LLVM libunwind which follows libgcc's way of backtracking.
# jemalloc provides support two unwind flavors:
# - JEMALLOC_PROF_LIBUNWIND - unw_backtrace() - gnu libunwind (compatible with llvm libunwind)
# - JEMALLOC_PROF_LIBGCC - _Unwind_Backtrace() - the original HP libunwind and the one coming with gcc / g++ / libstdc++.
#
# ClickHouse has to provide `unw_backtrace` method by the means of [commit 8e2b31e](https://github.com/ClickHouse/libunwind/commit/8e2b31e766dd502f6df74909e04a7dbdf5182eb1).
target_compile_definitions (_jemalloc PRIVATE -DJEMALLOC_PROF_LIBGCC=1)
# But for JEMALLOC_PROF_LIBGCC it also calls _Unwind_Backtrace() during
# bootstraping of jemalloc, which may lead to deadlock, if the dlsym will do
# allocations somewhere (like glibc does prio 2.34, see [1]).
#
# [1]: https://sourceware.org/git/?p=glibc.git;a=commit;h=fada9018199c21c469ff0e731ef75c6020074ac9
#
# And since ClickHouse unwind already supports unw_backtrace() we can safely
# switch to it to avoid this deadlock.
target_compile_definitions (_jemalloc PRIVATE -DJEMALLOC_PROF_LIBUNWIND=1)
target_link_libraries (_jemalloc PRIVATE unwind)
# for RTLD_NEXT

View File

@ -54,7 +54,6 @@ set(SRCS
"${LIBPQ_SOURCE_DIR}/port/pgstrcasecmp.c"
"${LIBPQ_SOURCE_DIR}/port/thread.c"
"${LIBPQ_SOURCE_DIR}/port/path.c"
"${LIBPQ_SOURCE_DIR}/port/explicit_bzero.c"
)
add_library(_libpq ${SRCS})

2
contrib/orc vendored

@ -1 +1 @@
Subproject commit 947cebaf9432d708253ac08dc3012daa6b4ede6f
Subproject commit bcc025c09828c556f54cfbdf83a66b9acae7d17f

2
contrib/pocketfft vendored

@ -1 +1 @@
Subproject commit 9efd4da52cf8d28d14531d14e43ad9d913807546
Subproject commit f4c1aa8aa9ce79ad39e80f2c9c41b92ead90fda3

2
contrib/rocksdb vendored

@ -1 +1 @@
Subproject commit 3a0b80ca9d6eebb38fad7ea3f41dfc9db4f6a984
Subproject commit be366233921293bd07a84dc4ea6991858665f202

View File

@ -1,24 +1,17 @@
option (ENABLE_ROCKSDB "Enable rocksdb library" ${ENABLE_LIBRARIES})
option (ENABLE_ROCKSDB "Enable RocksDB" ${ENABLE_LIBRARIES})
if (NOT ENABLE_ROCKSDB)
message (STATUS "Not using rocksdb")
message (STATUS "Not using RocksDB")
return()
endif()
## this file is extracted from `contrib/rocksdb/CMakeLists.txt`
set(ROCKSDB_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/rocksdb")
list(APPEND CMAKE_MODULE_PATH "${ROCKSDB_SOURCE_DIR}/cmake/modules/")
set(PORTABLE ON)
## always disable jemalloc for rocksdb by default
## because it introduces non-standard jemalloc APIs
# Always disable jemalloc for rocksdb by default because it introduces non-standard jemalloc APIs
option(WITH_JEMALLOC "build with JeMalloc" OFF)
set(USE_SNAPPY OFF)
if (TARGET ch_contrib::snappy)
set(USE_SNAPPY ON)
endif()
option(WITH_SNAPPY "build with SNAPPY" ${USE_SNAPPY})
## lz4, zlib, zstd is enabled in ClickHouse by default
option(WITH_LIBURING "build with liburing" OFF) # TODO could try to enable this conditionally, depending on ClickHouse's ENABLE_LIBURING
# ClickHouse cannot be compiled without snappy, lz4, zlib, zstd
option(WITH_SNAPPY "build with SNAPPY" ON)
option(WITH_LZ4 "build with lz4" ON)
option(WITH_ZLIB "build with zlib" ON)
option(WITH_ZSTD "build with zstd" ON)
@ -26,78 +19,46 @@ option(WITH_ZSTD "build with zstd" ON)
# third-party/folly is only validated to work on Linux and Windows for now.
# So only turn it on there by default.
if(CMAKE_SYSTEM_NAME MATCHES "Linux|Windows")
if(MSVC AND MSVC_VERSION LESS 1910)
# Folly does not compile with MSVC older than VS2017
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" OFF)
else()
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" ON)
endif()
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" ON)
else()
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" OFF)
endif()
if( NOT DEFINED CMAKE_CXX_STANDARD )
set(CMAKE_CXX_STANDARD 11)
if(WITH_SNAPPY)
add_definitions(-DSNAPPY)
list(APPEND THIRDPARTY_LIBS ch_contrib::snappy)
endif()
if(MSVC)
option(WITH_XPRESS "build with windows built in compression" OFF)
include("${ROCKSDB_SOURCE_DIR}/thirdparty.inc")
else()
if(CMAKE_SYSTEM_NAME MATCHES "FreeBSD" AND NOT CMAKE_SYSTEM_NAME MATCHES "kFreeBSD")
# FreeBSD has jemalloc as default malloc
# but it does not have all the jemalloc files in include/...
set(WITH_JEMALLOC ON)
else()
if(WITH_JEMALLOC AND TARGET ch_contrib::jemalloc)
add_definitions(-DROCKSDB_JEMALLOC -DJEMALLOC_NO_DEMANGLE)
list(APPEND THIRDPARTY_LIBS ch_contrib::jemalloc)
endif()
endif()
if(WITH_SNAPPY)
add_definitions(-DSNAPPY)
list(APPEND THIRDPARTY_LIBS ch_contrib::snappy)
endif()
if(WITH_ZLIB)
add_definitions(-DZLIB)
list(APPEND THIRDPARTY_LIBS ch_contrib::zlib)
endif()
if(WITH_LZ4)
add_definitions(-DLZ4)
list(APPEND THIRDPARTY_LIBS ch_contrib::lz4)
endif()
if(WITH_ZSTD)
add_definitions(-DZSTD)
list(APPEND THIRDPARTY_LIBS ch_contrib::zstd)
endif()
if(WITH_ZLIB)
add_definitions(-DZLIB)
list(APPEND THIRDPARTY_LIBS ch_contrib::zlib)
endif()
if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
if(POWER9)
set(HAS_POWER9 1)
set(HAS_ALTIVEC 1)
else()
set(HAS_POWER8 1)
set(HAS_ALTIVEC 1)
endif(POWER9)
endif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
if(WITH_LZ4)
add_definitions(-DLZ4)
list(APPEND THIRDPARTY_LIBS ch_contrib::lz4)
endif()
if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64|arm64|ARM64")
set(HAS_ARMV8_CRC 1)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
endif(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64|arm64|ARM64")
if(WITH_ZSTD)
add_definitions(-DZSTD)
list(APPEND THIRDPARTY_LIBS ch_contrib::zstd)
endif()
option(PORTABLE "build a portable binary" ON)
if(ENABLE_AVX2 AND ENABLE_PCLMULQDQ)
if(ENABLE_SSE42 AND ENABLE_PCLMULQDQ)
add_definitions(-DHAVE_SSE42)
add_definitions(-DHAVE_PCLMUL)
endif()
if(CMAKE_SYSTEM_PROCESSOR MATCHES "arm64|aarch64|AARCH64")
set (HAS_ARMV8_CRC 1)
# the original build descriptions set specific flags for ARM. These flags are already subsumed by ClickHouse's general
# ARM flags, see cmake/cpu_features.cmake
# set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
endif()
set (HAVE_THREAD_LOCAL 1)
if(HAVE_THREAD_LOCAL)
add_definitions(-DROCKSDB_SUPPORT_THREAD_LOCAL)
@ -107,8 +68,6 @@ if(CMAKE_SYSTEM_NAME MATCHES "Darwin")
add_definitions(-DOS_MACOSX)
elseif(CMAKE_SYSTEM_NAME MATCHES "Linux")
add_definitions(-DOS_LINUX)
elseif(CMAKE_SYSTEM_NAME MATCHES "SunOS")
add_definitions(-DOS_SOLARIS)
elseif(CMAKE_SYSTEM_NAME MATCHES "FreeBSD")
add_definitions(-DOS_FREEBSD)
elseif(CMAKE_SYSTEM_NAME MATCHES "Android")
@ -123,12 +82,10 @@ endif()
if (OS_LINUX)
add_definitions(-DROCKSDB_SCHED_GETCPU_PRESENT)
add_definitions(-DROCKSDB_AUXV_SYSAUXV_PRESENT)
add_definitions(-DROCKSDB_AUXV_GETAUXVAL_PRESENT)
elseif (OS_FREEBSD)
add_definitions(-DROCKSDB_AUXV_SYSAUXV_PRESENT)
endif()
set(ROCKSDB_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/rocksdb")
include_directories(${ROCKSDB_SOURCE_DIR})
include_directories("${ROCKSDB_SOURCE_DIR}/include")
@ -136,11 +93,11 @@ if(WITH_FOLLY_DISTRIBUTED_MUTEX)
include_directories("${ROCKSDB_SOURCE_DIR}/third-party/folly")
endif()
# Main library source code
set(SOURCES
${ROCKSDB_SOURCE_DIR}/cache/cache.cc
${ROCKSDB_SOURCE_DIR}/cache/cache_entry_roles.cc
${ROCKSDB_SOURCE_DIR}/cache/cache_key.cc
${ROCKSDB_SOURCE_DIR}/cache/cache_reservation_manager.cc
${ROCKSDB_SOURCE_DIR}/cache/clock_cache.cc
${ROCKSDB_SOURCE_DIR}/cache/lru_cache.cc
${ROCKSDB_SOURCE_DIR}/cache/sharded_cache.cc
@ -156,6 +113,7 @@ set(SOURCES
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_format.cc
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_sequential_reader.cc
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_writer.cc
${ROCKSDB_SOURCE_DIR}/db/blob/prefetch_buffer_collection.cc
${ROCKSDB_SOURCE_DIR}/db/builder.cc
${ROCKSDB_SOURCE_DIR}/db/c.cc
${ROCKSDB_SOURCE_DIR}/db/column_family.cc
@ -229,6 +187,7 @@ set(SOURCES
${ROCKSDB_SOURCE_DIR}/env/file_system_tracer.cc
${ROCKSDB_SOURCE_DIR}/env/fs_remap.cc
${ROCKSDB_SOURCE_DIR}/env/mock_env.cc
${ROCKSDB_SOURCE_DIR}/env/unique_id_gen.cc
${ROCKSDB_SOURCE_DIR}/file/delete_scheduler.cc
${ROCKSDB_SOURCE_DIR}/file/file_prefetch_buffer.cc
${ROCKSDB_SOURCE_DIR}/file/file_util.cc
@ -247,6 +206,7 @@ set(SOURCES
${ROCKSDB_SOURCE_DIR}/memory/concurrent_arena.cc
${ROCKSDB_SOURCE_DIR}/memory/jemalloc_nodump_allocator.cc
${ROCKSDB_SOURCE_DIR}/memory/memkind_kmem_allocator.cc
${ROCKSDB_SOURCE_DIR}/memory/memory_allocator.cc
${ROCKSDB_SOURCE_DIR}/memtable/alloc_tracker.cc
${ROCKSDB_SOURCE_DIR}/memtable/hash_linklist_rep.cc
${ROCKSDB_SOURCE_DIR}/memtable/hash_skiplist_rep.cc
@ -322,6 +282,7 @@ set(SOURCES
${ROCKSDB_SOURCE_DIR}/table/table_factory.cc
${ROCKSDB_SOURCE_DIR}/table/table_properties.cc
${ROCKSDB_SOURCE_DIR}/table/two_level_iterator.cc
${ROCKSDB_SOURCE_DIR}/table/unique_id.cc
${ROCKSDB_SOURCE_DIR}/test_util/sync_point.cc
${ROCKSDB_SOURCE_DIR}/test_util/sync_point_impl.cc
${ROCKSDB_SOURCE_DIR}/test_util/testutil.cc
@ -333,9 +294,12 @@ set(SOURCES
${ROCKSDB_SOURCE_DIR}/tools/ldb_tool.cc
${ROCKSDB_SOURCE_DIR}/tools/sst_dump_tool.cc
${ROCKSDB_SOURCE_DIR}/tools/trace_analyzer_tool.cc
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_replay.cc
${ROCKSDB_SOURCE_DIR}/trace_replay/block_cache_tracer.cc
${ROCKSDB_SOURCE_DIR}/trace_replay/io_tracer.cc
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_record_handler.cc
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_record_result.cc
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_record.cc
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_replay.cc
${ROCKSDB_SOURCE_DIR}/util/coding.cc
${ROCKSDB_SOURCE_DIR}/util/compaction_job_stats_impl.cc
${ROCKSDB_SOURCE_DIR}/util/comparator.cc
@ -347,6 +311,7 @@ set(SOURCES
${ROCKSDB_SOURCE_DIR}/util/murmurhash.cc
${ROCKSDB_SOURCE_DIR}/util/random.cc
${ROCKSDB_SOURCE_DIR}/util/rate_limiter.cc
${ROCKSDB_SOURCE_DIR}/util/regex.cc
${ROCKSDB_SOURCE_DIR}/util/ribbon_config.cc
${ROCKSDB_SOURCE_DIR}/util/slice.cc
${ROCKSDB_SOURCE_DIR}/util/file_checksum_helper.cc
@ -362,18 +327,23 @@ set(SOURCES
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl_filesnapshot.cc
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_dump_tool.cc
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_file.cc
${ROCKSDB_SOURCE_DIR}/utilities/cache_dump_load.cc
${ROCKSDB_SOURCE_DIR}/utilities/cache_dump_load_impl.cc
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/cassandra_compaction_filter.cc
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/format.cc
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/merge_operator.cc
${ROCKSDB_SOURCE_DIR}/utilities/checkpoint/checkpoint_impl.cc
${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters.cc
${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc
${ROCKSDB_SOURCE_DIR}/utilities/debug.cc
${ROCKSDB_SOURCE_DIR}/utilities/env_mirror.cc
${ROCKSDB_SOURCE_DIR}/utilities/env_timed.cc
${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_env.cc
${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_fs.cc
${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_secondary_cache.cc
${ROCKSDB_SOURCE_DIR}/utilities/leveldb_options/leveldb_options.cc
${ROCKSDB_SOURCE_DIR}/utilities/memory/memory_util.cc
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators.cc
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/bytesxor.cc
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/max.cc
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/put.cc
@ -393,6 +363,7 @@ set(SOURCES
${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/sim_cache.cc
${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_on_deletion_collector.cc
${ROCKSDB_SOURCE_DIR}/utilities/trace/file_trace_reader_writer.cc
${ROCKSDB_SOURCE_DIR}/utilities/trace/replayer_impl.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/lock_manager.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_tracker.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_manager.cc
@ -411,6 +382,7 @@ set(SOURCES
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn_db.cc
${ROCKSDB_SOURCE_DIR}/utilities/ttl/db_ttl_impl.cc
${ROCKSDB_SOURCE_DIR}/utilities/wal_filter.cc
${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index.cc
${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index_internal.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/concurrent_tree.cc
@ -425,7 +397,7 @@ set(SOURCES
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/standalone_port.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/util/dbt.cc
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/util/memarena.cc
rocksdb_build_version.cc)
build_version.cc) # generated by hand
if(ENABLE_SSE42 AND ENABLE_PCLMULQDQ)
set_source_files_properties(
@ -462,5 +434,6 @@ endif()
add_library(_rocksdb ${SOURCES})
add_library(ch_contrib::rocksdb ALIAS _rocksdb)
target_link_libraries(_rocksdb PRIVATE ${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
# SYSTEM is required to overcome some issues
target_include_directories(_rocksdb SYSTEM BEFORE INTERFACE "${ROCKSDB_SOURCE_DIR}/include")

47
docker/reqgenerator.py Normal file
View File

@ -0,0 +1,47 @@
#!/usr/bin/env python3
# To run this script you must install docker and piddeptree python package
#
import subprocess
import os
import sys
def build_docker_deps(image_name, imagedir):
cmd = f"""docker run --entrypoint "/bin/bash" {image_name} -c "pip install pipdeptree 2>/dev/null 1>/dev/null && pipdeptree --freeze --warn silence | sed 's/ \+//g' | sort | uniq" > {imagedir}/requirements.txt"""
subprocess.check_call(cmd, shell=True)
def check_docker_file_install_with_pip(filepath):
image_name = None
with open(filepath, "r") as f:
for line in f:
if "docker build" in line:
arr = line.split(" ")
if len(arr) > 4:
image_name = arr[4]
if "pip3 install" in line or "pip install" in line:
return image_name, True
return image_name, False
def process_affected_images(images_dir):
for root, _dirs, files in os.walk(images_dir):
for f in files:
if f == "Dockerfile":
docker_file_path = os.path.join(root, f)
print("Checking image on path", docker_file_path)
image_name, has_pip = check_docker_file_install_with_pip(
docker_file_path
)
if has_pip:
print("Find pip in", image_name)
try:
build_docker_deps(image_name, root)
except Exception as ex:
print(ex)
else:
print("Pip not found in", docker_file_path)
process_affected_images(sys.argv[1])

View File

@ -19,10 +19,7 @@ RUN apt-get update \
odbcinst \
psmisc \
python3 \
python3-lxml \
python3-pip \
python3-requests \
python3-termcolor \
unixodbc \
pv \
jq \
@ -31,7 +28,8 @@ RUN apt-get update \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
RUN pip3 install numpy==1.26.3 scipy==1.12.0 pandas==1.5.3 Jinja2==3.1.3
COPY requirements.txt /
RUN pip3 install --no-cache-dir -r /requirements.txt
# This symlink is required by gcc to find the lld linker
RUN ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/ld.lld
@ -39,6 +37,10 @@ RUN ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/ld.lld
# https://salsa.debian.org/pkg-llvm-team/llvm-toolchain/-/commit/992e52c0b156a5ba9c6a8a54f8c4857ddd3d371d
RUN sed -i '/_IMPORT_CHECK_FILES_FOR_\(mlir-\|llvm-bolt\|merge-fdata\|MLIR\)/ {s|^|#|}' /usr/lib/llvm-${LLVM_VERSION}/lib/cmake/llvm/LLVMExports-*.cmake
# LLVM changes paths for compiler-rt libraries. For some reason clang-18.1.8 cannot catch up libraries from default install path.
# It's very dirty workaround, better to build compiler and LLVM ourself and use it. Details: https://github.com/llvm/llvm-project/issues/95792
RUN test ! -d /usr/lib/llvm-18/lib/clang/18/lib/x86_64-pc-linux-gnu || ln -s /usr/lib/llvm-18/lib/clang/18/lib/x86_64-pc-linux-gnu /usr/lib/llvm-18/lib/clang/18/lib/x86_64-unknown-linux-gnu
ARG CCACHE_VERSION=4.6.1
RUN mkdir /tmp/ccache \
&& cd /tmp/ccache \

View File

@ -0,0 +1,41 @@
Jinja2==3.1.3
MarkupSafe==2.1.5
PyJWT==2.3.0
PyYAML==6.0.1
Pygments==2.11.2
SecretStorage==3.3.1
blinker==1.4
certifi==2020.6.20
chardet==4.0.0
cryptography==3.4.8
dbus-python==1.2.18
distro==1.7.0
httplib2==0.20.2
idna==3.3
importlib-metadata==4.6.4
jeepney==0.7.1
keyring==23.5.0
launchpadlib==1.10.16
lazr.restfulclient==0.14.4
lazr.uri==1.0.6
lxml==4.8.0
more-itertools==8.10.0
numpy==1.26.3
oauthlib==3.2.0
packaging==24.1
pandas==1.5.3
pip==24.1.1
pipdeptree==2.23.0
pyparsing==2.4.7
python-apt==2.4.0+ubuntu3
python-dateutil==2.9.0.post0
pytz==2024.1
requests==2.32.3
scipy==1.12.0
setuptools==59.6.0
six==1.16.0
termcolor==1.1.0
urllib3==1.26.5
wadllib==1.3.6
wheel==0.37.1
zipp==1.0.0

View File

@ -84,6 +84,8 @@ function start_server
echo "ClickHouse server pid '$server_pid' started and responded"
}
export -f start_server
function clone_root
{
[ "$UID" -eq 0 ] && git config --global --add safe.directory "$FASTTEST_SOURCE"
@ -254,6 +256,19 @@ function configure
rm -f "$FASTTEST_DATA/config.d/secure_ports.xml"
}
function timeout_with_logging() {
local exit_code=0
timeout -s TERM --preserve-status "${@}" || exit_code="${?}"
if [[ "${exit_code}" -eq "124" ]]
then
echo "The command 'timeout ${*}' has been killed by timeout"
fi
return $exit_code
}
function run_tests
{
clickhouse-server --version
@ -269,6 +284,11 @@ function run_tests
NPROC=1
fi
export CLICKHOUSE_CONFIG_DIR=$FASTTEST_DATA
export CLICKHOUSE_CONFIG="$FASTTEST_DATA/config.xml"
export CLICKHOUSE_USER_FILES="$FASTTEST_DATA/user_files"
export CLICKHOUSE_SCHEMA_FILES="$FASTTEST_DATA/format_schemas"
local test_opts=(
--hung-check
--fast-tests-only
@ -292,6 +312,8 @@ function run_tests
clickhouse stop --pid-path "$FASTTEST_DATA"
}
export -f run_tests
case "$stage" in
"")
ls -la
@ -315,7 +337,7 @@ case "$stage" in
configure 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/install_log.txt"
;&
"run_tests")
run_tests
timeout_with_logging 35m bash -c run_tests ||:
/process_functional_tests_result.py --in-results-dir "$FASTTEST_OUTPUT/" \
--out-results-file "$FASTTEST_OUTPUT/test_results.tsv" \
--out-status-file "$FASTTEST_OUTPUT/check_status.tsv" || echo -e "failure\tCannot parse results" > "$FASTTEST_OUTPUT/check_status.tsv"

View File

@ -31,7 +31,8 @@ RUN apt-get update \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
RUN pip3 install Jinja2
COPY requirements.txt /
RUN pip3 install --no-cache-dir -r /requirements.txt
COPY * /

View File

@ -0,0 +1,27 @@
blinker==1.4
cryptography==3.4.8
dbus-python==1.2.18
distro==1.7.0
httplib2==0.20.2
importlib-metadata==4.6.4
jeepney==0.7.1
Jinja2==3.1.4
keyring==23.5.0
launchpadlib==1.10.16
lazr.restfulclient==0.14.4
lazr.uri==1.0.6
MarkupSafe==2.1.5
more-itertools==8.10.0
oauthlib==3.2.0
packaging==24.1
pip==24.1.1
pipdeptree==2.23.0
PyJWT==2.3.0
pyparsing==2.4.7
python-apt==2.4.0+ubuntu3
SecretStorage==3.3.1
setuptools==59.6.0
six==1.16.0
wadllib==1.3.6
wheel==0.37.1
zipp==1.0.0

View File

@ -33,7 +33,8 @@ RUN apt-get update \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
RUN pip3 install pycurl
COPY requirements.txt /
RUN pip3 install --no-cache-dir -r requirements.txt && rm -rf /root/.cache/pip
# Architecture of the image when BuildKit/buildx is used
ARG TARGETARCH

View File

@ -0,0 +1,26 @@
blinker==1.4
cryptography==3.4.8
dbus-python==1.2.18
distro==1.7.0
httplib2==0.20.2
importlib-metadata==4.6.4
jeepney==0.7.1
keyring==23.5.0
launchpadlib==1.10.16
lazr.restfulclient==0.14.4
lazr.uri==1.0.6
more-itertools==8.10.0
oauthlib==3.2.0
packaging==24.1
pip==24.1.1
pipdeptree==2.23.0
pycurl==7.45.3
PyJWT==2.3.0
pyparsing==2.4.7
python-apt==2.4.0+ubuntu3
SecretStorage==3.3.1
setuptools==59.6.0
six==1.16.0
wadllib==1.3.6
wheel==0.37.1
zipp==1.0.0

View File

@ -2,4 +2,5 @@
# Helper docker container to run python bottle apps
FROM python:3
RUN python -m pip install bottle
COPY requirements.txt /
RUN python -m pip install --no-cache-dir -r requirements.txt

View File

@ -0,0 +1,6 @@
bottle==0.12.25
packaging==24.1
pip==23.2.1
pipdeptree==2.23.0
setuptools==69.0.3
wheel==0.42.0

View File

@ -26,7 +26,6 @@ RUN apt-get update \
libicu-dev \
bsdutils \
curl \
python3-pika \
liblua5.1-dev \
luajit \
libssl-dev \
@ -61,49 +60,8 @@ RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
# kazoo 2.10.0 is broken
# https://s3.amazonaws.com/clickhouse-test-reports/59337/524625a1d2f4cc608a3f1059e3df2c30f353a649/integration_tests__asan__analyzer__[5_6].html
RUN python3 -m pip install --no-cache-dir \
PyMySQL==1.1.0 \
asyncio==3.4.3 \
avro==1.10.2 \
azure-storage-blob==12.19.0 \
boto3==1.34.24 \
cassandra-driver==3.29.0 \
confluent-kafka==2.3.0 \
delta-spark==2.3.0 \
dict2xml==1.7.4 \
dicttoxml==1.7.16 \
docker==6.1.3 \
docker-compose==1.29.2 \
grpcio==1.60.0 \
grpcio-tools==1.60.0 \
kafka-python==2.0.2 \
lz4==4.3.3 \
minio==7.2.3 \
nats-py==2.6.0 \
protobuf==4.25.2 \
kazoo==2.9.0 \
psycopg2-binary==2.9.6 \
pyhdfs==0.3.1 \
pymongo==3.11.0 \
pyspark==3.3.2 \
pytest==7.4.4 \
pytest-order==1.0.0 \
pytest-random==0.2 \
pytest-repeat==0.9.3 \
pytest-timeout==2.2.0 \
pytest-xdist==3.5.0 \
pytest-reportlog==0.4.0 \
pytz==2023.3.post1 \
pyyaml==5.3.1 \
redis==5.0.1 \
requests-kerberos==0.14.0 \
tzlocal==2.1 \
retry==0.9.2 \
bs4==0.0.2 \
lxml==5.1.0 \
urllib3==2.0.7 \
jwcrypto==1.5.6
# bs4, lxml are for cloud tests, do not delete
COPY requirements.txt /
RUN python3 -m pip install --no-cache-dir -r requirements.txt
# Hudi supports only spark 3.3.*, not 3.4
RUN curl -fsSL -O https://archive.apache.org/dist/spark/spark-3.3.2/spark-3.3.2-bin-hadoop3.tgz \

View File

@ -0,0 +1,113 @@
PyHDFS==0.3.1
PyJWT==2.3.0
PyMySQL==1.1.0
PyNaCl==1.5.0
PyYAML==5.3.1
SecretStorage==3.3.1
argon2-cffi-bindings==21.2.0
argon2-cffi==23.1.0
async-timeout==4.0.3
asyncio==3.4.3
attrs==23.2.0
avro==1.10.2
azure-core==1.30.1
azure-storage-blob==12.19.0
bcrypt==4.1.3
beautifulsoup4==4.12.3
blinker==1.4
boto3==1.34.24
botocore==1.34.101
bs4==0.0.2
cassandra-driver==3.29.0
certifi==2024.2.2
cffi==1.16.0
charset-normalizer==3.3.2
click==8.1.7
confluent-kafka==2.3.0
cryptography==3.4.8
dbus-python==1.2.18
decorator==5.1.1
delta-spark==2.3.0
dict2xml==1.7.4
dicttoxml==1.7.16
distro-info==1.1+ubuntu0.2
distro==1.7.0
docker-compose==1.29.2
docker==6.1.3
dockerpty==0.4.1
docopt==0.6.2
exceptiongroup==1.2.1
execnet==2.1.1
geomet==0.2.1.post1
grpcio-tools==1.60.0
grpcio==1.60.0
gssapi==1.8.3
httplib2==0.20.2
idna==3.7
importlib-metadata==4.6.4
iniconfig==2.0.0
isodate==0.6.1
jeepney==0.7.1
jmespath==1.0.1
jsonschema==3.2.0
jwcrypto==1.5.6
kafka-python==2.0.2
kazoo==2.9.0
keyring==23.5.0
krb5==0.5.1
launchpadlib==1.10.16
lazr.restfulclient==0.14.4
lazr.uri==1.0.6
lxml==5.1.0
lz4==4.3.3
minio==7.2.3
more-itertools==8.10.0
nats-py==2.6.0
oauthlib==3.2.0
packaging==24.0
paramiko==3.4.0
pika==1.2.0
pip==24.1.1
pipdeptree==2.23.0
pluggy==1.5.0
protobuf==4.25.2
psycopg2-binary==2.9.6
py4j==0.10.9.5
py==1.11.0
pycparser==2.22
pycryptodome==3.20.0
pymongo==3.11.0
pyparsing==2.4.7
pyrsistent==0.20.0
pyspark==3.3.2
pyspnego==0.10.2
pytest-order==1.0.0
pytest-random==0.2
pytest-repeat==0.9.3
pytest-reportlog==0.4.0
pytest-timeout==2.2.0
pytest-xdist==3.5.0
pytest==7.4.4
python-apt==2.4.0+ubuntu3
python-dateutil==2.9.0.post0
python-dotenv==0.21.1
pytz==2023.3.post1
redis==5.0.1
requests-kerberos==0.14.0
requests==2.31.0
retry==0.9.2
s3transfer==0.10.1
setuptools==59.6.0
simplejson==3.19.2
six==1.16.0
soupsieve==2.5
texttable==1.7.0
tomli==2.0.1
typing_extensions==4.11.0
tzlocal==2.1
unattended-upgrades==0.1
urllib3==2.0.7
wadllib==1.3.6
websocket-client==0.59.0
wheel==0.37.1
zipp==1.0.0

View File

@ -1,3 +1,4 @@
# docker build -t clickhouse/libfuzzer .
ARG FROM_TAG=latest
FROM clickhouse/test-base:$FROM_TAG
@ -29,7 +30,8 @@ RUN apt-get update \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
RUN pip3 install Jinja2
COPY requirements.txt /
RUN pip3 install --no-cache-dir -r /requirements.txt
COPY * /

View File

@ -0,0 +1,27 @@
blinker==1.4
cryptography==3.4.8
dbus-python==1.2.18
distro==1.7.0
httplib2==0.20.2
importlib-metadata==4.6.4
jeepney==0.7.1
Jinja2==3.1.4
keyring==23.5.0
launchpadlib==1.10.16
lazr.restfulclient==0.14.4
lazr.uri==1.0.6
MarkupSafe==2.1.5
more-itertools==8.10.0
oauthlib==3.2.0
packaging==24.1
pip==24.1.1
pipdeptree==2.23.0
PyJWT==2.3.0
pyparsing==2.4.7
python-apt==2.4.0+ubuntu3
SecretStorage==3.3.1
setuptools==59.6.0
six==1.16.0
wadllib==1.3.6
wheel==0.37.1
zipp==1.0.0

View File

@ -28,19 +28,19 @@ def run_fuzzer(fuzzer: str):
parser.read(path)
if parser.has_section("asan"):
os.environ[
"ASAN_OPTIONS"
] = f"{os.environ['ASAN_OPTIONS']}:{':'.join('%s=%s' % (key, value) for key, value in parser['asan'].items())}"
os.environ["ASAN_OPTIONS"] = (
f"{os.environ['ASAN_OPTIONS']}:{':'.join('%s=%s' % (key, value) for key, value in parser['asan'].items())}"
)
if parser.has_section("msan"):
os.environ[
"MSAN_OPTIONS"
] = f"{os.environ['MSAN_OPTIONS']}:{':'.join('%s=%s' % (key, value) for key, value in parser['msan'].items())}"
os.environ["MSAN_OPTIONS"] = (
f"{os.environ['MSAN_OPTIONS']}:{':'.join('%s=%s' % (key, value) for key, value in parser['msan'].items())}"
)
if parser.has_section("ubsan"):
os.environ[
"UBSAN_OPTIONS"
] = f"{os.environ['UBSAN_OPTIONS']}:{':'.join('%s=%s' % (key, value) for key, value in parser['ubsan'].items())}"
os.environ["UBSAN_OPTIONS"] = (
f"{os.environ['UBSAN_OPTIONS']}:{':'.join('%s=%s' % (key, value) for key, value in parser['ubsan'].items())}"
)
if parser.has_section("libfuzzer"):
custom_libfuzzer_options = " ".join(

View File

@ -23,7 +23,6 @@ RUN apt-get update \
python3 \
python3-dev \
python3-pip \
python3-setuptools \
rsync \
tree \
tzdata \
@ -33,12 +32,14 @@ RUN apt-get update \
cargo \
ripgrep \
zstd \
&& pip3 --no-cache-dir install 'clickhouse-driver==0.2.1' scipy \
&& apt-get purge --yes python3-dev g++ \
&& apt-get autoremove --yes \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
COPY requirements.txt /
RUN pip3 --no-cache-dir install -r requirements.txt
COPY run.sh /
CMD ["bash", "/run.sh"]

View File

@ -0,0 +1,32 @@
blinker==1.4
clickhouse-driver==0.2.7
cryptography==3.4.8
dbus-python==1.2.18
distro==1.7.0
httplib2==0.20.2
importlib-metadata==4.6.4
jeepney==0.7.1
keyring==23.5.0
launchpadlib==1.10.16
lazr.restfulclient==0.14.4
lazr.uri==1.0.6
more-itertools==8.10.0
numpy==1.26.3
oauthlib==3.2.0
packaging==24.1
pip==24.1.1
pipdeptree==2.23.0
Pygments==2.11.2
PyJWT==2.3.0
pyparsing==2.4.7
python-apt==2.4.0+ubuntu3
pytz==2023.4
PyYAML==6.0.1
scipy==1.12.0
SecretStorage==3.3.1
setuptools==59.6.0
six==1.16.0
tzlocal==2.1
wadllib==1.3.6
wheel==0.37.1
zipp==1.0.0

View File

@ -18,11 +18,8 @@ RUN apt-get update --yes \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
RUN pip3 install \
numpy \
pyodbc \
deepdiff \
sqlglot
COPY requirements.txt /
RUN pip3 install --no-cache-dir -r /requirements.txt
ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.6.20200320/clickhouse-odbc-1.1.6-Linux.tar.gz"

View File

@ -0,0 +1,30 @@
blinker==1.4
cryptography==3.4.8
dbus-python==1.2.18
deepdiff==7.0.1
distro==1.7.0
httplib2==0.20.2
importlib-metadata==4.6.4
jeepney==0.7.1
keyring==23.5.0
launchpadlib==1.10.16
lazr.restfulclient==0.14.4
lazr.uri==1.0.6
more-itertools==8.10.0
numpy==1.26.4
oauthlib==3.2.0
ordered-set==4.1.0
packaging==24.1
pip==24.1.1
pipdeptree==2.23.0
PyJWT==2.3.0
pyodbc==5.1.0
pyparsing==2.4.7
python-apt==2.4.0+ubuntu3
SecretStorage==3.3.1
setuptools==59.6.0
six==1.16.0
sqlglot==23.16.0
wadllib==1.3.6
wheel==0.37.1
zipp==1.0.0

View File

@ -14,9 +14,8 @@ RUN apt-get update --yes \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
RUN pip3 install \
pyyaml \
clickhouse-driver
COPY requirements.txt /
RUN pip3 install --no-cache-dir -r /requirements.txt
ARG sqltest_repo="https://github.com/elliotchance/sqltest/"

View File

@ -0,0 +1,29 @@
blinker==1.4
clickhouse-driver==0.2.7
cryptography==3.4.8
dbus-python==1.2.18
distro==1.7.0
httplib2==0.20.2
importlib-metadata==4.6.4
jeepney==0.7.1
keyring==23.5.0
launchpadlib==1.10.16
lazr.restfulclient==0.14.4
lazr.uri==1.0.6
more-itertools==8.10.0
oauthlib==3.2.0
packaging==24.1
pip==24.1.1
pipdeptree==2.23.0
PyJWT==2.3.0
pyparsing==2.4.7
python-apt==2.4.0+ubuntu3
pytz==2024.1
PyYAML==6.0.1
SecretStorage==3.3.1
setuptools==59.6.0
six==1.16.0
tzlocal==5.2
wadllib==1.3.6
wheel==0.37.1
zipp==1.0.0

View File

@ -6,7 +6,6 @@ FROM clickhouse/stateless-test:$FROM_TAG
RUN apt-get update -y \
&& env DEBIAN_FRONTEND=noninteractive \
apt-get install --yes --no-install-recommends \
python3-requests \
nodejs \
npm \
&& apt-get clean \

View File

@ -16,11 +16,17 @@ dpkg -i package_folder/clickhouse-client_*.deb
ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
# shellcheck disable=SC1091
source /utils.lib
# install test configs
/usr/share/clickhouse-test/config/install.sh
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --silent --inMemoryPersistence &
./setup_minio.sh stateful
./mc admin trace clickminio > /test_output/rubbish.log &
MC_ADMIN_PID=$!
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
@ -251,6 +257,8 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
sudo clickhouse stop --pid-path /var/run/clickhouse-server2 ||:
fi
# Kill minio admin client to stop collecting logs
kill $MC_ADMIN_PID
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server.log ||:
zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.zst ||:
@ -272,3 +280,5 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
mv /var/log/clickhouse-server/stderr1.log /test_output/ ||:
mv /var/log/clickhouse-server/stderr2.log /test_output/ ||:
fi
collect_core_dumps

View File

@ -25,10 +25,7 @@ RUN apt-get update -y \
openssl \
postgresql-client \
python3 \
python3-lxml \
python3-pip \
python3-requests \
python3-termcolor \
qemu-user-static \
sqlite3 \
sudo \
@ -51,7 +48,8 @@ RUN curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PR
&& unzip protoc-${PROTOC_VERSION}-linux-x86_64.zip -d /usr/local \
&& rm protoc-${PROTOC_VERSION}-linux-x86_64.zip
RUN pip3 install numpy==1.26.3 scipy==1.12.0 pandas==1.5.3 Jinja2==3.1.3 pyarrow==15.0.0
COPY requirements.txt /
RUN pip3 install --no-cache-dir -r /requirements.txt
RUN mkdir -p /tmp/clickhouse-odbc-tmp \
&& cd /tmp/clickhouse-odbc-tmp \
@ -88,6 +86,7 @@ RUN curl -L --no-verbose -O 'https://archive.apache.org/dist/hadoop/common/hadoo
ENV MINIO_ROOT_USER="clickhouse"
ENV MINIO_ROOT_PASSWORD="clickhouse"
ENV EXPORT_S3_STORAGE_POLICIES=1
ENV CLICKHOUSE_GRPC_CLIENT="/usr/share/clickhouse-utils/grpc-client/clickhouse-grpc-client.py"
RUN npm install -g azurite@3.30.0 \
&& npm install -g tslib && npm install -g node

View File

@ -0,0 +1,53 @@
awscli==1.22.34
blinker==1.4
botocore==1.23.34
certifi==2020.6.20
chardet==4.0.0
colorama==0.4.4
cryptography==3.4.8
dbus-python==1.2.18
distro==1.7.0
docutils==0.17.1
grpcio==1.47.0
gyp==0.1
httplib2==0.20.2
idna==3.3
importlib-metadata==4.6.4
jeepney==0.7.1
Jinja2==3.1.3
jmespath==0.10.0
keyring==23.5.0
launchpadlib==1.10.16
lazr.restfulclient==0.14.4
lazr.uri==1.0.6
lxml==4.8.0
MarkupSafe==2.1.5
more-itertools==8.10.0
numpy==1.26.3
oauthlib==3.2.0
packaging==24.1
pandas==1.5.3
pip==24.1.1
pipdeptree==2.23.0
protobuf==4.25.3
pyarrow==15.0.0
pyasn1==0.4.8
PyJWT==2.3.0
pyparsing==2.4.7
python-apt==2.4.0+ubuntu3
python-dateutil==2.8.1
pytz==2024.1
PyYAML==6.0.1
requests==2.32.3
roman==3.3
rsa==4.8
s3transfer==0.5.0
scipy==1.12.0
SecretStorage==3.3.1
setuptools==59.6.0
six==1.16.0
termcolor==1.1.0
urllib3==1.26.5
wadllib==1.3.6
wheel==0.37.1
zipp==1.0.0

View File

@ -6,19 +6,30 @@ source /setup_export_logs.sh
# fail on errors, verbose and export all env variables
set -e -x -a
MAX_RUN_TIME=${MAX_RUN_TIME:-7200}
MAX_RUN_TIME=$((MAX_RUN_TIME == 0 ? 7200 : MAX_RUN_TIME))
USE_DATABASE_REPLICATED=${USE_DATABASE_REPLICATED:=0}
USE_SHARED_CATALOG=${USE_SHARED_CATALOG:=0}
RUN_SEQUENTIAL_TESTS_IN_PARALLEL=1
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]] || [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
RUN_SEQUENTIAL_TESTS_IN_PARALLEL=0
fi
# Choose random timezone for this test run.
#
# NOTE: that clickhouse-test will randomize session_timezone by itself as well
# (it will choose between default server timezone and something specific).
TZ="$(rg -v '#' /usr/share/zoneinfo/zone.tab | awk '{print $3}' | shuf | head -n1)"
echo "Choosen random timezone $TZ"
echo "Chosen random timezone $TZ"
ln -snf "/usr/share/zoneinfo/$TZ" /etc/localtime && echo "$TZ" > /etc/timezone
dpkg -i package_folder/clickhouse-common-static_*.deb
dpkg -i package_folder/clickhouse-common-static-dbg_*.deb
# Accept failure in the next two commands until 24.4 is released (for compatibility and Bugfix validation run)
dpkg -i package_folder/clickhouse-odbc-bridge_*.deb || true
dpkg -i package_folder/clickhouse-library-bridge_*.deb || true
dpkg -i package_folder/clickhouse-odbc-bridge_*.deb
dpkg -i package_folder/clickhouse-library-bridge_*.deb
dpkg -i package_folder/clickhouse-server_*.deb
dpkg -i package_folder/clickhouse-client_*.deb
@ -43,6 +54,9 @@ source /utils.lib
/usr/share/clickhouse-test/config/install.sh
./setup_minio.sh stateless
m./c admin trace clickminio > /test_output/rubbish.log &
MC_ADMIN_PID=$!
./setup_hdfs_minicluster.sh
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
@ -55,12 +69,6 @@ if [[ -n "$BUGFIX_VALIDATE_CHECK" ]] && [[ "$BUGFIX_VALIDATE_CHECK" -eq 1 ]]; th
rm /etc/clickhouse-server/users.d/s3_cache_new.xml
rm /etc/clickhouse-server/config.d/zero_copy_destructive_operations.xml
#todo: remove these after 24.3 released.
sudo sed -i "s|<object_storage_type>azure<|<object_storage_type>azure_blob_storage<|" /etc/clickhouse-server/config.d/azure_storage_conf.xml
#todo: remove these after 24.3 released.
sudo sed -i "s|<object_storage_type>local<|<object_storage_type>local_blob_storage<|" /etc/clickhouse-server/config.d/storage_conf.xml
function remove_keeper_config()
{
sudo sed -i "/<$1>$2<\/$1>/d" /etc/clickhouse-server/config.d/keeper_port.xml
@ -93,10 +101,57 @@ if [ "$NUM_TRIES" -gt "1" ]; then
mkdir -p /var/run/clickhouse-server
fi
# Run a CH instance to execute sequential tests on it in parallel with all other tests.
if [[ "$RUN_SEQUENTIAL_TESTS_IN_PARALLEL" -eq 1 ]]; then
mkdir -p /var/run/clickhouse-server3 /etc/clickhouse-server3 /var/lib/clickhouse3
cp -r -L /etc/clickhouse-server/* /etc/clickhouse-server3/
sudo chown clickhouse:clickhouse /var/run/clickhouse-server3 /var/lib/clickhouse3 /etc/clickhouse-server3/
sudo chown -R clickhouse:clickhouse /etc/clickhouse-server3/*
function replace(){
sudo find /etc/clickhouse-server3/ -type f -name '*.xml' -exec sed -i "$1" {} \;
}
replace "s|<port>9000</port>|<port>19000</port>|g"
replace "s|<port>9440</port>|<port>19440</port>|g"
replace "s|<port>9988</port>|<port>19988</port>|g"
replace "s|<port>9234</port>|<port>19234</port>|g"
replace "s|<port>9181</port>|<port>19181</port>|g"
replace "s|<https_port>8443</https_port>|<https_port>18443</https_port>|g"
replace "s|<tcp_port>9000</tcp_port>|<tcp_port>19000</tcp_port>|g"
replace "s|<tcp_port>9181</tcp_port>|<tcp_port>19181</tcp_port>|g"
replace "s|<tcp_port_secure>9440</tcp_port_secure>|<tcp_port_secure>19440</tcp_port_secure>|g"
replace "s|<tcp_with_proxy_port>9010</tcp_with_proxy_port>|<tcp_with_proxy_port>19010</tcp_with_proxy_port>|g"
replace "s|<mysql_port>9004</mysql_port>|<mysql_port>19004</mysql_port>|g"
replace "s|<postgresql_port>9005</postgresql_port>|<postgresql_port>19005</postgresql_port>|g"
replace "s|<interserver_http_port>9009</interserver_http_port>|<interserver_http_port>19009</interserver_http_port>|g"
replace "s|8123|18123|g"
replace "s|/var/lib/clickhouse/|/var/lib/clickhouse3/|g"
replace "s|/etc/clickhouse-server/|/etc/clickhouse-server3/|g"
# distributed cache
replace "s|<tcp_port>10001</tcp_port>|<tcp_port>10003</tcp_port>|g"
replace "s|<tcp_port>10002</tcp_port>|<tcp_port>10004</tcp_port>|g"
sudo -E -u clickhouse /usr/bin/clickhouse server --daemon --config /etc/clickhouse-server3/config.xml \
--pid-file /var/run/clickhouse-server3/clickhouse-server.pid \
-- --path /var/lib/clickhouse3/ --logger.stderr /var/log/clickhouse-server/stderr3.log \
--logger.log /var/log/clickhouse-server/clickhouse-server3.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server3.err.log \
--tcp_port 19000 --tcp_port_secure 19440 --http_port 18123 --https_port 18443 --interserver_http_port 19009 --tcp_with_proxy_port 19010 \
--prometheus.port 19988 --keeper_server.raft_configuration.server.port 19234 --keeper_server.tcp_port 19181 \
--mysql_port 19004 --postgresql_port 19005
for _ in {1..100}
do
clickhouse-client --port 19000 --query "SELECT 1" && break
sleep 1
done
fi
# simplest way to forward env variables to server
sudo -E -u clickhouse /usr/bin/clickhouse-server --config /etc/clickhouse-server/config.xml --daemon --pid-file /var/run/clickhouse-server/clickhouse-server.pid
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
sudo sed -i "s|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches/</filesystem_caches_path>|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches_1/</filesystem_caches_path>|" /etc/clickhouse-server1/config.d/filesystem_caches_path.xml
sudo sed -i "s|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches/</filesystem_caches_path>|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches_2/</filesystem_caches_path>|" /etc/clickhouse-server2/config.d/filesystem_caches_path.xml
@ -133,7 +188,7 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
MAX_RUN_TIME=$((MAX_RUN_TIME != 0 ? MAX_RUN_TIME : 9000)) # set to 2.5 hours if 0 (unlimited)
fi
if [[ -n "$USE_SHARED_CATALOG" ]] && [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
sudo cat /etc/clickhouse-server1/config.d/filesystem_caches_path.xml \
| sed "s|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches/</filesystem_caches_path>|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches_1/</filesystem_caches_path>|" \
> /etc/clickhouse-server1/config.d/filesystem_caches_path.xml.tmp
@ -213,15 +268,15 @@ function run_tests()
ADDITIONAL_OPTIONS+=('--no-random-merge-tree-settings')
fi
if [[ -n "$USE_SHARED_CATALOG" ]] && [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
ADDITIONAL_OPTIONS+=('--shared-catalog')
fi
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
ADDITIONAL_OPTIONS+=('--replicated-database')
# Too many tests fail for DatabaseReplicated in parallel.
ADDITIONAL_OPTIONS+=('--jobs')
ADDITIONAL_OPTIONS+=('2')
ADDITIONAL_OPTIONS+=('3')
elif [[ 1 == $(clickhouse-client --query "SELECT value LIKE '%SANITIZE_COVERAGE%' FROM system.build_options WHERE name = 'CXX_FLAGS'") ]]; then
# Coverage on a per-test basis could only be collected sequentially.
# Do not set the --jobs parameter.
@ -229,7 +284,11 @@ function run_tests()
else
# All other configurations are OK.
ADDITIONAL_OPTIONS+=('--jobs')
ADDITIONAL_OPTIONS+=('8')
ADDITIONAL_OPTIONS+=('5')
fi
if [[ "$RUN_SEQUENTIAL_TESTS_IN_PARALLEL" -eq 1 ]]; then
ADDITIONAL_OPTIONS+=('--run-sequential-tests-in-parallel')
fi
if [[ -n "$RUN_BY_HASH_NUM" ]] && [[ -n "$RUN_BY_HASH_TOTAL" ]]; then
@ -253,7 +312,7 @@ function run_tests()
try_run_with_retry 10 clickhouse-client -q "insert into system.zookeeper (name, path, value) values ('auxiliary_zookeeper2', '/test/chroot/', '')"
set +e
timeout -s TERM --preserve-status 120m clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \
timeout -k 60m -s TERM --preserve-status 140m clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \
--no-drop-if-fail --test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
| ts '%Y-%m-%d %H:%M:%S' \
| tee -a test_output/test_result.txt
@ -262,14 +321,17 @@ function run_tests()
export -f run_tests
# This should be enough to setup job and collect artifacts
TIMEOUT=$((MAX_RUN_TIME - 700))
if [ "$NUM_TRIES" -gt "1" ]; then
# We don't run tests with Ordinary database in PRs, only in master.
# So run new/changed tests with Ordinary at least once in flaky check.
timeout_with_logging "$MAX_RUN_TIME" bash -c 'NUM_TRIES=1; USE_DATABASE_ORDINARY=1; run_tests' \
timeout_with_logging "$TIMEOUT" bash -c 'NUM_TRIES=1; USE_DATABASE_ORDINARY=1; run_tests' \
| sed 's/All tests have finished//' | sed 's/No tests were run//' ||:
fi
timeout_with_logging "$MAX_RUN_TIME" bash -c run_tests ||:
timeout_with_logging "$TIMEOUT" bash -c run_tests ||:
echo "Files in current directory"
ls -la ./
@ -290,7 +352,7 @@ do
err=$(clickhouse-client -q "select * from system.$table into outfile '/test_output/$table.tsv.gz' format TSVWithNamesAndTypes")
echo "$err"
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
err=$( { clickhouse-client --port 19000 -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst; } 2>&1 )
echo "$err"
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
@ -299,7 +361,7 @@ do
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
fi
if [[ -n "$USE_SHARED_CATALOG" ]] && [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
err=$( { clickhouse-client --port 19000 -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst; } 2>&1 )
echo "$err"
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
@ -310,19 +372,33 @@ done
# Why do we read data with clickhouse-local?
# Because it's the simplest way to read it when server has crashed.
sudo clickhouse stop ||:
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
if [[ "$RUN_SEQUENTIAL_TESTS_IN_PARALLEL" -eq 1 ]]; then
sudo clickhouse stop --pid-path /var/run/clickhouse-server3 ||:
fi
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
sudo clickhouse stop --pid-path /var/run/clickhouse-server1 ||:
sudo clickhouse stop --pid-path /var/run/clickhouse-server2 ||:
fi
if [[ -n "$USE_SHARED_CATALOG" ]] && [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
sudo clickhouse stop --pid-path /var/run/clickhouse-server1 ||:
fi
# Kill minio admin client to stop collecting logs
kill $MC_ADMIN_PID
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server.log ||:
rg -A50 -Fa "============" /var/log/clickhouse-server/stderr.log ||:
zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.zst &
if [[ "$RUN_SEQUENTIAL_TESTS_IN_PARALLEL" -eq 1 ]]; then
rg -Fa "<Fatal>" /var/log/clickhouse-server3/clickhouse-server.log ||:
rg -A50 -Fa "============" /var/log/clickhouse-server3/stderr.log ||:
zstd --threads=0 < /var/log/clickhouse-server3/clickhouse-server.log > /test_output/clickhouse-server3.log.zst &
fi
data_path_config="--path=/var/lib/clickhouse/"
if [[ -n "$USE_S3_STORAGE_FOR_MERGE_TREE" ]] && [[ "$USE_S3_STORAGE_FOR_MERGE_TREE" -eq 1 ]]; then
# We need s3 storage configuration (but it's more likely that clickhouse-local will fail for some reason)
@ -342,12 +418,17 @@ if [ $failed_to_save_logs -ne 0 ]; then
for table in query_log zookeeper_log trace_log transactions_info_log metric_log blob_storage_log error_log
do
clickhouse-local "$data_path_config" --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.tsv.zst ||:
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
if [[ "$RUN_SEQUENTIAL_TESTS_IN_PARALLEL" -eq 1 ]]; then
clickhouse-local --path /var/lib/clickhouse3/ --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.3.tsv.zst ||:
fi
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
clickhouse-local --path /var/lib/clickhouse1/ --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst ||:
clickhouse-local --path /var/lib/clickhouse2/ --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.2.tsv.zst ||:
fi
if [[ -n "$USE_SHARED_CATALOG" ]] && [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
clickhouse-local --path /var/lib/clickhouse1/ --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst ||:
fi
done
@ -383,7 +464,14 @@ rm -rf /var/lib/clickhouse/data/system/*/
tar -chf /test_output/store.tar /var/lib/clickhouse/store ||:
tar -chf /test_output/metadata.tar /var/lib/clickhouse/metadata/*.sql ||:
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
if [[ "$RUN_SEQUENTIAL_TESTS_IN_PARALLEL" -eq 1 ]]; then
rm -rf /var/lib/clickhouse3/data/system/*/
tar -chf /test_output/store.tar /var/lib/clickhouse3/store ||:
tar -chf /test_output/metadata.tar /var/lib/clickhouse3/metadata/*.sql ||:
fi
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server1.log ||:
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server2.log ||:
zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.zst ||:
@ -394,9 +482,11 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
tar -chf /test_output/coordination2.tar /var/lib/clickhouse2/coordination ||:
fi
if [[ -n "$USE_SHARED_CATALOG" ]] && [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server1.log ||:
zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.zst ||:
mv /var/log/clickhouse-server/stderr1.log /test_output/ ||:
tar -chf /test_output/coordination1.tar /var/lib/clickhouse1/coordination ||:
fi
collect_core_dumps

View File

@ -1,8 +1,5 @@
#!/bin/bash
# core.COMM.PID-TID
sysctl kernel.core_pattern='core.%e.%p-%P'
OK="\tOK\t\\N\t"
FAIL="\tFAIL\t\\N\t"
@ -315,12 +312,4 @@ function collect_query_and_trace_logs()
done
}
function collect_core_dumps()
{
find . -type f -maxdepth 1 -name 'core.*' | while read -r core; do
zstd --threads=0 "$core"
mv "$core.zst" /test_output/
done
}
# vi: ft=bash

View File

@ -1,5 +1,10 @@
#!/bin/bash
# core.COMM.PID-TID
sysctl kernel.core_pattern='core.%e.%p-%P'
# ASAN doesn't work with suid_dumpable=2
sysctl fs.suid_dumpable=1
function run_with_retry()
{
if [[ $- =~ e ]]; then
@ -38,7 +43,7 @@ function fn_exists() {
function timeout_with_logging() {
local exit_code=0
timeout "${@}" || exit_code="${?}"
timeout -s TERM --preserve-status "${@}" || exit_code="${?}"
if [[ "${exit_code}" -eq "124" ]]
then
@ -48,4 +53,12 @@ function timeout_with_logging() {
return $exit_code
}
function collect_core_dumps()
{
find . -type f -maxdepth 1 -name 'core.*' | while read -r core; do
zstd --threads=0 "$core"
mv "$core.zst" /test_output/
done
}
# vi: ft=bash

View File

@ -21,6 +21,9 @@ source /attach_gdb.lib
# shellcheck source=../stateless/stress_tests.lib
source /stress_tests.lib
# shellcheck disable=SC1091
source /utils.lib
install_packages package_folder
# Thread Fuzzer allows to check more permutations of possible thread scheduling

View File

@ -23,22 +23,8 @@ RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
# python-magic is the same version as in Ubuntu 22.04
RUN pip3 install \
PyGithub \
black==23.12.0 \
boto3 \
codespell==2.2.1 \
mypy==1.8.0 \
pylint==3.1.0 \
python-magic==0.4.24 \
flake8==4.0.1 \
requests \
thefuzz \
tqdm==4.66.4 \
types-requests \
unidiff \
jwt \
&& rm -rf /root/.cache/pip
COPY requirements.txt /
RUN pip3 install --no-cache-dir -r requirements.txt
RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen en_US.UTF-8
ENV LC_ALL en_US.UTF-8

View File

@ -0,0 +1,58 @@
aiohttp==3.9.5
aiosignal==1.3.1
astroid==3.1.0
async-timeout==4.0.3
attrs==23.2.0
black==24.4.2
boto3==1.34.131
botocore==1.34.131
certifi==2024.6.2
cffi==1.16.0
charset-normalizer==3.3.2
click==8.1.7
codespell==2.2.1
cryptography==42.0.8
Deprecated==1.2.14
dill==0.3.8
flake8==4.0.1
frozenlist==1.4.1
idna==3.7
isort==5.13.2
jmespath==1.0.1
jwt==1.3.1
mccabe==0.6.1
multidict==6.0.5
mypy==1.8.0
mypy-extensions==1.0.0
packaging==24.1
pathspec==0.9.0
pip==24.1.1
pipdeptree==2.23.0
platformdirs==4.2.2
pycodestyle==2.8.0
pycparser==2.22
pyflakes==2.4.0
PyGithub==2.3.0
PyJWT==2.8.0
pylint==3.1.0
PyNaCl==1.5.0
python-dateutil==2.9.0.post0
python-magic==0.4.24
PyYAML==6.0.1
rapidfuzz==3.9.3
requests==2.32.3
s3transfer==0.10.1
setuptools==59.6.0
six==1.16.0
thefuzz==0.22.1
tomli==2.0.1
tomlkit==0.12.5
tqdm==4.66.4
types-requests==2.32.0.20240622
typing_extensions==4.12.2
unidiff==0.7.5
urllib3==2.2.2
wheel==0.37.1
wrapt==1.16.0
yamllint==1.26.3
yarl==1.9.4

View File

@ -11,6 +11,7 @@ TIMEOUT_SIGN = "[ Timeout! "
UNKNOWN_SIGN = "[ UNKNOWN "
SKIPPED_SIGN = "[ SKIPPED "
HUNG_SIGN = "Found hung queries in processlist"
SERVER_DIED_SIGN = "Server died, terminating all processes"
DATABASE_SIGN = "Database: "
SUCCESS_FINISH_SIGNS = ["All tests have finished", "No tests were run"]
@ -25,6 +26,7 @@ def process_test_log(log_path, broken_tests):
failed = 0
success = 0
hung = False
server_died = False
retries = False
success_finish = False
test_results = []
@ -41,6 +43,8 @@ def process_test_log(log_path, broken_tests):
if HUNG_SIGN in line:
hung = True
break
if SERVER_DIED_SIGN in line:
server_died = True
if RETRIES_SIGN in line:
retries = True
if any(
@ -123,6 +127,7 @@ def process_test_log(log_path, broken_tests):
failed,
success,
hung,
server_died,
success_finish,
retries,
test_results,
@ -150,6 +155,7 @@ def process_result(result_path, broken_tests):
failed,
success,
hung,
server_died,
success_finish,
retries,
test_results,
@ -165,6 +171,10 @@ def process_result(result_path, broken_tests):
description = "Some queries hung, "
state = "failure"
test_results.append(("Some queries hung", "FAIL", "0", ""))
elif server_died:
description = "Server died, "
state = "failure"
test_results.append(("Server died", "FAIL", "0", ""))
elif not success_finish:
description = "Tests are not finished, "
state = "failure"
@ -218,5 +228,20 @@ if __name__ == "__main__":
state, description, test_results = process_result(args.in_results_dir, broken_tests)
logging.info("Result parsed")
status = (state, description)
def test_result_comparator(item):
# sort by status then by check name
order = {
"FAIL": 0,
"Timeout": 1,
"NOT_FAILED": 2,
"BROKEN": 3,
"OK": 4,
"SKIPPED": 5,
}
return order.get(item[1], 10), str(item[0]), item[1]
test_results.sort(key=test_result_comparator)
write_results(args.out_results_file, args.out_status_file, test_results, status)
logging.info("Result written")

View File

@ -75,7 +75,7 @@ SETTINGS
Possible values:
- unordered — With unordered mode, the set of all already processed files is tracked with persistent nodes in ZooKeeper.
- ordered — With ordered mode, only the max name of the successfully consumed file, and the names of files that will be retried after unsuccessful loading attempt are being stored in ZooKeeper.
- ordered — With ordered mode, the files are processed in lexicographic order. It means that if file named 'BBB' was processed at some point and later on a file named 'AA' is added to the bucket, it will be ignored. Only the max name (in lexicographic sense) of the successfully consumed file, and the names of files that will be retried after unsuccessful loading attempt are being stored in ZooKeeper.
Default value: `ordered` in versions before 24.6. Starting with 24.6 there is no default value, the setting becomes required to be specified manually. For tables created on earlier versions the default value will remain `Ordered` for compatibility.

View File

@ -1535,6 +1535,10 @@ the columns from input data will be mapped to the columns from the table by thei
Otherwise, the first row will be skipped.
If setting [input_format_with_types_use_header](/docs/en/operations/settings/settings-formats.md/#input_format_with_types_use_header) is set to 1,
the types from input data will be compared with the types of the corresponding columns from the table. Otherwise, the second row will be skipped.
If setting [output_format_binary_encode_types_in_binary_format](/docs/en/operations/settings/settings-formats.md/#output_format_binary_encode_types_in_binary_format) is set to 1,
the types in header will be written using [binary encoding](/docs/en/sql-reference/data-types/data-types-binary-encoding.md) instead of strings with type names in RowBinaryWithNamesAndTypes output format.
If setting [input_format_binary_encode_types_in_binary_format](/docs/en/operations/settings/settings-formats.md/#input_format_binary_encode_types_in_binary_format) is set to 1,
the types in header will be read using [binary encoding](/docs/en/sql-reference/data-types/data-types-binary-encoding.md) instead of strings with type names in RowBinaryWithNamesAndTypes input format.
:::
## RowBinaryWithDefaults {#rowbinarywithdefaults}

View File

@ -2,15 +2,11 @@
slug: /en/operations/opentelemetry
sidebar_position: 62
sidebar_label: Tracing ClickHouse with OpenTelemetry
title: "[experimental] Tracing ClickHouse with OpenTelemetry"
title: "Tracing ClickHouse with OpenTelemetry"
---
[OpenTelemetry](https://opentelemetry.io/) is an open standard for collecting traces and metrics from the distributed application. ClickHouse has some support for OpenTelemetry.
:::note
This is an experimental feature that will change in backwards-incompatible ways in future releases.
:::
## Supplying Trace Context to ClickHouse
ClickHouse accepts trace context HTTP headers, as described by the [W3C recommendation](https://www.w3.org/TR/trace-context/). It also accepts trace context over a native protocol that is used for communication between ClickHouse servers or between the client and server. For manual testing, trace context headers conforming to the Trace Context recommendation can be supplied to `clickhouse-client` using `--opentelemetry-traceparent` and `--opentelemetry-tracestate` flags.

View File

@ -974,6 +974,13 @@ Default value: false
- [exclude_deleted_rows_for_part_size_in_merge](#exclude_deleted_rows_for_part_size_in_merge) setting
## use_compact_variant_discriminators_serialization {#use_compact_variant_discriminators_serialization}
Enables compact mode for binary serialization of discriminators in Variant data type.
This mode allows to use significantly less memory for storing discriminators in parts when there is mostly one variant or a lot of NULL values.
Default value: true
## merge_workload
Used to regulate how resources are utilized and shared between merges and other workloads. Specified value is used as `workload` setting value for background merges of this table. If not specified (empty string), then server setting `merge_workload` is used instead.

View File

@ -1951,6 +1951,18 @@ The maximum allowed size for String in RowBinary format. It prevents allocating
Default value: `1GiB`.
### output_format_binary_encode_types_in_binary_format {#output_format_binary_encode_types_in_binary_format}
Write data types in [binary format](../../sql-reference/data-types/data-types-binary-encoding.md) instead of type names in RowBinaryWithNamesAndTypes output format.
Disabled by default.
### input_format_binary_decode_types_in_binary_format {#input_format_binary_decode_types_in_binary_format}
Read data types in [binary format](../../sql-reference/data-types/data-types-binary-encoding.md) instead of type names in RowBinaryWithNamesAndTypes input format.
Disabled by default.
## Native format settings {#native-format-settings}
### input_format_native_allow_types_conversion {#input_format_native_allow_types_conversion}
@ -1958,3 +1970,15 @@ Default value: `1GiB`.
Allow types conversion in Native input format between columns from input data and requested columns.
Enabled by default.
### output_format_native_encode_types_in_binary_format {#output_format_native_encode_types_in_binary_format}
Write data types in [binary format](../../sql-reference/data-types/data-types-binary-encoding.md) instead of type names in Native output format.
Disabled by default.
### input_format_native_decode_types_in_binary_format {#input_format_native_decode_types_in_binary_format}
Read data types in [binary format](../../sql-reference/data-types/data-types-binary-encoding.md) instead of type names in Native input format.
Disabled by default.

View File

@ -1170,6 +1170,10 @@ Data in the VALUES clause of INSERT queries is processed by a separate stream pa
Default value: 262144 (= 256 KiB).
:::note
`max_query_size` cannot be set within an SQL query (e.g., `SELECT now() SETTINGS max_query_size=10000`) because ClickHouse needs to allocate a buffer to parse the query, and this buffer size is determined by the `max_query_size` setting, which must be configured before the query is executed.
:::
## max_parser_depth {#max_parser_depth}
Limits maximum recursion depth in the recursive descent parser. Allows controlling the stack size.
@ -1354,12 +1358,25 @@ Connection pool size for PostgreSQL table engine and database engine.
Default value: 16
## postgresql_connection_attempt_timeout {#postgresql-connection-attempt-timeout}
Connection timeout in seconds of a single attempt to connect PostgreSQL end-point.
The value is passed as a `connect_timeout` parameter of the connection URL.
Default value: `2`.
## postgresql_connection_pool_wait_timeout {#postgresql-connection-pool-wait-timeout}
Connection pool push/pop timeout on empty pool for PostgreSQL table engine and database engine. By default it will block on empty pool.
Default value: 5000
## postgresql_connection_pool_retries {#postgresql-connection-pool-retries}
The maximum number of retries to establish a connection with the PostgreSQL end-point.
Default value: `2`.
## postgresql_connection_pool_auto_close_connection {#postgresql-connection-pool-auto-close-connection}
Close connection before returning connection to the pool.

View File

@ -0,0 +1,30 @@
---
slug: /en/operations/startup-scripts
sidebar_label: Startup Scripts
---
# Startup Scripts
ClickHouse can run arbitrary SQL queries from the server configuration during startup. This can be useful for migrations or automatic schema creation.
```xml
<clickhouse>
<startup_scripts>
<scripts>
<query>CREATE ROLE OR REPLACE test_role</query>
</scripts>
<scripts>
<query>CREATE TABLE TestTable (id UInt64) ENGINE=TinyLog</query>
<condition>SELECT 1;</condition>
</scripts>
</startup_scripts>
</clickhouse>
```
ClickHouse executes all queries from the `startup_scripts` sequentially in the specified order. If any of the queries fail, the execution of the following queries won't be interrupted.
You can specify a conditional query in the config. In that case, the corresponding query executes only when the condition query returns the value `1` or `true`.
:::note
If the condition query returns any other value than `1` or `true`, the result will be interpreted as `false`, and the corresponding won't be executed.
:::

View File

@ -357,7 +357,7 @@ Number of currently running inserts to Kafka
Number of alive connections
### KeeperOutstandingRequets
### KeeperOutstandingRequests
Number of outstanding requests

View File

@ -4,35 +4,56 @@ sidebar_position: 59
sidebar_label: clickhouse-disks
---
# clickhouse-disks
# Clickhouse-disks
A utility providing filesystem-like operations for ClickHouse disks.
A utility providing filesystem-like operations for ClickHouse disks. It can work in both interactive and not interactive modes.
Program-wide options:
## Program-wide options
* `--config-file, -C` -- path to ClickHouse config, defaults to `/etc/clickhouse-server/config.xml`.
* `--save-logs` -- Log progress of invoked commands to `/var/log/clickhouse-server/clickhouse-disks.log`.
* `--log-level` -- What [type](../server-configuration-parameters/settings#server_configuration_parameters-logger) of events to log, defaults to `none`.
* `--disk` -- what disk to use for `mkdir, move, read, write, remove` commands. Defaults to `default`.
* `--query, -q` -- single query that can be executed without launching interactive mode
* `--help, -h` -- print all the options and commands with description
## Default Disks
After the launch two disks are initialized. The first one is a disk `local` that is supposed to imitate local file system from which clickhouse-disks utility was launched. The second one is a disk `default` that is mounted to the local filesystem in the directory that can be found in config as a parameter `clickhouse/path` (default value is `/var/lib/clickhouse`).
## Clickhouse-disks state
For each disk that was added the utility stores current directory (as in a usual filesystem). User can change current directory and switch between disks.
State is reflected in a prompt "`disk_name`:`path_name`"
## Commands
* `copy [--disk-from d1] [--disk-to d2] <FROM_PATH> <TO_PATH>`.
Recursively copy data from `FROM_PATH` at disk `d1` (defaults to `disk` value if not provided)
to `TO_PATH` at disk `d2` (defaults to `disk` value if not provided).
* `move <FROM_PATH> <TO_PATH>`.
Move file or directory from `FROM_PATH` to `TO_PATH`.
* `remove <PATH>`.
Remove `PATH` recursively.
* `link <FROM_PATH> <TO_PATH>`.
Create a hardlink from `FROM_PATH` to `TO_PATH`.
* `list [--recursive] <PATH>...`
List files at `PATH`s. Non-recursive by default.
* `list-disks`.
In these documentation file all mandatory positional arguments are referred as `<parameter>`, named arguments are referred as `[--parameter value]`. All positional parameters could be mentioned as a named parameter with a corresponding name.
* `cd (change-dir, change_dir) [--disk disk] <path>`
Change directory to path `path` on disk `disk` (default value is a current disk). No disk switching happens.
* `copy (cp) [--disk-from disk_1] [--disk-to disk_2] <path-from> <path-to>`.
Recursively copy data from `path-from` at disk `disk_1` (default value is a current disk (parameter `disk` in a non-interactive mode))
to `path-to` at disk `disk_2` (default value is a current disk (parameter `disk` in a non-interactive mode)).
* `current_disk_with_path (current, current_disk, current_path)`
Print current state in format:
`Disk: "current_disk" Path: "current path on current disk"`
* `help [<command>]`
Print help message about command `command`. If `command` is not specified print information about all commands.
* `move (mv) <path-from> <path-to>`.
Move file or directory from `path-from` to `path-to` within current disk.
* `remove (rm, delete) <path>`.
Remove `path` recursively on a current disk.
* `link (ln) <path-from> <path-to>`.
Create a hardlink from `path-from` to `path-to` on a current disk.
* `list (ls) [--recursive] <path>`
List files at `path`s on a current disk. Non-recursive by default.
* `list-disks (list_disks, ls-disks, ls_disks)`.
List disks names.
* `mkdir [--recursive] <PATH>`.
* `mkdir [--recursive] <path>` on a current disk.
Create a directory. Non-recursive by default.
* `read: <FROM_PATH> [<TO_PATH>]`
Read a file from `FROM_PATH` to `TO_PATH` (`stdout` if not supplied).
* `write [FROM_PATH] <TO_PATH>`.
Write a file from `FROM_PATH` (`stdin` if not supplied) to `TO_PATH`.
* `read (r) <path-from> [--path-to path]`
Read a file from `path-from` to `path` (`stdout` if not supplied).
* `switch-disk [--path path] <disk>`
Switch to disk `disk` on path `path` (if `path` is not specified default value is a previous path on disk `disk`).
* `write (w) [--path-from path] <path-to>`.
Write a file from `path` (`stdin` if `path` is not supplied, input must finish by Ctrl+D) to `path-to`.

View File

@ -16,7 +16,7 @@ sidebar_label: clickhouse-local
While `clickhouse-local` is a great tool for development and testing purposes, and for processing files, it is not suitable for serving end users or applications. In these scenarios, it is recommended to use the open-source [ClickHouse](https://clickhouse.com/docs/en/install). ClickHouse is a powerful OLAP database that is designed to handle large-scale analytical workloads. It provides fast and efficient processing of complex queries on large datasets, making it ideal for use in production environments where high-performance is critical. Additionally, ClickHouse offers a wide range of features such as replication, sharding, and high availability, which are essential for scaling up to handle large datasets and serving applications. If you need to handle larger datasets or serve end users or applications, we recommend using open-source ClickHouse instead of `clickhouse-local`.
Please read the docs below that show example use cases for `clickhouse-local`, such as [querying local CSVs](#query-data-in-a-csv-file-using-sql) or [reading a parquet file in S3](#query-data-in-a-parquet-file-in-aws-s3).
Please read the docs below that show example use cases for `clickhouse-local`, such as [querying local file](#query_data_in_file) or [reading a parquet file in S3](#query-data-in-a-parquet-file-in-aws-s3).
## Download clickhouse-local

View File

@ -18,7 +18,7 @@ ClickHouse also supports:
During aggregation, all `NULL` arguments are skipped. If the aggregation has several arguments it will ignore any row in which one or more of them are NULL.
There is an exception to this rule, which are the functions [`first_value`](../../sql-reference/aggregate-functions/reference/first_value.md), [`last_value`](../../sql-reference/aggregate-functions/reference/last_value.md) and their aliases when followed by the modifier `RESPECT NULLS`: `FIRST_VALUE(b) RESPECT NULLS`.
There is an exception to this rule, which are the functions [`first_value`](../../sql-reference/aggregate-functions/reference/first_value.md), [`last_value`](../../sql-reference/aggregate-functions/reference/last_value.md) and their aliases (`any` and `anyLast` respectively) when followed by the modifier `RESPECT NULLS`. For example, `FIRST_VALUE(b) RESPECT NULLS`.
**Examples:**

View File

@ -0,0 +1,37 @@
---
slug: /en/sql-reference/aggregate-functions/reference/aggthrow
sidebar_position: 101
---
# aggThrow
This function can be used for the purpose of testing exception safety. It will throw an exception on creation with the specified probability.
**Syntax**
```sql
aggThrow(throw_prob)
```
**Arguments**
- `throw_prob` — Probability to throw on creation. [Float64](../../data-types/float.md).
**Returned value**
- An exception: `Code: 503. DB::Exception: Aggregate function aggThrow has thrown exception successfully`.
**Example**
Query:
```sql
SELECT number % 2 AS even, aggThrow(number) FROM numbers(10) GROUP BY even;
```
Result:
```response
Received exception:
Code: 503. DB::Exception: Aggregate function aggThrow has thrown exception successfully: While executing AggregatingTransform. (AGGREGATE_FUNCTION_THROW)
```

View File

@ -5,12 +5,12 @@ sidebar_position: 102
# any
Selects the first encountered value of a column.
Selects the first encountered value of a column, ignoring any `NULL` values.
**Syntax**
```sql
any(column)
any(column) [RESPECT NULLS]
```
Aliases: `any_value`, [`first_value`](../reference/first_value.md).
@ -20,7 +20,9 @@ Aliases: `any_value`, [`first_value`](../reference/first_value.md).
**Returned value**
By default, it ignores NULL values and returns the first NOT NULL value found in the column. Like [`first_value`](../../../sql-reference/aggregate-functions/reference/first_value.md) it supports `RESPECT NULLS`, in which case it will select the first value passed, independently on whether it's NULL or not.
:::note
Supports the `RESPECT NULLS` modifier after the function name. Using this modifier will ensure the function selects the first value passed, regardless of whether it is `NULL` or not.
:::
:::note
The return type of the function is the same as the input, except for LowCardinality which is discarded. This means that given no rows as input it will return the default value of that type (0 for integers, or Null for a Nullable() column). You might use the `-OrNull` [combinator](../../../sql-reference/aggregate-functions/combinators.md) ) to modify this behaviour.

View File

@ -1,44 +0,0 @@
---
slug: /en/sql-reference/aggregate-functions/reference/any_respect_nulls
sidebar_position: 103
---
# any_respect_nulls
Selects the first encountered value of a column, irregardless of whether it is a `NULL` value or not.
Alias: `any_value_respect_nulls`, `first_value_repect_nulls`.
**Syntax**
```sql
any_respect_nulls(column)
```
**Parameters**
- `column`: The column name.
**Returned value**
- The last value encountered, irregardless of whether it is a `NULL` value or not.
**Example**
Query:
```sql
CREATE TABLE any_nulls (city Nullable(String)) ENGINE=Log;
INSERT INTO any_nulls (city) VALUES (NULL), ('Amsterdam'), ('New York'), ('Tokyo'), ('Valencia'), (NULL);
SELECT any(city), any_respect_nulls(city) FROM any_nulls;
```
```response
┌─any(city)─┬─any_respect_nulls(city)─┐
│ Amsterdam │ ᴺᵁᴸᴸ │
└───────────┴─────────────────────────┘
```
**See Also**
- [any](../reference/any.md)

View File

@ -5,17 +5,21 @@ sidebar_position: 105
# anyLast
Selects the last value encountered. The result is just as indeterminate as for the [any](../../../sql-reference/aggregate-functions/reference/any.md) function.
Selects the last value encountered, ignoring any `NULL` values by default. The result is just as indeterminate as for the [any](../../../sql-reference/aggregate-functions/reference/any.md) function.
**Syntax**
```sql
anyLast(column)
anyLast(column) [RESPECT NULLS]
```
**Parameters**
- `column`: The column name.
:::note
Supports the `RESPECT NULLS` modifier after the function name. Using this modifier will ensure the function selects the first value passed, regardless of whether it is `NULL` or not.
:::
**Returned value**
- The last value encountered.

View File

@ -1,39 +0,0 @@
---
slug: /en/sql-reference/aggregate-functions/reference/anylast_respect_nulls
sidebar_position: 106
---
# anyLast_respect_nulls
Selects the last value encountered, irregardless of whether it is `NULL` or not.
**Syntax**
```sql
anyLast_respect_nulls(column)
```
**Parameters**
- `column`: The column name.
**Returned value**
- The last value encountered, irregardless of whether it is `NULL` or not.
**Example**
Query:
```sql
CREATE TABLE any_last_nulls (city Nullable(String)) ENGINE=Log;
INSERT INTO any_last_nulls (city) VALUES ('Amsterdam'),(NULL),('New York'),('Tokyo'),('Valencia'),(NULL);
SELECT anyLast(city), anyLast_respect_nulls(city) FROM any_last_nulls;
```
```response
┌─anyLast(city)─┬─anyLast_respect_nulls(city)─┐
│ Valencia │ ᴺᵁᴸᴸ │
└───────────────┴─────────────────────────────┘
```

View File

@ -43,11 +43,11 @@ Standard aggregate functions:
ClickHouse-specific aggregate functions:
- [aggThrow](../reference/aggthrow.md)
- [analysisOfVariance](../reference/analysis_of_variance.md)
- [any](../reference/any_respect_nulls.md)
- [any](../reference/any.md)
- [anyHeavy](../reference/anyheavy.md)
- [anyLast](../reference/anylast.md)
- [anyLast](../reference/anylast_respect_nulls.md)
- [boundingRatio](../reference/boundrat.md)
- [first_value](../reference/first_value.md)
- [last_value](../reference/last_value.md)

View File

@ -5,23 +5,45 @@ sidebar_position: 165
# maxMap
Syntax: `maxMap(key, value)` or `maxMap(Tuple(key, value))`
Calculates the maximum from `value` array according to the keys specified in the `key` array.
Passing a tuple of keys and value arrays is identical to passing two arrays of keys and values.
**Syntax**
The number of elements in `key` and `value` must be the same for each row that is totaled.
```sql
maxMap(key, value)
```
or
```sql
maxMap(Tuple(key, value))
```
Returns a tuple of two arrays: keys and values calculated for the corresponding keys.
Alias: `maxMappedArrays`
Example:
:::note
- Passing a tuple of keys and value arrays is identical to passing two arrays of keys and values.
- The number of elements in `key` and `value` must be the same for each row that is totaled.
:::
**Parameters**
- `key` — Array of keys. [Array](../../data-types/array.md).
- `value` — Array of values. [Array](../../data-types/array.md).
**Returned value**
- Returns a tuple of two arrays: keys in sorted order, and values calculated for the corresponding keys. [Tuple](../../data-types/tuple.md)([Array](../../data-types/array.md), [Array](../../data-types/array.md)).
**Example**
Query:
``` sql
SELECT maxMap(a, b)
FROM values('a Array(Char), b Array(Int64)', (['x', 'y'], [2, 2]), (['y', 'z'], [3, 1]))
```
Result:
``` text
┌─maxMap(a, b)───────────┐
│ [['x','y','z'],[2,3,1]]│

View File

@ -5,23 +5,45 @@ sidebar_position: 169
# minMap
Syntax: `minMap(key, value)` or `minMap(Tuple(key, value))`
Calculates the minimum from `value` array according to the keys specified in the `key` array.
Passing a tuple of keys and value arrays is identical to passing two arrays of keys and values.
**Syntax**
The number of elements in `key` and `value` must be the same for each row that is totaled.
```sql
`minMap(key, value)`
```
or
```sql
minMap(Tuple(key, value))
```
Returns a tuple of two arrays: keys in sorted order, and values calculated for the corresponding keys.
Alias: `minMappedArrays`
Example:
:::note
- Passing a tuple of keys and value arrays is identical to passing an array of keys and an array of values.
- The number of elements in `key` and `value` must be the same for each row that is totaled.
:::
**Parameters**
- `key` — Array of keys. [Array](../../data-types/array.md).
- `value` — Array of values. [Array](../../data-types/array.md).
**Returned value**
- Returns a tuple of two arrays: keys in sorted order, and values calculated for the corresponding keys. [Tuple](../../data-types/tuple.md)([Array](../../data-types/array.md), [Array](../../data-types/array.md)).
**Example**
Query:
``` sql
SELECT minMap(a, b)
FROM values('a Array(Int32), b Array(Int64)', ([1, 2], [2, 2]), ([2, 3], [1, 1]))
```
Result:
``` text
┌─minMap(a, b)──────┐
│ ([1,2,3],[2,1,1]) │

View File

@ -16,7 +16,7 @@ singleValueOrNull(x)
**Parameters**
- `x` — Column of any [data type](../../data-types/index.md).
- `x` — Column of any [data type](../../data-types/index.md) (except [Map](../../data-types/map.md), [Array](../../data-types/array.md) or [Tuple](../../data-types/tuple) which cannot be of type [Nullable](../../data-types/nullable.md)).
**Returned values**

View File

@ -0,0 +1,115 @@
---
slug: /en/sql-reference/data-types/data-types-binary-encoding
sidebar_position: 56
sidebar_label: Data types binary encoding specification.
---
# Data types binary encoding specification
This specification describes the binary format that can be used for binary encoding and decoding of ClickHouse data types. This format is used in `Dynamic` column [binary serialization](dynamic.md#binary-output-format) and can be used in input/output formats [RowBinaryWithNamesAndTypes](../../interfaces/formats.md#rowbinarywithnamesandtypes) and [Native](../../interfaces/formats.md#native) under corresponding settings.
The table below describes how each data type is represented in binary format. Each data type encoding consist of 1 byte that indicates the type and some optional additional information.
`var_uint` in the binary encoding means that the size is encoded using Variable-Length Quantity compression.
| ClickHouse data type | Binary encoding |
|--------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `Nothing` | `0x00` |
| `UInt8` | `0x01` |
| `UInt16` | `0x02` |
| `UInt32` | `0x03` |
| `UInt64` | `0x04` |
| `UInt128` | `0x05` |
| `UInt256` | `0x06` |
| `Int8` | `0x07` |
| `Int16` | `0x08` |
| `Int32` | `0x09` |
| `Int64` | `0x0A` |
| `Int128` | `0x0B` |
| `Int256` | `0x0C` |
| `Float32` | `0x0D` |
| `Float64` | `0x0E` |
| `Date` | `0x0F` |
| `Date32` | `0x10` |
| `DateTime` | `0x11` |
| `DateTime(time_zone)` | `0x12<var_uint_time_zone_name_size><time_zone_name_data>` |
| `DateTime64(P)` | `0x13<uint8_precision>` |
| `DateTime64(P, time_zone)` | `0x14<uint8_precision><var_uint_time_zone_name_size><time_zone_name_data>` |
| `String` | `0x15` |
| `FixedString(N)` | `0x16<var_uint_size>` |
| `Enum8` | `0x17<var_uint_number_of_elements><var_uint_name_size_1><name_data_1><int8_value_1>...<var_uint_name_size_N><name_data_N><int8_value_N>` |
| `Enum16` | `0x18<var_uint_number_of_elements><var_uint_name_size_1><name_data_1><int16_little_endian_value_1>...><var_uint_name_size_N><name_data_N><int16_little_endian_value_N>` |
| `Decimal32(P, S)` | `0x19<uint8_precision><uint8_scale>` |
| `Decimal64(P, S)` | `0x1A<uint8_precision><uint8_scale>` |
| `Decimal128(P, S)` | `0x1B<uint8_precision><uint8_scale>` |
| `Decimal256(P, S)` | `0x1C<uint8_precision><uint8_scale>` |
| `UUID` | `0x1D` |
| `Array(T)` | `0x1E<nested_type_encoding>` |
| `Tuple(T1, ..., TN)` | `0x1F<var_uint_number_of_elements><nested_type_encoding_1>...<nested_type_encoding_N>` |
| `Tuple(name1 T1, ..., nameN TN)` | `0x20<var_uint_number_of_elements><var_uint_name_size_1><name_data_1><nested_type_encoding_1>...<var_uint_name_size_N><name_data_N><nested_type_encoding_N>` |
| `Set` | `0x21` |
| `Interval` | `0x22<interval_kind>` (see [interval kind binary encoding](#interval-kind-binary-encoding)) |
| `Nullable(T)` | `0x23<nested_type_encoding>` |
| `Function` | `0x24<var_uint_number_of_arguments><argument_type_encoding_1>...<argument_type_encoding_N><return_type_encoding>` |
| `AggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN)` | `0x25<var_uint_version><var_uint_function_name_size><function_name_data><var_uint_number_of_parameters><param_1>...<param_N><var_uint_number_of_arguments><argument_type_encoding_1>...<argument_type_encoding_N>` (see [aggregate function parameter binary encoding](#aggregate-function-parameter-binary-encoding)) |
| `LowCardinality(T)` | `0x26<nested_type_encoding>` |
| `Map(K, V)` | `0x27<key_type_encoding><value_type_encoding>` |
| `IPv4` | `0x28` |
| `IPv6` | `0x29` |
| `Variant(T1, ..., TN)` | `0x2A<var_uint_number_of_variants><variant_type_encoding_1>...<variant_type_encoding_N>` |
| `Dynamic(max_types=N)` | `0x2B<uint8_max_types>` |
| `Custom type` (`Ring`, `Polygon`, etc) | `0x2C<var_uint_type_name_size><type_name_data>` |
| `Bool` | `0x2D` |
| `SimpleAggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN)` | `0x2E<var_uint_function_name_size><function_name_data><var_uint_number_of_parameters><param_1>...<param_N><var_uint_number_of_arguments><argument_type_encoding_1>...<argument_type_encoding_N>` (see [aggregate function parameter binary encoding](#aggregate-function-parameter-binary-encoding)) |
| `Nested(name1 T1, ..., nameN TN)` | `0x2F<var_uint_number_of_elements><var_uint_name_size_1><name_data_1><nested_type_encoding_1>...<var_uint_name_size_N><name_data_N><nested_type_encoding_N>` |
### Interval kind binary encoding
The table below describes how different interval kinds of `Interval` data type are encoded.
| Interval kind | Binary encoding |
|---------------|-----------------|
| `Nanosecond` | `0x00` |
| `Microsecond` | `0x01` |
| `Millisecond` | `0x02` |
| `Second` | `0x03` |
| `Minute` | `0x04` |
| `Hour` | `0x05` |
| `Day` | `0x06` |
| `Week` | `0x07` |
| `Month` | `0x08` |
| `Quarter` | `0x09` |
| `Year` | `0x1A` |
### Aggregate function parameter binary encoding
The table below describes how parameters of `AggragateFunction` and `SimpleAggregateFunction` are encoded.
The encoding of a parameter consists of 1 byte indicating the type of the parameter and the value itself.
| Parameter type | Binary encoding |
|--------------------------|--------------------------------------------------------------------------------------------------------------------------------|
| `Null` | `0x00` |
| `UInt64` | `0x01<var_uint_value>` |
| `Int64` | `0x02<var_int_value>` |
| `UInt128` | `0x03<uint128_little_endian_value>` |
| `Int128` | `0x04<int128_little_endian_value>` |
| `UInt128` | `0x05<uint128_little_endian_value>` |
| `Int128` | `0x06<int128_little_endian_value>` |
| `Float64` | `0x07<float64_little_endian_value>` |
| `Decimal32` | `0x08<var_uint_scale><int32_little_endian_value>` |
| `Decimal64` | `0x09<var_uint_scale><int64_little_endian_value>` |
| `Decimal128` | `0x0A<var_uint_scale><int128_little_endian_value>` |
| `Decimal256` | `0x0B<var_uint_scale><int256_little_endian_value>` |
| `String` | `0x0C<var_uint_size><data>` |
| `Array` | `0x0D<var_uint_size><value_encoding_1>...<value_encoding_N>` |
| `Tuple` | `0x0E<var_uint_size><value_encoding_1>...<value_encoding_N>` |
| `Map` | `0x0F<var_uint_size><key_encoding_1><value_encoding_1>...<key_endoding_N><value_encoding_N>` |
| `IPv4` | `0x10<uint32_little_endian_value>` |
| `IPv6` | `0x11<uint128_little_endian_value>` |
| `UUID` | `0x12<uuid_value>` |
| `Bool` | `0x13<bool_value>` |
| `Object` | `0x14<var_uint_size><var_uint_key_size_1><key_data_1><value_encoding_1>...<var_uint_key_size_N><key_data_N><value_encoding_N>` |
| `AggregateFunctionState` | `0x15<var_uint_name_size><name_data><var_uint_data_size><data>` |
| `Negative infinity` | `0xFE` |
| `Positive infinity` | `0xFF` |

View File

@ -1,6 +1,6 @@
---
slug: /en/sql-reference/data-types/dynamic
sidebar_position: 56
sidebar_position: 62
sidebar_label: Dynamic
---
@ -493,3 +493,44 @@ SELECT count(), dynamicType(d), _part FROM test GROUP BY _part, dynamicType(d) O
```
As we can see, ClickHouse kept the most frequent types `UInt64` and `Array(UInt64)` and casted all other types to `String`.
## JSONExtract functions with Dynamic
All `JSONExtract*` functions support `Dynamic` type:
```sql
SELECT JSONExtract('{"a" : [1, 2, 3]}', 'a', 'Dynamic') AS dynamic, dynamicType(dynamic) AS dynamic_type;
```
```text
┌─dynamic─┬─dynamic_type───────────┐
│ [1,2,3] │ Array(Nullable(Int64)) │
└─────────┴────────────────────────┘
```
```sql
SELECT JSONExtract('{"obj" : {"a" : 42, "b" : "Hello", "c" : [1,2,3]}}', 'obj', 'Map(String, Variant(UInt32, String, Array(UInt32)))') AS map_of_dynamics, mapApply((k, v) -> (k, variantType(v)), map_of_dynamics) AS map_of_dynamic_types```
```text
┌─map_of_dynamics──────────────────┬─map_of_dynamic_types────────────────────────────┐
│ {'a':42,'b':'Hello','c':[1,2,3]} │ {'a':'UInt32','b':'String','c':'Array(UInt32)'} │
└──────────────────────────────────┴─────────────────────────────────────────────────┘
```
```sql
SELECT JSONExtractKeysAndValues('{"a" : 42, "b" : "Hello", "c" : [1,2,3]}', 'Variant(UInt32, String, Array(UInt32))') AS dynamics, arrayMap(x -> (x.1, variantType(x.2)), dynamics) AS dynamic_types```
```
```text
┌─dynamics───────────────────────────────┬─dynamic_types─────────────────────────────────────────┐
│ [('a',42),('b','Hello'),('c',[1,2,3])] │ [('a','UInt32'),('b','String'),('c','Array(UInt32)')] │
└────────────────────────────────────────┴───────────────────────────────────────────────────────┘
```
### Binary output format
In RowBinary format values of `Dynamic` type are serialized in the following format:
```text
<binary_encoded_data_type><value_in_binary_format_according_to_the_data_type>
```

View File

@ -5,11 +5,11 @@ sidebar_label: Object Data Type
keywords: [object, data type]
---
# Object Data Type
# Object Data Type (deprecated)
:::note
This feature is not production-ready and is now deprecated. If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-ingestion/data-formats/json) instead. A new implementation to support JSON object is in progress and can be tracked [here](https://github.com/ClickHouse/ClickHouse/issues/54864)
:::
**This feature is not production-ready and is now deprecated.** If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-ingestion/data-formats/json) instead. A new implementation to support JSON object is in progress and can be tracked [here](https://github.com/ClickHouse/ClickHouse/issues/54864).
<hr />
Stores JavaScript Object Notation (JSON) documents in a single column.

View File

@ -3080,4 +3080,4 @@ Result:
## Distance functions
All supported functions are described in [distance functions documentation](../../sql-reference/functions/distance-functions.md).
All supported functions are described in [distance functions documentation](../../sql-reference/functions/distance-functions.md).

View File

@ -83,7 +83,57 @@ Result:
```
## makeDate32
Like [makeDate](#makedate) but produces a [Date32](../data-types/date32.md).
Creates a date of type [Date32](../../sql-reference/data-types/date32.md) from a year, month, day (or optionally a year and a day).
**Syntax**
```sql
makeDate32(year, [month,] day)
```
**Arguments**
- `year` — Year. [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md).
- `month` — Month (optional). [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md).
- `day` — Day. [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md).
:::note
If `month` is omitted then `day` should take a value between `1` and `365`, otherwise it should take a value between `1` and `31`.
:::
**Returned values**
- A date created from the arguments. [Date32](../../sql-reference/data-types/date32.md).
**Examples**
Create a date from a year, month, and day:
Query:
```sql
SELECT makeDate32(2024, 1, 1);
```
Result:
```response
2024-01-01
```
Create a Date from a year and day of year:
Query:
``` sql
SELECT makeDate32(2024, 100);
```
Result:
```response
2024-04-09
```
## makeDateTime
@ -125,12 +175,38 @@ Result:
## makeDateTime64
Like [makeDateTime](#makedatetime) but produces a [DateTime64](../data-types/datetime64.md).
Creates a [DateTime64](../../sql-reference/data-types/datetime64.md) data type value from its components: year, month, day, hour, minute, second. With optional sub-second precision.
**Syntax**
```sql
makeDateTime64(year, month, day, hour, minute, second[, precision])
```
**Arguments**
- `year` — Year (0-9999). [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md).
- `month` — Month (1-12). [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md).
- `day` — Day (1-31). [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md).
- `hour` — Hour (0-23). [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md).
- `minute` — Minute (0-59). [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md).
- `second` — Second (0-59). [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md).
- `precision` — Optional precision of the sub-second component (0-9). [Integer](../../sql-reference/data-types/int-uint.md).
**Returned value**
- A date and time created from the supplied arguments. [DateTime64](../../sql-reference/data-types/datetime64.md).
**Example**
``` sql
makeDateTime64(year, month, day, hour, minute, second[, fraction[, precision[, timezone]]])
SELECT makeDateTime64(2023, 5, 15, 10, 30, 45, 779, 5);
```
```response
┌─makeDateTime64(2023, 5, 15, 10, 30, 45, 779, 5)─┐
│ 2023-05-15 10:30:45.00779 │
└─────────────────────────────────────────────────┘
```
## timestamp

View File

@ -314,10 +314,71 @@ SELECT groupBitXor(cityHash64(*)) FROM table
Calculates a 32-bit hash code from any type of integer.
This is a relatively fast non-cryptographic hash function of average quality for numbers.
**Syntax**
```sql
intHash32(int)
```
**Arguments**
- `int` — Integer to hash. [(U)Int*](../data-types/int-uint.md).
**Returned value**
- 32-bit hash code. [UInt32](../data-types/int-uint.md).
**Example**
Query:
```sql
SELECT intHash32(42);
```
Result:
```response
┌─intHash32(42)─┐
│ 1228623923 │
└───────────────┘
```
## intHash64
Calculates a 64-bit hash code from any type of integer.
It works faster than intHash32. Average quality.
This is a relatively fast non-cryptographic hash function of average quality for numbers.
It works faster than [intHash32](#inthash32).
**Syntax**
```sql
intHash64(int)
```
**Arguments**
- `int` — Integer to hash. [(U)Int*](../data-types/int-uint.md).
**Returned value**
- 64-bit hash code. [UInt64](../data-types/int-uint.md).
**Example**
Query:
```sql
SELECT intHash64(42);
```
Result:
```response
┌────────intHash64(42)─┐
│ 11490350930367293593 │
└──────────────────────┘
```
## SHA1, SHA224, SHA256, SHA512, SHA512_256

View File

@ -12,9 +12,7 @@ Functions for [searching](string-search-functions.md) in strings and for [replac
## empty
Checks whether the input string is empty.
A string is considered non-empty if it contains at least one byte, even if this byte is a space or the null byte.
Checks whether the input string is empty. A string is considered non-empty if it contains at least one byte, even if this byte is a space or the null byte.
The function is also available for [arrays](array-functions.md#function-empty) and [UUIDs](uuid-functions.md#empty).
@ -48,9 +46,7 @@ Result:
## notEmpty
Checks whether the input string is non-empty.
A string is considered non-empty if it contains at least one byte, even if this byte is a space or the null byte.
Checks whether the input string is non-empty. A string is considered non-empty if it contains at least one byte, even if this byte is a space or the null byte.
The function is also available for [arrays](array-functions.md#function-notempty) and [UUIDs](uuid-functions.md#notempty).
@ -96,7 +92,7 @@ length(s)
**Parameters**
- `s`: An input string or array. [String](../data-types/string)/[Array](../data-types/array).
- `s` An input string or array. [String](../data-types/string)/[Array](../data-types/array).
**Returned value**
@ -149,7 +145,7 @@ lengthUTF8(s)
**Parameters**
- `s`: String containing valid UTF-8 encoded text. [String](../data-types/string).
- `s` String containing valid UTF-8 encoded text. [String](../data-types/string).
**Returned value**
@ -183,8 +179,8 @@ left(s, offset)
**Parameters**
- `s`: The string to calculate a substring from. [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
- `offset`: The number of bytes of the offset. [UInt*](../data-types/int-uint).
- `s` The string to calculate a substring from. [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
- `offset` The number of bytes of the offset. [UInt*](../data-types/int-uint).
**Returned value**
@ -230,8 +226,8 @@ leftUTF8(s, offset)
**Parameters**
- `s`: The UTF-8 encoded string to calculate a substring from. [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
- `offset`: The number of bytes of the offset. [UInt*](../data-types/int-uint).
- `s` The UTF-8 encoded string to calculate a substring from. [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
- `offset` The number of bytes of the offset. [UInt*](../data-types/int-uint).
**Returned value**
@ -347,8 +343,8 @@ right(s, offset)
**Parameters**
- `s`: The string to calculate a substring from. [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
- `offset`: The number of bytes of the offset. [UInt*](../data-types/int-uint).
- `s` The string to calculate a substring from. [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
- `offset` The number of bytes of the offset. [UInt*](../data-types/int-uint).
**Returned value**
@ -394,8 +390,8 @@ rightUTF8(s, offset)
**Parameters**
- `s`: The UTF-8 encoded string to calculate a substring from. [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
- `offset`: The number of bytes of the offset. [UInt*](../data-types/int-uint).
- `s` The UTF-8 encoded string to calculate a substring from. [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
- `offset` The number of bytes of the offset. [UInt*](../data-types/int-uint).
**Returned value**
@ -547,7 +543,7 @@ Alias: `ucase`
**Parameters**
- `input`: A string type [String](../data-types/string.md).
- `input` A string type [String](../data-types/string.md).
**Returned value**
@ -571,16 +567,47 @@ SELECT upper('clickhouse');
Converts a string to lowercase, assuming that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined.
Does not detect the language, e.g. for Turkish the result might not be exactly correct (i/İ vs. i/I).
:::note
Does not detect the language, e.g. for Turkish the result might not be exactly correct (i/İ vs. i/I). If the length of the UTF-8 byte sequence is different for upper and lower case of a code point (such as `ẞ` and `ß`), the result may be incorrect for this code point.
:::
If the length of the UTF-8 byte sequence is different for upper and lower case of a code point, the result may be incorrect for this code point.
**Syntax**
```sql
lowerUTF8(input)
```
**Parameters**
- `input` — A string type [String](../data-types/string.md).
**Returned value**
- A [String](../data-types/string.md) data type value.
**Example**
Query:
``` sql
SELECT lowerUTF8('MÜNCHEN') as Lowerutf8;
```
Result:
``` response
┌─Lowerutf8─┐
│ münchen │
└───────────┘
```
## upperUTF8
Converts a string to uppercase, assuming that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined.
If the length of the UTF-8 byte sequence is different for upper and lower case of a code point, the result may be incorrect for this code point.
:::note
Does not detect the language, e.g. for Turkish the result might not be exactly correct (i/İ vs. i/I). If the length of the UTF-8 byte sequence is different for upper and lower case of a code point (such as `ẞ` and `ß`), the result may be incorrect for this code point.
:::
**Syntax**
@ -590,7 +617,7 @@ upperUTF8(input)
**Parameters**
- `input`: A string type [String](../data-types/string.md).
- `input` A string type [String](../data-types/string.md).
**Returned value**
@ -604,6 +631,8 @@ Query:
SELECT upperUTF8('München') as Upperutf8;
```
Result:
``` response
┌─Upperutf8─┐
│ MÜNCHEN │
@ -614,6 +643,34 @@ SELECT upperUTF8('München') as Upperutf8;
Returns 1, if the set of bytes constitutes valid UTF-8-encoded text, otherwise 0.
**Syntax**
``` sql
isValidUTF8(input)
```
**Parameters**
- `input` — A string type [String](../data-types/string.md).
**Returned value**
- Returns `1`, if the set of bytes constitutes valid UTF-8-encoded text, otherwise `0`.
Query:
``` sql
SELECT isValidUTF8('\xc3\xb1') AS valid, isValidUTF8('\xc3\x28') AS invalid;
```
Result:
``` response
┌─valid─┬─invalid─┐
│ 1 │ 0 │
└───────┴─────────┘
```
## toValidUTF8
Replaces invalid UTF-8 characters by the `<60>` (U+FFFD) character. All running in a row invalid characters are collapsed into the one replacement character.
@ -883,7 +940,7 @@ Returns the substring of a string `s` which starts at the specified byte index `
substring(s, offset[, length])
```
Alias:
Aliases:
- `substr`
- `mid`
- `byteSlice`
@ -926,9 +983,9 @@ substringUTF8(s, offset[, length])
**Arguments**
- `s`: The string to calculate a substring from. [String](../data-types/string.md), [FixedString](../data-types/fixedstring.md) or [Enum](../data-types/enum.md)
- `offset`: The starting position of the substring in `s` . [(U)Int*](../data-types/int-uint.md).
- `length`: The maximum length of the substring. [(U)Int*](../data-types/int-uint.md). Optional.
- `s` The string to calculate a substring from. [String](../data-types/string.md), [FixedString](../data-types/fixedstring.md) or [Enum](../data-types/enum.md)
- `offset` The starting position of the substring in `s` . [(U)Int*](../data-types/int-uint.md).
- `length` The maximum length of the substring. [(U)Int*](../data-types/int-uint.md). Optional.
**Returned value**
@ -964,9 +1021,9 @@ Alias: `SUBSTRING_INDEX`
**Arguments**
- s: The string to extract substring from. [String](../data-types/string.md).
- delim: The character to split. [String](../data-types/string.md).
- count: The number of occurrences of the delimiter to count before extracting the substring. If count is positive, everything to the left of the final delimiter (counting from the left) is returned. If count is negative, everything to the right of the final delimiter (counting from the right) is returned. [UInt or Int](../data-types/int-uint.md)
- s The string to extract substring from. [String](../data-types/string.md).
- delim The character to split. [String](../data-types/string.md).
- count The number of occurrences of the delimiter to count before extracting the substring. If count is positive, everything to the left of the final delimiter (counting from the left) is returned. If count is negative, everything to the right of the final delimiter (counting from the right) is returned. [UInt or Int](../data-types/int-uint.md)
**Example**
@ -995,9 +1052,9 @@ substringIndexUTF8(s, delim, count)
**Arguments**
- `s`: The string to extract substring from. [String](../data-types/string.md).
- `delim`: The character to split. [String](../data-types/string.md).
- `count`: The number of occurrences of the delimiter to count before extracting the substring. If count is positive, everything to the left of the final delimiter (counting from the left) is returned. If count is negative, everything to the right of the final delimiter (counting from the right) is returned. [UInt or Int](../data-types/int-uint.md)
- `s` The string to extract substring from. [String](../data-types/string.md).
- `delim` The character to split. [String](../data-types/string.md).
- `count` The number of occurrences of the delimiter to count before extracting the substring. If count is positive, everything to the left of the final delimiter (counting from the left) is returned. If count is negative, everything to the right of the final delimiter (counting from the right) is returned. [UInt or Int](../data-types/int-uint.md)
**Returned value**
@ -1277,7 +1334,7 @@ tryBase64Decode(encoded)
**Arguments**
- `encoded`: [String](../data-types/string.md) column or constant. If the string is not a valid Base64-encoded value, returns an empty string.
- `encoded` [String](../data-types/string.md) column or constant. If the string is not a valid Base64-encoded value, returns an empty string.
**Returned value**
@ -1309,7 +1366,7 @@ tryBase64URLDecode(encodedUrl)
**Parameters**
- `encodedURL`: [String](../data-types/string.md) column or constant. If the string is not a valid Base64-encoded value with URL-specific modifications, returns an empty string.
- `encodedURL` [String](../data-types/string.md) column or constant. If the string is not a valid Base64-encoded value with URL-specific modifications, returns an empty string.
**Returned value**
@ -1555,7 +1612,7 @@ The result type is UInt64.
## normalizeQuery
Replaces literals, sequences of literals and complex aliases with placeholders.
Replaces literals, sequences of literals and complex aliases (containing whitespace, more than two digits or at least 36 bytes long such as UUIDs) with placeholder `?`.
**Syntax**
@ -1573,6 +1630,8 @@ normalizeQuery(x)
**Example**
Query:
``` sql
SELECT normalizeQuery('[1, 2, 3, x]') AS query;
```
@ -1585,9 +1644,44 @@ Result:
└──────────┘
```
## normalizeQueryKeepNames
Replaces literals, sequences of literals with placeholder `?` but does not replace complex aliases (containing whitespace, more than two digits
or at least 36 bytes long such as UUIDs). This helps better analyze complex query logs.
**Syntax**
``` sql
normalizeQueryKeepNames(x)
```
**Arguments**
- `x` — Sequence of characters. [String](../data-types/string.md).
**Returned value**
- Sequence of characters with placeholders. [String](../data-types/string.md).
**Example**
Query:
``` sql
SELECT normalizeQuery('SELECT 1 AS aComplexName123'), normalizeQueryKeepNames('SELECT 1 AS aComplexName123');
```
Result:
```result
┌─normalizeQuery('SELECT 1 AS aComplexName123')─┬─normalizeQueryKeepNames('SELECT 1 AS aComplexName123')─┐
│ SELECT ? AS `?` │ SELECT ? AS aComplexName123 │
└───────────────────────────────────────────────┴────────────────────────────────────────────────────────┘
```
## normalizedQueryHash
Returns identical 64bit hash values without the values of literals for similar queries. Can be helpful to analyze query log.
Returns identical 64bit hash values without the values of literals for similar queries. Can be helpful to analyze query logs.
**Syntax**
@ -1605,6 +1699,8 @@ normalizedQueryHash(x)
**Example**
Query:
``` sql
SELECT normalizedQueryHash('SELECT 1 AS `xyz`') != normalizedQueryHash('SELECT 1 AS `abc`') AS res;
```
@ -1617,6 +1713,43 @@ Result:
└─────┘
```
## normalizedQueryHashKeepNames
Like [normalizedQueryHash](#normalizedqueryhash) it returns identical 64bit hash values without the values of literals for similar queries but it does not replace complex aliases (containing whitespace, more than two digits
or at least 36 bytes long such as UUIDs) with a placeholder before hashing. Can be helpful to analyze query logs.
**Syntax**
``` sql
normalizedQueryHashKeepNames(x)
```
**Arguments**
- `x` — Sequence of characters. [String](../data-types/string.md).
**Returned value**
- Hash value. [UInt64](../data-types/int-uint.md#uint-ranges).
**Example**
``` sql
SELECT normalizedQueryHash('SELECT 1 AS `xyz123`') != normalizedQueryHash('SELECT 1 AS `abc123`') AS normalizedQueryHash;
SELECT normalizedQueryHashKeepNames('SELECT 1 AS `xyz123`') != normalizedQueryHashKeepNames('SELECT 1 AS `abc123`') AS normalizedQueryHashKeepNames;
```
Result:
```result
┌─normalizedQueryHash─┐
│ 0 │
└─────────────────────┘
┌─normalizedQueryHashKeepNames─┐
│ 1 │
└──────────────────────────────┘
```
## normalizeUTF8NFC
Converts a string to [NFC normalized form](https://en.wikipedia.org/wiki/Unicode_equivalence#Normal_forms), assuming the string is valid UTF8-encoded text.
@ -1935,7 +2068,7 @@ soundex(val)
**Arguments**
- `val` - Input value. [String](../data-types/string.md)
- `val` Input value. [String](../data-types/string.md)
**Returned value**
@ -1968,7 +2101,7 @@ punycodeEncode(val)
**Arguments**
- `val` - Input value. [String](../data-types/string.md)
- `val` Input value. [String](../data-types/string.md)
**Returned value**
@ -2001,7 +2134,7 @@ punycodeEncode(val)
**Arguments**
- `val` - Punycode-encoded string. [String](../data-types/string.md)
- `val` Punycode-encoded string. [String](../data-types/string.md)
**Returned value**
@ -2027,7 +2160,7 @@ Like `punycodeDecode` but returns an empty string if no valid Punycode-encoded s
## idnaEncode
Returns the the ASCII representation (ToASCII algorithm) of a domain name according to the [Internationalized Domain Names in Applications](https://en.wikipedia.org/wiki/Internationalized_domain_name#Internationalizing_Domain_Names_in_Applications) (IDNA) mechanism.
Returns the ASCII representation (ToASCII algorithm) of a domain name according to the [Internationalized Domain Names in Applications](https://en.wikipedia.org/wiki/Internationalized_domain_name#Internationalizing_Domain_Names_in_Applications) (IDNA) mechanism.
The input string must be UTF-encoded and translatable to an ASCII string, otherwise an exception is thrown.
Note: No percent decoding or trimming of tabs, spaces or control characters is performed.
@ -2039,7 +2172,7 @@ idnaEncode(val)
**Arguments**
- `val` - Input value. [String](../data-types/string.md)
- `val` Input value. [String](../data-types/string.md)
**Returned value**
@ -2065,7 +2198,7 @@ Like `idnaEncode` but returns an empty string in case of an error instead of thr
## idnaDecode
Returns the the Unicode (UTF-8) representation (ToUnicode algorithm) of a domain name according to the [Internationalized Domain Names in Applications](https://en.wikipedia.org/wiki/Internationalized_domain_name#Internationalizing_Domain_Names_in_Applications) (IDNA) mechanism.
Returns the Unicode (UTF-8) representation (ToUnicode algorithm) of a domain name according to the [Internationalized Domain Names in Applications](https://en.wikipedia.org/wiki/Internationalized_domain_name#Internationalizing_Domain_Names_in_Applications) (IDNA) mechanism.
In case of an error (e.g. because the input is invalid), the input string is returned.
Note that repeated application of `idnaEncode()` and `idnaDecode()` does not necessarily return the original string due to case normalization.
@ -2077,7 +2210,7 @@ idnaDecode(val)
**Arguments**
- `val` - Input value. [String](../data-types/string.md)
- `val` Input value. [String](../data-types/string.md)
**Returned value**
@ -2121,7 +2254,7 @@ Result:
└───────────────────────────────────────────┘
```
Alias: mismatches
Alias: `mismatches`
## stringJaccardIndex
@ -2175,7 +2308,7 @@ Result:
└─────────────────────────────────────┘
```
Alias: levenshteinDistance
Alias: `levenshteinDistance`
## editDistanceUTF8
@ -2201,7 +2334,7 @@ Result:
└─────────────────────────────────────┘
```
Alias: levenshteinDistanceUTF8
Alias: `levenshteinDistanceUTF8`
## damerauLevenshteinDistance
@ -2279,13 +2412,93 @@ Result:
Convert the first letter of each word to upper case and the rest to lower case. Words are sequences of alphanumeric characters separated by non-alphanumeric characters.
:::note
Because `initCap` converts only the first letter of each word to upper case you may observe unexpected behaviour for words containing apostrophes or capital letters. For example:
```sql
SELECT initCap('mother''s daughter'), initCap('joe McAdam');
```
will return
```response
┌─initCap('mother\'s daughter')─┬─initCap('joe McAdam')─┐
│ Mother'S Daughter │ Joe Mcadam │
└───────────────────────────────┴───────────────────────┘
```
This is a known behaviour, with no plans currently to fix it.
:::
**Syntax**
```sql
initcap(val)
```
**Arguments**
- `val` — Input value. [String](../data-types/string.md).
**Returned value**
- `val` with the first letter of each word converted to upper case. [String](../data-types/string.md).
**Example**
Query:
```sql
SELECT initcap('building for fast');
```
Result:
```text
┌─initcap('building for fast')─┐
│ Building For Fast │
└──────────────────────────────┘
```
## initcapUTF8
Like [initcap](#initcap), assuming that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined.
Does not detect the language, e.g. for Turkish the result might not be exactly correct (i/İ vs. i/I).
Like [initcap](#initcap), `initcapUTF8` converts the first letter of each word to upper case and the rest to lower case. Assumes that the string contains valid UTF-8 encoded text.
If this assumption is violated, no exception is thrown and the result is undefined.
:::note
This function does not detect the language, e.g. for Turkish the result might not be exactly correct (i/İ vs. i/I).
If the length of the UTF-8 byte sequence is different for upper and lower case of a code point, the result may be incorrect for this code point.
:::
**Syntax**
```sql
initcapUTF8(val)
```
**Arguments**
- `val` — Input value. [String](../data-types/string.md).
**Returned value**
- `val` with the first letter of each word converted to upper case. [String](../data-types/string.md).
**Example**
Query:
```sql
SELECT initcapUTF8('не тормозит');
```
Result:
```text
┌─initcapUTF8('не тормозит')─┐
Не Тормозит │
└────────────────────────────┘
```
## firstLine
@ -2299,7 +2512,7 @@ firstLine(val)
**Arguments**
- `val` - Input value. [String](../data-types/string.md)
- `val` Input value. [String](../data-types/string.md)
**Returned value**

View File

@ -34,7 +34,7 @@ Alias: `replace`.
Replaces the first occurrence of the substring matching the regular expression `pattern` (in [re2 syntax](https://github.com/google/re2/wiki/Syntax)) in `haystack` by the `replacement` string.
`replacement` can containing substitutions `\0-\9`.
`replacement` can contain substitutions `\0-\9`.
Substitutions `\1-\9` correspond to the 1st to 9th capturing group (submatch), substitution `\0` corresponds to the entire match.
To use a verbatim `\` character in the `pattern` or `replacement` strings, escape it using `\`.

View File

@ -6,44 +6,122 @@ sidebar_label: Time Window
# Time Window Functions
Time window functions return the inclusive lower and exclusive upper bound of the corresponding window. The functions for working with WindowView are listed below:
Time window functions return the inclusive lower and exclusive upper bound of the corresponding window. The functions for working with [WindowView](../statements/create/view.md/#window-view-experimental) are listed below:
## tumble
A tumbling time window assigns records to non-overlapping, continuous windows with a fixed duration (`interval`).
**Syntax**
``` sql
tumble(time_attr, interval [, timezone])
```
**Arguments**
- `time_attr` - Date and time. [DateTime](../data-types/datetime.md) data type.
- `interval` - Window interval in [Interval](../data-types/special-data-types/interval.md) data type.
- `time_attr` Date and time. [DateTime](../data-types/datetime.md).
- `interval` Window interval in [Interval](../data-types/special-data-types/interval.md).
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (optional).
**Returned values**
- The inclusive lower and exclusive upper bound of the corresponding tumbling window. [Tuple](../data-types/tuple.md)([DateTime](../data-types/datetime.md), [DateTime](../data-types/datetime.md))`.
- The inclusive lower and exclusive upper bound of the corresponding tumbling window. [Tuple](../data-types/tuple.md)([DateTime](../data-types/datetime.md), [DateTime](../data-types/datetime.md)).
**Example**
Query:
``` sql
SELECT tumble(now(), toIntervalDay('1'))
SELECT tumble(now(), toIntervalDay('1'));
```
Result:
``` text
┌─tumble(now(), toIntervalDay('1'))─────────────┐
['2020-01-01 00:00:00','2020-01-02 00:00:00']
('2024-07-04 00:00:00','2024-07-05 00:00:00')
└───────────────────────────────────────────────┘
```
## tumbleStart
Returns the inclusive lower bound of the corresponding [tumbling window](#tumble).
**Syntax**
``` sql
tumbleStart(time_attr, interval [, timezone]);
```
**Arguments**
- `time_attr` — Date and time. [DateTime](../data-types/datetime.md).
- `interval` — Window interval in [Interval](../data-types/special-data-types/interval.md).
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (optional).
The parameters above can also be passed to the function as a [tuple](../data-types/tuple.md).
**Returned values**
- The inclusive lower bound of the corresponding tumbling window. [DateTime](../data-types/datetime.md), [Tuple](../data-types/tuple.md) or [UInt32](../data-types/int-uint.md).
**Example**
Query:
```sql
SELECT tumbleStart(now(), toIntervalDay('1'));
```
Result:
```response
┌─tumbleStart(now(), toIntervalDay('1'))─┐
│ 2024-07-04 00:00:00 │
└────────────────────────────────────────┘
```
## tumbleEnd
Returns the exclusive upper bound of the corresponding [tumbling window](#tumble).
**Syntax**
``` sql
tumbleEnd(time_attr, interval [, timezone]);
```
**Arguments**
- `time_attr` — Date and time. [DateTime](../data-types/datetime.md).
- `interval` — Window interval in [Interval](../data-types/special-data-types/interval.md).
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (optional).
The parameters above can also be passed to the function as a [tuple](../data-types/tuple.md).
**Returned values**
- The inclusive lower bound of the corresponding tumbling window. [DateTime](../data-types/datetime.md), [Tuple](../data-types/tuple.md) or [UInt32](../data-types/int-uint.md).
**Example**
Query:
```sql
SELECT tumbleEnd(now(), toIntervalDay('1'));
```
Result:
```response
┌─tumbleEnd(now(), toIntervalDay('1'))─┐
│ 2024-07-05 00:00:00 │
└──────────────────────────────────────┘
```
## hop
A hopping time window has a fixed duration (`window_interval`) and hops by a specified hop interval (`hop_interval`). If the `hop_interval` is smaller than the `window_interval`, hopping windows are overlapping. Thus, records can be assigned to multiple windows.
A hopping time window has a fixed duration (`window_interval`) and hops by a specified hop interval (`hop_interval`). If the `hop_interval` is smaller than the `window_interval`, hopping windows are overlapping. Thus, records can be assigned to multiple windows.
``` sql
hop(time_attr, hop_interval, window_interval [, timezone])
@ -51,65 +129,118 @@ hop(time_attr, hop_interval, window_interval [, timezone])
**Arguments**
- `time_attr` - Date and time. [DateTime](../data-types/datetime.md) data type.
- `hop_interval` - Hop interval in [Interval](../data-types/special-data-types/interval.md) data type. Should be a positive number.
- `window_interval` - Window interval in [Interval](../data-types/special-data-types/interval.md) data type. Should be a positive number.
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (optional).
- `time_attr` Date and time. [DateTime](../data-types/datetime.md).
- `hop_interval` — Positive Hop interval. [Interval](../data-types/special-data-types/interval.md).
- `window_interval` — Positive Window interval. [Interval](../data-types/special-data-types/interval.md).
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (optional).
**Returned values**
- The inclusive lower and exclusive upper bound of the corresponding hopping window. Since one record can be assigned to multiple hop windows, the function only returns the bound of the **first** window when hop function is used **without** `WINDOW VIEW`. [Tuple](../data-types/tuple.md)([DateTime](../data-types/datetime.md), [DateTime](../data-types/datetime.md))`.
- The inclusive lower and exclusive upper bound of the corresponding hopping window. [Tuple](../data-types/tuple.md)([DateTime](../data-types/datetime.md), [DateTime](../data-types/datetime.md))`.
:::note
Since one record can be assigned to multiple hop windows, the function only returns the bound of the **first** window when hop function is used **without** `WINDOW VIEW`.
:::
**Example**
Query:
``` sql
SELECT hop(now(), INTERVAL '1' SECOND, INTERVAL '2' SECOND)
SELECT hop(now(), INTERVAL '1' DAY, INTERVAL '2' DAY);
```
Result:
``` text
┌─hop(now(), toIntervalSecond('1'), toIntervalSecond('2'))──┐
│ ('2020-01-14 16:58:22','2020-01-14 16:58:24') │
└───────────────────────────────────────────────────────────┘
```
## tumbleStart
Returns the inclusive lower bound of the corresponding tumbling window.
``` sql
tumbleStart(bounds_tuple);
tumbleStart(time_attr, interval [, timezone]);
```
## tumbleEnd
Returns the exclusive upper bound of the corresponding tumbling window.
``` sql
tumbleEnd(bounds_tuple);
tumbleEnd(time_attr, interval [, timezone]);
┌─hop(now(), toIntervalDay('1'), toIntervalDay('2'))─┐
│ ('2024-07-03 00:00:00','2024-07-05 00:00:00') │
└────────────────────────────────────────────────────┘
```
## hopStart
Returns the inclusive lower bound of the corresponding hopping window.
Returns the inclusive lower bound of the corresponding [hopping window](#hop).
**Syntax**
``` sql
hopStart(bounds_tuple);
hopStart(time_attr, hop_interval, window_interval [, timezone]);
```
**Arguments**
- `time_attr` — Date and time. [DateTime](../data-types/datetime.md).
- `hop_interval` — Positive Hop interval. [Interval](../data-types/special-data-types/interval.md).
- `window_interval` — Positive Window interval. [Interval](../data-types/special-data-types/interval.md).
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (optional).
The parameters above can also be passed to the function as a [tuple](../data-types/tuple.md).
**Returned values**
- The inclusive lower bound of the corresponding hopping window. [DateTime](../data-types/datetime.md), [Tuple](../data-types/tuple.md) or [UInt32](../data-types/int-uint.md).
:::note
Since one record can be assigned to multiple hop windows, the function only returns the bound of the **first** window when hop function is used **without** `WINDOW VIEW`.
:::
**Example**
Query:
``` sql
SELECT hopStart(now(), INTERVAL '1' DAY, INTERVAL '2' DAY);
```
Result:
``` text
┌─hopStart(now(), toIntervalDay('1'), toIntervalDay('2'))─┐
│ 2024-07-03 00:00:00 │
└─────────────────────────────────────────────────────────┘
```
## hopEnd
Returns the exclusive upper bound of the corresponding hopping window.
Returns the exclusive upper bound of the corresponding [hopping window](#hop).
**Syntax**
``` sql
hopEnd(bounds_tuple);
hopEnd(time_attr, hop_interval, window_interval [, timezone]);
```
**Arguments**
- `time_attr` — Date and time. [DateTime](../data-types/datetime.md).
- `hop_interval` — Positive Hop interval. [Interval](../data-types/special-data-types/interval.md).
- `window_interval` — Positive Window interval. [Interval](../data-types/special-data-types/interval.md).
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (optional).
The parameters above can also be passed to the function as a [tuple](../data-types/tuple.md).
**Returned values**
- The exclusive upper bound of the corresponding hopping window. [DateTime](../data-types/datetime.md), [Tuple](../data-types/tuple.md) or [UInt32](../data-types/int-uint.md).
:::note
Since one record can be assigned to multiple hop windows, the function only returns the bound of the **first** window when hop function is used **without** `WINDOW VIEW`.
:::
**Example**
Query:
``` sql
SELECT hopEnd(now(), INTERVAL '1' DAY, INTERVAL '2' DAY);
```
Result:
``` text
┌─hopEnd(now(), toIntervalDay('1'), toIntervalDay('2'))─┐
│ 2024-07-05 00:00:00 │
└───────────────────────────────────────────────────────┘
```
## Related content

View File

@ -7,7 +7,7 @@ sidebar_label: Tuples
## tuple
A function that allows grouping multiple columns.
For columns with the types T1, T2, ..., it returns a Tuple(T1, T2, ...) type tuple containing these columns. There is no cost to execute the function.
For columns C1, C2, ... with the types T1, T2, ..., it returns a named Tuple(C1 T1, C2 T2, ...) type tuple containing these columns if their names are unique and can be treated as unquoted identifiers, otherwise a Tuple(T1, T2, ...) is returned. There is no cost to execute the function.
Tuples are normally used as intermediate values for an argument of IN operators, or for creating a list of formal parameters of lambda functions. Tuples cant be written to a table.
The function implements the operator `(x, y, ...)`.
@ -259,6 +259,60 @@ Result:
└───────────────────────────────────────┘
```
## tupleNames
Converts a tuple into an array of column names. For a tuple in the form `Tuple(a T, b T, ...)`, it returns an array of strings representing the named columns of the tuple. If the tuple elements do not have explicit names, their indices will be used as the column names instead.
**Syntax**
``` sql
tupleNames(tuple)
```
**Arguments**
- `tuple` — Named tuple. [Tuple](../../sql-reference/data-types/tuple.md) with any types of values.
**Returned value**
- An array with strings.
Type: [Array](../../sql-reference/data-types/array.md)([Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md), ...)).
**Example**
Query:
``` sql
CREATE TABLE tupletest (col Tuple(user_ID UInt64, session_ID UInt64)) ENGINE = Memory;
INSERT INTO tupletest VALUES (tuple(1, 2));
SELECT tupleNames(col) FROM tupletest;
```
Result:
``` text
┌─tupleNames(col)──────────┐
│ ['user_ID','session_ID'] │
└──────────────────────────┘
```
If you pass a simple tuple to the function, ClickHouse uses the indexes of the columns as their names:
``` sql
SELECT tupleNames(tuple(3, 2, 1));
```
Result:
``` text
┌─tupleNames((3, 2, 1))─┐
│ ['1','2','3'] │
└───────────────────────┘
```
## tuplePlus
Calculates the sum of corresponding values of two tuples of the same size.

View File

@ -600,7 +600,7 @@ mapApply(func, map)
**Arguments**
- `func` - [Lambda function](../../sql-reference/functions/index.md#higher-order-functions---operator-and-lambdaparams-expr-function).
- `func` [Lambda function](../../sql-reference/functions/index.md#higher-order-functions---operator-and-lambdaparams-expr-function).
- `map` — [Map](../data-types/map.md).
**Returned value**
@ -831,7 +831,39 @@ SELECT mapSort((k, v) -> v, map('key2', 2, 'key3', 1, 'key1', 3)) AS map;
└──────────────────────────────┘
```
For more details see the [reference](../../sql-reference/functions/array-functions.md#array_functions-sort) for `arraySort` function.
For more details see the [reference](../../sql-reference/functions/array-functions.md#array_functions-sort) for `arraySort` function.
## mapPartialSort
Sorts the elements of a map in ascending order with additional `limit` argument allowing partial sorting.
If the `func` function is specified, the sorting order is determined by the result of the `func` function applied to the keys and values of the map.
**Syntax**
```sql
mapPartialSort([func,] limit, map)
```
**Arguments**
- `func` Optional function to apply to the keys and values of the map. [Lambda function](../../sql-reference/functions/index.md#higher-order-functions---operator-and-lambdaparams-expr-function).
- `limit` Elements in range [1..limit] are sorted. [(U)Int](../data-types/int-uint.md).
- `map` Map to sort. [Map](../data-types/map.md).
**Returned value**
- Partially sorted map. [Map](../data-types/map.md).
**Example**
``` sql
SELECT mapPartialSort((k, v) -> v, 2, map('k1', 3, 'k2', 1, 'k3', 2));
```
``` text
┌─mapPartialSort(lambda(tuple(k, v), v), 2, map('k1', 3, 'k2', 1, 'k3', 2))─┐
│ {'k2':1,'k3':2,'k1':3} │
└───────────────────────────────────────────────────────────────────────────┘
```
## mapReverseSort(\[func,\], map)
@ -861,3 +893,35 @@ SELECT mapReverseSort((k, v) -> v, map('key2', 2, 'key3', 1, 'key1', 3)) AS map;
```
For more details see function [arrayReverseSort](../../sql-reference/functions/array-functions.md#array_functions-reverse-sort).
## mapPartialReverseSort
Sorts the elements of a map in descending order with additional `limit` argument allowing partial sorting.
If the `func` function is specified, the sorting order is determined by the result of the `func` function applied to the keys and values of the map.
**Syntax**
```sql
mapPartialReverseSort([func,] limit, map)
```
**Arguments**
- `func` Optional function to apply to the keys and values of the map. [Lambda function](../../sql-reference/functions/index.md#higher-order-functions---operator-and-lambdaparams-expr-function).
- `limit` Elements in range [1..limit] are sorted. [(U)Int](../data-types/int-uint.md).
- `map` Map to sort. [Map](../data-types/map.md).
**Returned value**
- Partially sorted map. [Map](../data-types/map.md).
**Example**
``` sql
SELECT mapPartialReverseSort((k, v) -> v, 2, map('k1', 3, 'k2', 1, 'k3', 2));
```
``` text
┌─mapPartialReverseSort(lambda(tuple(k, v), v), 2, map('k1', 3, 'k2', 1, 'k3', 2))─┐
│ {'k1':3,'k3':2,'k2':1} │
└──────────────────────────────────────────────────────────────────────────────────┘
```

View File

@ -58,6 +58,8 @@ KILL QUERY WHERE query_id='2-857d-4a57-9ee0-327da5d60a90'
KILL QUERY WHERE user='username' SYNC
```
:::tip If you are killing a query in ClickHouse Cloud or in a self-managed cluster, then be sure to use the ```ON CLUSTER [cluster-name]``` option, in order to ensure the query is killed on all replicas:::
Read-only users can only stop their own queries.
By default, the asynchronous version of queries is used (`ASYNC`), which does not wait for confirmation that queries have stopped.
@ -131,6 +133,7 @@ KILL MUTATION WHERE database = 'default' AND table = 'table'
-- Cancel the specific mutation:
KILL MUTATION WHERE database = 'default' AND table = 'table' AND mutation_id = 'mutation_3.txt'
```
:::tip If you are killing a mutation in ClickHouse Cloud or in a self-managed cluster, then be sure to use the ```ON CLUSTER [cluster-name]``` option, in order to ensure the mutation is killed on all replicas:::
The query is useful when a mutation is stuck and cannot finish (e.g. if some function in the mutation query throws an exception when applied to the data contained in the table).

View File

@ -130,7 +130,9 @@ SELECT * FROM file('user_files/archives/archive{1..2}.zip :: table.csv');
## Globs in path
Paths may use globbing. Files must match the whole path pattern, not only the suffix or prefix.
Paths may use globbing. Files must match the whole path pattern, not only the suffix or prefix. There is one exception that if the path refers to an existing
directory and does not use globs, a `*` will be implicitly added to the path so
all the files in the directory are selected.
- `*` — Represents arbitrarily many characters except `/` but including the empty string.
- `?` — Represents an arbitrary single character.
@ -163,6 +165,12 @@ An alternative path expression which achieves the same:
SELECT count(*) FROM file('{some,another}_dir/*', 'TSV', 'name String, value UInt32');
```
Query the total number of rows in `some_dir` using the implicit `*`:
```sql
SELECT count(*) FROM file('some_dir', 'TSV', 'name String, value UInt32');
```
:::note
If your listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`.
:::

View File

@ -0,0 +1,73 @@
---
slug: /en/sql-reference/window-functions/dense_rank
sidebar_label: dense_rank
sidebar_position: 7
---
# dense_rank
Ranks the current row within its partition without gaps. In other words, if the value of any new row encountered is equal to the value of one of the previous rows then it will receive the next successive rank without any gaps in ranking.
The [rank](./rank.md) function provides the same behaviour, but with gaps in ranking.
**Syntax**
```sql
dense_rank (column_name)
OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
[ROWS or RANGE expression_to_bound_rows_withing_the_group]] | [window_name])
FROM table_name
WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column])
```
For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax).
**Returned value**
- A number for the current row within its partition, without gaps in ranking. [UInt64](../data-types/int-uint.md).
**Example**
The following example is based on the example provided in the video instructional [Ranking window functions in ClickHouse](https://youtu.be/Yku9mmBYm_4?si=XIMu1jpYucCQEoXA).
Query:
```sql
CREATE TABLE salaries
(
`team` String,
`player` String,
`salary` UInt32,
`position` String
)
Engine = Memory;
INSERT INTO salaries FORMAT Values
('Port Elizabeth Barbarians', 'Gary Chen', 195000, 'F'),
('New Coreystad Archdukes', 'Charles Juarez', 190000, 'F'),
('Port Elizabeth Barbarians', 'Michael Stanley', 150000, 'D'),
('New Coreystad Archdukes', 'Scott Harrison', 150000, 'D'),
('Port Elizabeth Barbarians', 'Robert George', 195000, 'M'),
('South Hampton Seagulls', 'Douglas Benson', 150000, 'M'),
('South Hampton Seagulls', 'James Henderson', 140000, 'M');
```
```sql
SELECT player, salary,
dense_rank() OVER (ORDER BY salary DESC) AS dense_rank
FROM salaries;
```
Result:
```response
┌─player──────────┬─salary─┬─dense_rank─┐
1. │ Gary Chen │ 195000 │ 1 │
2. │ Robert George │ 195000 │ 1 │
3. │ Charles Juarez │ 190000 │ 2 │
4. │ Michael Stanley │ 150000 │ 3 │
5. │ Douglas Benson │ 150000 │ 3 │
6. │ Scott Harrison │ 150000 │ 3 │
7. │ James Henderson │ 140000 │ 4 │
└─────────────────┴────────┴────────────┘
```

View File

@ -0,0 +1,79 @@
---
slug: /en/sql-reference/window-functions/first_value
sidebar_label: first_value
sidebar_position: 3
---
# first_value
Returns the first value evaluated within its ordered frame. By default, NULL arguments are skipped, however the `RESPECT NULLS` modifier can be used to override this behaviour.
**Syntax**
```sql
first_value (column_name) [[RESPECT NULLS] | [IGNORE NULLS]]
OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
[ROWS or RANGE expression_to_bound_rows_withing_the_group]] | [window_name])
FROM table_name
WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column])
```
Alias: `any`.
:::note
Using the optional modifier `RESPECT NULLS` after `first_value(column_name)` will ensure that `NULL` arguments are not skipped.
See [NULL processing](../aggregate-functions/index.md/#null-processing) for more information.
:::
For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax).
**Returned value**
- The first value evaluated within its ordered frame.
**Example**
In this example the `first_value` function is used to find the highest paid footballer from a fictional dataset of salaries of Premier League football players.
Query:
```sql
DROP TABLE IF EXISTS salaries;
CREATE TABLE salaries
(
`team` String,
`player` String,
`salary` UInt32,
`position` String
)
Engine = Memory;
INSERT INTO salaries FORMAT Values
('Port Elizabeth Barbarians', 'Gary Chen', 196000, 'F'),
('New Coreystad Archdukes', 'Charles Juarez', 190000, 'F'),
('Port Elizabeth Barbarians', 'Michael Stanley', 100000, 'D'),
('New Coreystad Archdukes', 'Scott Harrison', 180000, 'D'),
('Port Elizabeth Barbarians', 'Robert George', 195000, 'M'),
('South Hampton Seagulls', 'Douglas Benson', 150000, 'M'),
('South Hampton Seagulls', 'James Henderson', 140000, 'M');
```
```sql
SELECT player, salary,
first_value(player) OVER (ORDER BY salary DESC) AS highest_paid_player
FROM salaries;
```
Result:
```response
┌─player──────────┬─salary─┬─highest_paid_player─┐
1. │ Gary Chen │ 196000 │ Gary Chen │
2. │ Robert George │ 195000 │ Gary Chen │
3. │ Charles Juarez │ 190000 │ Gary Chen │
4. │ Scott Harrison │ 180000 │ Gary Chen │
5. │ Douglas Benson │ 150000 │ Gary Chen │
6. │ James Henderson │ 140000 │ Gary Chen │
7. │ Michael Stanley │ 100000 │ Gary Chen │
└─────────────────┴────────┴─────────────────────┘
```

View File

@ -1,10 +1,11 @@
---
slug: /en/sql-reference/window-functions/
sidebar_position: 62
sidebar_label: Window Functions
title: Window Functions
sidebar_position: 1
---
# Window Functions
Windows functions let you perform calculations across a set of rows that are related to the current row.
Some of the calculations that you can do are similar to those that can be done with an aggregate function, but a window function doesn't cause rows to be grouped into a single output - the individual rows are still returned.
@ -12,8 +13,8 @@ Some of the calculations that you can do are similar to those that can be done w
ClickHouse supports the standard grammar for defining windows and window functions. The table below indicates whether a feature is currently supported.
| Feature | Supported? |
|------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| Feature | Supported? |
|--------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| ad hoc window specification (`count(*) over (partition by id order by time desc)`) | ✅ |
| expressions involving window functions, e.g. `(count(*) over ()) / 2)` | ✅ |
| `WINDOW` clause (`select ... from table window w as (partition by id)`) | ✅ |
@ -75,14 +76,14 @@ WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
These functions can be used only as a window function.
- `row_number()` - Number the current row within its partition starting from 1.
- `first_value(x)` - Return the first non-NULL value evaluated within its ordered frame.
- `last_value(x)` - Return the last non-NULL value evaluated within its ordered frame.
- `nth_value(x, offset)` - Return the first non-NULL value evaluated against the nth row (offset) in its ordered frame.
- `rank()` - Rank the current row within its partition with gaps.
- `dense_rank()` - Rank the current row within its partition without gaps.
- `lagInFrame(x[, offset[, default]])` - Return a value evaluated at the row that is at a specified physical offset row before the current row within the ordered frame. The offset parameter, if not specified, defaults to 1, meaning it will fetch the value from the next row. If the calculated row exceeds the boundaries of the window frame, the specified default value is returned.
- `leadInFrame(x[, offset[, default]])` - Return a value evaluated at the row that is offset rows after the current row within the ordered frame. If offset is not provided, it defaults to 1. If the offset leads to a position outside the window frame, the specified default value is used.
- [`row_number()`](./row_number.md) - Number the current row within its partition starting from 1.
- [`first_value(x)`](./first_value.md) - Return the first value evaluated within its ordered frame.
- [`last_value(x)`](./last_value.md) - Return the last value evaluated within its ordered frame.
- [`nth_value(x, offset)`](./nth_value.md) - Return the first non-NULL value evaluated against the nth row (offset) in its ordered frame.
- [`rank()`](./rank.md) - Rank the current row within its partition with gaps.
- [`dense_rank()`](./dense_rank.md) - Rank the current row within its partition without gaps.
- [`lagInFrame(x)`](./lagInFrame.md) - Return a value evaluated at the row that is at a specified physical offset row before the current row within the ordered frame.
- [`leadInFrame(x)`](./leadInFrame.md) - Return a value evaluated at the row that is offset rows after the current row within the ordered frame.
## Examples

View File

@ -0,0 +1,79 @@
---
slug: /en/sql-reference/window-functions/lagInFrame
sidebar_label: lagInFrame
sidebar_position: 8
---
# lagInFrame
Returns a value evaluated at the row that is at a specified physical offset row before the current row within the ordered frame.
**Syntax**
```sql
lagInFrame(x[, offset[, default]])
OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
[ROWS or RANGE expression_to_bound_rows_withing_the_group]] | [window_name])
FROM table_name
WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column])
```
For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax).
**Parameters**
- `x` — Column name.
- `offset` — Offset to apply. [(U)Int*](../data-types/int-uint.md). (Optional - `1` by default).
- `default` — Value to return if calculated row exceeds the boundaries of the window frame. (Optional - `null` by default).
**Returned value**
- Value evaluated at the row that is at a specified physical offset before the current row within the ordered frame.
**Example**
This example looks at historical data for a specific stock and uses the `lagInFrame` function to calculate a day-to-day delta and percentage change in the closing price of the stock.
Query:
```sql
CREATE TABLE stock_prices
(
`date` Date,
`open` Float32, -- opening price
`high` Float32, -- daily high
`low` Float32, -- daily low
`close` Float32, -- closing price
`volume` UInt32 -- trade volume
)
Engine = Memory;
INSERT INTO stock_prices FORMAT Values
('2024-06-03', 113.62, 115.00, 112.00, 115.00, 438392000),
('2024-06-04', 115.72, 116.60, 114.04, 116.44, 403324000),
('2024-06-05', 118.37, 122.45, 117.47, 122.44, 528402000),
('2024-06-06', 124.05, 125.59, 118.32, 121.00, 664696000),
('2024-06-07', 119.77, 121.69, 118.02, 120.89, 412386000);
```
```sql
SELECT
date,
close,
lagInFrame(close, 1, close) OVER (ORDER BY date ASC) AS previous_day_close,
COALESCE(ROUND(close - previous_day_close, 2)) AS delta,
COALESCE(ROUND((delta / previous_day_close) * 100, 2)) AS percent_change
FROM stock_prices
ORDER BY date DESC;
```
Result:
```response
┌───────date─┬──close─┬─previous_day_close─┬─delta─┬─percent_change─┐
1. │ 2024-06-07 │ 120.89 │ 121 │ -0.11 │ -0.09 │
2. │ 2024-06-06 │ 121 │ 122.44 │ -1.44 │ -1.18 │
3. │ 2024-06-05 │ 122.44 │ 116.44 │ 6 │ 5.15 │
4. │ 2024-06-04 │ 116.44 │ 115 │ 1.44 │ 1.25 │
5. │ 2024-06-03 │ 115 │ 115 │ 0 │ 0 │
└────────────┴────────┴────────────────────┴───────┴────────────────┘
```

View File

@ -0,0 +1,79 @@
---
slug: /en/sql-reference/window-functions/last_value
sidebar_label: last_value
sidebar_position: 4
---
# last_value
Returns the last value evaluated within its ordered frame. By default, NULL arguments are skipped, however the `RESPECT NULLS` modifier can be used to override this behaviour.
**Syntax**
```sql
last_value (column_name) [[RESPECT NULLS] | [IGNORE NULLS]]
OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
[ROWS or RANGE expression_to_bound_rows_withing_the_group]] | [window_name])
FROM table_name
WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column])
```
Alias: `anyLast`.
:::note
Using the optional modifier `RESPECT NULLS` after `first_value(column_name)` will ensure that `NULL` arguments are not skipped.
See [NULL processing](../aggregate-functions/index.md/#null-processing) for more information.
:::
For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax).
**Returned value**
- The last value evaluated within its ordered frame.
**Example**
In this example the `last_value` function is used to find the highest paid footballer from a fictional dataset of salaries of Premier League football players.
Query:
```sql
DROP TABLE IF EXISTS salaries;
CREATE TABLE salaries
(
`team` String,
`player` String,
`salary` UInt32,
`position` String
)
Engine = Memory;
INSERT INTO salaries FORMAT Values
('Port Elizabeth Barbarians', 'Gary Chen', 196000, 'F'),
('New Coreystad Archdukes', 'Charles Juarez', 190000, 'F'),
('Port Elizabeth Barbarians', 'Michael Stanley', 100000, 'D'),
('New Coreystad Archdukes', 'Scott Harrison', 180000, 'D'),
('Port Elizabeth Barbarians', 'Robert George', 195000, 'M'),
('South Hampton Seagulls', 'Douglas Benson', 150000, 'M'),
('South Hampton Seagulls', 'James Henderson', 140000, 'M');
```
```sql
SELECT player, salary,
last_value(player) OVER (ORDER BY salary DESC RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS lowest_paid_player
FROM salaries;
```
Result:
```response
┌─player──────────┬─salary─┬─lowest_paid_player─┐
1. │ Gary Chen │ 196000 │ Michael Stanley │
2. │ Robert George │ 195000 │ Michael Stanley │
3. │ Charles Juarez │ 190000 │ Michael Stanley │
4. │ Scott Harrison │ 180000 │ Michael Stanley │
5. │ Douglas Benson │ 150000 │ Michael Stanley │
6. │ James Henderson │ 140000 │ Michael Stanley │
7. │ Michael Stanley │ 100000 │ Michael Stanley │
└─────────────────┴────────┴────────────────────┘
```

View File

@ -0,0 +1,60 @@
---
slug: /en/sql-reference/window-functions/leadInFrame
sidebar_label: leadInFrame
sidebar_position: 9
---
# leadInFrame
Returns a value evaluated at the row that is offset rows after the current row within the ordered frame.
**Syntax**
```sql
leadInFrame(x[, offset[, default]])
OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
[ROWS or RANGE expression_to_bound_rows_withing_the_group]] | [window_name])
FROM table_name
WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column])
```
For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax).
**Parameters**
- `x` — Column name.
- `offset` — Offset to apply. [(U)Int*](../data-types/int-uint.md). (Optional - `1` by default).
- `default` — Value to return if calculated row exceeds the boundaries of the window frame. (Optional - `null` by default).
**Returned value**
- value evaluated at the row that is offset rows after the current row within the ordered frame.
**Example**
This example looks at [historical data](https://www.kaggle.com/datasets/sazidthe1/nobel-prize-data) for Nobel Prize winners and uses the `leadInFrame` function to return a list of successive winners in the physics category.
Query:
```sql
CREATE OR REPLACE VIEW nobel_prize_laureates AS FROM file('nobel_laureates_data.csv') SELECT *;
```
```sql
FROM nobel_prize_laureates SELECT fullName, leadInFrame(year, 1, year) OVER (PARTITION BY category ORDER BY year) AS year, category, motivation WHERE category == 'physics' ORDER BY year DESC LIMIT 9;
```
Result:
```response
┌─fullName─────────┬─year─┬─category─┬─motivation─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
1. │ Pierre Agostini │ 2023 │ physics │ for experimental methods that generate attosecond pulses of light for the study of electron dynamics in matter │
2. │ Ferenc Krausz │ 2023 │ physics │ for experimental methods that generate attosecond pulses of light for the study of electron dynamics in matter │
3. │ Anne L Huillier │ 2023 │ physics │ for experimental methods that generate attosecond pulses of light for the study of electron dynamics in matter │
4. │ Alain Aspect │ 2022 │ physics │ for experiments with entangled photons establishing the violation of Bell inequalities and pioneering quantum information science │
5. │ Anton Zeilinger │ 2022 │ physics │ for experiments with entangled photons establishing the violation of Bell inequalities and pioneering quantum information science │
6. │ John Clauser │ 2022 │ physics │ for experiments with entangled photons establishing the violation of Bell inequalities and pioneering quantum information science │
7. │ Syukuro Manabe │ 2021 │ physics │ for the physical modelling of Earths climate quantifying variability and reliably predicting global warming │
8. │ Klaus Hasselmann │ 2021 │ physics │ for the physical modelling of Earths climate quantifying variability and reliably predicting global warming │
9. │ Giorgio Parisi │ 2021 │ physics │ for the discovery of the interplay of disorder and fluctuations in physical systems from atomic to planetary scales │
└──────────────────┴──────┴──────────┴────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
```

View File

@ -0,0 +1,75 @@
---
slug: /en/sql-reference/window-functions/nth_value
sidebar_label: nth_value
sidebar_position: 5
---
# nth_value
Returns the first non-NULL value evaluated against the nth row (offset) in its ordered frame.
**Syntax**
```sql
nth_value (x, offset)
OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
[ROWS or RANGE expression_to_bound_rows_withing_the_group]] | [window_name])
FROM table_name
WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column])
```
For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax).
**Parameters**
- `x` — Column name.
- `offset` — nth row to evaluate current row against.
**Returned value**
- The first non-NULL value evaluated against the nth row (offset) in its ordered frame.
**Example**
In this example the `nth-value` function is used to find the third-highest salary from a fictional dataset of salaries of Premier League football players.
Query:
```sql
DROP TABLE IF EXISTS salaries;
CREATE TABLE salaries
(
`team` String,
`player` String,
`salary` UInt32,
`position` String
)
Engine = Memory;
INSERT INTO salaries FORMAT Values
('Port Elizabeth Barbarians', 'Gary Chen', 195000, 'F'),
('New Coreystad Archdukes', 'Charles Juarez', 190000, 'F'),
('Port Elizabeth Barbarians', 'Michael Stanley', 100000, 'D'),
('New Coreystad Archdukes', 'Scott Harrison', 180000, 'D'),
('Port Elizabeth Barbarians', 'Robert George', 195000, 'M'),
('South Hampton Seagulls', 'Douglas Benson', 150000, 'M'),
('South Hampton Seagulls', 'James Henderson', 140000, 'M');
```
```sql
SELECT player, salary, nth_value(player,3) OVER(ORDER BY salary DESC) AS third_highest_salary FROM salaries;
```
Result:
```response
┌─player──────────┬─salary─┬─third_highest_salary─┐
1. │ Gary Chen │ 195000 │ │
2. │ Robert George │ 195000 │ │
3. │ Charles Juarez │ 190000 │ Charles Juarez │
4. │ Scott Harrison │ 180000 │ Charles Juarez │
5. │ Douglas Benson │ 150000 │ Charles Juarez │
6. │ James Henderson │ 140000 │ Charles Juarez │
7. │ Michael Stanley │ 100000 │ Charles Juarez │
└─────────────────┴────────┴──────────────────────┘
```

Some files were not shown because too many files have changed in this diff Show More