mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 15:12:02 +00:00
Merge branch 'master' into user-grants-from-config
This commit is contained in:
commit
ec011b9113
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -2,7 +2,7 @@
|
||||
A technical comment, you are free to remove or leave it as it is when PR is created
|
||||
The following categories are used in the next scripts, update them accordingly
|
||||
utils/changelog/changelog.py
|
||||
tests/ci/run_check.py
|
||||
tests/ci/cancel_and_rerun_workflow_lambda/app.py
|
||||
-->
|
||||
### Changelog category (leave one):
|
||||
- New Feature
|
||||
|
34
.github/workflows/master.yml
vendored
34
.github/workflows/master.yml
vendored
@ -1341,6 +1341,40 @@ jobs:
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
FunctionalStatelessTestReleaseAnalyzer:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_analyzer
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (release, analyzer)
|
||||
REPO_COPY=${{runner.temp}}/stateless_analyzer/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
FunctionalStatelessTestAarch64:
|
||||
needs: [BuilderDebAarch64]
|
||||
runs-on: [self-hosted, func-tester-aarch64]
|
||||
|
6
.gitmodules
vendored
6
.gitmodules
vendored
@ -296,6 +296,9 @@
|
||||
[submodule "contrib/libdivide"]
|
||||
path = contrib/libdivide
|
||||
url = https://github.com/ridiculousfish/libdivide
|
||||
[submodule "contrib/libbcrypt"]
|
||||
path = contrib/libbcrypt
|
||||
url = https://github.com/rg3/libbcrypt.git
|
||||
[submodule "contrib/ulid-c"]
|
||||
path = contrib/ulid-c
|
||||
url = https://github.com/ClickHouse/ulid-c.git
|
||||
@ -335,6 +338,9 @@
|
||||
[submodule "contrib/liburing"]
|
||||
path = contrib/liburing
|
||||
url = https://github.com/axboe/liburing
|
||||
[submodule "contrib/libfiu"]
|
||||
path = contrib/libfiu
|
||||
url = https://github.com/ClickHouse/libfiu.git
|
||||
[submodule "contrib/isa-l"]
|
||||
path = contrib/isa-l
|
||||
url = https://github.com/ClickHouse/isa-l.git
|
||||
|
@ -57,7 +57,7 @@ if (ENABLE_CHECK_HEAVY_BUILDS)
|
||||
# set CPU time limit to 1000 seconds
|
||||
set (RLIMIT_CPU 1000)
|
||||
|
||||
# gcc10/gcc10/clang -fsanitize=memory is too heavy
|
||||
# -fsanitize=memory is too heavy
|
||||
if (SANITIZE STREQUAL "memory")
|
||||
set (RLIMIT_DATA 10000000000) # 10G
|
||||
endif()
|
||||
@ -170,18 +170,6 @@ else ()
|
||||
set(NO_WHOLE_ARCHIVE --no-whole-archive)
|
||||
endif ()
|
||||
|
||||
option(ENABLE_CURL_BUILD "Enable curl, azure, sentry build on by default except MacOS." ON)
|
||||
if (OS_DARWIN)
|
||||
# Disable the curl, azure, senry build on MacOS
|
||||
set (ENABLE_CURL_BUILD OFF)
|
||||
endif ()
|
||||
|
||||
option(ENABLE_ISAL_LIBRARY "Enable ISA-L library ON by default except on aarch64." ON)
|
||||
if (ARCH_AARCH64)
|
||||
# Disable ISA-L libray on aarch64.
|
||||
set (ENABLE_ISAL_LIBRARY OFF)
|
||||
endif ()
|
||||
|
||||
if (NOT CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE")
|
||||
# Can be lld or ld-lld or lld-13 or /path/to/lld.
|
||||
if (LINKER_NAME MATCHES "lld")
|
||||
@ -292,7 +280,7 @@ set (CMAKE_C_STANDARD 11)
|
||||
set (CMAKE_C_EXTENSIONS ON) # required by most contribs written in C
|
||||
set (CMAKE_C_STANDARD_REQUIRED ON)
|
||||
|
||||
# Compiler-specific coverage flags e.g. -fcoverage-mapping for gcc
|
||||
# Compiler-specific coverage flags e.g. -fcoverage-mapping
|
||||
option(WITH_COVERAGE "Profile the resulting binary/binaries" OFF)
|
||||
|
||||
if (COMPILER_CLANG)
|
||||
@ -399,14 +387,16 @@ else()
|
||||
endif ()
|
||||
|
||||
option (ENABLE_GWP_ASAN "Enable Gwp-Asan" ON)
|
||||
# We use mmap for allocations more heavily in debug builds,
|
||||
# but GWP-ASan also wants to use mmap frequently,
|
||||
# and due to a large number of memory mappings,
|
||||
# We use mmap for allocations more heavily in debug builds,
|
||||
# but GWP-ASan also wants to use mmap frequently,
|
||||
# and due to a large number of memory mappings,
|
||||
# it does not work together well.
|
||||
if ((NOT OS_LINUX AND NOT OS_ANDROID) OR (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG"))
|
||||
set(ENABLE_GWP_ASAN OFF)
|
||||
endif ()
|
||||
|
||||
option (ENABLE_FIU "Enable Fiu" ON)
|
||||
|
||||
option(WERROR "Enable -Werror compiler option" ON)
|
||||
|
||||
if (WERROR)
|
||||
@ -534,6 +524,26 @@ include (cmake/print_flags.cmake)
|
||||
|
||||
if (ENABLE_RUST)
|
||||
add_subdirectory (rust)
|
||||
|
||||
# With LTO Rust adds few symbols with global visiblity, the most common is
|
||||
# rust_eh_personality. And this leads to linking errors because multiple
|
||||
# Rust libraries contains the same symbol.
|
||||
#
|
||||
# If it was shared library, that we could use version script for linker to
|
||||
# hide this symbols, but libraries are static.
|
||||
#
|
||||
# we could in theory compile everything to one library but this will be a
|
||||
# mess
|
||||
#
|
||||
# But this should be OK since CI has lots of other builds that are done
|
||||
# without LTO and it will find multiple definitions if there will be any.
|
||||
#
|
||||
# More information about this behaviour in Rust can be found here
|
||||
# - https://github.com/rust-lang/rust/issues/44322
|
||||
# - https://alanwu.space/post/symbol-hygiene/
|
||||
if (ENABLE_THINLTO)
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--allow-multiple-definition")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
add_subdirectory (base)
|
||||
|
12
README.md
12
README.md
@ -21,11 +21,17 @@ curl https://clickhouse.com/ | sh
|
||||
* [Contacts](https://clickhouse.com/company/contact) can help to get your questions answered if there are any.
|
||||
|
||||
## Upcoming Events
|
||||
* [**ClickHouse Spring Meetup in Manhattan**](https://www.meetup.com/clickhouse-new-york-user-group/events/292517734) - April 26 - It's spring, and it's time to meet again in the city! Talks include: "Building a domain specific query language on top of Clickhouse", "A Galaxy of Information", "Our Journey to ClickHouse Cloud from Redshift", and a ClickHouse update!
|
||||
|
||||
* [**v23.4 Release Webinar**](https://clickhouse.com/company/events/v23-4-release-webinar?utm_source=github&utm_medium=social&utm_campaign=release-webinar-2023-04) - April 26 - 23.4 is rapidly approaching. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release.
|
||||
* [**ClickHouse Meetup in Berlin**](https://www.meetup.com/clickhouse-berlin-user-group/events/292892466) - May 16 - Save the date! ClickHouse is coming back to Berlin. We’re excited to announce an upcoming ClickHouse Meetup that you won’t want to miss. Join us as we gather together to discuss the latest in the world of ClickHouse and share user stories.
|
||||
* [**ClickHouse Meetup in Berlin**](https://www.meetup.com/clickhouse-berlin-user-group/events/292892466) - May 16
|
||||
* [**ClickHouse Meetup in Barcelona**](https://www.meetup.com/clickhouse-barcelona-user-group/events/292892669) - May 25
|
||||
* [**ClickHouse Meetup in London**](https://www.meetup.com/clickhouse-london-user-group/events/292892824) - May 25
|
||||
* [**ClickHouse Meetup in San Francisco**](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/293426725/) - Jun 7
|
||||
* [**ClickHouse Meetup in Stockholm**](https://www.meetup.com/clickhouse-berlin-user-group/events/292892466) - Jun 13
|
||||
|
||||
Also, keep an eye out for upcoming meetups in Amsterdam, Boston, NYC, Beijing, and Toronto. Somewhere else you want us to be? Please feel free to reach out to tyler <at> clickhouse <dot> com.
|
||||
|
||||
## Recent Recordings
|
||||
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
||||
* **Recording available**: [**v23.3 Release Webinar**](https://www.youtube.com/watch?v=ISaGUjvBNao) UNDROP TABLE, server settings introspection, nested dynamic disks, MySQL compatibility, parseDate Time, Lightweight Deletes, Parallel Replicas, integrations updates, and so much more! Watch it now!
|
||||
* **Recording available**: [**v23.4 Release Webinar**]([https://www.youtube.com/watch?v=ISaGUjvBNao](https://www.youtube.com/watch?v=4rrf6bk_mOg)) UNDROP TABLE, server settings introspection, nested dynamic disks, MySQL compatibility, parseDate Time, Lightweight Deletes, Parallel Replicas, integrations updates, and so much more! Watch it now!
|
||||
* **All release webinar recordings**: [YouTube playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3jAlSy1JxyP8zluvXaN3nxU)
|
||||
|
@ -22,13 +22,7 @@ The following versions of ClickHouse server are currently being supported with s
|
||||
| 22.10 | ❌ |
|
||||
| 22.9 | ❌ |
|
||||
| 22.8 | ✔️ |
|
||||
| 22.7 | ❌ |
|
||||
| 22.6 | ❌ |
|
||||
| 22.5 | ❌ |
|
||||
| 22.4 | ❌ |
|
||||
| 22.3 | ❌ |
|
||||
| 22.2 | ❌ |
|
||||
| 22.1 | ❌ |
|
||||
| 22.* | ❌ |
|
||||
| 21.* | ❌ |
|
||||
| 20.* | ❌ |
|
||||
| 19.* | ❌ |
|
||||
|
@ -73,18 +73,6 @@
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#if defined(ADDRESS_SANITIZER)
|
||||
# define BOOST_USE_ASAN 1
|
||||
# define BOOST_USE_UCONTEXT 1
|
||||
#endif
|
||||
|
||||
#if defined(THREAD_SANITIZER)
|
||||
# define BOOST_USE_TSAN 1
|
||||
# define BOOST_USE_UCONTEXT 1
|
||||
#endif
|
||||
|
||||
/// TODO: Strange enough, there is no way to detect UB sanitizer.
|
||||
|
||||
/// Explicitly allow undefined behaviour for certain functions. Use it as a function attribute.
|
||||
/// It is useful in case when compiler cannot see (and exploit) it, but UBSan can.
|
||||
/// Example: multiplication of signed integers with possibility of overflow when both sides are from user input.
|
||||
|
@ -314,7 +314,14 @@ struct integer<Bits, Signed>::_impl
|
||||
|
||||
const T alpha = t / static_cast<T>(max_int);
|
||||
|
||||
if (alpha <= static_cast<T>(max_int))
|
||||
/** Here we have to use strict comparison.
|
||||
* The max_int is 2^64 - 1.
|
||||
* When casted to floating point type, it will be rounded to the closest representable number,
|
||||
* which is 2^64.
|
||||
* But 2^64 is not representable in uint64_t,
|
||||
* so the maximum representable number will be strictly less.
|
||||
*/
|
||||
if (alpha < static_cast<T>(max_int))
|
||||
self = static_cast<uint64_t>(alpha);
|
||||
else // max(double) / 2^64 will surely contain less than 52 precision bits, so speed up computations.
|
||||
set_multiplier<double>(self, static_cast<double>(alpha));
|
||||
|
@ -53,7 +53,7 @@ float logf(float x)
|
||||
tmp = ix - OFF;
|
||||
i = (tmp >> (23 - LOGF_TABLE_BITS)) % N;
|
||||
k = (int32_t)tmp >> 23; /* arithmetic shift */
|
||||
iz = ix - (tmp & 0x1ff << 23);
|
||||
iz = ix - (tmp & 0xff800000);
|
||||
invc = T[i].invc;
|
||||
logc = T[i].logc;
|
||||
z = (double_t)asfloat(iz);
|
||||
|
@ -31,7 +31,8 @@ TRAP(argp_state_help)
|
||||
TRAP(argp_usage)
|
||||
TRAP(asctime)
|
||||
TRAP(clearenv)
|
||||
TRAP(crypt)
|
||||
// Redefined at contrib/libbcrypt/crypt_blowfish/wrapper.c:186
|
||||
// TRAP(crypt)
|
||||
TRAP(ctime)
|
||||
TRAP(cuserid)
|
||||
TRAP(drand48)
|
||||
|
@ -53,7 +53,7 @@
|
||||
|
||||
|
||||
// Define if no <locale> header is available (such as on WinCE)
|
||||
// #define POCO_NO_LOCALE
|
||||
#define POCO_NO_LOCALE
|
||||
|
||||
|
||||
// Define to desired default thread stack size
|
||||
|
@ -30,9 +30,6 @@
|
||||
#include <cctype>
|
||||
#include <cmath>
|
||||
#include <limits>
|
||||
#if !defined(POCO_NO_LOCALE)
|
||||
# include <locale>
|
||||
#endif
|
||||
|
||||
|
||||
// binary numbers are supported, thus 64 (bits) + 1 (string terminating zero)
|
||||
@ -53,11 +50,7 @@ inline char decimalSeparator()
|
||||
/// Returns decimal separator from global locale or
|
||||
/// default '.' for platforms where locale is unavailable.
|
||||
{
|
||||
#if !defined(POCO_NO_LOCALE)
|
||||
return std::use_facet<std::numpunct<char>>(std::locale()).decimal_point();
|
||||
#else
|
||||
return '.';
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
@ -65,11 +58,7 @@ inline char thousandSeparator()
|
||||
/// Returns thousand separator from global locale or
|
||||
/// default ',' for platforms where locale is unavailable.
|
||||
{
|
||||
#if !defined(POCO_NO_LOCALE)
|
||||
return std::use_facet<std::numpunct<char>>(std::locale()).thousands_sep();
|
||||
#else
|
||||
return ',';
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
@ -16,9 +16,6 @@
|
||||
#include "Poco/Exception.h"
|
||||
#include "Poco/Ascii.h"
|
||||
#include <sstream>
|
||||
#if !defined(POCO_NO_LOCALE)
|
||||
#include <locale>
|
||||
#endif
|
||||
#include <cstddef>
|
||||
|
||||
|
||||
@ -147,9 +144,6 @@ namespace
|
||||
void formatOne(std::string& result, std::string::const_iterator& itFmt, const std::string::const_iterator& endFmt, std::vector<Any>::const_iterator& itVal)
|
||||
{
|
||||
std::ostringstream str;
|
||||
#if !defined(POCO_NO_LOCALE)
|
||||
str.imbue(std::locale::classic());
|
||||
#endif
|
||||
try
|
||||
{
|
||||
parseFlags(str, itFmt, endFmt);
|
||||
|
@ -15,9 +15,6 @@
|
||||
#include "Poco/NumberFormatter.h"
|
||||
#include "Poco/MemoryStream.h"
|
||||
#include <iomanip>
|
||||
#if !defined(POCO_NO_LOCALE)
|
||||
#include <locale>
|
||||
#endif
|
||||
#include <cstdio>
|
||||
|
||||
|
||||
|
@ -19,9 +19,6 @@
|
||||
#include <cstdio>
|
||||
#include <cctype>
|
||||
#include <stdlib.h>
|
||||
#if !defined(POCO_NO_LOCALE)
|
||||
#include <locale>
|
||||
#endif
|
||||
|
||||
|
||||
#if defined(POCO_LONG_IS_64_BIT)
|
||||
|
@ -9,27 +9,19 @@ if (CMAKE_CXX_COMPILER_LAUNCHER MATCHES "ccache" OR CMAKE_C_COMPILER_LAUNCHER MA
|
||||
return()
|
||||
endif()
|
||||
|
||||
set(ENABLE_CCACHE "default" CACHE STRING "Deprecated, use COMPILER_CACHE=(auto|ccache|sccache|disabled)")
|
||||
if (NOT ENABLE_CCACHE STREQUAL "default")
|
||||
message(WARNING "The -DENABLE_CCACHE is deprecated in favor of -DCOMPILER_CACHE")
|
||||
endif()
|
||||
|
||||
set(COMPILER_CACHE "auto" CACHE STRING "Speedup re-compilations using the caching tools; valid options are 'auto' (ccache, then sccache), 'ccache', 'sccache', or 'disabled'")
|
||||
|
||||
# It has pretty complex logic, because the ENABLE_CCACHE is deprecated, but still should
|
||||
# control the COMPILER_CACHE
|
||||
# After it will be completely removed, the following block will be much simpler
|
||||
if (COMPILER_CACHE STREQUAL "ccache" OR (ENABLE_CCACHE AND NOT ENABLE_CCACHE STREQUAL "default"))
|
||||
find_program (CCACHE_EXECUTABLE ccache)
|
||||
elseif(COMPILER_CACHE STREQUAL "disabled" OR NOT ENABLE_CCACHE STREQUAL "default")
|
||||
message(STATUS "Using *ccache: no (disabled via configuration)")
|
||||
return()
|
||||
elseif(COMPILER_CACHE STREQUAL "auto")
|
||||
if(COMPILER_CACHE STREQUAL "auto")
|
||||
find_program (CCACHE_EXECUTABLE ccache sccache)
|
||||
elseif (COMPILER_CACHE STREQUAL "ccache")
|
||||
find_program (CCACHE_EXECUTABLE ccache)
|
||||
elseif(COMPILER_CACHE STREQUAL "sccache")
|
||||
find_program (CCACHE_EXECUTABLE sccache)
|
||||
elseif(COMPILER_CACHE STREQUAL "disabled")
|
||||
message(STATUS "Using *ccache: no (disabled via configuration)")
|
||||
return()
|
||||
else()
|
||||
message(${RECONFIGURE_MESSAGE_LEVEL} "The COMPILER_CACHE must be one of (auto|ccache|sccache|disabled), given '${COMPILER_CACHE}'")
|
||||
message(${RECONFIGURE_MESSAGE_LEVEL} "The COMPILER_CACHE must be one of (auto|ccache|sccache|disabled), value: '${COMPILER_CACHE}'")
|
||||
endif()
|
||||
|
||||
|
||||
|
@ -21,7 +21,7 @@ set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
|
||||
set (CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=bfd")
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=bfd")
|
||||
|
||||
# Currently, lld does not work with the error:
|
||||
# ld.lld: error: section size decrease is too large
|
||||
|
34
contrib/CMakeLists.txt
vendored
34
contrib/CMakeLists.txt
vendored
@ -105,6 +105,7 @@ add_contrib (libfarmhash)
|
||||
add_contrib (icu-cmake icu)
|
||||
add_contrib (h3-cmake h3)
|
||||
add_contrib (mariadb-connector-c-cmake mariadb-connector-c)
|
||||
add_contrib (libfiu-cmake libfiu)
|
||||
|
||||
if (ENABLE_TESTS)
|
||||
add_contrib (googletest-cmake googletest)
|
||||
@ -134,27 +135,28 @@ add_contrib (aws-cmake
|
||||
)
|
||||
|
||||
add_contrib (base64-cmake base64)
|
||||
if (NOT ARCH_S390X)
|
||||
add_contrib (simdjson-cmake simdjson)
|
||||
endif()
|
||||
add_contrib (rapidjson-cmake rapidjson)
|
||||
add_contrib (fastops-cmake fastops)
|
||||
add_contrib (libuv-cmake libuv)
|
||||
add_contrib (liburing-cmake liburing)
|
||||
add_contrib (amqpcpp-cmake AMQP-CPP) # requires: libuv
|
||||
add_contrib (cassandra-cmake cassandra) # requires: libuv
|
||||
|
||||
if (ENABLE_CURL_BUILD)
|
||||
if (NOT OS_DARWIN)
|
||||
add_contrib (curl-cmake curl)
|
||||
add_contrib (azure-cmake azure)
|
||||
add_contrib (sentry-native-cmake sentry-native) # requires: curl
|
||||
endif()
|
||||
|
||||
add_contrib (fmtlib-cmake fmtlib)
|
||||
add_contrib (krb5-cmake krb5)
|
||||
add_contrib (cyrus-sasl-cmake cyrus-sasl) # for krb5
|
||||
add_contrib (libgsasl-cmake libgsasl) # requires krb5
|
||||
add_contrib (librdkafka-cmake librdkafka) # requires: libgsasl
|
||||
add_contrib (nats-io-cmake nats-io)
|
||||
add_contrib (libhdfs3-cmake libhdfs3) # requires: protobuf, krb5
|
||||
add_contrib (isa-l-cmake isa-l)
|
||||
add_contrib (libhdfs3-cmake libhdfs3) # requires: protobuf, krb5, isa-l
|
||||
add_contrib (hive-metastore-cmake hive-metastore) # requires: thrift/avro/arrow/libhdfs3
|
||||
add_contrib (cppkafka-cmake cppkafka)
|
||||
add_contrib (libpqxx-cmake libpqxx)
|
||||
@ -176,25 +178,31 @@ endif()
|
||||
add_contrib (sqlite-cmake sqlite-amalgamation)
|
||||
add_contrib (s2geometry-cmake s2geometry)
|
||||
add_contrib (c-ares-cmake c-ares)
|
||||
add_contrib (qpl-cmake qpl)
|
||||
add_contrib (morton-nd-cmake morton-nd)
|
||||
|
||||
if (OS_LINUX AND ARCH_AMD64 AND (ENABLE_AVX2 OR ENABLE_AVX512))
|
||||
option (ENABLE_QPL "Enable Intel® Query Processing Library" ${ENABLE_LIBRARIES})
|
||||
elseif(ENABLE_QPL)
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "QPL library is only supported on x86_64 arch with avx2/avx512 support")
|
||||
endif()
|
||||
if (ENABLE_QPL)
|
||||
add_contrib (idxd-config-cmake idxd-config)
|
||||
add_contrib (qpl-cmake qpl) # requires: idxd-config
|
||||
else()
|
||||
message(STATUS "Not using QPL")
|
||||
endif ()
|
||||
|
||||
add_contrib (morton-nd-cmake morton-nd)
|
||||
if (ARCH_S390X)
|
||||
add_contrib(crc32-s390x-cmake crc32-s390x)
|
||||
endif()
|
||||
|
||||
add_contrib (annoy-cmake annoy)
|
||||
|
||||
add_contrib (xxHash-cmake xxHash)
|
||||
|
||||
add_contrib (libbcrypt-cmake libbcrypt)
|
||||
|
||||
add_contrib (google-benchmark-cmake google-benchmark)
|
||||
|
||||
add_contrib (ulid-c-cmake ulid-c)
|
||||
|
||||
if (ENABLE_ISAL_LIBRARY)
|
||||
add_contrib (isa-l-cmake isa-l)
|
||||
endif()
|
||||
|
||||
# Put all targets defined here and in subdirectories under "contrib/<immediate-subdir>" folders in GUI-based IDEs.
|
||||
# Some of third-party projects may override CMAKE_FOLDER or FOLDER property of their targets, so they would not appear
|
||||
# in "contrib/..." as originally planned, so we workaround this by fixing FOLDER properties of all targets manually,
|
||||
|
@ -92,6 +92,8 @@ add_library (boost::system ALIAS _boost_system)
|
||||
target_include_directories (_boost_system PRIVATE ${LIBRARY_DIR})
|
||||
|
||||
# context
|
||||
option (BOOST_USE_UCONTEXT "Use ucontext_t for context switching of boost::fiber within boost::context" OFF)
|
||||
|
||||
enable_language(ASM)
|
||||
SET(ASM_OPTIONS "-x assembler-with-cpp")
|
||||
|
||||
@ -100,20 +102,6 @@ set (SRCS_CONTEXT
|
||||
"${LIBRARY_DIR}/libs/context/src/posix/stack_traits.cpp"
|
||||
)
|
||||
|
||||
if (SANITIZE AND (SANITIZE STREQUAL "address" OR SANITIZE STREQUAL "thread"))
|
||||
add_compile_definitions(BOOST_USE_UCONTEXT)
|
||||
|
||||
if (SANITIZE STREQUAL "address")
|
||||
add_compile_definitions(BOOST_USE_ASAN)
|
||||
elseif (SANITIZE STREQUAL "thread")
|
||||
add_compile_definitions(BOOST_USE_TSAN)
|
||||
endif()
|
||||
|
||||
set (SRCS_CONTEXT ${SRCS_CONTEXT}
|
||||
"${LIBRARY_DIR}/libs/context/src/fiber.cpp"
|
||||
"${LIBRARY_DIR}/libs/context/src/continuation.cpp"
|
||||
)
|
||||
endif()
|
||||
if (ARCH_AARCH64)
|
||||
set (SRCS_CONTEXT ${SRCS_CONTEXT}
|
||||
"${LIBRARY_DIR}/libs/context/src/asm/jump_arm64_aapcs_elf_gas.S"
|
||||
@ -152,10 +140,27 @@ else()
|
||||
)
|
||||
endif()
|
||||
|
||||
if (SANITIZE OR BOOST_USE_UCONTEXT)
|
||||
list (APPEND SRCS_CONTEXT
|
||||
"${LIBRARY_DIR}/libs/context/src/fiber.cpp"
|
||||
"${LIBRARY_DIR}/libs/context/src/continuation.cpp"
|
||||
)
|
||||
endif()
|
||||
|
||||
add_library (_boost_context ${SRCS_CONTEXT})
|
||||
add_library (boost::context ALIAS _boost_context)
|
||||
target_include_directories (_boost_context PRIVATE ${LIBRARY_DIR})
|
||||
|
||||
if (SANITIZE OR BOOST_USE_UCONTEXT)
|
||||
target_compile_definitions(_boost_context PUBLIC BOOST_USE_UCONTEXT)
|
||||
endif()
|
||||
|
||||
if (SANITIZE STREQUAL "address")
|
||||
target_compile_definitions(_boost_context PUBLIC BOOST_USE_ASAN)
|
||||
elseif (SANITIZE STREQUAL "thread")
|
||||
target_compile_definitions(_boost_context PUBLIC BOOST_USE_TSAN)
|
||||
endif()
|
||||
|
||||
# coroutine
|
||||
|
||||
set (SRCS_COROUTINE
|
||||
|
@ -111,6 +111,8 @@ elseif(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "mips")
|
||||
set(ARCH "generic")
|
||||
elseif(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "ppc64le")
|
||||
set(ARCH "ppc64le")
|
||||
elseif(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "riscv64")
|
||||
set(ARCH "riscv64")
|
||||
else()
|
||||
message(FATAL_ERROR "Unknown processor:" ${CMAKE_SYSTEM_PROCESSOR})
|
||||
endif()
|
||||
|
@ -8,7 +8,7 @@
|
||||
|
||||
/*
|
||||
* (all numbers are written in big-endian manner: the least significant digit on the right)
|
||||
* (only bit representations are used - no hex or octal, leading zeroes are ommited)
|
||||
* (only bit representations are used - no hex or octal, leading zeroes are omitted)
|
||||
*
|
||||
* Consistent hashing scheme:
|
||||
*
|
||||
|
23
contrib/idxd-config-cmake/CMakeLists.txt
Normal file
23
contrib/idxd-config-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,23 @@
|
||||
## accel_config is the utility library required by QPL-Deflate codec for controlling and configuring Intel® In-Memory Analytics Accelerator (Intel® IAA).
|
||||
set (LIBACCEL_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/idxd-config")
|
||||
set (UUID_DIR "${ClickHouse_SOURCE_DIR}/contrib/qpl-cmake")
|
||||
set (LIBACCEL_HEADER_DIR "${ClickHouse_SOURCE_DIR}/contrib/idxd-config-cmake/include")
|
||||
set (SRCS
|
||||
"${LIBACCEL_SOURCE_DIR}/accfg/lib/libaccfg.c"
|
||||
"${LIBACCEL_SOURCE_DIR}/util/log.c"
|
||||
"${LIBACCEL_SOURCE_DIR}/util/sysfs.c"
|
||||
)
|
||||
|
||||
add_library(_accel-config ${SRCS})
|
||||
|
||||
target_compile_options(_accel-config PRIVATE "-D_GNU_SOURCE")
|
||||
|
||||
target_include_directories(_accel-config BEFORE
|
||||
PRIVATE ${UUID_DIR}
|
||||
PRIVATE ${LIBACCEL_HEADER_DIR}
|
||||
PRIVATE ${LIBACCEL_SOURCE_DIR})
|
||||
|
||||
target_include_directories(_accel-config SYSTEM BEFORE
|
||||
PUBLIC ${LIBACCEL_SOURCE_DIR}/accfg)
|
||||
|
||||
add_library(ch_contrib::accel-config ALIAS _accel-config)
|
@ -1,3 +1,14 @@
|
||||
option(ENABLE_ISAL_LIBRARY "Enable ISA-L library" ${ENABLE_LIBRARIES})
|
||||
if (ARCH_AARCH64)
|
||||
# Disable ISA-L libray on aarch64.
|
||||
set (ENABLE_ISAL_LIBRARY OFF)
|
||||
endif ()
|
||||
|
||||
if (NOT ENABLE_ISAL_LIBRARY)
|
||||
message(STATUS "Not using isa-l")
|
||||
return()
|
||||
endif()
|
||||
|
||||
set(ISAL_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/isa-l")
|
||||
|
||||
# The YASM and NASM assembers are somewhat mutually compatible. ISAL specifically needs NASM. If only YASM is installed, then check_language(ASM_NASM)
|
||||
|
1
contrib/libbcrypt
vendored
Submodule
1
contrib/libbcrypt
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 8aa32ad94ebe06b76853b0767c910c9fbf7ccef4
|
19
contrib/libbcrypt-cmake/CMakeLists.txt
Normal file
19
contrib/libbcrypt-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,19 @@
|
||||
option(ENABLE_BCRYPT "Enable bcrypt" ${ENABLE_LIBRARIES})
|
||||
|
||||
if (NOT ENABLE_BCRYPT)
|
||||
message(STATUS "Not using bcrypt")
|
||||
return()
|
||||
endif()
|
||||
|
||||
set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/libbcrypt")
|
||||
|
||||
set(SRCS
|
||||
"${LIBRARY_DIR}/bcrypt.c"
|
||||
"${LIBRARY_DIR}/crypt_blowfish/crypt_blowfish.c"
|
||||
"${LIBRARY_DIR}/crypt_blowfish/crypt_gensalt.c"
|
||||
"${LIBRARY_DIR}/crypt_blowfish/wrapper.c"
|
||||
)
|
||||
|
||||
add_library(_bcrypt ${SRCS})
|
||||
target_include_directories(_bcrypt SYSTEM PUBLIC "${LIBRARY_DIR}")
|
||||
add_library(ch_contrib::bcrypt ALIAS _bcrypt)
|
1
contrib/libfiu
vendored
Submodule
1
contrib/libfiu
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit b85edbde4cf974b1b40d27828a56f0505f4e2ee5
|
20
contrib/libfiu-cmake/CMakeLists.txt
Normal file
20
contrib/libfiu-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,20 @@
|
||||
if (NOT ENABLE_FIU)
|
||||
message (STATUS "Not using fiu")
|
||||
return ()
|
||||
endif ()
|
||||
|
||||
set(FIU_DIR "${ClickHouse_SOURCE_DIR}/contrib/libfiu/")
|
||||
|
||||
set(FIU_SOURCES
|
||||
${FIU_DIR}/libfiu/fiu.c
|
||||
${FIU_DIR}/libfiu/fiu-rc.c
|
||||
${FIU_DIR}/libfiu/backtrace.c
|
||||
${FIU_DIR}/libfiu/wtable.c
|
||||
)
|
||||
|
||||
set(FIU_HEADERS "${FIU_DIR}/libfiu")
|
||||
|
||||
add_library(_fiu ${FIU_SOURCES})
|
||||
target_compile_definitions(_fiu PUBLIC DUMMY_BACKTRACE)
|
||||
target_include_directories(_fiu PUBLIC ${FIU_HEADERS})
|
||||
add_library(ch_contrib::fiu ALIAS _fiu)
|
@ -1,10 +0,0 @@
|
||||
#include <string.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
// We can't test "char *p = strerror_r()" because that only causes a
|
||||
// compiler warning when strerror_r returns an integer.
|
||||
char *buf = 0;
|
||||
int i = strerror_r(0, buf, 100);
|
||||
return i;
|
||||
}
|
@ -1,46 +0,0 @@
|
||||
FUNCTION(AUTO_SOURCES RETURN_VALUE PATTERN SOURCE_SUBDIRS)
|
||||
|
||||
IF ("${SOURCE_SUBDIRS}" STREQUAL "RECURSE")
|
||||
SET(PATH ".")
|
||||
IF (${ARGC} EQUAL 4)
|
||||
LIST(GET ARGV 3 PATH)
|
||||
ENDIF ()
|
||||
ENDIF()
|
||||
|
||||
IF ("${SOURCE_SUBDIRS}" STREQUAL "RECURSE")
|
||||
UNSET(${RETURN_VALUE})
|
||||
FILE(GLOB SUBDIR_FILES "${PATH}/${PATTERN}")
|
||||
LIST(APPEND ${RETURN_VALUE} ${SUBDIR_FILES})
|
||||
|
||||
FILE(GLOB SUBDIRS RELATIVE ${PATH} ${PATH}/*)
|
||||
|
||||
FOREACH(DIR ${SUBDIRS})
|
||||
IF (IS_DIRECTORY ${PATH}/${DIR})
|
||||
IF (NOT "${DIR}" STREQUAL "CMAKEFILES")
|
||||
FILE(GLOB_RECURSE SUBDIR_FILES "${PATH}/${DIR}/${PATTERN}")
|
||||
LIST(APPEND ${RETURN_VALUE} ${SUBDIR_FILES})
|
||||
ENDIF()
|
||||
ENDIF()
|
||||
ENDFOREACH()
|
||||
ELSE ()
|
||||
FILE(GLOB ${RETURN_VALUE} "${PATTERN}")
|
||||
|
||||
FOREACH (PATH ${SOURCE_SUBDIRS})
|
||||
FILE(GLOB SUBDIR_FILES "${PATH}/${PATTERN}")
|
||||
LIST(APPEND ${RETURN_VALUE} ${SUBDIR_FILES})
|
||||
ENDFOREACH(PATH ${SOURCE_SUBDIRS})
|
||||
ENDIF ()
|
||||
|
||||
IF (${FILTER_OUT})
|
||||
LIST(REMOVE_ITEM ${RETURN_VALUE} ${FILTER_OUT})
|
||||
ENDIF()
|
||||
|
||||
SET(${RETURN_VALUE} ${${RETURN_VALUE}} PARENT_SCOPE)
|
||||
ENDFUNCTION(AUTO_SOURCES)
|
||||
|
||||
FUNCTION(CONTAINS_STRING FILE SEARCH RETURN_VALUE)
|
||||
FILE(STRINGS ${FILE} FILE_CONTENTS REGEX ".*${SEARCH}.*")
|
||||
IF (FILE_CONTENTS)
|
||||
SET(${RETURN_VALUE} TRUE PARENT_SCOPE)
|
||||
ENDIF()
|
||||
ENDFUNCTION(CONTAINS_STRING)
|
@ -1,44 +0,0 @@
|
||||
OPTION(ENABLE_SSE "enable SSE4.2 builtin function" ON)
|
||||
|
||||
INCLUDE (CheckFunctionExists)
|
||||
CHECK_FUNCTION_EXISTS(dladdr HAVE_DLADDR)
|
||||
CHECK_FUNCTION_EXISTS(nanosleep HAVE_NANOSLEEP)
|
||||
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-strict-aliasing")
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-strict-aliasing")
|
||||
|
||||
IF(ENABLE_SSE STREQUAL ON AND ARCH_AMD64)
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse4.2")
|
||||
ENDIF()
|
||||
|
||||
IF(NOT TEST_HDFS_PREFIX)
|
||||
SET(TEST_HDFS_PREFIX "./" CACHE STRING "default directory prefix used for test." FORCE)
|
||||
ENDIF(NOT TEST_HDFS_PREFIX)
|
||||
|
||||
ADD_DEFINITIONS(-DTEST_HDFS_PREFIX="${TEST_HDFS_PREFIX}")
|
||||
ADD_DEFINITIONS(-D__STDC_FORMAT_MACROS)
|
||||
ADD_DEFINITIONS(-D_GNU_SOURCE)
|
||||
ADD_DEFINITIONS(-D_GLIBCXX_USE_NANOSLEEP)
|
||||
|
||||
TRY_COMPILE(STRERROR_R_RETURN_INT
|
||||
${CMAKE_CURRENT_BINARY_DIR}
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/CMake/CMakeTestCompileStrerror.c"
|
||||
CMAKE_FLAGS "-DCMAKE_CXX_LINK_EXECUTABLE='echo not linking now...'"
|
||||
OUTPUT_VARIABLE OUTPUT)
|
||||
|
||||
MESSAGE(STATUS "Checking whether strerror_r returns an int")
|
||||
|
||||
IF(STRERROR_R_RETURN_INT)
|
||||
MESSAGE(STATUS "Checking whether strerror_r returns an int -- yes")
|
||||
ELSE(STRERROR_R_RETURN_INT)
|
||||
MESSAGE(STATUS "Checking whether strerror_r returns an int -- no")
|
||||
ENDIF(STRERROR_R_RETURN_INT)
|
||||
|
||||
set(HAVE_STEADY_CLOCK 1)
|
||||
set(HAVE_NESTED_EXCEPTION 1)
|
||||
|
||||
SET(HAVE_BOOST_CHRONO 0)
|
||||
SET(HAVE_BOOST_ATOMIC 0)
|
||||
|
||||
SET(HAVE_STD_CHRONO 1)
|
||||
SET(HAVE_STD_ATOMIC 1)
|
@ -1,42 +0,0 @@
|
||||
IF(CMAKE_SYSTEM_NAME STREQUAL "Linux")
|
||||
SET(OS_LINUX true CACHE INTERNAL "Linux operating system")
|
||||
ELSEIF(CMAKE_SYSTEM_NAME STREQUAL "Darwin")
|
||||
SET(OS_MACOSX true CACHE INTERNAL "Mac Darwin operating system")
|
||||
ELSE(CMAKE_SYSTEM_NAME STREQUAL "Linux")
|
||||
MESSAGE(FATAL_ERROR "Unsupported OS: \"${CMAKE_SYSTEM_NAME}\"")
|
||||
ENDIF(CMAKE_SYSTEM_NAME STREQUAL "Linux")
|
||||
|
||||
IF(CMAKE_COMPILER_IS_GNUCXX)
|
||||
EXECUTE_PROCESS(COMMAND ${CMAKE_CXX_COMPILER} -dumpfullversion OUTPUT_VARIABLE GCC_COMPILER_VERSION)
|
||||
|
||||
IF (NOT GCC_COMPILER_VERSION)
|
||||
EXECUTE_PROCESS(COMMAND ${CMAKE_CXX_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_COMPILER_VERSION)
|
||||
|
||||
IF (NOT GCC_COMPILER_VERSION)
|
||||
MESSAGE(FATAL_ERROR "Cannot get gcc version")
|
||||
ENDIF (NOT GCC_COMPILER_VERSION)
|
||||
ENDIF (NOT GCC_COMPILER_VERSION)
|
||||
|
||||
STRING(REGEX MATCHALL "[0-9]+" GCC_COMPILER_VERSION ${GCC_COMPILER_VERSION})
|
||||
|
||||
LIST(LENGTH GCC_COMPILER_VERSION GCC_COMPILER_VERSION_LENGTH)
|
||||
LIST(GET GCC_COMPILER_VERSION 0 GCC_COMPILER_VERSION_MAJOR)
|
||||
if (GCC_COMPILER_VERSION_LENGTH GREATER 1)
|
||||
LIST(GET GCC_COMPILER_VERSION 1 GCC_COMPILER_VERSION_MINOR)
|
||||
else ()
|
||||
set (GCC_COMPILER_VERSION_MINOR 0)
|
||||
endif ()
|
||||
|
||||
SET(GCC_COMPILER_VERSION_MAJOR ${GCC_COMPILER_VERSION_MAJOR} CACHE INTERNAL "gcc major version")
|
||||
SET(GCC_COMPILER_VERSION_MINOR ${GCC_COMPILER_VERSION_MINOR} CACHE INTERNAL "gcc minor version")
|
||||
|
||||
MESSAGE(STATUS "checking compiler: GCC (${GCC_COMPILER_VERSION_MAJOR}.${GCC_COMPILER_VERSION_MINOR}.${GCC_COMPILER_VERSION_PATCH})")
|
||||
ELSE(CMAKE_COMPILER_IS_GNUCXX)
|
||||
EXECUTE_PROCESS(COMMAND ${CMAKE_C_COMPILER} --version OUTPUT_VARIABLE COMPILER_OUTPUT)
|
||||
IF(COMPILER_OUTPUT MATCHES "clang")
|
||||
SET(CMAKE_COMPILER_IS_CLANG true CACHE INTERNAL "using clang as compiler")
|
||||
MESSAGE(STATUS "checking compiler: CLANG")
|
||||
ELSE(COMPILER_OUTPUT MATCHES "clang")
|
||||
MESSAGE(FATAL_ERROR "Unsupported compiler: \"${CMAKE_CXX_COMPILER}\"")
|
||||
ENDIF(COMPILER_OUTPUT MATCHES "clang")
|
||||
ENDIF(CMAKE_COMPILER_IS_GNUCXX)
|
@ -21,10 +21,17 @@ set(HDFS3_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/libhdfs3")
|
||||
set(HDFS3_SOURCE_DIR "${HDFS3_ROOT_DIR}/src")
|
||||
set(HDFS3_COMMON_DIR "${HDFS3_SOURCE_DIR}/common")
|
||||
|
||||
# module
|
||||
set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMake" ${CMAKE_MODULE_PATH})
|
||||
include(Platform)
|
||||
include(Options)
|
||||
ADD_DEFINITIONS(-DTEST_HDFS_PREFIX="${TEST_HDFS_PREFIX}")
|
||||
ADD_DEFINITIONS(-D__STDC_FORMAT_MACROS)
|
||||
ADD_DEFINITIONS(-D_GNU_SOURCE)
|
||||
ADD_DEFINITIONS(-D_GLIBCXX_USE_NANOSLEEP)
|
||||
ADD_DEFINITIONS(-DHAVE_NANOSLEEP)
|
||||
set(HAVE_STEADY_CLOCK 1)
|
||||
set(HAVE_NESTED_EXCEPTION 1)
|
||||
SET(HAVE_BOOST_CHRONO 0)
|
||||
SET(HAVE_BOOST_ATOMIC 0)
|
||||
SET(HAVE_STD_CHRONO 1)
|
||||
SET(HAVE_STD_ATOMIC 1)
|
||||
|
||||
# source
|
||||
set(PROTO_FILES
|
||||
@ -172,7 +179,7 @@ if (TARGET OpenSSL::SSL)
|
||||
target_link_libraries(_hdfs3 PRIVATE OpenSSL::Crypto OpenSSL::SSL)
|
||||
endif()
|
||||
|
||||
if (ENABLE_ISAL_LIBRARY)
|
||||
if (TARGET ch_contrib::isal)
|
||||
target_link_libraries(_hdfs3 PRIVATE ch_contrib::isal)
|
||||
add_definitions(-DHADOOP_ISAL_LIBRARY)
|
||||
endif()
|
||||
|
2
contrib/llvm-project
vendored
2
contrib/llvm-project
vendored
@ -1 +1 @@
|
||||
Subproject commit 2aedf7598a4040b23881dbe05b6afaca25a337ef
|
||||
Subproject commit d857c707fccd50423bea1c4710dc469cf89607a9
|
@ -1,36 +1,5 @@
|
||||
## The Intel® QPL provides high performance implementations of data processing functions for existing hardware accelerator, and/or software path in case if hardware accelerator is not available.
|
||||
if (OS_LINUX AND ARCH_AMD64 AND (ENABLE_AVX2 OR ENABLE_AVX512))
|
||||
option (ENABLE_QPL "Enable Intel® Query Processing Library" ${ENABLE_LIBRARIES})
|
||||
elseif(ENABLE_QPL)
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "QPL library is only supported on x86_64 arch with avx2/avx512 support")
|
||||
endif()
|
||||
|
||||
if (NOT ENABLE_QPL)
|
||||
message(STATUS "Not using QPL")
|
||||
return()
|
||||
endif()
|
||||
|
||||
## QPL has build dependency on libaccel-config. Here is to build libaccel-config which is required by QPL.
|
||||
## libaccel-config is the utility library for controlling and configuring Intel® In-Memory Analytics Accelerator (Intel® IAA).
|
||||
set (LIBACCEL_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/idxd-config")
|
||||
set (UUID_DIR "${ClickHouse_SOURCE_DIR}/contrib/qpl-cmake")
|
||||
set (LIBACCEL_HEADER_DIR "${ClickHouse_SOURCE_DIR}/contrib/qpl-cmake/idxd-header")
|
||||
set (SRCS
|
||||
"${LIBACCEL_SOURCE_DIR}/accfg/lib/libaccfg.c"
|
||||
"${LIBACCEL_SOURCE_DIR}/util/log.c"
|
||||
"${LIBACCEL_SOURCE_DIR}/util/sysfs.c"
|
||||
)
|
||||
|
||||
add_library(accel-config ${SRCS})
|
||||
|
||||
target_compile_options(accel-config PRIVATE "-D_GNU_SOURCE")
|
||||
|
||||
target_include_directories(accel-config BEFORE
|
||||
PRIVATE ${UUID_DIR}
|
||||
PRIVATE ${LIBACCEL_HEADER_DIR}
|
||||
PRIVATE ${LIBACCEL_SOURCE_DIR})
|
||||
|
||||
## QPL build start here.
|
||||
set (QPL_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/qpl")
|
||||
set (QPL_SRC_DIR "${ClickHouse_SOURCE_DIR}/contrib/qpl/sources")
|
||||
set (QPL_BINARY_DIR "${ClickHouse_BINARY_DIR}/build/contrib/qpl")
|
||||
@ -187,7 +156,9 @@ target_include_directories(qplcore_avx512
|
||||
set_target_properties(qplcore_avx512 PROPERTIES
|
||||
$<$<C_COMPILER_ID:GNU>:C_STANDARD 17>)
|
||||
|
||||
target_link_libraries(qplcore_avx512 ${CMAKE_DL_LIBS} isal)
|
||||
target_link_libraries(qplcore_avx512
|
||||
PRIVATE isal
|
||||
PRIVATE ${CMAKE_DL_LIBS})
|
||||
|
||||
target_compile_options(qplcore_avx512
|
||||
PRIVATE ${QPL_LINUX_TOOLCHAIN_REQUIRED_FLAGS}
|
||||
@ -217,7 +188,9 @@ target_include_directories(qplcore_px
|
||||
set_target_properties(qplcore_px PROPERTIES
|
||||
$<$<C_COMPILER_ID:GNU>:C_STANDARD 17>)
|
||||
|
||||
target_link_libraries(qplcore_px isal ${CMAKE_DL_LIBS})
|
||||
target_link_libraries(qplcore_px
|
||||
PRIVATE isal
|
||||
PRIVATE ${CMAKE_DL_LIBS})
|
||||
|
||||
target_compile_options(qplcore_px
|
||||
PRIVATE ${QPL_LINUX_TOOLCHAIN_REQUIRED_FLAGS}
|
||||
@ -338,11 +311,12 @@ target_compile_definitions(_qpl
|
||||
PUBLIC -DENABLE_QPL_COMPRESSION)
|
||||
|
||||
target_link_libraries(_qpl
|
||||
PRIVATE accel-config
|
||||
PRIVATE ch_contrib::accel-config
|
||||
PRIVATE ch_contrib::isal
|
||||
PRIVATE ${CMAKE_DL_LIBS})
|
||||
|
||||
add_library (ch_contrib::qpl ALIAS _qpl)
|
||||
target_include_directories(_qpl SYSTEM BEFORE
|
||||
PUBLIC "${QPL_PROJECT_DIR}/include"
|
||||
PUBLIC "${LIBACCEL_SOURCE_DIR}/accfg"
|
||||
PUBLIC ${UUID_DIR})
|
||||
|
||||
add_library (ch_contrib::qpl ALIAS _qpl)
|
||||
|
@ -1,6 +1,6 @@
|
||||
## The bare minimum ClickHouse Docker image.
|
||||
|
||||
It is intented as a showcase to check the amount of implicit dependencies of ClickHouse from the OS in addition to the OS kernel.
|
||||
It is intended as a showcase to check the amount of implicit dependencies of ClickHouse from the OS in addition to the OS kernel.
|
||||
|
||||
Example usage:
|
||||
|
||||
|
@ -123,7 +123,8 @@
|
||||
"docker/test/stateless",
|
||||
"docker/test/integration/base",
|
||||
"docker/test/fuzzer",
|
||||
"docker/test/keeper-jepsen"
|
||||
"docker/test/keeper-jepsen",
|
||||
"docker/test/server-jepsen"
|
||||
]
|
||||
},
|
||||
"docker/test/integration/kerberized_hadoop": {
|
||||
@ -139,6 +140,10 @@
|
||||
"name": "clickhouse/keeper-jepsen-test",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/server-jepsen": {
|
||||
"name": "clickhouse/server-jepsen-test",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/install/deb": {
|
||||
"name": "clickhouse/install-deb-test",
|
||||
"dependent": []
|
||||
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
esac
|
||||
|
||||
ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release"
|
||||
ARG VERSION="23.4.1.1943"
|
||||
ARG VERSION="23.4.2.11"
|
||||
ARG PACKAGES="clickhouse-keeper"
|
||||
|
||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||
|
@ -33,7 +33,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="23.4.1.1943"
|
||||
ARG VERSION="23.4.2.11"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||
|
@ -22,7 +22,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
||||
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="deb https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||
ARG VERSION="23.4.1.1943"
|
||||
ARG VERSION="23.4.2.11"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
# set non-empty deb_location_url url to create a docker image
|
||||
|
@ -36,12 +36,10 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# repo versions doesn't work correctly with C++17
|
||||
# also we push reports to s3, so we add index.html to subfolder urls
|
||||
# https://github.com/ClickHouse-Extras/woboq_codebrowser/commit/37e15eaf377b920acb0b48dbe82471be9203f76b
|
||||
RUN git clone https://github.com/ClickHouse/woboq_codebrowser \
|
||||
&& cd woboq_codebrowser \
|
||||
RUN git clone --depth=1 https://github.com/ClickHouse/woboq_codebrowser /woboq_codebrowser \
|
||||
&& cd /woboq_codebrowser \
|
||||
&& cmake . -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_COMPILER=clang\+\+-${LLVM_VERSION} -DCMAKE_C_COMPILER=clang-${LLVM_VERSION} \
|
||||
&& ninja \
|
||||
&& cd .. \
|
||||
&& rm -rf woboq_codebrowser
|
||||
&& ninja
|
||||
|
||||
ENV CODEGEN=/woboq_codebrowser/generator/codebrowser_generator
|
||||
ENV CODEINDEX=/woboq_codebrowser/indexgenerator/codebrowser_indexgenerator
|
||||
|
@ -147,6 +147,7 @@ function clone_submodules
|
||||
contrib/xxHash
|
||||
contrib/simdjson
|
||||
contrib/liburing
|
||||
contrib/libfiu
|
||||
)
|
||||
|
||||
git submodule sync
|
||||
|
@ -16,6 +16,11 @@ ENV TESTS_TO_RUN="8"
|
||||
ENV TIME_LIMIT="30"
|
||||
|
||||
ENV KEEPER_NODE=""
|
||||
ENV NEMESIS=""
|
||||
ENV WORKLOAD=""
|
||||
ENV WITH_LOCAL_BINARY=""
|
||||
ENV RATE=""
|
||||
ENV CONCURRENCY=""
|
||||
|
||||
|
||||
# volumes
|
||||
|
@ -15,8 +15,38 @@ if [ -z "$CLICKHOUSE_REPO_PATH" ]; then
|
||||
ls -lath ||:
|
||||
fi
|
||||
|
||||
clickhouse_source="--clickhouse-source $CLICKHOUSE_PACKAGE"
|
||||
if [ -n "$WITH_LOCAL_BINARY" ]; then
|
||||
clickhouse_source="--clickhouse-source /clickhouse"
|
||||
fi
|
||||
|
||||
tests_count="--test-count $TESTS_TO_RUN"
|
||||
tests_to_run="test-all"
|
||||
workload=""
|
||||
if [ -n "$WORKLOAD" ]; then
|
||||
tests_to_run="test"
|
||||
workload="--workload $WORKLOAD"
|
||||
tests_count=""
|
||||
fi
|
||||
|
||||
nemesis=""
|
||||
if [ -n "$NEMESIS" ]; then
|
||||
nemesis="--nemesis $NEMESIS"
|
||||
fi
|
||||
|
||||
rate=""
|
||||
if [ -n "$RATE" ]; then
|
||||
rate="--rate $RATE"
|
||||
fi
|
||||
|
||||
concurrency=""
|
||||
if [ -n "$CONCURRENCY" ]; then
|
||||
concurrency="--concurrency $CONCURRENCY"
|
||||
fi
|
||||
|
||||
|
||||
cd "$CLICKHOUSE_REPO_PATH/tests/jepsen.clickhouse"
|
||||
|
||||
(lein run server test-all --keeper "$KEEPER_NODE" --nodes-file "$NODES_FILE_PATH" --username "$NODES_USERNAME" --logging-json --password "$NODES_PASSWORD" --time-limit "$TIME_LIMIT" --concurrency 50 -r 50 --clickhouse-source "$CLICKHOUSE_PACKAGE" --test-count "$TESTS_TO_RUN" || true) | tee "$TEST_OUTPUT/jepsen_run_all_tests.log"
|
||||
(lein run server $tests_to_run $workload --keeper "$KEEPER_NODE" $concurrency $nemesis $rate --nodes-file "$NODES_FILE_PATH" --username "$NODES_USERNAME" --logging-json --password "$NODES_PASSWORD" --time-limit "$TIME_LIMIT" --concurrency 50 $clickhouse_source $tests_count --reuse-binary || true) | tee "$TEST_OUTPUT/jepsen_run_all_tests.log"
|
||||
|
||||
mv store "$TEST_OUTPUT/"
|
||||
|
@ -59,6 +59,12 @@ install_packages previous_release_package_folder
|
||||
# available for dump via clickhouse-local
|
||||
configure
|
||||
|
||||
# local_blob_storage disk type does not exist in older versions
|
||||
sudo cat /etc/clickhouse-server/config.d/storage_conf.xml \
|
||||
| sed "s|<type>local_blob_storage</type>|<type>local</type>|" \
|
||||
> /etc/clickhouse-server/config.d/storage_conf.xml.tmp
|
||||
sudo mv /etc/clickhouse-server/config.d/storage_conf.xml.tmp /etc/clickhouse-server/config.d/storage_conf.xml
|
||||
|
||||
start
|
||||
stop
|
||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.initial.log
|
||||
@ -83,6 +89,11 @@ export USE_S3_STORAGE_FOR_MERGE_TREE=1
|
||||
export ZOOKEEPER_FAULT_INJECTION=0
|
||||
configure
|
||||
|
||||
sudo cat /etc/clickhouse-server/config.d/storage_conf.xml \
|
||||
| sed "s|<type>local_blob_storage</type>|<type>local</type>|" \
|
||||
> /etc/clickhouse-server/config.d/storage_conf.xml.tmp
|
||||
sudo mv /etc/clickhouse-server/config.d/storage_conf.xml.tmp /etc/clickhouse-server/config.d/storage_conf.xml
|
||||
|
||||
start
|
||||
|
||||
clickhouse-client --query="SELECT 'Server version: ', version()"
|
||||
|
@ -59,12 +59,16 @@ def process_test_log(log_path, broken_tests):
|
||||
|
||||
total += 1
|
||||
if TIMEOUT_SIGN in line:
|
||||
failed += 1
|
||||
test_results.append((test_name, "Timeout", test_time, []))
|
||||
if test_name in broken_tests:
|
||||
success += 1
|
||||
test_results.append((test_name, "BROKEN", test_time, []))
|
||||
else:
|
||||
failed += 1
|
||||
test_results.append((test_name, "Timeout", test_time, []))
|
||||
elif FAIL_SIGN in line:
|
||||
if test_name in broken_tests:
|
||||
success += 1
|
||||
test_results.append((test_name, "OK", test_time, []))
|
||||
test_results.append((test_name, "BROKEN", test_time, []))
|
||||
else:
|
||||
failed += 1
|
||||
test_results.append((test_name, "FAIL", test_time, []))
|
||||
@ -76,11 +80,11 @@ def process_test_log(log_path, broken_tests):
|
||||
test_results.append((test_name, "SKIPPED", test_time, []))
|
||||
else:
|
||||
if OK_SIGN in line and test_name in broken_tests:
|
||||
failed += 1
|
||||
skipped += 1
|
||||
test_results.append(
|
||||
(
|
||||
test_name,
|
||||
"SKIPPED",
|
||||
"NOT_FAILED",
|
||||
test_time,
|
||||
["This test passed. Update broken_tests.txt.\n"],
|
||||
)
|
||||
|
20
docs/changelogs/v23.4.2.11-stable.md
Normal file
20
docs/changelogs/v23.4.2.11-stable.md
Normal file
@ -0,0 +1,20 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.4.2.11-stable (b6442320f9d) FIXME as compared to v23.4.1.1943-stable (3920eb987f7)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Revert "Fix GCS native copy ([#48981](https://github.com/ClickHouse/ClickHouse/issues/48981))" [#49194](https://github.com/ClickHouse/ClickHouse/pull/49194) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix race on Outdated parts loading [#49223](https://github.com/ClickHouse/ClickHouse/pull/49223) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Implement status comment [#48468](https://github.com/ClickHouse/ClickHouse/pull/48468) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Update curl to 8.0.1 (for CVEs) [#48765](https://github.com/ClickHouse/ClickHouse/pull/48765) ([Boris Kuschel](https://github.com/bkuschel)).
|
||||
* Fallback auth gh api [#49314](https://github.com/ClickHouse/ClickHouse/pull/49314) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
@ -98,7 +98,7 @@ A hand-written recursive descent parser parses a query. For example, `ParserSele
|
||||
|
||||
## Interpreters {#interpreters}
|
||||
|
||||
Interpreters are responsible for creating the query execution pipeline from an `AST`. There are simple interpreters, such as `InterpreterExistsQuery` and `InterpreterDropQuery`, or the more sophisticated `InterpreterSelectQuery`. The query execution pipeline is a combination of block input or output streams. For example, the result of interpreting the `SELECT` query is the `IBlockInputStream` to read the result set from; the result of the INSERT query is the `IBlockOutputStream` to write data for insertion to, and the result of interpreting the `INSERT SELECT` query is the `IBlockInputStream` that returns an empty result set on the first read, but that copies data from `SELECT` to `INSERT` at the same time.
|
||||
Interpreters are responsible for creating the query execution pipeline from an `AST`. There are simple interpreters, such as `InterpreterExistsQuery` and `InterpreterDropQuery`, or the more sophisticated `InterpreterSelectQuery`. The query execution pipeline is a combination of block input or output streams. For example, the result of interpreting the `SELECT` query is the `IBlockInputStream` to read the result set from; the result of the `INSERT` query is the `IBlockOutputStream` to write data for insertion to, and the result of interpreting the `INSERT SELECT` query is the `IBlockInputStream` that returns an empty result set on the first read, but that copies data from `SELECT` to `INSERT` at the same time.
|
||||
|
||||
`InterpreterSelectQuery` uses `ExpressionAnalyzer` and `ExpressionActions` machinery for query analysis and transformations. This is where most rule-based query optimizations are done. `ExpressionAnalyzer` is quite messy and should be rewritten: various query transformations and optimizations should be extracted to separate classes to allow modular transformations of query.
|
||||
|
||||
|
@ -155,6 +155,9 @@ The following settings can be specified in configuration file for given endpoint
|
||||
- `no_sign_request` - Ignore all the credentials so requests are not signed. Useful for accessing public buckets.
|
||||
- `header` — Adds specified HTTP header to a request to given endpoint. Optional, can be specified multiple times.
|
||||
- `server_side_encryption_customer_key_base64` — If specified, required headers for accessing S3 objects with SSE-C encryption will be set. Optional.
|
||||
- `server_side_encryption_kms_key_id` - If specified, required headers for accessing S3 objects with [SSE-KMS encryption](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) will be set. If an empty string is specified, the AWS managed S3 key will be used. Optional.
|
||||
- `server_side_encryption_kms_encryption_context` - If specified alongside `server_side_encryption_kms_key_id`, the given encryption context header for SSE-KMS will be set. Optional.
|
||||
- `server_side_encryption_kms_bucket_key_enabled` - If specified alongside `server_side_encryption_kms_key_id`, the header to enable S3 bucket keys for SSE-KMS will be set. Optional, can be `true` or `false`, defaults to nothing (matches the bucket-level setting).
|
||||
- `max_single_read_retries` — The maximum number of attempts during single read. Default value is `4`. Optional.
|
||||
- `max_put_rps`, `max_put_burst`, `max_get_rps` and `max_get_burst` - Throttling settings (see description above) to use for specific endpoint instead of per query. Optional.
|
||||
|
||||
@ -173,6 +176,9 @@ The following settings can be specified in configuration file for given endpoint
|
||||
<!-- <no_sign_request>false</no_sign_request> -->
|
||||
<!-- <header>Authorization: Bearer SOME-TOKEN</header> -->
|
||||
<!-- <server_side_encryption_customer_key_base64>BASE64-ENCODED-KEY</server_side_encryption_customer_key_base64> -->
|
||||
<!-- <server_side_encryption_kms_key_id>KMS_KEY_ID</server_side_encryption_kms_key_id> -->
|
||||
<!-- <server_side_encryption_kms_encryption_context>KMS_ENCRYPTION_CONTEXT</server_side_encryption_kms_encryption_context> -->
|
||||
<!-- <server_side_encryption_kms_bucket_key_enabled>true</server_side_encryption_kms_bucket_key_enabled> -->
|
||||
<!-- <max_single_read_retries>4</max_single_read_retries> -->
|
||||
</endpoint-name>
|
||||
</s3>
|
||||
|
@ -148,7 +148,7 @@ Valid values:
|
||||
- `all` (default) - a universal rule, used when `rule_type` is omitted.
|
||||
- `plain` - a rule for plain metrics. The field `regexp` is processed as regular expression.
|
||||
- `tagged` - a rule for tagged metrics (metrics are stored in DB in the format of `someName?tag1=value1&tag2=value2&tag3=value3`). Regular expression must be sorted by tags' names, first tag must be `__name__` if exists. The field `regexp` is processed as regular expression.
|
||||
- `tag_list` - a rule for tagged matrics, a simple DSL for easier metric description in graphite format `someName;tag1=value1;tag2=value2`, `someName`, or `tag1=value1;tag2=value2`. The field `regexp` is translated into a `tagged` rule. The sorting by tags' names is unnecessary, ti will be done automatically. A tag's value (but not a name) can be set as a regular expression, e.g. `env=(dev|staging)`.
|
||||
- `tag_list` - a rule for tagged metrics, a simple DSL for easier metric description in graphite format `someName;tag1=value1;tag2=value2`, `someName`, or `tag1=value1;tag2=value2`. The field `regexp` is translated into a `tagged` rule. The sorting by tags' names is unnecessary, ti will be done automatically. A tag's value (but not a name) can be set as a regular expression, e.g. `env=(dev|staging)`.
|
||||
- `regexp` – A pattern for the metric name (a regular or DSL).
|
||||
- `age` – The minimum age of the data in seconds.
|
||||
- `precision`– How precisely to define the age of the data in seconds. Should be a divisor for 86400 (seconds in a day).
|
||||
|
@ -439,6 +439,50 @@ Syntax: `ngrambf_v1(n, size_of_bloom_filter_in_bytes, number_of_hash_functions,
|
||||
- `number_of_hash_functions` — The number of hash functions used in the Bloom filter.
|
||||
- `random_seed` — The seed for Bloom filter hash functions.
|
||||
|
||||
Users can create [UDF](/docs/en/sql-reference/statements/create/function.md) to estimate the parameters set of `ngrambf_v1`. Query statements are as follows:
|
||||
|
||||
```sql
|
||||
CREATE FUNCTION bfEstimateFunctions [ON CLUSTER cluster]
|
||||
AS
|
||||
(total_nubmer_of_all_grams, size_of_bloom_filter_in_bits) -> round((size_of_bloom_filter_in_bits / total_nubmer_of_all_grams) * log(2));
|
||||
|
||||
CREATE FUNCTION bfEstimateBmSize [ON CLUSTER cluster]
|
||||
AS
|
||||
(total_nubmer_of_all_grams, probability_of_false_positives) -> ceil((total_nubmer_of_all_grams * log(probability_of_false_positives)) / log(1 / pow(2, log(2))));
|
||||
|
||||
CREATE FUNCTION bfEstimateFalsePositive [ON CLUSTER cluster]
|
||||
AS
|
||||
(total_nubmer_of_all_grams, number_of_hash_functions, size_of_bloom_filter_in_bytes) -> pow(1 - exp(-number_of_hash_functions/ (size_of_bloom_filter_in_bytes / total_nubmer_of_all_grams)), number_of_hash_functions);
|
||||
|
||||
CREATE FUNCTION bfEstimateGramNumber [ON CLUSTER cluster]
|
||||
AS
|
||||
(number_of_hash_functions, probability_of_false_positives, size_of_bloom_filter_in_bytes) -> ceil(size_of_bloom_filter_in_bytes / (-number_of_hash_functions / log(1 - exp(log(probability_of_false_positives) / number_of_hash_functions))))
|
||||
|
||||
```
|
||||
To use those functions,we need to specify two parameter at least.
|
||||
For example, if there 4300 ngrams in the granule and we expect false positives to be less than 0.0001. The other parameters can be estimated by executing following queries:
|
||||
|
||||
|
||||
```sql
|
||||
--- estimate number of bits in the filter
|
||||
SELECT bfEstimateBmSize(4300, 0.0001) / 8 as size_of_bloom_filter_in_bytes;
|
||||
|
||||
┌─size_of_bloom_filter_in_bytes─┐
|
||||
│ 10304 │
|
||||
└───────────────────────────────┘
|
||||
|
||||
--- estimate number of hash functions
|
||||
SELECT bfEstimateFunctions(4300, bfEstimateBmSize(4300, 0.0001)) as number_of_hash_functions
|
||||
|
||||
┌─number_of_hash_functions─┐
|
||||
│ 13 │
|
||||
└──────────────────────────┘
|
||||
|
||||
```
|
||||
Of course, you can also use those functions to estimate parameters by other conditions.
|
||||
The functions refer to the content [here](https://hur.st/bloomfilter).
|
||||
|
||||
|
||||
#### Token Bloom Filter
|
||||
|
||||
The same as `ngrambf_v1`, but stores tokens instead of ngrams. Tokens are sequences separated by non-alphanumeric characters.
|
||||
@ -683,7 +727,7 @@ TTL d + INTERVAL 1 MONTH RECOMPRESS CODEC(ZSTD(17)), d + INTERVAL 1 YEAR RECOMPR
|
||||
SETTINGS min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0;
|
||||
```
|
||||
|
||||
Creating a table, where expired rows are aggregated. In result rows `x` contains the maximum value accross the grouped rows, `y` — the minimum value, and `d` — any occasional value from grouped rows.
|
||||
Creating a table, where expired rows are aggregated. In result rows `x` contains the maximum value across the grouped rows, `y` — the minimum value, and `d` — any occasional value from grouped rows.
|
||||
|
||||
``` sql
|
||||
CREATE TABLE table_for_aggregation
|
||||
@ -1011,7 +1055,11 @@ Configuration markup:
|
||||
<access_key_id>your_access_key_id</access_key_id>
|
||||
<secret_access_key>your_secret_access_key</secret_access_key>
|
||||
<region></region>
|
||||
<header>Authorization: Bearer SOME-TOKEN</header>
|
||||
<server_side_encryption_customer_key_base64>your_base64_encoded_customer_key</server_side_encryption_customer_key_base64>
|
||||
<server_side_encryption_kms_key_id>your_kms_key_id</server_side_encryption_kms_key_id>
|
||||
<server_side_encryption_kms_encryption_context>your_kms_encryption_context</server_side_encryption_kms_encryption_context>
|
||||
<server_side_encryption_kms_bucket_key_enabled>true</server_side_encryption_kms_bucket_key_enabled>
|
||||
<proxy>
|
||||
<uri>http://proxy1</uri>
|
||||
<uri>http://proxy2</uri>
|
||||
@ -1062,7 +1110,11 @@ Optional parameters:
|
||||
- `min_bytes_for_seek` — Minimal number of bytes to use seek operation instead of sequential read. Default value is `1 Mb`.
|
||||
- `metadata_path` — Path on local FS to store metadata files for S3. Default value is `/var/lib/clickhouse/disks/<disk_name>/`.
|
||||
- `skip_access_check` — If true, disk access checks will not be performed on disk start-up. Default value is `false`.
|
||||
- `header` — Adds specified HTTP header to a request to given endpoint. Optional, can be specified multiple times.
|
||||
- `server_side_encryption_customer_key_base64` — If specified, required headers for accessing S3 objects with SSE-C encryption will be set.
|
||||
- `server_side_encryption_kms_key_id` - If specified, required headers for accessing S3 objects with [SSE-KMS encryption](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) will be set. If an empty string is specified, the AWS managed S3 key will be used. Optional.
|
||||
- `server_side_encryption_kms_encryption_context` - If specified alongside `server_side_encryption_kms_key_id`, the given encryption context header for SSE-KMS will be set. Optional.
|
||||
- `server_side_encryption_kms_bucket_key_enabled` - If specified alongside `server_side_encryption_kms_key_id`, the header to enable S3 bucket keys for SSE-KMS will be set. Optional, can be `true` or `false`, defaults to nothing (matches the bucket-level setting).
|
||||
- `s3_max_put_rps` — Maximum PUT requests per second rate before throttling. Default value is `0` (unlimited).
|
||||
- `s3_max_put_burst` — Max number of requests that can be issued simultaneously before hitting request per second limit. By default (`0` value) equals to `s3_max_put_rps`.
|
||||
- `s3_max_get_rps` — Maximum GET requests per second rate before throttling. Default value is `0` (unlimited).
|
||||
|
@ -90,15 +90,17 @@ SELECT * FROM mySecondReplacingMT FINAL;
|
||||
|
||||
### is_deleted
|
||||
|
||||
`is_deleted` — Name of the column with the type of row: `1` is a “deleted“ row, `0` is a “state“ row.
|
||||
`is_deleted` — Name of a column used during a merge to determine whether the data in this row represents the state or is to be deleted; `1` is a “deleted“ row, `0` is a “state“ row.
|
||||
|
||||
Column data type — `Int8`.
|
||||
Column data type — `UInt8`.
|
||||
|
||||
Can only be enabled when `ver` is used.
|
||||
The row is deleted when use the `OPTIMIZE ... FINAL CLEANUP`, or `OPTIMIZE ... FINAL` if the engine settings `clean_deleted_rows` has been set to `Always`.
|
||||
No matter the operation on the data, the version must be increased. If two inserted rows have the same version number, the last inserted one is the one kept.
|
||||
:::note
|
||||
`is_deleted` can only be enabled when `ver` is used.
|
||||
|
||||
The row is deleted when `OPTIMIZE ... FINAL CLEANUP` or `OPTIMIZE ... FINAL` is used, or if the engine setting `clean_deleted_rows` has been set to `Always`.
|
||||
|
||||
No matter the operation on the data, the version must be increased. If two inserted rows have the same version number, the last inserted row is the one kept.
|
||||
:::
|
||||
|
||||
## Query clauses
|
||||
|
||||
|
@ -242,7 +242,7 @@ When querying a `Distributed` table, `SELECT` queries are sent to all shards and
|
||||
|
||||
When the `max_parallel_replicas` option is enabled, query processing is parallelized across all replicas within a single shard. For more information, see the section [max_parallel_replicas](../../../operations/settings/settings.md#settings-max_parallel_replicas).
|
||||
|
||||
To learn more about how distibuted `in` and `global in` queries are processed, refer to [this](../../../sql-reference/operators/in.md#select-distributed-subqueries) documentation.
|
||||
To learn more about how distributed `in` and `global in` queries are processed, refer to [this](../../../sql-reference/operators/in.md#select-distributed-subqueries) documentation.
|
||||
|
||||
## Virtual Columns {#virtual-columns}
|
||||
|
||||
|
@ -120,7 +120,7 @@ Some comments about the `sentiment` table:
|
||||
- The `TabSeparated` format means our Python script needs to generate rows of raw data that contain tab-separated values
|
||||
- The query selects two columns from `hackernews`. The Python script will need to parse out those column values from the incoming rows
|
||||
|
||||
Here is the defintion of `sentiment.py`:
|
||||
Here is the definition of `sentiment.py`:
|
||||
|
||||
```python
|
||||
#!/usr/local/bin/python3.9
|
||||
|
@ -14,7 +14,7 @@ Syntax: `URL(URL [,Format] [,CompressionMethod])`
|
||||
|
||||
- The `Format` must be one that ClickHouse can use in `SELECT` queries and, if necessary, in `INSERTs`. For the full list of supported formats, see [Formats](../../../interfaces/formats.md#formats).
|
||||
|
||||
If this argument is not specified, ClickHouse detectes the format automatically from the suffix of the `URL` parameter. If the suffix of `URL` parameter does not match any supported formats, it fails to create table. For example, for engine expression `URL('http://localhost/test.json')`, `JSON` format is applied.
|
||||
If this argument is not specified, ClickHouse detects the format automatically from the suffix of the `URL` parameter. If the suffix of `URL` parameter does not match any supported formats, it fails to create table. For example, for engine expression `URL('http://localhost/test.json')`, `JSON` format is applied.
|
||||
|
||||
- `CompressionMethod` indicates that whether the HTTP body should be compressed. If the compression is enabled, the HTTP packets sent by the URL engine contain 'Content-Encoding' header to indicate which compression method is used.
|
||||
|
||||
|
@ -308,7 +308,7 @@ To build a Superset dashboard using the OpenCelliD dataset you should:
|
||||
![Choose clickhouse connect as database type](@site/docs/en/getting-started/example-datasets/images/superset-choose-a-database.png)
|
||||
|
||||
:::note
|
||||
If **ClickHouse Connect** is not one of your options, then you will need to install it. The comand is `pip install clickhouse-connect`, and more info is [available here](https://pypi.org/project/clickhouse-connect/).
|
||||
If **ClickHouse Connect** is not one of your options, then you will need to install it. The command is `pip install clickhouse-connect`, and more info is [available here](https://pypi.org/project/clickhouse-connect/).
|
||||
:::
|
||||
|
||||
#### Add your connection details:
|
||||
|
@ -261,5 +261,5 @@ The results look like
|
||||
```
|
||||
|
||||
:::note
|
||||
As mentioned in the [GitHub repo](https://github.com/GoogleCloudPlatform/covid-19-open-data), the datset is no longer updated as of September 15, 2022.
|
||||
As mentioned in the [GitHub repo](https://github.com/GoogleCloudPlatform/covid-19-open-data), the dataset is no longer updated as of September 15, 2022.
|
||||
:::
|
@ -184,6 +184,15 @@ sudo yum install -y yum-utils
|
||||
sudo yum-config-manager --add-repo https://packages.clickhouse.com/rpm/clickhouse.repo
|
||||
```
|
||||
|
||||
For systems with `zypper` package manager (openSUSE, SLES):
|
||||
|
||||
``` bash
|
||||
sudo zypper addrepo -r https://packages.clickhouse.com/rpm/clickhouse.repo -g
|
||||
sudo zypper --gpg-auto-import-keys refresh clickhouse-stable
|
||||
```
|
||||
|
||||
Later any `yum install` can be replaced by `zypper install`. To specify a particular version, add `-$VERSION` to the end of the package name, e.g. `clickhouse-client-22.2.2.22`.
|
||||
|
||||
#### Install ClickHouse server and client
|
||||
|
||||
```bash
|
||||
|
@ -30,7 +30,7 @@ description: In order to effectively mitigate possible human errors, you should
|
||||
```
|
||||
|
||||
:::note ALL
|
||||
`ALL` is only applicable to the `RESTORE` command.
|
||||
`ALL` is only applicable to the `RESTORE` command prior to version 23.4 of Clickhouse.
|
||||
:::
|
||||
|
||||
## Background
|
||||
|
@ -23,6 +23,6 @@ Additional cache types:
|
||||
- [Dictionaries](../sql-reference/dictionaries/index.md) data cache.
|
||||
- Schema inference cache.
|
||||
- [Filesystem cache](storing-data.md) over S3, Azure, Local and other disks.
|
||||
- [(Experimental) Query cache](query-cache.md).
|
||||
- [Query cache](query-cache.md).
|
||||
|
||||
To drop one of the caches, use [SYSTEM DROP ... CACHE](../sql-reference/statements/system.md#drop-mark-cache) statements.
|
||||
|
@ -1,10 +1,10 @@
|
||||
---
|
||||
slug: /en/operations/query-cache
|
||||
sidebar_position: 65
|
||||
sidebar_label: Query Cache [experimental]
|
||||
sidebar_label: Query Cache
|
||||
---
|
||||
|
||||
# Query Cache [experimental]
|
||||
# Query Cache
|
||||
|
||||
The query cache allows to compute `SELECT` queries just once and to serve further executions of the same query directly from the cache.
|
||||
Depending on the type of the queries, this can dramatically reduce latency and resource consumption of the ClickHouse server.
|
||||
@ -29,21 +29,10 @@ Transactionally inconsistent caching is traditionally provided by client tools o
|
||||
the same caching logic and configuration is often duplicated. With ClickHouse's query cache, the caching logic moves to the server side.
|
||||
This reduces maintenance effort and avoids redundancy.
|
||||
|
||||
:::note
|
||||
The query cache is an experimental feature that should not be used in production. There are known cases (e.g. in distributed query
|
||||
processing) where wrong results are returned.
|
||||
:::
|
||||
|
||||
## Configuration Settings and Usage
|
||||
|
||||
As long as the result cache is experimental it must be activated using the following configuration setting:
|
||||
|
||||
```sql
|
||||
SET allow_experimental_query_cache = true;
|
||||
```
|
||||
|
||||
Afterwards, setting [use_query_cache](settings/settings.md#use-query-cache) can be used to control whether a specific query or all queries
|
||||
of the current session should utilize the query cache. For example, the first execution of query
|
||||
Setting [use_query_cache](settings/settings.md#use-query-cache) can be used to control whether a specific query or all queries of the
|
||||
current session should utilize the query cache. For example, the first execution of query
|
||||
|
||||
```sql
|
||||
SELECT some_expensive_calculation(column_1, column_2)
|
||||
|
@ -208,7 +208,7 @@ Default value: `3600` (1 hour).
|
||||
## database_catalog_unused_dir_rm_timeout_sec {#database_catalog_unused_dir_rm_timeout_sec}
|
||||
|
||||
Parameter of a task that cleans up garbage from `store/` directory.
|
||||
If some subdirectory is not used by clickhouse-server and it was previousely "hidden"
|
||||
If some subdirectory is not used by clickhouse-server and it was previously "hidden"
|
||||
(see [database_catalog_unused_dir_hide_timeout_sec](../../operations/server-configuration-parameters/settings.md#database_catalog_unused_dir_hide_timeout_sec))
|
||||
and this directory was not modified for last
|
||||
`database_catalog_unused_dir_rm_timeout_sec` seconds, the task will remove this directory.
|
||||
@ -1045,7 +1045,7 @@ Default value: `0`.
|
||||
|
||||
## background_pool_size {#background_pool_size}
|
||||
|
||||
Sets the number of threads performing background merges and mutations for tables with MergeTree engines. This setting is also could be applied at server startup from the `default` profile configuration for backward compatibility at the ClickHouse server start. You can only increase the number of threads at runtime. To lower the number of threads you have to restart the server. By adjusting this setting, you manage CPU and disk load. Smaller pool size utilizes less CPU and disk resources, but background processes advance slower which might eventually impact query performance.
|
||||
Sets the number of threads performing background merges and mutations for tables with MergeTree engines. This setting is also could be applied at server startup from the `default` profile configuration for backward compatibility at the ClickHouse server start. You can only increase the number of threads at runtime. To lower the number of threads you have to restart the server. By adjusting this setting, you manage CPU and disk load. Smaller pool size utilizes less CPU and disk resources, but background processes advance slower which might eventually impact query performance.
|
||||
|
||||
Before changing it, please also take a look at related MergeTree settings, such as [number_of_free_entries_in_pool_to_lower_max_size_of_merge](../../operations/settings/merge-tree-settings.md#number-of-free-entries-in-pool-to-lower-max-size-of-merge) and [number_of_free_entries_in_pool_to_execute_mutation](../../operations/settings/merge-tree-settings.md#number-of-free-entries-in-pool-to-execute-mutation).
|
||||
|
||||
@ -1063,8 +1063,8 @@ Default value: 16.
|
||||
|
||||
## background_merges_mutations_concurrency_ratio {#background_merges_mutations_concurrency_ratio}
|
||||
|
||||
Sets a ratio between the number of threads and the number of background merges and mutations that can be executed concurrently. For example if the ratio equals to 2 and
|
||||
`background_pool_size` is set to 16 then ClickHouse can execute 32 background merges concurrently. This is possible, because background operation could be suspended and postponed. This is needed to give small merges more execution priority. You can only increase this ratio at runtime. To lower it you have to restart the server.
|
||||
Sets a ratio between the number of threads and the number of background merges and mutations that can be executed concurrently. For example, if the ratio equals to 2 and
|
||||
`background_pool_size` is set to 16 then ClickHouse can execute 32 background merges concurrently. This is possible, because background operations could be suspended and postponed. This is needed to give small merges more execution priority. You can only increase this ratio at runtime. To lower it you have to restart the server.
|
||||
The same as for `background_pool_size` setting `background_merges_mutations_concurrency_ratio` could be applied from the `default` profile for backward compatibility.
|
||||
|
||||
Possible values:
|
||||
@ -1079,6 +1079,33 @@ Default value: 2.
|
||||
<background_merges_mutations_concurrency_ratio>3</background_merges_mutations_concurrency_ratio>
|
||||
```
|
||||
|
||||
## merges_mutations_memory_usage_soft_limit {#merges_mutations_memory_usage_soft_limit}
|
||||
|
||||
Sets the limit on how much RAM is allowed to use for performing merge and mutation operations.
|
||||
Zero means unlimited.
|
||||
If ClickHouse reaches this limit, it won't schedule any new background merge or mutation operations but will continue to execute already scheduled tasks.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Any positive integer.
|
||||
|
||||
**Example**
|
||||
|
||||
```xml
|
||||
<merges_mutations_memory_usage_soft_limit>0</merges_mutations_memory_usage_soft_limit>
|
||||
```
|
||||
|
||||
## merges_mutations_memory_usage_to_ram_ratio {#merges_mutations_memory_usage_to_ram_ratio}
|
||||
|
||||
The default `merges_mutations_memory_usage_soft_limit` value is calculated as `memory_amount * merges_mutations_memory_usage_to_ram_ratio`.
|
||||
|
||||
Default value: `0.5`.
|
||||
|
||||
**See also**
|
||||
|
||||
- [max_memory_usage](../../operations/settings/query-complexity.md#settings_max_memory_usage)
|
||||
- [merges_mutations_memory_usage_soft_limit](#merges_mutations_memory_usage_soft_limit)
|
||||
|
||||
## background_merges_mutations_scheduling_policy {#background_merges_mutations_scheduling_policy}
|
||||
|
||||
Algorithm used to select next merge or mutation to be executed by background thread pool. Policy may be changed at runtime without server restart.
|
||||
@ -2068,3 +2095,20 @@ Possible values:
|
||||
- Positive integer.
|
||||
|
||||
Default value: `10000`.
|
||||
|
||||
## display_secrets_in_show_and_select {#display_secrets_in_show_and_select}
|
||||
|
||||
Enables or disables showing secrets in `SHOW` and `SELECT` queries for tables, databases,
|
||||
table functions, and dictionaries.
|
||||
|
||||
User wishing to see secrets must also have
|
||||
[`format_display_secrets_in_show_and_select` format setting](../settings/formats#format_display_secrets_in_show_and_select)
|
||||
turned on and a
|
||||
[`displaySecretsInShowAndSelect`](../../sql-reference/statements/grant#grant-display-secrets) privilege.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Disabled.
|
||||
- 1 — Enabled.
|
||||
|
||||
Default value: 0.
|
||||
|
@ -7,6 +7,23 @@ toc_max_heading_level: 2
|
||||
|
||||
# Format settings {#format-settings}
|
||||
|
||||
## format_display_secrets_in_show_and_select {#format_display_secrets_in_show_and_select}
|
||||
|
||||
Enables or disables showing secrets in `SHOW` and `SELECT` queries for tables, databases,
|
||||
table functions, and dictionaries.
|
||||
|
||||
User wishing to see secrets must also have
|
||||
[`display_secrets_in_show_and_select` server setting](../server-configuration-parameters/settings#display_secrets_in_show_and_select)
|
||||
turned on and a
|
||||
[`displaySecretsInShowAndSelect`](../../sql-reference/statements/grant#grant-display-secrets) privilege.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Disabled.
|
||||
- 1 — Enabled.
|
||||
|
||||
Default value: 0.
|
||||
|
||||
## input_format_skip_unknown_fields {#input_format_skip_unknown_fields}
|
||||
|
||||
Enables or disables skipping insertion of extra data.
|
||||
|
@ -1027,7 +1027,7 @@ Timeout to close idle TCP connections after specified number of seconds.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer (0 - close immediatly, after 0 seconds).
|
||||
- Positive integer (0 - close immediately, after 0 seconds).
|
||||
|
||||
Default value: 3600.
|
||||
|
||||
@ -1125,6 +1125,12 @@ If unsuccessful, several attempts are made to connect to various replicas.
|
||||
|
||||
Default value: 1000.
|
||||
|
||||
## connect_timeout_with_failover_secure_ms
|
||||
|
||||
Connection timeout for selecting first healthy replica (for secure connections)
|
||||
|
||||
Default value: 1000.
|
||||
|
||||
## connection_pool_max_wait_ms {#connection-pool-max-wait-ms}
|
||||
|
||||
The wait time in milliseconds for a connection when the connection pool is full.
|
||||
@ -1410,8 +1416,8 @@ and [enable_writes_to_query_cache](#enable-writes-to-query-cache) control in mor
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 - Yes
|
||||
- 1 - No
|
||||
- 0 - Disabled
|
||||
- 1 - Enabled
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
@ -1514,7 +1520,7 @@ Default value: `0`.
|
||||
|
||||
## query_cache_max_size_in_bytes {#query-cache-max-size-in-bytes}
|
||||
|
||||
The maximum amount of memory (in bytes) the current user may allocate in the query cache. 0 means unlimited.
|
||||
The maximum amount of memory (in bytes) the current user may allocate in the [query cache](../query-cache.md). 0 means unlimited.
|
||||
|
||||
Possible values:
|
||||
|
||||
@ -1524,7 +1530,7 @@ Default value: 0 (no restriction).
|
||||
|
||||
## query_cache_max_entries {#query-cache-max-entries}
|
||||
|
||||
The maximum number of query results the current user may store in the query cache. 0 means unlimited.
|
||||
The maximum number of query results the current user may store in the [query cache](../query-cache.md). 0 means unlimited.
|
||||
|
||||
Possible values:
|
||||
|
||||
@ -1630,7 +1636,7 @@ For not replicated tables see [non_replicated_deduplication_window](merge-tree-s
|
||||
|
||||
### async_insert {#async-insert}
|
||||
|
||||
Enables or disables asynchronous inserts. This makes sense only for insertion over HTTP protocol. Note that deduplication isn't working for such inserts.
|
||||
Enables or disables asynchronous inserts. Note that deduplication isn't working for such inserts.
|
||||
|
||||
If enabled, the data is combined into batches before the insertion into tables, so it is possible to do small and frequent insertions into ClickHouse (up to 15000 queries per second) without buffer tables.
|
||||
|
||||
@ -1733,7 +1739,7 @@ Possible values:
|
||||
|
||||
Default value: 1.
|
||||
|
||||
By default, async inserts are inserted into replicated tables by the `INSERT` statement enabling [async_isnert](#async-insert) are deduplicated (see [Data Replication](../../engines/table-engines/mergetree-family/replication.md)).
|
||||
By default, async inserts are inserted into replicated tables by the `INSERT` statement enabling [async_insert](#async-insert) are deduplicated (see [Data Replication](../../engines/table-engines/mergetree-family/replication.md)).
|
||||
For the replicated tables, by default, only 10000 of the most recent inserts for each partition are deduplicated (see [replicated_deduplication_window_for_async_inserts](merge-tree-settings.md/#replicated-deduplication-window-async-inserts), [replicated_deduplication_window_seconds_for_async_inserts](merge-tree-settings.md/#replicated-deduplication-window-seconds-async-inserts)).
|
||||
We recommend enabling the [async_block_ids_cache](merge-tree-settings.md/#use-async-block-ids-cache) to increase the efficiency of deduplication.
|
||||
This function does not work for non-replicated tables.
|
||||
@ -1939,8 +1945,8 @@ Do not merge aggregation states from different servers for distributed query pro
|
||||
Possible values:
|
||||
|
||||
- `0` — Disabled (final query processing is done on the initiator node).
|
||||
- `1` - Do not merge aggregation states from different servers for distributed query processing (query completelly processed on the shard, initiator only proxy the data), can be used in case it is for certain that there are different keys on different shards.
|
||||
- `2` - Same as `1` but applies `ORDER BY` and `LIMIT` (it is not possible when the query processed completelly on the remote node, like for `distributed_group_by_no_merge=1`) on the initiator (can be used for queries with `ORDER BY` and/or `LIMIT`).
|
||||
- `1` - Do not merge aggregation states from different servers for distributed query processing (query completely processed on the shard, initiator only proxy the data), can be used in case it is for certain that there are different keys on different shards.
|
||||
- `2` - Same as `1` but applies `ORDER BY` and `LIMIT` (it is not possible when the query processed completely on the remote node, like for `distributed_group_by_no_merge=1`) on the initiator (can be used for queries with `ORDER BY` and/or `LIMIT`).
|
||||
|
||||
Default value: `0`
|
||||
|
||||
@ -3562,7 +3568,7 @@ Default value: `1`.
|
||||
|
||||
If the setting is set to `0`, the table function does not make Nullable columns and inserts default values instead of NULL. This is also applicable for NULL values inside arrays.
|
||||
|
||||
## allow_experimental_projection_optimization {#allow-experimental-projection-optimization}
|
||||
## optimize_use_projections {#optimize_use_projections}
|
||||
|
||||
Enables or disables [projection](../../engines/table-engines/mergetree-family/mergetree.md/#projections) optimization when processing `SELECT` queries.
|
||||
|
||||
@ -3575,7 +3581,7 @@ Default value: `1`.
|
||||
|
||||
## force_optimize_projection {#force-optimize-projection}
|
||||
|
||||
Enables or disables the obligatory use of [projections](../../engines/table-engines/mergetree-family/mergetree.md/#projections) in `SELECT` queries, when projection optimization is enabled (see [allow_experimental_projection_optimization](#allow-experimental-projection-optimization) setting).
|
||||
Enables or disables the obligatory use of [projections](../../engines/table-engines/mergetree-family/mergetree.md/#projections) in `SELECT` queries, when projection optimization is enabled (see [optimize_use_projections](#optimize_use_projections) setting).
|
||||
|
||||
Possible values:
|
||||
|
||||
@ -4110,7 +4116,7 @@ Enabled by default.
|
||||
|
||||
## use_hedged_requests {#use_hedged_requests}
|
||||
|
||||
Enables hadged requests logic for remote queries. It allows to establish many connections with different replicas for query.
|
||||
Enables hedged requests logic for remote queries. It allows to establish many connections with different replicas for query.
|
||||
New connection is enabled in case existent connection(s) with replica(s) were not established within `hedged_connection_timeout`
|
||||
or no data was received within `receive_data_timeout`. Query uses the first connection which send non empty progress packet (or data packet, if `allow_changing_replica_until_first_data_packet`);
|
||||
other connections are cancelled. Queries with `max_parallel_replicas > 1` are supported.
|
||||
|
@ -20,6 +20,9 @@ Columns:
|
||||
- `errors_count` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The number of times this host failed to reach replica.
|
||||
- `slowdowns_count` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The number of slowdowns that led to changing replica when establishing a connection with hedged requests.
|
||||
- `estimated_recovery_time` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Seconds remaining until the replica error count is zeroed and it is considered to be back to normal.
|
||||
- `database_shard_name` ([String](../../sql-reference/data-types/string.md)) — The name of the `Replicated` database shard (for clusters that belong to a `Replicated` database).
|
||||
- `database_replica_name` ([String](../../sql-reference/data-types/string.md)) — The name of the `Replicated` database replica (for clusters that belong to a `Replicated` database).
|
||||
- `is_active` ([Nullable(UInt8)](../../sql-reference/data-types/int-uint.md)) — The status of the `Replicated` database replica (for clusters that belong to a `Replicated` database): 1 means "replica is online", 0 means "replica is offline", `NULL` means "unknown".
|
||||
|
||||
**Example**
|
||||
|
||||
@ -47,6 +50,9 @@ default_database:
|
||||
errors_count: 0
|
||||
slowdowns_count: 0
|
||||
estimated_recovery_time: 0
|
||||
database_shard_name:
|
||||
database_replica_name:
|
||||
is_active: NULL
|
||||
|
||||
Row 2:
|
||||
──────
|
||||
@ -63,6 +69,9 @@ default_database:
|
||||
errors_count: 0
|
||||
slowdowns_count: 0
|
||||
estimated_recovery_time: 0
|
||||
database_shard_name:
|
||||
database_replica_name:
|
||||
is_active: NULL
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
@ -12,7 +12,7 @@ Columns:
|
||||
|
||||
- `storage` ([String](../../sql-reference/data-types/string.md)) — Path to the storage of users. Configured in the `access_control_path` parameter.
|
||||
|
||||
- `auth_type` ([Enum8](../../sql-reference/data-types/enum.md)('no_password' = 0,'plaintext_password' = 1, 'sha256_password' = 2, 'double_sha1_password' = 3, 'ldap' = 4, 'kerberos' = 5, 'ssl_certificate' = 6)) — Shows the authentication type. There are multiple ways of user identification: with no password, with plain text password, with [SHA256](https://ru.wikipedia.org/wiki/SHA-2)-encoded password or with [double SHA-1](https://ru.wikipedia.org/wiki/SHA-1)-encoded password.
|
||||
- `auth_type` ([Enum8](../../sql-reference/data-types/enum.md)('no_password' = 0, 'plaintext_password' = 1, 'sha256_password' = 2, 'double_sha1_password' = 3, 'ldap' = 4, 'kerberos' = 5, 'ssl_certificate' = 6, 'bcrypt_password' = 7)) — Shows the authentication type. There are multiple ways of user identification: with no password, with plain text password, with [SHA256](https://en.wikipedia.org/wiki/SHA-2)-encoded password, with [double SHA-1](https://en.wikipedia.org/wiki/SHA-1)-encoded password or with [bcrypt](https://en.wikipedia.org/wiki/Bcrypt)-encoded password.
|
||||
|
||||
- `auth_params` ([String](../../sql-reference/data-types/string.md)) — Authentication parameters in the JSON format depending on the `auth_type`.
|
||||
|
||||
|
@ -41,9 +41,9 @@ If the file is sitting on the same machine as `clickhouse-local`, use the `file`
|
||||
```
|
||||
|
||||
ClickHouse knows the file uses a tab-separated format from filename extension. If you need to explicitly specify the format, simply add one of the [many ClickHouse input formats](../../interfaces/formats.md):
|
||||
```bash
|
||||
./clickhouse local -q "SELECT * FROM file('reviews.tsv', 'TabSeparated')"
|
||||
```
|
||||
```bash
|
||||
./clickhouse local -q "SELECT * FROM file('reviews.tsv', 'TabSeparated')"
|
||||
```
|
||||
|
||||
The `file` table function creates a table, and you can use `DESCRIBE` to see the inferred schema:
|
||||
|
||||
@ -183,7 +183,7 @@ Arguments:
|
||||
- `-S`, `--structure` — table structure for input data.
|
||||
- `--input-format` — input format, `TSV` by default.
|
||||
- `-f`, `--file` — path to data, `stdin` by default.
|
||||
- `-q`, `--query` — queries to execute with `;` as delimeter. You must specify either `query` or `queries-file` option.
|
||||
- `-q`, `--query` — queries to execute with `;` as delimiter. You must specify either `query` or `queries-file` option.
|
||||
- `--queries-file` - file path with queries to execute. You must specify either `query` or `queries-file` option.
|
||||
- `-N`, `--table` — table name where to put output data, `table` by default.
|
||||
- `--format`, `--output-format` — output format, `TSV` by default.
|
||||
|
@ -0,0 +1,55 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/first_value
|
||||
sidebar_position: 7
|
||||
---
|
||||
|
||||
# first_value
|
||||
|
||||
Selects the first encountered value, similar to `any`, but could accept NULL.
|
||||
|
||||
## examples
|
||||
|
||||
```sql
|
||||
insert into test_data (a,b) values (1,null), (2,3), (4, 5), (6,null)
|
||||
```
|
||||
|
||||
### example1
|
||||
The NULL value is ignored at default.
|
||||
```sql
|
||||
select first_value(b) from test_data
|
||||
```
|
||||
|
||||
```text
|
||||
┌─first_value_ignore_nulls(b)─┐
|
||||
│ 3 │
|
||||
└─────────────────────────────┘
|
||||
|
||||
```
|
||||
|
||||
### example2
|
||||
The NULL value is ignored.
|
||||
```sql
|
||||
select first_value(b) ignore nulls sfrom test_data
|
||||
```
|
||||
|
||||
```text
|
||||
┌─first_value_ignore_nulls(b)─┐
|
||||
│ 3 │
|
||||
└─────────────────────────────┘
|
||||
|
||||
```
|
||||
|
||||
### example3
|
||||
The NULL value is accepted.
|
||||
```sql
|
||||
select first_value(b) respect nulls from test_data
|
||||
```
|
||||
|
||||
```text
|
||||
|
||||
┌─first_value_respect_nulls(b)─┐
|
||||
│ ᴺᵁᴸᴸ │
|
||||
└──────────────────────────────┘
|
||||
```
|
||||
|
||||
|
@ -13,11 +13,11 @@ groupBitAnd(expr)
|
||||
|
||||
**Arguments**
|
||||
|
||||
`expr` – An expression that results in `UInt*` type.
|
||||
`expr` – An expression that results in `UInt*` or `Int*` type.
|
||||
|
||||
**Return value**
|
||||
|
||||
Value of the `UInt*` type.
|
||||
Value of the `UInt*` or `Int*` type.
|
||||
|
||||
**Example**
|
||||
|
||||
|
@ -13,11 +13,11 @@ groupBitOr(expr)
|
||||
|
||||
**Arguments**
|
||||
|
||||
`expr` – An expression that results in `UInt*` type.
|
||||
`expr` – An expression that results in `UInt*` or `Int*` type.
|
||||
|
||||
**Returned value**
|
||||
|
||||
Value of the `UInt*` type.
|
||||
Value of the `UInt*` or `Int*` type.
|
||||
|
||||
**Example**
|
||||
|
||||
|
@ -13,11 +13,11 @@ groupBitXor(expr)
|
||||
|
||||
**Arguments**
|
||||
|
||||
`expr` – An expression that results in `UInt*` type.
|
||||
`expr` – An expression that results in `UInt*` or `Int*` type.
|
||||
|
||||
**Return value**
|
||||
|
||||
Value of the `UInt*` type.
|
||||
Value of the `UInt*` or `Int*` type.
|
||||
|
||||
**Example**
|
||||
|
||||
|
@ -26,6 +26,8 @@ ClickHouse-specific aggregate functions:
|
||||
|
||||
- [anyHeavy](../../../sql-reference/aggregate-functions/reference/anyheavy.md)
|
||||
- [anyLast](../../../sql-reference/aggregate-functions/reference/anylast.md)
|
||||
- [first_value](../../../sql-reference/aggregate-functions/reference/first_value.md)
|
||||
- [last_value](../../../sql-reference/aggregate-functions/reference/last_value.md)
|
||||
- [argMin](../../../sql-reference/aggregate-functions/reference/argmin.md)
|
||||
- [argMax](../../../sql-reference/aggregate-functions/reference/argmax.md)
|
||||
- [avgWeighted](../../../sql-reference/aggregate-functions/reference/avgweighted.md)
|
||||
|
@ -0,0 +1,53 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/last_value
|
||||
sidebar_position: 8
|
||||
---
|
||||
|
||||
# first_value
|
||||
|
||||
Selects the last encountered value, similar to `anyLast`, but could accept NULL.
|
||||
|
||||
|
||||
## examples
|
||||
|
||||
```sql
|
||||
insert into test_data (a,b) values (1,null), (2,3), (4, 5), (6,null)
|
||||
```
|
||||
|
||||
### example1
|
||||
The NULL value is ignored at default.
|
||||
```sql
|
||||
select last_value(b) from test_data
|
||||
```
|
||||
|
||||
```text
|
||||
┌─last_value_ignore_nulls(b)─┐
|
||||
│ 5 │
|
||||
└────────────────────────────┘
|
||||
```
|
||||
|
||||
### example2
|
||||
The NULL value is ignored.
|
||||
```sql
|
||||
select last_value(b) ignore nulls from test_data
|
||||
```
|
||||
|
||||
```text
|
||||
┌─last_value_ignore_nulls(b)─┐
|
||||
│ 5 │
|
||||
└────────────────────────────┘
|
||||
```
|
||||
|
||||
### example3
|
||||
The NULL value is accepted.
|
||||
```sql
|
||||
select last_value(b) respect nulls from test_data
|
||||
```
|
||||
|
||||
```text
|
||||
┌─last_value_respect_nulls(b)─┐
|
||||
│ ᴺᵁᴸᴸ │
|
||||
└─────────────────────────────┘
|
||||
```
|
||||
|
||||
|
@ -23,7 +23,7 @@ Alias: `medianDeterministic`.
|
||||
|
||||
- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` value in the range of `[0.01, 0.99]`. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median).
|
||||
- `expr` — Expression over the column values resulting in numeric [data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) or [DateTime](../../../sql-reference/data-types/datetime.md).
|
||||
- `determinator` — Number whose hash is used instead of a random number generator in the reservoir sampling algorithm to make the result of sampling deterministic. As a determinator you can use any deterministic positive number, for example, a user id or an event id. If the same determinator value occures too often, the function works incorrectly.
|
||||
- `determinator` — Number whose hash is used instead of a random number generator in the reservoir sampling algorithm to make the result of sampling deterministic. As a determinator you can use any deterministic positive number, for example, a user id or an event id. If the same determinator value occurs too often, the function works incorrectly.
|
||||
|
||||
**Returned value**
|
||||
|
||||
|
@ -8,7 +8,7 @@ sidebar_label: Nullable
|
||||
|
||||
Allows to store special marker ([NULL](../../sql-reference/syntax.md)) that denotes “missing value” alongside normal values allowed by `TypeName`. For example, a `Nullable(Int8)` type column can store `Int8` type values, and the rows that do not have a value will store `NULL`.
|
||||
|
||||
For a `TypeName`, you can’t use composite data types [Array](../../sql-reference/data-types/array.md) and [Tuple](../../sql-reference/data-types/tuple.md). Composite data types can contain `Nullable` type values, such as `Array(Nullable(Int8))`.
|
||||
For a `TypeName`, you can’t use composite data types [Array](../../sql-reference/data-types/array.md), [Map](../../sql-reference/data-types/map.md) and [Tuple](../../sql-reference/data-types/tuple.md). Composite data types can contain `Nullable` type values, such as `Array(Nullable(Int8))`.
|
||||
|
||||
A `Nullable` type field can’t be included in table indexes.
|
||||
|
||||
|
@ -8,10 +8,6 @@ sidebar_label: Interval
|
||||
|
||||
The family of data types representing time and date intervals. The resulting types of the [INTERVAL](../../../sql-reference/operators/index.md#operator-interval) operator.
|
||||
|
||||
:::note
|
||||
`Interval` data type values can’t be stored in tables.
|
||||
:::
|
||||
|
||||
Structure:
|
||||
|
||||
- Time interval as an unsigned integer value.
|
||||
@ -19,6 +15,9 @@ Structure:
|
||||
|
||||
Supported interval types:
|
||||
|
||||
- `NANOSECOND`
|
||||
- `MICROSECOND`
|
||||
- `MILLISECOND`
|
||||
- `SECOND`
|
||||
- `MINUTE`
|
||||
- `HOUR`
|
||||
|
@ -949,7 +949,7 @@ SOURCE(ODBC(... invalidate_query 'SELECT update_time FROM dictionary_source wher
|
||||
...
|
||||
```
|
||||
|
||||
For `Cache`, `ComplexKeyCache`, `SSDCache`, and `SSDComplexKeyCache` dictionaries both synchronious and asynchronious updates are supported.
|
||||
For `Cache`, `ComplexKeyCache`, `SSDCache`, and `SSDComplexKeyCache` dictionaries both synchronious and asynchronous updates are supported.
|
||||
|
||||
It is also possible for `Flat`, `Hashed`, `ComplexKeyHashed` dictionaries to only request data that was changed after the previous update. If `update_field` is specified as part of the dictionary source configuration, value of the previous update time in seconds will be added to the data request. Depends on source type (Executable, HTTP, MySQL, PostgreSQL, ClickHouse, or ODBC) different logic will be applied to `update_field` before request data from an external source.
|
||||
|
||||
|
@ -78,6 +78,22 @@ GROUP BY
|
||||
│ 1 │ Bobruisk │ Firefox │
|
||||
└─────────────┴──────────┴─────────┘
|
||||
```
|
||||
### Important note!
|
||||
Using multiple `arrayJoin` with same expression may not produce expected results due to optimizations.
|
||||
For that cases, consider modifying repeated array expression with extra operations that do not affect join result - e.g. `arrayJoin(arraySort(arr))`, `arrayJoin(arrayConcat(arr, []))`
|
||||
|
||||
Example:
|
||||
```sql
|
||||
SELECT
|
||||
arrayJoin(dice) as first_throw,
|
||||
/* arrayJoin(dice) as second_throw */ -- is technically correct, but will annihilate result set
|
||||
arrayJoin(arrayConcat(dice, [])) as second_throw -- intentionally changed expression to force re-evaluation
|
||||
FROM (
|
||||
SELECT [1, 2, 3, 4, 5, 6] as dice
|
||||
);
|
||||
```
|
||||
|
||||
|
||||
|
||||
Note the [ARRAY JOIN](../statements/select/array-join.md) syntax in the SELECT query, which provides broader possibilities.
|
||||
`ARRAY JOIN` allows you to convert multiple arrays with the same number of elements at a time.
|
||||
|
@ -314,7 +314,7 @@ SELECT bitTestAny(number, index1, index2, index3, index4, ...)
|
||||
|
||||
**Returned values**
|
||||
|
||||
Returns result of logical disjuction.
|
||||
Returns result of logical disjunction.
|
||||
|
||||
Type: `UInt8`.
|
||||
|
||||
|
@ -256,7 +256,7 @@ Result:
|
||||
|
||||
## bitmapCardinality
|
||||
|
||||
Rerturn the cardinality of a bitmap.
|
||||
Returns the cardinality of a bitmap.
|
||||
|
||||
**Syntax**
|
||||
|
||||
|
@ -14,7 +14,7 @@ The following types can be compared:
|
||||
- dates
|
||||
- dates with times
|
||||
|
||||
Only values within the same group can be compared (e.g. UInt16 and UInt64) but not accross groups (e.g. UInt16 and DateTime).
|
||||
Only values within the same group can be compared (e.g. UInt16 and UInt64) but not across groups (e.g. UInt16 and DateTime).
|
||||
|
||||
Strings are compared byte-by-byte. Note that this may lead to unexpected results if one of the strings contains UTF-8 encoded multi-byte characters.
|
||||
|
||||
|
@ -26,19 +26,27 @@ SELECT
|
||||
|
||||
## makeDate
|
||||
|
||||
Creates a [Date](../../sql-reference/data-types/date.md) from a year, month and day argument.
|
||||
Creates a [Date](../../sql-reference/data-types/date.md)
|
||||
- from a year, month and day argument, or
|
||||
- from a year and day of year argument.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
makeDate(year, month, day)
|
||||
makeDate(year, month, day);
|
||||
makeDate(year, day_of_year);
|
||||
```
|
||||
|
||||
Alias:
|
||||
- `MAKEDATE(year, month, day);`
|
||||
- `MAKEDATE(year, day_of_year);`
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `year` — Year. [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md).
|
||||
- `month` — Month. [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md).
|
||||
- `day` — Day. [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md).
|
||||
- `day_of_year` — Day of the year. [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -48,6 +56,8 @@ Type: [Date](../../sql-reference/data-types/date.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Create a Date from a year, month and day:
|
||||
|
||||
``` sql
|
||||
SELECT makeDate(2023, 2, 28) AS Date;
|
||||
```
|
||||
@ -60,6 +70,19 @@ Result:
|
||||
└────────────┘
|
||||
```
|
||||
|
||||
Create a Date from a year and day of year argument:
|
||||
|
||||
``` sql
|
||||
SELECT makeDate(2023, 42) AS Date;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌───────date─┐
|
||||
│ 2023-02-11 │
|
||||
└────────────┘
|
||||
```
|
||||
## makeDate32
|
||||
|
||||
Like [makeDate](#makeDate) but produces a [Date32](../../sql-reference/data-types/date32.md).
|
||||
@ -108,6 +131,12 @@ Result:
|
||||
|
||||
Like [makeDateTime](#makedatetime) but produces a [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
makeDateTime32(year, month, day, hour, minute, second[, fraction[, precision[, timezone]]])
|
||||
```
|
||||
|
||||
## timeZone
|
||||
|
||||
Returns the timezone of the server.
|
||||
@ -289,7 +318,7 @@ Aliases: `DAYOFMONTH`, `DAY`.
|
||||
|
||||
Converts a date or date with time to the number of the day in the week as UInt8 value.
|
||||
|
||||
The two-argument form of `toDayOfWeek()` enables you to specify whether the week starts on Monday or Sunday, and whether the return value should be in the range from 0 to 6 or 1 to 7. If the mode argument is ommited, the default mode is 0. The time zone of the date can be specified as the third argument.
|
||||
The two-argument form of `toDayOfWeek()` enables you to specify whether the week starts on Monday or Sunday, and whether the return value should be in the range from 0 to 6 or 1 to 7. If the mode argument is omitted, the default mode is 0. The time zone of the date can be specified as the third argument.
|
||||
|
||||
| Mode | First day of week | Range |
|
||||
|------|-------------------|------------------------------------------------|
|
||||
|
@ -84,7 +84,7 @@ Result:
|
||||
|
||||
## s2GetNeighbors
|
||||
|
||||
Returns S2 neighbor indixes corresponding to the provided [S2](#s2index). Each cell in the S2 system is a quadrilateral bounded by four geodesics. So, each cell has 4 neighbors.
|
||||
Returns S2 neighbor indexes corresponding to the provided [S2](#s2index). Each cell in the S2 system is a quadrilateral bounded by four geodesics. So, each cell has 4 neighbors.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -206,7 +206,7 @@ s2CapUnion(center1, radius1, center2, radius2)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `center1`, `center2` — S2 point indixes corresponding to the two input caps. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `center1`, `center2` — S2 point indexes corresponding to the two input caps. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `radius1`, `radius2` — Radius of the two input caps in degrees. [Float64](../../../sql-reference/data-types/float.md).
|
||||
|
||||
**Returned values**
|
||||
|
@ -64,7 +64,7 @@ This is a cryptographic hash function. It works at least three times faster than
|
||||
The function [interprets](/docs/en/sql-reference/functions/type-conversion-functions.md/#type_conversion_functions-reinterpretAsString) all the input parameters as strings and calculates the hash value for each of them. It then combines the hashes by the following algorithm:
|
||||
|
||||
1. The first and the second hash value are concatenated to an array which is hashed.
|
||||
2. The previously calculated hash value and the hash of the third input paramter are hashed in a similar way.
|
||||
2. The previously calculated hash value and the hash of the third input parameter are hashed in a similar way.
|
||||
3. This calculation is repeated for all remaining hash values of the original input.
|
||||
|
||||
**Arguments**
|
||||
|
@ -84,7 +84,7 @@ Alias: The [OR Operator](../../sql-reference/operators/index.md#logical-or-opera
|
||||
|
||||
**Returned value**
|
||||
|
||||
- `1`, if at least one argument evalutes to `true`,
|
||||
- `1`, if at least one argument evaluates to `true`,
|
||||
- `0`, if all arguments evaluate to `false`,
|
||||
- `NULL`, if all arguments evaluate to `false` and at least one argument is `NULL`.
|
||||
|
||||
@ -173,7 +173,7 @@ xor(val1, val2...)
|
||||
**Returned value**
|
||||
|
||||
- `1`, for two values: if one of the values evaluates to `false` and other does not,
|
||||
- `0`, for two values: if both values evalute to `false` or to both `true`,
|
||||
- `0`, for two values: if both values evaluate to `false` or to both `true`,
|
||||
- `NULL`, if at least one of the inputs is `NULL`
|
||||
|
||||
Type: [UInt8](../../sql-reference/data-types/int-uint.md) or [Nullable](../../sql-reference/data-types/nullable.md)([UInt8](../../sql-reference/data-types/int-uint.md)).
|
||||
|
@ -187,7 +187,7 @@ detectLanguageMixed('text_to_be_analyzed')
|
||||
|
||||
**Returned value**
|
||||
|
||||
- `Map(String, Float32)`: The keys are 2-letter ISO codes and the values are a perentage of text found for that language
|
||||
- `Map(String, Float32)`: The keys are 2-letter ISO codes and the values are a percentage of text found for that language
|
||||
|
||||
|
||||
**Examples**
|
||||
|
@ -306,7 +306,7 @@ You can use this function in table engine parameters in a CREATE TABLE query whe
|
||||
|
||||
## currentUser()
|
||||
|
||||
Returns the login of current user. Login of user, that initiated query, will be returned in case distibuted query.
|
||||
Returns the login of current user. Login of user, that initiated query, will be returned in case distributed query.
|
||||
|
||||
``` sql
|
||||
SELECT currentUser();
|
||||
@ -317,7 +317,7 @@ Alias: `user()`, `USER()`.
|
||||
**Returned values**
|
||||
|
||||
- Login of current user.
|
||||
- Login of user that initiated query in case of disributed query.
|
||||
- Login of user that initiated query in case of distributed query.
|
||||
|
||||
Type: `String`.
|
||||
|
||||
|
@ -19,13 +19,13 @@ The random numbers are generated by non-cryptographic algorithms.
|
||||
|
||||
## rand, rand32
|
||||
|
||||
Returns a random UInt32 number, evenly distributed accross the range of all possible UInt32 numbers.
|
||||
Returns a random UInt32 number, evenly distributed across the range of all possible UInt32 numbers.
|
||||
|
||||
Uses a linear congruential generator.
|
||||
|
||||
## rand64
|
||||
|
||||
Returns a random UInt64 number, evenly distributed accross the range of all possible UInt64 numbers.
|
||||
Returns a random UInt64 number, evenly distributed across the range of all possible UInt64 numbers.
|
||||
|
||||
Uses a linear congruential generator.
|
||||
|
||||
|
@ -310,7 +310,7 @@ SELECT toValidUTF8('\x61\xF0\x80\x80\x80b');
|
||||
|
||||
## repeat
|
||||
|
||||
Conatenates a string as many times with itself as specified.
|
||||
Concatenates a string as many times with itself as specified.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -1215,96 +1215,3 @@ Result:
|
||||
│ A240 │
|
||||
└──────────────────┘
|
||||
```
|
||||
|
||||
## extractKeyValuePairs
|
||||
|
||||
Extracts key-value pairs from any string. The string does not need to be 100% structured in a key value pair format;
|
||||
|
||||
It can contain noise (e.g. log files). The key-value pair format to be interpreted should be specified via function arguments.
|
||||
|
||||
A key-value pair consists of a key followed by a `key_value_delimiter` and a value. Quoted keys and values are also supported. Key value pairs must be separated by pair delimiters.
|
||||
|
||||
**Syntax**
|
||||
``` sql
|
||||
extractKeyValuePairs(data, [key_value_delimiter], [pair_delimiter], [quoting_character])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
- `data` - String to extract key-value pairs from. [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
- `key_value_delimiter` - Character to be used as delimiter between the key and the value. Defaults to `:`. [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
- `pair_delimiters` - Set of character to be used as delimiters between pairs. Defaults to `\space`, `,` and `;`. [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
- `quoting_character` - Character to be used as quoting character. Defaults to `"`. [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
|
||||
**Returned values**
|
||||
- The extracted key-value pairs in a Map(String, String).
|
||||
|
||||
**Examples**
|
||||
|
||||
Query:
|
||||
|
||||
**Simple case**
|
||||
``` sql
|
||||
arthur :) select extractKeyValuePairs('name:neymar, age:31 team:psg,nationality:brazil') as kv
|
||||
|
||||
SELECT extractKeyValuePairs('name:neymar, age:31 team:psg,nationality:brazil') as kv
|
||||
|
||||
Query id: f9e0ca6f-3178-4ee2-aa2c-a5517abb9cee
|
||||
|
||||
┌─kv──────────────────────────────────────────────────────────────────────┐
|
||||
│ {'name':'neymar','age':'31','team':'psg','nationality':'brazil'} │
|
||||
└─────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Single quote as quoting character**
|
||||
``` sql
|
||||
arthur :) select extractKeyValuePairs('name:\'neymar\';\'age\':31;team:psg;nationality:brazil,last_key:last_value', ':', ';,', '\'') as kv
|
||||
|
||||
SELECT extractKeyValuePairs('name:\'neymar\';\'age\':31;team:psg;nationality:brazil,last_key:last_value', ':', ';,', '\'') as kv
|
||||
|
||||
Query id: 0e22bf6b-9844-414a-99dc-32bf647abd5e
|
||||
|
||||
┌─kv───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ {'name':'neymar','age':'31','team':'psg','nationality':'brazil','last_key':'last_value'} │
|
||||
└──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Escape sequences without escape sequences support**
|
||||
``` sql
|
||||
arthur :) select extractKeyValuePairs('age:a\\x0A\\n\\0') as kv
|
||||
|
||||
SELECT extractKeyValuePairs('age:a\\x0A\\n\\0') AS kv
|
||||
|
||||
Query id: e9fd26ee-b41f-4a11-b17f-25af6fd5d356
|
||||
|
||||
┌─kv─────────────────────┐
|
||||
│ {'age':'a\\x0A\\n\\0'} │
|
||||
└────────────────────────┘
|
||||
```
|
||||
|
||||
## extractKeyValuePairsWithEscaping
|
||||
|
||||
Same as `extractKeyValuePairs` but with escaping support.
|
||||
|
||||
Escape sequences supported: `\x`, `\N`, `\a`, `\b`, `\e`, `\f`, `\n`, `\r`, `\t`, `\v` and `\0`.
|
||||
Non standard escape sequences are returned as it is (including the backslash) unless they are one of the following:
|
||||
`\\`, `'`, `"`, `backtick`, `/`, `=` or ASCII control characters (c <= 31).
|
||||
|
||||
This function will satisfy the use case where pre-escaping and post-escaping are not suitable. For instance, consider the following
|
||||
input string: `a: "aaaa\"bbb"`. The expected output is: `a: aaaa\"bbbb`.
|
||||
- Pre-escaping: Pre-escaping it will output: `a: "aaaa"bbb"` and `extractKeyValuePairs` will then output: `a: aaaa`
|
||||
- Post-escaping: `extractKeyValuePairs` will output `a: aaaa\` and post-escaping will keep it as it is.
|
||||
|
||||
Leading escape sequences will be skipped in keys and will be considered invalid for values.
|
||||
|
||||
**Escape sequences with escape sequence support turned on**
|
||||
``` sql
|
||||
arthur :) select extractKeyValuePairsWithEscaping('age:a\\x0A\\n\\0') as kv
|
||||
|
||||
SELECT extractKeyValuePairsWithEscaping('age:a\\x0A\\n\\0') AS kv
|
||||
|
||||
Query id: 44c114f0-5658-4c75-ab87-4574de3a1645
|
||||
|
||||
┌─kv────────────────┐
|
||||
│ {'age':'a\n\n\0'} │
|
||||
└───────────────────┘
|
||||
```
|
||||
|
@ -133,7 +133,7 @@ Tuples should have the same type of the elements.
|
||||
|
||||
- The Hamming distance.
|
||||
|
||||
Type: The result type is calculed the same way it is for [Arithmetic functions](../../sql-reference/functions/arithmetic-functions.md), based on the number of elements in the input tuples.
|
||||
Type: The result type is calculated the same way it is for [Arithmetic functions](../../sql-reference/functions/arithmetic-functions.md), based on the number of elements in the input tuples.
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
@ -223,7 +223,7 @@ Result:
|
||||
└───────────────────────────────────────┘
|
||||
```
|
||||
|
||||
It is possible to transform colums to rows using this function:
|
||||
It is possible to transform columns to rows using this function:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE tupletest (col Tuple(CPU Float64, Memory Float64, Disk Float64)) ENGINE = Memory;
|
||||
|
@ -109,6 +109,108 @@ SELECT mapFromArrays([1, 2, 3], map('a', 1, 'b', 2, 'c', 3))
|
||||
└───────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## extractKeyValuePairs
|
||||
|
||||
Extracts key-value pairs, i.e. a [Map(String, String)](../../sql-reference/data-types/map.md), from a string. Parsing is robust towards noise (e.g. log files).
|
||||
|
||||
A key-value pair consists of a key, followed by a `key_value_delimiter` and a value. Key value pairs must be separated by `pair_delimiter`. Quoted keys and values are also supported.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
extractKeyValuePairs(data[, key_value_delimiter[, pair_delimiter[, quoting_character]]])
|
||||
```
|
||||
|
||||
Alias:
|
||||
- `str_to_map`
|
||||
- `mapFromString`
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `data` - String to extract key-value pairs from. [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
- `key_value_delimiter` - Character to be used as delimiter between the key and the value. Defaults to `:`. [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
- `pair_delimiters` - Set of character to be used as delimiters between pairs. Defaults to ` `, `,` and `;`. [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
- `quoting_character` - Character to be used as quoting character. Defaults to `"`. [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- A [Map(String, String)](../../sql-reference/data-types/map.md) of key-value pairs.
|
||||
|
||||
**Examples**
|
||||
|
||||
Simple case:
|
||||
|
||||
``` sql
|
||||
SELECT extractKeyValuePairs('name:neymar, age:31 team:psg,nationality:brazil') as kv
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` Result:
|
||||
┌─kv──────────────────────────────────────────────────────────────────────┐
|
||||
│ {'name':'neymar','age':'31','team':'psg','nationality':'brazil'} │
|
||||
└─────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Single quote as quoting character:
|
||||
|
||||
``` sql
|
||||
SELECT extractKeyValuePairs('name:\'neymar\';\'age\':31;team:psg;nationality:brazil,last_key:last_value', ':', ';,', '\'') as kv
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─kv───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ {'name':'neymar','age':'31','team':'psg','nationality':'brazil','last_key':'last_value'} │
|
||||
└──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Escape sequences without escape sequences support:
|
||||
|
||||
``` sql
|
||||
SELECT extractKeyValuePairs('age:a\\x0A\\n\\0') AS kv
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─kv─────────────────────┐
|
||||
│ {'age':'a\\x0A\\n\\0'} │
|
||||
└────────────────────────┘
|
||||
```
|
||||
|
||||
## extractKeyValuePairsWithEscaping
|
||||
|
||||
Same as `extractKeyValuePairs` but with escaping support.
|
||||
|
||||
Supported escape sequences: `\x`, `\N`, `\a`, `\b`, `\e`, `\f`, `\n`, `\r`, `\t`, `\v` and `\0`.
|
||||
Non standard escape sequences are returned as it is (including the backslash) unless they are one of the following:
|
||||
`\\`, `'`, `"`, `backtick`, `/`, `=` or ASCII control characters (c <= 31).
|
||||
|
||||
This function will satisfy the use case where pre-escaping and post-escaping are not suitable. For instance, consider the following
|
||||
input string: `a: "aaaa\"bbb"`. The expected output is: `a: aaaa\"bbbb`.
|
||||
- Pre-escaping: Pre-escaping it will output: `a: "aaaa"bbb"` and `extractKeyValuePairs` will then output: `a: aaaa`
|
||||
- Post-escaping: `extractKeyValuePairs` will output `a: aaaa\` and post-escaping will keep it as it is.
|
||||
|
||||
Leading escape sequences will be skipped in keys and will be considered invalid for values.
|
||||
|
||||
**Examples**
|
||||
|
||||
Escape sequences with escape sequence support turned on:
|
||||
|
||||
``` sql
|
||||
SELECT extractKeyValuePairsWithEscaping('age:a\\x0A\\n\\0') AS kv
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` result
|
||||
┌─kv────────────────┐
|
||||
│ {'age':'a\n\n\0'} │
|
||||
└───────────────────┘
|
||||
```
|
||||
|
||||
## mapAdd
|
||||
|
||||
Collect all the keys and sum corresponding values.
|
||||
@ -449,7 +551,7 @@ mapExtractKeyLike(map, pattern)
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A map contained elements the key of which matchs the specified pattern. If there are no elements matched the pattern, it will return an empty map.
|
||||
- A map contained elements the key of which matches the specified pattern. If there are no elements matched the pattern, it will return an empty map.
|
||||
|
||||
**Example**
|
||||
|
||||
|
@ -103,7 +103,11 @@ ALTER TABLE table2 [ON CLUSTER cluster] ATTACH PARTITION partition_expr FROM tab
|
||||
```
|
||||
|
||||
This query copies the data partition from `table1` to `table2`.
|
||||
Note that data will be deleted neither from `table1` nor from `table2`.
|
||||
|
||||
Note that:
|
||||
|
||||
- Data will be deleted neither from `table1` nor from `table2`.
|
||||
- `table1` may be a temporary table.
|
||||
|
||||
For the query to run successfully, the following conditions must be met:
|
||||
|
||||
@ -117,7 +121,12 @@ For the query to run successfully, the following conditions must be met:
|
||||
ALTER TABLE table2 [ON CLUSTER cluster] REPLACE PARTITION partition_expr FROM table1
|
||||
```
|
||||
|
||||
This query copies the data partition from the `table1` to `table2` and replaces existing partition in the `table2`. Note that data won’t be deleted from `table1`.
|
||||
This query copies the data partition from the `table1` to `table2` and replaces existing partition in the `table2`.
|
||||
|
||||
Note that:
|
||||
|
||||
- Data won’t be deleted from `table1`.
|
||||
- `table1` may be a temporary table.
|
||||
|
||||
For the query to run successfully, the following conditions must be met:
|
||||
|
||||
|
@ -116,7 +116,7 @@ The column description can specify a default value expression in the form of `DE
|
||||
|
||||
The expression `expr` is optional. If it is omitted, the column type must be specified explicitly and the default value will be `0` for numeric columns, `''` (the empty string) for string columns, `[]` (the empty array) for array columns, `1970-01-01` for date columns, or `NULL` for nullable columns.
|
||||
|
||||
The column type of a default value column can be omitted in which case it is infered from `expr`'s type. For example the type of column `EventDate DEFAULT toDate(EventTime)` will be date.
|
||||
The column type of a default value column can be omitted in which case it is inferred from `expr`'s type. For example the type of column `EventDate DEFAULT toDate(EventTime)` will be date.
|
||||
|
||||
If both a data type and a default value expression are specified, an implicit type casting function inserted which converts the expression to the specified type. Example: `Hits UInt32 DEFAULT 0` is internally represented as `Hits UInt32 DEFAULT toUInt32(0)`.
|
||||
|
||||
|
@ -32,9 +32,12 @@ There are multiple ways of user identification:
|
||||
- `IDENTIFIED WITH sha256_hash BY 'hash'` or `IDENTIFIED WITH sha256_hash BY 'hash' SALT 'salt'`
|
||||
- `IDENTIFIED WITH double_sha1_password BY 'qwerty'`
|
||||
- `IDENTIFIED WITH double_sha1_hash BY 'hash'`
|
||||
- `IDENTIFIED WITH bcrypt_password BY 'qwerty'`
|
||||
- `IDENTIFIED WITH bcrypt_hash BY 'hash'`
|
||||
- `IDENTIFIED WITH ldap SERVER 'server_name'`
|
||||
- `IDENTIFIED WITH kerberos` or `IDENTIFIED WITH kerberos REALM 'realm'`
|
||||
- `IDENTIFIED WITH ssl_certificate CN 'mysite.com:user'`
|
||||
- `IDENTIFIED BY 'qwerty'`
|
||||
|
||||
## Examples
|
||||
|
||||
@ -54,21 +57,12 @@ There are multiple ways of user identification:
|
||||
The password is stored in a SQL text file in `/var/lib/clickhouse/access`, so it's not a good idea to use `plaintext_password`. Try `sha256_password` instead, as demonstrated next...
|
||||
:::
|
||||
|
||||
3. The best option is to use a password that is hashed using SHA-256. ClickHouse will hash the password for you when you specify `IDENTIFIED WITH sha256_password`. For example:
|
||||
3. The most common option is to use a password that is hashed using SHA-256. ClickHouse will hash the password for you when you specify `IDENTIFIED WITH sha256_password`. For example:
|
||||
|
||||
```sql
|
||||
CREATE USER name3 IDENTIFIED WITH sha256_password BY 'my_password'
|
||||
```
|
||||
|
||||
Notice ClickHouse generates and runs the following command for you:
|
||||
|
||||
```response
|
||||
CREATE USER name3
|
||||
IDENTIFIED WITH sha256_hash
|
||||
BY '8B3404953FCAA509540617F082DB13B3E0734F90FF6365C19300CC6A6EA818D6'
|
||||
SALT 'D6489D8B5692D82FF944EA6415785A8A8A1AF33825456AFC554487725A74A609'
|
||||
```
|
||||
|
||||
The `name3` user can now login using `my_password`, but the password is stored as the hashed value above. THe following SQL file was created in `/var/lib/clickhouse/access` and gets executed at server startup:
|
||||
|
||||
```bash
|
||||
@ -92,10 +86,24 @@ There are multiple ways of user identification:
|
||||
CREATE USER name4 IDENTIFIED WITH double_sha1_hash BY 'CCD3A959D6A004B9C3807B728BC2E55B67E10518'
|
||||
```
|
||||
|
||||
5. The type of the password can also be omitted:
|
||||
5. The `bcrypt_password` is the most secure option for storing passwords. It uses the [bcrypt](https://en.wikipedia.org/wiki/Bcrypt) algorithm, which is resilient against brute force attacks even if the password hash is compromised.
|
||||
|
||||
```sql
|
||||
CREATE USER name4 IDENTIFIED BY 'my_password'
|
||||
CREATE USER name5 IDENTIFIED WITH bcrypt_password BY 'my_password'
|
||||
```
|
||||
|
||||
The length of the password is limited to 72 characters with this method. The bcrypt work factor parameter, which defines the amount of computations and time needed to compute the hash and verify the password, can be modified in the server configuration:
|
||||
|
||||
```xml
|
||||
<bcrypt_workfactor>12</bcrypt_workfactor>
|
||||
```
|
||||
|
||||
The work factor must be between 4 and 31, with a default value of 12.
|
||||
|
||||
6. The type of the password can also be omitted:
|
||||
|
||||
```sql
|
||||
CREATE USER name6 IDENTIFIED BY 'my_password'
|
||||
```
|
||||
|
||||
In this case, ClickHouse will use the default password type specified in the server configuration:
|
||||
|
@ -200,6 +200,7 @@ Hierarchy of privileges:
|
||||
- `HDFS`
|
||||
- `S3`
|
||||
- [dictGet](#grant-dictget)
|
||||
- [displaySecretsInShowAndSelect](#grant-display-secrets)
|
||||
|
||||
Examples of how this hierarchy is treated:
|
||||
|
||||
@ -485,6 +486,15 @@ Privilege level: `DICTIONARY`.
|
||||
- `GRANT dictGet ON mydb.mydictionary TO john`
|
||||
- `GRANT dictGet ON mydictionary TO john`
|
||||
|
||||
|
||||
### displaySecretsInShowAndSelect {#grant-display-secrets}
|
||||
|
||||
Allows a user to view secrets in `SHOW` and `SELECT` queries if both
|
||||
[`display_secrets_in_show_and_select` server setting](../../operations/server-configuration-parameters/settings#display_secrets_in_show_and_select)
|
||||
and
|
||||
[`format_display_secrets_in_show_and_select` format setting](../../operations/settings/formats#format_display_secrets_in_show_and_select)
|
||||
are turned on.
|
||||
|
||||
### ALL
|
||||
|
||||
Grants all the privileges on regulated entity to a user account or a role.
|
||||
|
@ -34,7 +34,7 @@ If the `alter_sync` is set to `2` and some replicas are not active for more than
|
||||
|
||||
## BY expression
|
||||
|
||||
If you want to perform deduplication on custom set of columns rather than on all, you can specify list of columns explicitly or use any combination of [`*`](../../sql-reference/statements/select/index.md#asterisk), [`COLUMNS`](../../sql-reference/statements/select/index.md#columns-expression) or [`EXCEPT`](../../sql-reference/statements/select/index.md#except-modifier) expressions. The explictly written or implicitly expanded list of columns must include all columns specified in row ordering expression (both primary and sorting keys) and partitioning expression (partitioning key).
|
||||
If you want to perform deduplication on custom set of columns rather than on all, you can specify list of columns explicitly or use any combination of [`*`](../../sql-reference/statements/select/index.md#asterisk), [`COLUMNS`](../../sql-reference/statements/select/index.md#columns-expression) or [`EXCEPT`](../../sql-reference/statements/select/index.md#except-modifier) expressions. The explicitly written or implicitly expanded list of columns must include all columns specified in row ordering expression (both primary and sorting keys) and partitioning expression (partitioning key).
|
||||
|
||||
:::note
|
||||
Notice that `*` behaves just like in `SELECT`: [MATERIALIZED](../../sql-reference/statements/create/table.md#materialized) and [ALIAS](../../sql-reference/statements/create/table.md#alias) columns are not used for expansion.
|
||||
|
@ -12,7 +12,7 @@ Compressed files are supported. Compression type is detected by the extension of
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
SELECT <expr_list> INTO OUTFILE file_name [AND STDOUT] [COMPRESSION type [LEVEL level]]
|
||||
SELECT <expr_list> INTO OUTFILE file_name [AND STDOUT] [APPEND] [COMPRESSION type [LEVEL level]]
|
||||
```
|
||||
|
||||
`file_name` and `type` are string literals. Supported compression types are: `'none'`, `'gzip'`, `'deflate'`, `'br'`, `'xz'`, `'zstd'`, `'lz4'`, `'bz2'`.
|
||||
@ -25,6 +25,7 @@ SELECT <expr_list> INTO OUTFILE file_name [AND STDOUT] [COMPRESSION type [LEVEL
|
||||
- The query will fail if a file with the same file name already exists.
|
||||
- The default [output format](../../../interfaces/formats.md) is `TabSeparated` (like in the command-line client batch mode). Use [FORMAT](format.md) clause to change it.
|
||||
- If `AND STDOUT` is mentioned in the query then the output that is written to the file is also displayed on standard output. If used with compression, the plaintext is displayed on standard output.
|
||||
- If `APPEND` is mentioned in the query then the output is appended to an existing file. If compression is used, append cannot be used.
|
||||
|
||||
**Example**
|
||||
|
||||
|
@ -6,6 +6,13 @@ sidebar_label: SHOW
|
||||
|
||||
# SHOW Statements
|
||||
|
||||
N.B. `SHOW CREATE (TABLE|DATABASE|USER)` hides secrets unless
|
||||
[`display_secrets_in_show_and_select` server setting](../../operations/server-configuration-parameters/settings#display_secrets_in_show_and_select)
|
||||
is turned on,
|
||||
[`format_display_secrets_in_show_and_select` format setting](../../operations/settings/formats#format_display_secrets_in_show_and_select)
|
||||
is turned on and user has
|
||||
[`displaySecretsInShowAndSelect`](grant.md#grant-display-secrets) privilege.
|
||||
|
||||
## SHOW CREATE TABLE | DICTIONARY | VIEW | DATABASE
|
||||
|
||||
``` sql
|
||||
@ -98,22 +105,6 @@ Result:
|
||||
|
||||
- [CREATE DATABASE](https://clickhouse.com/docs/en/sql-reference/statements/create/database/#query-language-create-database)
|
||||
|
||||
## SHOW PROCESSLIST
|
||||
|
||||
``` sql
|
||||
SHOW PROCESSLIST [INTO OUTFILE filename] [FORMAT format]
|
||||
```
|
||||
|
||||
Outputs the content of the [system.processes](../../operations/system-tables/processes.md#system_tables-processes) table, that contains a list of queries that is being processed at the moment, excepting `SHOW PROCESSLIST` queries.
|
||||
|
||||
The `SELECT * FROM system.processes` query returns data about all the current queries.
|
||||
|
||||
Tip (execute in the console):
|
||||
|
||||
``` bash
|
||||
$ watch -n1 "clickhouse-client --query='SHOW PROCESSLIST'"
|
||||
```
|
||||
|
||||
## SHOW TABLES
|
||||
|
||||
Displays a list of tables.
|
||||
@ -277,6 +268,77 @@ SHOW DICTIONARIES FROM db LIKE '%reg%' LIMIT 2
|
||||
└──────────────┘
|
||||
```
|
||||
|
||||
## SHOW INDEX
|
||||
|
||||
Displays a list of primary and data skipping indexes of a table.
|
||||
|
||||
```sql
|
||||
SHOW [EXTENDED] {INDEX | INDEXES | KEYS } {FROM | IN} <table> [{FROM | IN} <db>] [WHERE <expr>] [INTO OUTFILE <filename>] [FORMAT <format>]
|
||||
```
|
||||
|
||||
The database and table name can be specified in abbreviated form as `<db>.<table>`, i.e. `FROM tab FROM db` and `FROM db.tab` are
|
||||
equivalent. If no database is specified, the query assumes the current database as database.
|
||||
|
||||
The optional keyword `EXTENDED` currently has no effect, it only exists for MySQL compatibility.
|
||||
|
||||
`SHOW INDEX` produces a result table with the following structure:
|
||||
- table - The name of the table (String)
|
||||
- non_unique - 0 if the index can contain duplicates, 1 otherwise (UInt8)
|
||||
- key_name - The name of the index, `PRIMARY` if the index is a primary key index (String)
|
||||
- seq_in_index - Currently unused
|
||||
- column_name - Currently unused
|
||||
- collation - The sorting of the column in the index, `A` if ascending, `D` if descending, `NULL` if unsorted (Nullable(String))
|
||||
- cardinality - Currently unused
|
||||
- sub_part - Currently unused
|
||||
- packed - Currently unused
|
||||
- null - Currently unused
|
||||
- index_type - The index type, e.g. `primary`, `minmax`, `bloom_filter` etc. (String)
|
||||
- comment - Currently unused
|
||||
- index_comment - Currently unused
|
||||
- visible - If the index is visible to the optimizer, always `YES` (String)
|
||||
- expression - The index expression (String)
|
||||
|
||||
**Examples**
|
||||
|
||||
Getting information about all indexes in table 'tbl'
|
||||
|
||||
```sql
|
||||
SHOW INDEX FROM 'tbl'
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─table─┬─non_unique─┬─key_name─┬─seq_in_index─┬─column_name─┬─collation─┬─cardinality─┬─sub_part─┬─packed─┬─null─┬─index_type───┬─comment─┬─index_comment─┬─visible─┬─expression─┐
|
||||
│ tbl │ 0 │ blf_idx │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ bloom_filter │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ YES │ d, b │
|
||||
│ tbl │ 0 │ mm1_idx │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ minmax │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ YES │ a, c, d │
|
||||
│ tbl │ 0 │ mm2_idx │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ minmax │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ YES │ c, d, e │
|
||||
│ tbl │ 0 │ PRIMARY │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ A │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ primary │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ YES │ c, a │
|
||||
│ tbl │ 0 │ set_idx │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ set │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ YES │ e │
|
||||
└───────┴────────────┴──────────┴──────────────┴─────────────┴───────────┴─────────────┴──────────┴────────┴──────┴──────────────┴─────────┴───────────────┴─────────┴────────────┘
|
||||
```
|
||||
|
||||
**See also**
|
||||
|
||||
- [system.tables](../../operations/system-tables/tables.md)
|
||||
- [system.data_skipping_indices](../../operations/system-tables/data_skipping_indices.md)
|
||||
|
||||
## SHOW PROCESSLIST
|
||||
|
||||
``` sql
|
||||
SHOW PROCESSLIST [INTO OUTFILE filename] [FORMAT format]
|
||||
```
|
||||
|
||||
Outputs the content of the [system.processes](../../operations/system-tables/processes.md#system_tables-processes) table, that contains a list of queries that is being processed at the moment, excepting `SHOW PROCESSLIST` queries.
|
||||
|
||||
The `SELECT * FROM system.processes` query returns data about all the current queries.
|
||||
|
||||
Tip (execute in the console):
|
||||
|
||||
``` bash
|
||||
$ watch -n1 "clickhouse-client --query='SHOW PROCESSLIST'"
|
||||
```
|
||||
|
||||
## SHOW GRANTS
|
||||
|
||||
Shows privileges for a user.
|
||||
@ -293,8 +355,6 @@ If user is not specified, the query returns privileges for the current user.
|
||||
|
||||
Shows parameters that were used at a [user creation](../../sql-reference/statements/create/user.md).
|
||||
|
||||
`SHOW CREATE USER` does not output user passwords.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
|
@ -76,7 +76,7 @@ Resets the mark cache.
|
||||
|
||||
## DROP REPLICA
|
||||
|
||||
Dead replicas can be dropped using following syntax:
|
||||
Dead replicas of `ReplicatedMergeTree` tables can be dropped using following syntax:
|
||||
|
||||
``` sql
|
||||
SYSTEM DROP REPLICA 'replica_name' FROM TABLE database.table;
|
||||
@ -85,13 +85,25 @@ SYSTEM DROP REPLICA 'replica_name';
|
||||
SYSTEM DROP REPLICA 'replica_name' FROM ZKPATH '/path/to/table/in/zk';
|
||||
```
|
||||
|
||||
Queries will remove the replica path in ZooKeeper. It is useful when the replica is dead and its metadata cannot be removed from ZooKeeper by `DROP TABLE` because there is no such table anymore. It will only drop the inactive/stale replica, and it cannot drop local replica, please use `DROP TABLE` for that. `DROP REPLICA` does not drop any tables and does not remove any data or metadata from disk.
|
||||
Queries will remove the `ReplicatedMergeTree` replica path in ZooKeeper. It is useful when the replica is dead and its metadata cannot be removed from ZooKeeper by `DROP TABLE` because there is no such table anymore. It will only drop the inactive/stale replica, and it cannot drop local replica, please use `DROP TABLE` for that. `DROP REPLICA` does not drop any tables and does not remove any data or metadata from disk.
|
||||
|
||||
The first one removes metadata of `'replica_name'` replica of `database.table` table.
|
||||
The second one does the same for all replicated tables in the database.
|
||||
The third one does the same for all replicated tables on the local server.
|
||||
The fourth one is useful to remove metadata of dead replica when all other replicas of a table were dropped. It requires the table path to be specified explicitly. It must be the same path as was passed to the first argument of `ReplicatedMergeTree` engine on table creation.
|
||||
|
||||
## DROP DATABASE REPLICA
|
||||
|
||||
Dead replicas of `Replicated` databases can be dropped using following syntax:
|
||||
|
||||
``` sql
|
||||
SYSTEM DROP DATABASE REPLICA 'replica_name' [FROM SHARD 'shard_name'] FROM DATABASE database;
|
||||
SYSTEM DROP DATABASE REPLICA 'replica_name' [FROM SHARD 'shard_name'];
|
||||
SYSTEM DROP DATABASE REPLICA 'replica_name' [FROM SHARD 'shard_name'] FROM ZKPATH '/path/to/table/in/zk';
|
||||
```
|
||||
|
||||
Similar to `SYSTEM DROP REPLICA`, but removes the `Replicated` database replica path from ZooKeeper when there's no database to run `DROP DATABASE`. Please note that it does not remove `ReplicatedMergeTree` replicas (so you may need `SYSTEM DROP REPLICA` as well). Shard and replica names are the names that were specified in `Replicated` engine arguments when creating the database. Also, these names can be obtained from `database_shard_name` and `database_replica_name` columns in `system.clusters`. If the `FROM SHARD` clause is missing, then `replica_name` must be a full replica name in `shard_name|replica_name` format.
|
||||
|
||||
## DROP UNCOMPRESSED CACHE
|
||||
|
||||
Reset the uncompressed data cache.
|
||||
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/dictionary
|
||||
sidebar_position: 54
|
||||
sidebar_label: dictionary function
|
||||
sidebar_label: dictionary
|
||||
title: dictionary
|
||||
---
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user