mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 01:25:21 +00:00
Merge branch 'master' of github.com:ClickHouse/ClickHouse
This commit is contained in:
commit
c51a1b8e87
37
.github/workflows/master.yml
vendored
37
.github/workflows/master.yml
vendored
@ -59,16 +59,17 @@ jobs:
|
||||
uses: ./.github/workflows/reusable_docker.yml
|
||||
with:
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
StyleCheck:
|
||||
needs: [RunConfig, BuildDockers]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Style check
|
||||
runner_type: style-checker
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
run_command: |
|
||||
python3 style_check.py --no-push
|
||||
# Tested in MQ
|
||||
# StyleCheck:
|
||||
# needs: [RunConfig, BuildDockers]
|
||||
# if: ${{ !failure() && !cancelled() }}
|
||||
# uses: ./.github/workflows/reusable_test.yml
|
||||
# with:
|
||||
# test_name: Style check
|
||||
# runner_type: style-checker
|
||||
# data: ${{ needs.RunConfig.outputs.data }}
|
||||
# run_command: |
|
||||
# python3 style_check.py --no-push
|
||||
CompatibilityCheckX86:
|
||||
needs: [RunConfig, BuilderDebRelease]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
@ -447,6 +448,14 @@ jobs:
|
||||
test_name: Stateless tests (debug)
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatelessTestAsanAzure:
|
||||
needs: [RunConfig, BuilderDebAsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateless tests (azure, asan)
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
##############################################################################################
|
||||
############################ FUNCTIONAl STATEFUL TESTS #######################################
|
||||
##############################################################################################
|
||||
@ -597,6 +606,14 @@ jobs:
|
||||
test_name: Stress test (tsan)
|
||||
runner_type: stress-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
StressTestTsanAzure:
|
||||
needs: [RunConfig, BuilderDebTsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stress test (azure, tsan)
|
||||
runner_type: stress-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
StressTestMsan:
|
||||
needs: [RunConfig, BuilderDebMsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
|
4
.github/workflows/pull_request.yml
vendored
4
.github/workflows/pull_request.yml
vendored
@ -158,7 +158,7 @@ jobs:
|
||||
#
|
||||
FinishCheck:
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
needs: [Tests_1, Tests_2, Builds_1_Report, Builds_2_Report]
|
||||
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_1_Report, Builds_2_Report, Tests_1, Tests_2]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
@ -171,7 +171,7 @@ jobs:
|
||||
- name: Finish label
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 finish_check.py
|
||||
python3 finish_check.py ${{ (contains(needs.*.result, 'failure') && github.event_name == 'merge_group') && '--pipeline-failure' || '' }}
|
||||
- name: Auto merge if approved
|
||||
if: ${{ github.event_name != 'merge_group' }}
|
||||
run: |
|
||||
|
@ -16,7 +16,7 @@
|
||||
#ci_set_reduced
|
||||
#ci_set_arm
|
||||
#ci_set_integration
|
||||
#ci_set_analyzer
|
||||
#ci_set_old_analyzer
|
||||
|
||||
## To run specified job in CI:
|
||||
#job_<JOB NAME>
|
||||
|
@ -38,9 +38,9 @@
|
||||
* Optimized function `dotProduct` to omit unnecessary and expensive memory copies. [#60928](https://github.com/ClickHouse/ClickHouse/pull/60928) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* 30x faster printing for 256-bit integers. [#61100](https://github.com/ClickHouse/ClickHouse/pull/61100) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* If the table's primary key contains mostly useless columns, don't keep them in memory. This is controlled by a new setting `primary_key_ratio_of_unique_prefix_values_to_skip_suffix_columns` with the value `0.9` by default, which means: for a composite primary key, if a column changes its value for at least 0.9 of all the times, the next columns after it will be not loaded. [#60255](https://github.com/ClickHouse/ClickHouse/pull/60255) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Improve the performance of serialized aggregation method when involving multiple `Nullable` columns. [#55809](https://github.com/ClickHouse/ClickHouse/pull/55809) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Lazy build JSON's output to improve performance of ALL JOIN. [#58278](https://github.com/ClickHouse/ClickHouse/pull/58278) ([LiuNeng](https://github.com/liuneng1994)).
|
||||
* Make HTTP/HTTPs connections with external services, such as AWS S3 reusable for all uses cases. Even when response is 3xx or 4xx. [#58845](https://github.com/ClickHouse/ClickHouse/pull/58845) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Improve the performance of serialized aggregation methods when involving multiple `Nullable` columns. [#55809](https://github.com/ClickHouse/ClickHouse/pull/55809) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Lazy builds JSON's output to improve performance of ALL JOIN. [#58278](https://github.com/ClickHouse/ClickHouse/pull/58278) ([LiuNeng](https://github.com/liuneng1994)).
|
||||
* Make HTTP/HTTPs connections with external services, such as AWS S3 reusable for all use cases. Even when the response is 3xx or 4xx. [#58845](https://github.com/ClickHouse/ClickHouse/pull/58845) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Improvements to aggregate functions `argMin` / `argMax` / `any` / `anyLast` / `anyHeavy`, as well as `ORDER BY {u8/u16/u32/u64/i8/i16/u32/i64) LIMIT 1` queries. [#58640](https://github.com/ClickHouse/ClickHouse/pull/58640) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Trivial optimization for column's filter. Peak memory can be reduced to 44% of the original in some cases. [#59698](https://github.com/ClickHouse/ClickHouse/pull/59698) ([李扬](https://github.com/taiyang-li)).
|
||||
* Execute `multiIf` function in a columnar fashion when the result type's underlying type is a number. [#60384](https://github.com/ClickHouse/ClickHouse/pull/60384) ([李扬](https://github.com/taiyang-li)).
|
||||
@ -49,7 +49,7 @@
|
||||
* Optimize data movement between columns of a Nullable number or a Nullable string, which improves some micro-benchmarks. [#60846](https://github.com/ClickHouse/ClickHouse/pull/60846) ([李扬](https://github.com/taiyang-li)).
|
||||
* Operations with the filesystem cache will suffer less from the lock contention. [#61066](https://github.com/ClickHouse/ClickHouse/pull/61066) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Optimize array join and other JOINs by preventing a wrong compiler's optimization. Close [#61074](https://github.com/ClickHouse/ClickHouse/issues/61074). [#61075](https://github.com/ClickHouse/ClickHouse/pull/61075) ([李扬](https://github.com/taiyang-li)).
|
||||
* If a query with a syntax error contained `COLUMNS` matcher with a regular expression, the regular expression was compiled each time during the parser's backtracking, instead of being compiled once. This was a fundamental error. The compiled regexp was put to AST. But the letter A in AST means "abstract" which means it should not contain heavyweight objects. Parts of AST can be created and discarded during parsing, including a large number of backtracking. This leads to slowness on the parsing side and consequently allows DoS by a readonly user. But the main problem is that it prevents progress in fuzzers. [#61543](https://github.com/ClickHouse/ClickHouse/pull/61543) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* If a query with a syntax error contained the `COLUMNS` matcher with a regular expression, the regular expression was compiled each time during the parser's backtracking, instead of being compiled once. This was a fundamental error. The compiled regexp was put to AST. But the letter A in AST means "abstract" which means it should not contain heavyweight objects. Parts of AST can be created and discarded during parsing, including a large number of backtracking. This leads to slowness on the parsing side and consequently allows DoS by a readonly user. But the main problem is that it prevents progress in fuzzers. [#61543](https://github.com/ClickHouse/ClickHouse/pull/61543) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a new analyzer pass to optimize the IN operator for a single value. [#61564](https://github.com/ClickHouse/ClickHouse/pull/61564) ([LiuNeng](https://github.com/liuneng1994)).
|
||||
* DNSResolver shuffles set of resolved IPs which is needed to uniformly utilize multiple endpoints of AWS S3. [#60965](https://github.com/ClickHouse/ClickHouse/pull/60965) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
|
||||
|
@ -66,9 +66,11 @@ public:
|
||||
/// The thread and process ids are set.
|
||||
|
||||
Message(
|
||||
const std::string & source, const std::string & text, Priority prio, const char * file, int line, std::string_view fmt_str = {});
|
||||
const std::string & source, const std::string & text, Priority prio, const char * file, int line,
|
||||
std::string_view fmt_str = {}, const std::vector<std::string> & fmt_str_args = {});
|
||||
Message(
|
||||
std::string && source, std::string && text, Priority prio, const char * file, int line, std::string_view fmt_str);
|
||||
std::string && source, std::string && text, Priority prio, const char * file, int line,
|
||||
std::string_view fmt_str, std::vector<std::string> && fmt_str_args);
|
||||
/// Creates a Message with the given source, text, priority,
|
||||
/// source file path and line.
|
||||
///
|
||||
@ -161,6 +163,9 @@ public:
|
||||
std::string_view getFormatString() const;
|
||||
void setFormatString(std::string_view fmt_str);
|
||||
|
||||
const std::vector<std::string> & getFormatStringArgs() const;
|
||||
void setFormatStringArgs(const std::vector<std::string> & fmt_str_args);
|
||||
|
||||
int getSourceLine() const;
|
||||
/// Returns the source file line of the statement
|
||||
/// generating the log message. May be 0
|
||||
@ -210,6 +215,7 @@ private:
|
||||
int _line;
|
||||
StringMap * _pMap;
|
||||
std::string_view _fmt_str;
|
||||
std::vector<std::string> _fmt_str_args;
|
||||
};
|
||||
|
||||
|
||||
|
@ -46,7 +46,9 @@ Message::Message(const std::string& source, const std::string& text, Priority pr
|
||||
}
|
||||
|
||||
|
||||
Message::Message(const std::string& source, const std::string& text, Priority prio, const char* file, int line, std::string_view fmt_str):
|
||||
Message::Message(
|
||||
const std::string& source, const std::string& text, Priority prio, const char* file, int line,
|
||||
std::string_view fmt_str, const std::vector<std::string>& fmt_str_args):
|
||||
_source(source),
|
||||
_text(text),
|
||||
_prio(prio),
|
||||
@ -54,13 +56,16 @@ Message::Message(const std::string& source, const std::string& text, Priority pr
|
||||
_file(file),
|
||||
_line(line),
|
||||
_pMap(0),
|
||||
_fmt_str(fmt_str)
|
||||
_fmt_str(fmt_str),
|
||||
_fmt_str_args(fmt_str_args)
|
||||
{
|
||||
init();
|
||||
}
|
||||
|
||||
|
||||
Message::Message(std::string && source, std::string && text, Priority prio, const char * file, int line, std::string_view fmt_str):
|
||||
Message::Message(
|
||||
std::string && source, std::string && text, Priority prio, const char * file, int line,
|
||||
std::string_view fmt_str, std::vector<std::string> && fmt_str_args):
|
||||
_source(std::move(source)),
|
||||
_text(std::move(text)),
|
||||
_prio(prio),
|
||||
@ -68,7 +73,8 @@ Message::Message(std::string && source, std::string && text, Priority prio, cons
|
||||
_file(file),
|
||||
_line(line),
|
||||
_pMap(0),
|
||||
_fmt_str(fmt_str)
|
||||
_fmt_str(fmt_str),
|
||||
_fmt_str_args(std::move(fmt_str_args))
|
||||
{
|
||||
init();
|
||||
}
|
||||
@ -83,7 +89,8 @@ Message::Message(const Message& msg):
|
||||
_pid(msg._pid),
|
||||
_file(msg._file),
|
||||
_line(msg._line),
|
||||
_fmt_str(msg._fmt_str)
|
||||
_fmt_str(msg._fmt_str),
|
||||
_fmt_str_args(msg._fmt_str_args)
|
||||
{
|
||||
if (msg._pMap)
|
||||
_pMap = new StringMap(*msg._pMap);
|
||||
@ -102,7 +109,8 @@ Message::Message(const Message& msg, const std::string& text):
|
||||
_pid(msg._pid),
|
||||
_file(msg._file),
|
||||
_line(msg._line),
|
||||
_fmt_str(msg._fmt_str)
|
||||
_fmt_str(msg._fmt_str),
|
||||
_fmt_str_args(msg._fmt_str_args)
|
||||
{
|
||||
if (msg._pMap)
|
||||
_pMap = new StringMap(*msg._pMap);
|
||||
@ -154,6 +162,7 @@ void Message::swap(Message& msg)
|
||||
swap(_line, msg._line);
|
||||
swap(_pMap, msg._pMap);
|
||||
swap(_fmt_str, msg._fmt_str);
|
||||
swap(_fmt_str_args, msg._fmt_str_args);
|
||||
}
|
||||
|
||||
|
||||
@ -227,6 +236,17 @@ void Message::setFormatString(std::string_view fmt_str)
|
||||
}
|
||||
|
||||
|
||||
const std::vector<std::string>& Message::getFormatStringArgs() const
|
||||
{
|
||||
return _fmt_str_args;
|
||||
}
|
||||
|
||||
void Message::setFormatStringArgs(const std::vector<std::string>& fmt_str_args)
|
||||
{
|
||||
_fmt_str_args = fmt_str_args;
|
||||
}
|
||||
|
||||
|
||||
bool Message::has(const std::string& param) const
|
||||
{
|
||||
return _pMap && (_pMap->find(param) != _pMap->end());
|
||||
|
@ -8,9 +8,6 @@ option (SANITIZE "Enable one of the code sanitizers" "")
|
||||
|
||||
set (SAN_FLAGS "${SAN_FLAGS} -g -fno-omit-frame-pointer -DSANITIZER")
|
||||
|
||||
# It's possible to pass an ignore list to sanitizers (-fsanitize-ignorelist). Intentionally not doing this because
|
||||
# 1. out-of-source suppressions are awkward 2. it seems ignore lists don't work after the Clang v16 upgrade (#49829)
|
||||
|
||||
if (SANITIZE)
|
||||
if (SANITIZE STREQUAL "address")
|
||||
set (ASAN_FLAGS "-fsanitize=address -fsanitize-address-use-after-scope")
|
||||
|
2
contrib/curl
vendored
2
contrib/curl
vendored
@ -1 +1 @@
|
||||
Subproject commit 1a05e833f8f7140628b27882b10525fd9ec4b873
|
||||
Subproject commit de7b3e89218467159a7af72d58cea8425946e97d
|
@ -33,14 +33,15 @@ set (SRCS
|
||||
"${LIBRARY_DIR}/lib/curl_memrchr.c"
|
||||
"${LIBRARY_DIR}/lib/curl_multibyte.c"
|
||||
"${LIBRARY_DIR}/lib/curl_ntlm_core.c"
|
||||
"${LIBRARY_DIR}/lib/curl_ntlm_wb.c"
|
||||
"${LIBRARY_DIR}/lib/curl_path.c"
|
||||
"${LIBRARY_DIR}/lib/curl_range.c"
|
||||
"${LIBRARY_DIR}/lib/curl_rtmp.c"
|
||||
"${LIBRARY_DIR}/lib/curl_sasl.c"
|
||||
"${LIBRARY_DIR}/lib/curl_sha512_256.c"
|
||||
"${LIBRARY_DIR}/lib/curl_sspi.c"
|
||||
"${LIBRARY_DIR}/lib/curl_threads.c"
|
||||
"${LIBRARY_DIR}/lib/curl_trc.c"
|
||||
"${LIBRARY_DIR}/lib/cw-out.c"
|
||||
"${LIBRARY_DIR}/lib/dict.c"
|
||||
"${LIBRARY_DIR}/lib/doh.c"
|
||||
"${LIBRARY_DIR}/lib/dynbuf.c"
|
||||
@ -98,6 +99,7 @@ set (SRCS
|
||||
"${LIBRARY_DIR}/lib/psl.c"
|
||||
"${LIBRARY_DIR}/lib/rand.c"
|
||||
"${LIBRARY_DIR}/lib/rename.c"
|
||||
"${LIBRARY_DIR}/lib/request.c"
|
||||
"${LIBRARY_DIR}/lib/rtsp.c"
|
||||
"${LIBRARY_DIR}/lib/select.c"
|
||||
"${LIBRARY_DIR}/lib/sendf.c"
|
||||
|
@ -38,6 +38,7 @@
|
||||
|
||||
#define HAVE_ARPA_INET_H
|
||||
#define HAVE_ERRNO_H
|
||||
#define HAVE_GETSOCKNAME
|
||||
#define HAVE_FCNTL_H
|
||||
#define HAVE_NETDB_H
|
||||
#define HAVE_NETINET_IN_H
|
||||
|
@ -93,6 +93,7 @@ enable_language(ASM)
|
||||
|
||||
if(COMPILER_CLANG)
|
||||
add_definitions(-Wno-unused-command-line-argument)
|
||||
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fuse-ld=lld") # only relevant for -DENABLE_OPENSSL_DYNAMIC=1
|
||||
endif()
|
||||
|
||||
if(ARCH_AMD64)
|
||||
@ -960,11 +961,6 @@ set(CRYPTO_SRC
|
||||
${OPENSSL_SOURCE_DIR}/crypto/x509/x_req.c
|
||||
${OPENSSL_SOURCE_DIR}/crypto/x509/x_x509.c
|
||||
${OPENSSL_SOURCE_DIR}/crypto/x509/x_x509a.c
|
||||
${OPENSSL_SOURCE_DIR}/engines/e_capi.c
|
||||
${OPENSSL_SOURCE_DIR}/engines/e_dasync.c
|
||||
${OPENSSL_SOURCE_DIR}/engines/e_loader_attic.c
|
||||
${OPENSSL_SOURCE_DIR}/engines/e_ossltest.c
|
||||
${OPENSSL_SOURCE_DIR}/engines/e_padlock.c
|
||||
${OPENSSL_SOURCE_DIR}/providers/baseprov.c
|
||||
${OPENSSL_SOURCE_DIR}/providers/common/bio_prov.c
|
||||
${OPENSSL_SOURCE_DIR}/providers/common/capabilities.c
|
||||
@ -985,8 +981,6 @@ set(CRYPTO_SRC
|
||||
${OPENSSL_SOURCE_DIR}/providers/common/securitycheck.c
|
||||
${OPENSSL_SOURCE_DIR}/providers/common/securitycheck_default.c
|
||||
${OPENSSL_SOURCE_DIR}/providers/defltprov.c
|
||||
${OPENSSL_SOURCE_DIR}/providers/fips/fips_entry.c
|
||||
${OPENSSL_SOURCE_DIR}/providers/fips/fipsprov.c
|
||||
${OPENSSL_SOURCE_DIR}/providers/implementations/asymciphers/rsa_enc.c
|
||||
${OPENSSL_SOURCE_DIR}/providers/implementations/asymciphers/sm2_enc.c
|
||||
${OPENSSL_SOURCE_DIR}/providers/implementations/ciphers/cipher_aes.c
|
||||
@ -1145,11 +1139,19 @@ set(CRYPTO_SRC
|
||||
${OPENSSL_SOURCE_DIR}/providers/implementations/signature/sm2_sig.c
|
||||
${OPENSSL_SOURCE_DIR}/providers/implementations/storemgmt/file_store.c
|
||||
${OPENSSL_SOURCE_DIR}/providers/implementations/storemgmt/file_store_any2obj.c
|
||||
${OPENSSL_SOURCE_DIR}/providers/legacyprov.c
|
||||
${OPENSSL_SOURCE_DIR}/providers/nullprov.c
|
||||
${OPENSSL_SOURCE_DIR}/providers/prov_running.c
|
||||
${OPENSSL_SOURCE_DIR}/ssl/record/methods/tls_pad.c
|
||||
${OPENSSL_SOURCE_DIR}/ssl/record/methods/ssl3_cbc.c
|
||||
)
|
||||
|
||||
if(NOT ENABLE_OPENSSL_DYNAMIC)
|
||||
set(CRYPTO_SRC ${CRYPTO_SRC}
|
||||
${OPENSSL_SOURCE_DIR}/providers/fips/fips_entry.c
|
||||
${OPENSSL_SOURCE_DIR}/providers/fips/fipsprov.c
|
||||
)
|
||||
endif()
|
||||
|
||||
if(ARCH_AMD64)
|
||||
if (OS_DARWIN)
|
||||
set(CRYPTO_SRC ${CRYPTO_SRC}
|
||||
@ -1376,8 +1378,6 @@ set(SSL_SRC
|
||||
${OPENSSL_SOURCE_DIR}/ssl/quic/uint_set.c
|
||||
${OPENSSL_SOURCE_DIR}/ssl/record/rec_layer_d1.c
|
||||
${OPENSSL_SOURCE_DIR}/ssl/record/rec_layer_s3.c
|
||||
${OPENSSL_SOURCE_DIR}/ssl/record/methods/tls_pad.c
|
||||
${OPENSSL_SOURCE_DIR}/ssl/record/methods/ssl3_cbc.c
|
||||
${OPENSSL_SOURCE_DIR}/ssl/record/methods/dtls_meth.c
|
||||
${OPENSSL_SOURCE_DIR}/ssl/record/methods/ssl3_meth.c
|
||||
${OPENSSL_SOURCE_DIR}/ssl/record/methods/tls13_meth.c
|
||||
|
@ -14,11 +14,14 @@ RUN curl -o krb5-libs-1.10.3-65.el6.x86_64.rpm ftp://ftp.pbone.net/mirror/vault.
|
||||
rpm -Uvh libkadm5-1.10.3-65.el6.x86_64.rpm libss-1.41.12-24.el6.x86_64.rpm krb5-libs-1.10.3-65.el6.x86_64.rpm krb5-workstation-1.10.3-65.el6.x86_64.rpm libcom_err-1.41.12-24.el6.x86_64.rpm && \
|
||||
rm -fr *.rpm
|
||||
|
||||
ADD https://archive.apache.org/dist/commons/daemon/source/commons-daemon-1.0.15-src.tar.gz /tmp/commons-daemon-1.0.15-src.tar.gz
|
||||
|
||||
RUN cd /tmp && \
|
||||
curl http://archive.apache.org/dist/commons/daemon/source/commons-daemon-1.0.15-src.tar.gz -o commons-daemon-1.0.15-src.tar.gz && \
|
||||
tar xzf commons-daemon-1.0.15-src.tar.gz && \
|
||||
cd commons-daemon-1.0.15-src/src/native/unix && \
|
||||
./configure && \
|
||||
make && \
|
||||
cp ./jsvc /usr/local/hadoop-2.7.0/sbin && \
|
||||
[ -e /usr/local/hadoop ] || ln -s ./hadoop-2.7.0 /usr/local/hadoop
|
||||
cd /tmp && \
|
||||
rm -rf commons-daemon-1.0.15-src* && \
|
||||
{ [ -e /usr/local/hadoop ] || ln -s ./hadoop-2.7.0 /usr/local/hadoop; }
|
||||
|
@ -19,7 +19,7 @@ ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
|
||||
# install test configs
|
||||
/usr/share/clickhouse-test/config/install.sh
|
||||
|
||||
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log &
|
||||
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --silent --inMemoryPersistence &
|
||||
./setup_minio.sh stateful
|
||||
|
||||
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
|
||||
@ -87,7 +87,7 @@ function start()
|
||||
tail -n1000 /var/log/clickhouse-server/clickhouse-server.log
|
||||
break
|
||||
fi
|
||||
timeout 120 service clickhouse-server start
|
||||
timeout 120 sudo -E -u clickhouse /usr/bin/clickhouse-server --config /etc/clickhouse-server/config.xml --daemon --pid-file /var/run/clickhouse-server/clickhouse-server.pid
|
||||
sleep 0.5
|
||||
counter=$((counter + 1))
|
||||
done
|
||||
|
@ -42,14 +42,6 @@ source /utils.lib
|
||||
# install test configs
|
||||
/usr/share/clickhouse-test/config/install.sh
|
||||
|
||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
echo "Azure is disabled"
|
||||
elif [[ -n "$USE_SHARED_CATALOG" ]] && [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
||||
echo "Azure is disabled"
|
||||
else
|
||||
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log &
|
||||
fi
|
||||
|
||||
./setup_minio.sh stateless
|
||||
./setup_hdfs_minicluster.sh
|
||||
|
||||
@ -99,12 +91,11 @@ if [ "$NUM_TRIES" -gt "1" ]; then
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_TIME_US_MAX=10000
|
||||
|
||||
mkdir -p /var/run/clickhouse-server
|
||||
# simplest way to forward env variables to server
|
||||
sudo -E -u clickhouse /usr/bin/clickhouse-server --config /etc/clickhouse-server/config.xml --daemon --pid-file /var/run/clickhouse-server/clickhouse-server.pid
|
||||
else
|
||||
sudo clickhouse start
|
||||
fi
|
||||
|
||||
# simplest way to forward env variables to server
|
||||
sudo -E -u clickhouse /usr/bin/clickhouse-server --config /etc/clickhouse-server/config.xml --daemon --pid-file /var/run/clickhouse-server/clickhouse-server.pid
|
||||
|
||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
sudo sed -i "s|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches/</filesystem_caches_path>|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches_1/</filesystem_caches_path>|" /etc/clickhouse-server1/config.d/filesystem_caches_path.xml
|
||||
|
||||
@ -214,6 +205,14 @@ function run_tests()
|
||||
ADDITIONAL_OPTIONS+=('--s3-storage')
|
||||
fi
|
||||
|
||||
if [[ -n "$USE_AZURE_STORAGE_FOR_MERGE_TREE" ]] && [[ "$USE_AZURE_STORAGE_FOR_MERGE_TREE" -eq 1 ]]; then
|
||||
# to disable the same tests
|
||||
ADDITIONAL_OPTIONS+=('--s3-storage')
|
||||
# azurite is slow, but with these two settings it can be super slow
|
||||
ADDITIONAL_OPTIONS+=('--no-random-settings')
|
||||
ADDITIONAL_OPTIONS+=('--no-random-merge-tree-settings')
|
||||
fi
|
||||
|
||||
if [[ -n "$USE_SHARED_CATALOG" ]] && [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
||||
ADDITIONAL_OPTIONS+=('--shared-catalog')
|
||||
fi
|
||||
@ -288,7 +287,7 @@ stop_logs_replication
|
||||
failed_to_save_logs=0
|
||||
for table in query_log zookeeper_log trace_log transactions_info_log metric_log
|
||||
do
|
||||
err=$( { clickhouse-client -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.tsv.zst; } 2>&1 )
|
||||
err=$(clickhouse-client -q "select * from system.$table into outfile '/test_output/$table.tsv.gz' format TSVWithNamesAndTypes")
|
||||
echo "$err"
|
||||
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
|
||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
|
@ -279,7 +279,7 @@ function check_logs_for_critical_errors()
|
||||
|
||||
function collect_query_and_trace_logs()
|
||||
{
|
||||
for table in query_log trace_log
|
||||
for table in query_log trace_log metric_log
|
||||
do
|
||||
clickhouse-local --config-file=/etc/clickhouse-server/config.xml --only-system-tables -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.tsv.zst ||:
|
||||
done
|
||||
|
@ -52,7 +52,6 @@ export ZOOKEEPER_FAULT_INJECTION=1
|
||||
# available for dump via clickhouse-local
|
||||
configure
|
||||
|
||||
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log &
|
||||
./setup_minio.sh stateless # to have a proper environment
|
||||
|
||||
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
|
||||
|
37
docs/changelogs/v23.8.13.25-lts.md
Normal file
37
docs/changelogs/v23.8.13.25-lts.md
Normal file
@ -0,0 +1,37 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v23.8.13.25-lts (37e034f903e) FIXME as compared to v23.8.12.13-lts (bdbd0d87e5d)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#61930](https://github.com/ClickHouse/ClickHouse/issues/61930): Fixed accounting of memory allocated before attaching thread to a query or a user. [#56089](https://github.com/ClickHouse/ClickHouse/pull/56089) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#62007](https://github.com/ClickHouse/ClickHouse/issues/62007): Remove from the Keeper Docker image the volumes at /etc/clickhouse-keeper and /var/log/clickhouse-keeper. [#61683](https://github.com/ClickHouse/ClickHouse/pull/61683) ([Tristan](https://github.com/Tristan971)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix REPLACE/MOVE PARTITION with zero-copy replication [#54193](https://github.com/ClickHouse/ClickHouse/pull/54193) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix ATTACH query with external ON CLUSTER [#61365](https://github.com/ClickHouse/ClickHouse/pull/61365) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Cancel merges before removing moved parts [#61610](https://github.com/ClickHouse/ClickHouse/pull/61610) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Mark CANNOT_PARSE_ESCAPE_SEQUENCE error as parse error to be able to skip it in row input formats [#61883](https://github.com/ClickHouse/ClickHouse/pull/61883) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Try to fix segfault in Hive engine [#62578](https://github.com/ClickHouse/ClickHouse/pull/62578) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
|
||||
#### CI Fix or Improvement (changelog entry is not required)
|
||||
|
||||
* Backported in [#62201](https://github.com/ClickHouse/ClickHouse/issues/62201):. [#62190](https://github.com/ClickHouse/ClickHouse/pull/62190) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||
* Backported in [#62796](https://github.com/ClickHouse/ClickHouse/issues/62796): We won't fail the job when GH fails to retrieve the job ID and URLs. [#62651](https://github.com/ClickHouse/ClickHouse/pull/62651) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Backported in [#62968](https://github.com/ClickHouse/ClickHouse/issues/62968):. [#62932](https://github.com/ClickHouse/ClickHouse/pull/62932) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
|
||||
#### NO CL CATEGORY
|
||||
|
||||
* Backported in [#62585](https://github.com/ClickHouse/ClickHouse/issues/62585):. [#60078](https://github.com/ClickHouse/ClickHouse/pull/60078) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Throw on query timeout in ZooKeeperRetries [#60922](https://github.com/ClickHouse/ClickHouse/pull/60922) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
|
@ -4,7 +4,7 @@ sidebar_position: 30
|
||||
sidebar_label: Replicated
|
||||
---
|
||||
|
||||
# [experimental] Replicated
|
||||
# Replicated
|
||||
|
||||
The engine is based on the [Atomic](../../engines/database-engines/atomic.md) engine. It supports replication of metadata via DDL log being written to ZooKeeper and executed on all of the replicas for a given database.
|
||||
|
||||
|
@ -8,6 +8,8 @@ sidebar_label: HDFS
|
||||
|
||||
This engine provides integration with the [Apache Hadoop](https://en.wikipedia.org/wiki/Apache_Hadoop) ecosystem by allowing to manage data on [HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html) via ClickHouse. This engine is similar to the [File](../../../engines/table-engines/special/file.md#table_engines-file) and [URL](../../../engines/table-engines/special/url.md#table_engines-url) engines, but provides Hadoop-specific features.
|
||||
|
||||
This feature is not supported by ClickHouse engineers, and it is known to have a sketchy quality. In case of any problems, fix them yourself and submit a pull request.
|
||||
|
||||
## Usage {#usage}
|
||||
|
||||
``` sql
|
||||
|
@ -304,10 +304,10 @@ We use the term `MergeTree` to refer to all table engines in the `MergeTree fami
|
||||
|
||||
If you had a `MergeTree` table that was manually replicated, you can convert it to a replicated table. You might need to do this if you have already collected a large amount of data in a `MergeTree` table and now you want to enable replication.
|
||||
|
||||
`MergeTree` table can be automatically converted on server restart if `convert_to_replicated` flag is set at the table's data directory (`/var/lib/clickhouse/store/xxx/xxxyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy/` for `Atomic` database).
|
||||
`MergeTree` table can be automatically converted on server restart if `convert_to_replicated` flag is set at the table's data directory (`/store/xxx/xxxyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy/` for `Atomic` database).
|
||||
Create empty `convert_to_replicated` file and the table will be loaded as replicated on next server restart.
|
||||
|
||||
This query can be used to get the table's data path.
|
||||
This query can be used to get the table's data path. If table has many data paths, you have to use the first one.
|
||||
|
||||
```sql
|
||||
SELECT data_paths FROM system.tables WHERE table = 'table_name' AND database = 'database_name';
|
||||
|
@ -10,7 +10,8 @@ The RecipeNLG dataset is available for download [here](https://recipenlg.cs.put.
|
||||
|
||||
1. Go to the download page [https://recipenlg.cs.put.poznan.pl/dataset](https://recipenlg.cs.put.poznan.pl/dataset).
|
||||
1. Accept Terms and Conditions and download zip file.
|
||||
1. Unpack the zip file with `unzip`. You will get the `full_dataset.csv` file.
|
||||
1. Option: Using the `md5sum dataset.zip` to validate the zip file and it should be equal to `3a168dfd0912bb034225619b3586ce76`.
|
||||
1. Unpack the zip file with `unzip dataset.zip`. You will get the `full_dataset.csv` file in the `dataset` directory.
|
||||
|
||||
## Create a Table
|
||||
|
||||
@ -72,7 +73,7 @@ Result:
|
||||
|
||||
``` text
|
||||
┌─count()─┐
|
||||
│ 2231141 │
|
||||
│ 2231142 │
|
||||
└─────────┘
|
||||
```
|
||||
|
||||
@ -115,7 +116,7 @@ Result:
|
||||
│ egg │ 160507 │
|
||||
│ baking powder │ 148277 │
|
||||
│ lemon juice │ 146414 │
|
||||
│ Salt │ 122557 │
|
||||
│ Salt │ 122558 │
|
||||
│ cinnamon │ 117927 │
|
||||
│ sour cream │ 116682 │
|
||||
│ cream cheese │ 114423 │
|
||||
|
@ -327,7 +327,9 @@ Use buffering to avoid situations where a query processing error occurred after
|
||||
|
||||
## Setting a role with query parameters {#setting-role-with-query-parameters}
|
||||
|
||||
In certain scenarios, it might be required to set the granted role first, before executing the statement itself.
|
||||
This is a new feature added in ClickHouse 24.4.
|
||||
|
||||
In specific scenarios, setting the granted role first might be required before executing the statement itself.
|
||||
However, it is not possible to send `SET ROLE` and the statement together, as multi-statements are not allowed:
|
||||
|
||||
```
|
||||
@ -346,7 +348,7 @@ To overcome this limitation, you could use the `role` query parameter instead:
|
||||
curl -sS "http://localhost:8123?role=my_role" --data-binary "SELECT * FROM my_table;"
|
||||
```
|
||||
|
||||
This will be an equivalent of executing `SET ROLE my_role` before the statement.
|
||||
This will be the equivalent of executing `SET ROLE my_role` before the statement.
|
||||
|
||||
Additionally, it is possible to specify multiple `role` query parameters:
|
||||
|
||||
|
@ -76,7 +76,7 @@ ClickHouse, Inc. does **not** maintain the tools and libraries listed below and
|
||||
- [clickhouse-maxmind-geoip](https://github.com/AlexeyKupershtokh/clickhouse-maxmind-geoip)
|
||||
- AutoML
|
||||
- [MindsDB](https://mindsdb.com/)
|
||||
- [MindsDB](https://github.com/mindsdb/mindsdb) - Predictive AI layer for ClickHouse database.
|
||||
- [MindsDB](https://github.com/mindsdb/mindsdb) - Integrates with ClickHouse, making data from ClickHouse accessible to a diverse range of AI/ML models.
|
||||
|
||||
## Programming Language Ecosystems {#programming-language-ecosystems}
|
||||
|
||||
|
@ -7,6 +7,8 @@ toc_max_heading_level: 2
|
||||
|
||||
# Core Settings
|
||||
|
||||
All below settings are also available in table [system.settings](/docs/en/operations/system-tables/settings).
|
||||
|
||||
## additional_table_filters
|
||||
|
||||
An additional filter expression that is applied after reading
|
||||
@ -3931,19 +3933,6 @@ For example, `avg(if(cond, col, null))` can be rewritten to `avgOrNullIf(cond, c
|
||||
Supported only with experimental analyzer (`allow_experimental_analyzer = 1`).
|
||||
:::
|
||||
|
||||
## allow_experimental_database_replicated {#allow_experimental_database_replicated}
|
||||
|
||||
Enables to create databases with [Replicated](../../engines/database-engines/replicated.md) engine.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Disabled.
|
||||
- 1 — Enabled.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
Cloud default value: `1`.
|
||||
|
||||
## database_replicated_initial_query_timeout_sec {#database_replicated_initial_query_timeout_sec}
|
||||
|
||||
Sets how long initial DDL query should wait for Replicated database to process previous DDL queue entries in seconds.
|
||||
|
@ -30,6 +30,16 @@ Columns:
|
||||
- `source_file` (LowCardinality(String)) — Source file from which the logging was done.
|
||||
- `source_line` (UInt64) — Source line from which the logging was done.
|
||||
- `message_format_string` (LowCardinality(String)) — A format string that was used to format the message.
|
||||
- `value1` (String) - Argument 1 that was used to format the message.
|
||||
- `value2` (String) - Argument 2 that was used to format the message.
|
||||
- `value3` (String) - Argument 3 that was used to format the message.
|
||||
- `value4` (String) - Argument 4 that was used to format the message.
|
||||
- `value5` (String) - Argument 5 that was used to format the message.
|
||||
- `value6` (String) - Argument 6 that was used to format the message.
|
||||
- `value7` (String) - Argument 7 that was used to format the message.
|
||||
- `value8` (String) - Argument 8 that was used to format the message.
|
||||
- `value9` (String) - Argument 9 that was used to format the message.
|
||||
- `value10` (String) - Argument 10 that was used to format the message.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -55,4 +65,14 @@ revision: 54440
|
||||
source_file: /ClickHouse/src/Interpreters/DNSCacheUpdater.cpp; void DB::DNSCacheUpdater::start()
|
||||
source_line: 45
|
||||
message_format_string: Update period {} seconds
|
||||
value1: 15
|
||||
value2:
|
||||
value3:
|
||||
value4:
|
||||
value5:
|
||||
value6:
|
||||
value7:
|
||||
value8:
|
||||
value9:
|
||||
value10:
|
||||
```
|
||||
|
@ -111,7 +111,7 @@ On newer Linux kernels transparent huge pages are alright.
|
||||
$ echo 'madvise' | sudo tee /sys/kernel/mm/transparent_hugepage/enabled
|
||||
```
|
||||
|
||||
If you want to modify the transparent huge pages setting permanently, editing the `/etc/default/grub` to add the `transparent_hugepage=never` to the `GRUB_CMDLINE_LINUX_DEFAULT` option:
|
||||
If you want to modify the transparent huge pages setting permanently, editing the `/etc/default/grub` to add the `transparent_hugepage=madvise` to the `GRUB_CMDLINE_LINUX_DEFAULT` option:
|
||||
|
||||
```bash
|
||||
$ GRUB_CMDLINE_LINUX_DEFAULT="transparent_hugepage=madvise ..."
|
||||
|
@ -1907,6 +1907,12 @@ If the addition results in a value outside the bounds of the data type, the resu
|
||||
date_add(unit, value, date)
|
||||
```
|
||||
|
||||
Alternative syntax:
|
||||
|
||||
``` sql
|
||||
date_add(date, INTERVAL value unit)
|
||||
```
|
||||
|
||||
Aliases: `dateAdd`, `DATE_ADD`.
|
||||
|
||||
**Arguments**
|
||||
@ -1946,6 +1952,20 @@ Result:
|
||||
└───────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT date_add(toDate('2018-01-01'), INTERVAL 3 YEAR);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─plus(toDate('2018-01-01'), toIntervalYear(3))─┐
|
||||
│ 2021-01-01 │
|
||||
└───────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
|
||||
**See Also**
|
||||
|
||||
- [addDate](#addDate)
|
||||
@ -1962,6 +1982,13 @@ If the subtraction results in a value outside the bounds of the data type, the r
|
||||
date_sub(unit, value, date)
|
||||
```
|
||||
|
||||
Alternative syntax:
|
||||
|
||||
``` sql
|
||||
date_sub(date, INTERVAL value unit)
|
||||
```
|
||||
|
||||
|
||||
Aliases: `dateSub`, `DATE_SUB`.
|
||||
|
||||
**Arguments**
|
||||
@ -2002,6 +2029,19 @@ Result:
|
||||
└────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
``` sql
|
||||
SELECT date_sub(toDate('2018-01-01'), INTERVAL 3 YEAR);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─minus(toDate('2018-01-01'), toIntervalYear(3))─┐
|
||||
│ 2015-01-01 │
|
||||
└────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
**See Also**
|
||||
|
||||
- [subDate](#subDate)
|
||||
|
@ -8,7 +8,7 @@ sidebar_label: Mathematical
|
||||
|
||||
## e
|
||||
|
||||
Returns e ([Euler's constant](https://en.wikipedia.org/wiki/Euler%27s_constant))
|
||||
Returns e ([Euler's constant](https://en.wikipedia.org/wiki/Euler%27s_constant)).
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -45,7 +45,7 @@ exp(x)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md)
|
||||
- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -65,7 +65,7 @@ Alias: `ln(x)`
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md)
|
||||
- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -83,7 +83,7 @@ exp2(x)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md)
|
||||
- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -111,7 +111,7 @@ log2(x)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md)
|
||||
- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -129,7 +129,7 @@ exp10(x)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md)
|
||||
- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -157,7 +157,7 @@ log10(x)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md)
|
||||
- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -173,7 +173,7 @@ sqrt(x)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md)
|
||||
- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -189,7 +189,7 @@ cbrt(x)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md)
|
||||
- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -207,7 +207,7 @@ erf(x)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md)
|
||||
- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -239,7 +239,7 @@ erfc(x)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md)
|
||||
- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -257,7 +257,7 @@ lgamma(x)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md)
|
||||
- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -275,7 +275,7 @@ gamma(x)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md)
|
||||
- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -293,7 +293,7 @@ sin(x)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md)
|
||||
- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -323,7 +323,7 @@ cos(x)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md)
|
||||
- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -341,7 +341,7 @@ tan(x)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md)
|
||||
- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -359,7 +359,7 @@ asin(x)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md)
|
||||
- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -377,7 +377,7 @@ acos(x)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md)
|
||||
- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -395,7 +395,7 @@ atan(x)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md)
|
||||
- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -434,7 +434,7 @@ cosh(x)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` — The angle, in radians. Values from the interval: `-∞ < x < +∞`. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
- `x` — The angle, in radians. Values from the interval: `-∞ < x < +∞`. [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -468,7 +468,7 @@ acosh(x)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` — Hyperbolic cosine of angle. Values from the interval: `1 <= x < +∞`. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
- `x` — Hyperbolic cosine of angle. Values from the interval: `1 <= x < +∞`. [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -502,7 +502,7 @@ sinh(x)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` — The angle, in radians. Values from the interval: `-∞ < x < +∞`. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
- `x` — The angle, in radians. Values from the interval: `-∞ < x < +∞`. [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -536,7 +536,7 @@ asinh(x)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` — Hyperbolic sine of angle. Values from the interval: `-∞ < x < +∞`. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
- `x` — Hyperbolic sine of angle. Values from the interval: `-∞ < x < +∞`. [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -569,13 +569,13 @@ tanh(x)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` — The angle, in radians. Values from the interval: `-∞ < x < +∞`. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
- `x` — The angle, in radians. Values from the interval: `-∞ < x < +∞`. [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Values from the interval: `-1 < tanh(x) < 1`.
|
||||
|
||||
Type: [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
Type: [Float*](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Example**
|
||||
|
||||
@ -601,7 +601,7 @@ atanh(x)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` — Hyperbolic tangent of angle. Values from the interval: `–1 < x < 1`. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
- `x` — Hyperbolic tangent of angle. Values from the interval: `–1 < x < 1`. [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -635,8 +635,8 @@ atan2(y, x)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `y` — y-coordinate of the point through which the ray passes. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
- `x` — x-coordinate of the point through which the ray passes. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
- `y` — y-coordinate of the point through which the ray passes. [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md).
|
||||
- `x` — x-coordinate of the point through which the ray passes. [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -670,8 +670,8 @@ hypot(x, y)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` — The first cathetus of a right-angle triangle. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
- `y` — The second cathetus of a right-angle triangle. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
- `x` — The first cathetus of a right-angle triangle. [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md).
|
||||
- `y` — The second cathetus of a right-angle triangle. [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -705,7 +705,7 @@ log1p(x)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` — Values from the interval: `-1 < x < +∞`. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
- `x` — Values from the interval: `-1 < x < +∞`. [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -747,6 +747,8 @@ sign(x)
|
||||
- 0 for `x = 0`
|
||||
- 1 for `x > 0`
|
||||
|
||||
Type: [Int8](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Examples**
|
||||
|
||||
Sign for the zero value:
|
||||
@ -803,7 +805,7 @@ degrees(x)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` — Input in radians. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
- `x` — Input in radians. [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -837,7 +839,7 @@ radians(x)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` — Input in degrees. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
- `x` — Input in degrees. [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
|
@ -3213,6 +3213,74 @@ Result:
|
||||
└───────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## connectionId
|
||||
|
||||
Retrieves the connection ID of the client that submitted the current query and returns it as a UInt64 integer.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
connectionId()
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
None.
|
||||
|
||||
**Returned value**
|
||||
|
||||
Returns an integer of type UInt64.
|
||||
|
||||
**Implementation details**
|
||||
|
||||
This function is most useful in debugging scenarios or for internal purposes within the MySQL handler. It was created for compatibility with [MySQL's `CONNECTION_ID` function](https://dev.mysql.com/doc/refman/8.0/en/information-functions.html#function_connection-id) It is not typically used in production queries.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT connectionId();
|
||||
```
|
||||
|
||||
```response
|
||||
0
|
||||
```
|
||||
|
||||
## connection_id
|
||||
|
||||
An alias of `connectionId`. Retrieves the connection ID of the client that submitted the current query and returns it as a UInt64 integer.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
connection_id()
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
None.
|
||||
|
||||
**Returned value**
|
||||
|
||||
Returns an integer of type UInt64.
|
||||
|
||||
**Implementation details**
|
||||
|
||||
This function is most useful in debugging scenarios or for internal purposes within the MySQL handler. It was created for compatibility with [MySQL's `CONNECTION_ID` function](https://dev.mysql.com/doc/refman/8.0/en/information-functions.html#function_connection-id) It is not typically used in production queries.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT connection_id();
|
||||
```
|
||||
|
||||
```response
|
||||
0
|
||||
```
|
||||
|
||||
## getClientHTTPHeader
|
||||
|
||||
Get the value of an HTTP header.
|
||||
|
@ -79,9 +79,9 @@ round(expression [, decimal_places])
|
||||
|
||||
The rounded number of the same type as the input number.
|
||||
|
||||
### Examples
|
||||
**Examples**
|
||||
|
||||
**Example of use with Float**
|
||||
Example of usage with Float:
|
||||
|
||||
``` sql
|
||||
SELECT number / 2 AS x, round(x) FROM system.numbers LIMIT 3;
|
||||
@ -95,7 +95,7 @@ SELECT number / 2 AS x, round(x) FROM system.numbers LIMIT 3;
|
||||
└─────┴──────────────────────────┘
|
||||
```
|
||||
|
||||
**Example of use with Decimal**
|
||||
Example of usage with Decimal:
|
||||
|
||||
``` sql
|
||||
SELECT cast(number / 2 AS Decimal(10,4)) AS x, round(x) FROM system.numbers LIMIT 3;
|
||||
@ -124,9 +124,7 @@ SELECT cast(number / 2 AS Decimal(10,4)) AS x, round(x) FROM system.numbers LIM
|
||||
└────────┴──────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Examples of rounding**
|
||||
|
||||
Rounding to the nearest number.
|
||||
Examples of rounding to the nearest number:
|
||||
|
||||
``` text
|
||||
round(3.2, 0) = 3
|
||||
@ -183,9 +181,7 @@ roundBankers(expression [, decimal_places])
|
||||
|
||||
A value rounded by the banker’s rounding method.
|
||||
|
||||
### Examples
|
||||
|
||||
**Example of use**
|
||||
**Examples**
|
||||
|
||||
Query:
|
||||
|
||||
@ -210,7 +206,7 @@ Result:
|
||||
└─────┴───┘
|
||||
```
|
||||
|
||||
**Examples of Banker’s rounding**
|
||||
Examples of Banker’s rounding:
|
||||
|
||||
``` text
|
||||
roundBankers(0.4) = 0
|
||||
@ -226,25 +222,180 @@ roundBankers(10.755, 2) = 10.76
|
||||
|
||||
- [round](#rounding_functions-round)
|
||||
|
||||
## roundToExp2(num)
|
||||
## roundToExp2
|
||||
|
||||
Accepts a number. If the number is less than one, it returns 0. Otherwise, it rounds the number down to the nearest (whole non-negative) degree of two.
|
||||
Accepts a number. If the number is less than one, it returns `0`. Otherwise, it rounds the number down to the nearest (whole non-negative) degree of two.
|
||||
|
||||
## roundDuration(num)
|
||||
**Syntax**
|
||||
|
||||
Accepts a number. If the number is less than one, it returns 0. Otherwise, it rounds the number down to numbers from the set: 1, 10, 30, 60, 120, 180, 240, 300, 600, 1200, 1800, 3600, 7200, 18000, 36000.
|
||||
```sql
|
||||
roundToExp2(num)
|
||||
```
|
||||
|
||||
## roundAge(num)
|
||||
**Parameters**
|
||||
|
||||
Accepts a number. If the number is
|
||||
- smaller than 1, it returns 0,
|
||||
- between 1 and 17, it returns 17,
|
||||
- between 18 and 24, it returns 18,
|
||||
- between 25 and 34, it returns 25,
|
||||
- between 35 and 44, it returns 35,
|
||||
- between 45 and 54, it returns 45,
|
||||
- larger than 55, it returns 55.
|
||||
- `num`: A number representing an age in years. [UInt](../data-types/int-uint.md)/[Float](../data-types/float.md).
|
||||
|
||||
## roundDown(num, arr)
|
||||
**Returned value**
|
||||
|
||||
- `0`, for `num` $\lt 1$. [UInt8](../data-types/int-uint.md).
|
||||
- `num` rounded down to the nearest (whole non-negative) degree of two. [UInt](../data-types/int-uint.md)/[Float](../data-types/float.md) equivalent to the input type.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT *, roundToExp2(*) FROM system.numbers WHERE number IN (0, 2, 5, 10, 19, 50)
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─number─┬─roundToExp2(number)─┐
|
||||
│ 0 │ 0 │
|
||||
│ 2 │ 2 │
|
||||
│ 5 │ 4 │
|
||||
│ 10 │ 8 │
|
||||
│ 19 │ 16 │
|
||||
│ 50 │ 32 │
|
||||
└────────┴─────────────────────┘
|
||||
```
|
||||
|
||||
## roundDuration
|
||||
|
||||
Accepts a number. If the number is less than one, it returns `0`. Otherwise, it rounds the number down to numbers from the set of commonly used durations: `1, 10, 30, 60, 120, 180, 240, 300, 600, 1200, 1800, 3600, 7200, 18000, 36000`.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
roundDuration(num)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `num`: A number to round to one of the numbers in the set of common durations. [UInt](../data-types/int-uint.md)/[Float](../data-types/float.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- `0`, for `num` $\lt 1$.
|
||||
- Otherwise, one of: `1, 10, 30, 60, 120, 180, 240, 300, 600, 1200, 1800, 3600, 7200, 18000, 36000`. [UInt16](../data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT *, roundDuration(*) FROM system.numbers WHERE number IN (0, 9, 19, 47, 101, 149, 205, 271, 421, 789, 1423, 2345, 4567, 9876, 24680, 42573)
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─number─┬─roundDuration(number)─┐
|
||||
│ 0 │ 0 │
|
||||
│ 9 │ 1 │
|
||||
│ 19 │ 10 │
|
||||
│ 47 │ 30 │
|
||||
│ 101 │ 60 │
|
||||
│ 149 │ 120 │
|
||||
│ 205 │ 180 │
|
||||
│ 271 │ 240 │
|
||||
│ 421 │ 300 │
|
||||
│ 789 │ 600 │
|
||||
│ 1423 │ 1200 │
|
||||
│ 2345 │ 1800 │
|
||||
│ 4567 │ 3600 │
|
||||
│ 9876 │ 7200 │
|
||||
│ 24680 │ 18000 │
|
||||
│ 42573 │ 36000 │
|
||||
└────────┴───────────────────────┘
|
||||
```
|
||||
|
||||
## roundAge
|
||||
|
||||
Accepts a number within various commonly used ranges of human age and returns either a maximum or a minimum within that range.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
roundAge(num)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `age`: A number representing an age in years. [UInt](../data-types/int-uint.md)/[Float](../data-types/float.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns `0`, for $age \lt 1$.
|
||||
- Returns `17`, for $1 \leq age \leq 17$.
|
||||
- Returns `18`, for $18 \leq age \leq 24$.
|
||||
- Returns `25`, for $25 \leq age \leq 34$.
|
||||
- Returns `35`, for $35 \leq age \leq 44$.
|
||||
- Returns `45`, for $45 \leq age \leq 54$.
|
||||
- Returns `55`, for $age \geq 55$.
|
||||
|
||||
Type: [UInt8](../data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT *, roundAge(*) FROM system.numbers WHERE number IN (0, 5, 20, 31, 37, 54, 72);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─number─┬─roundAge(number)─┐
|
||||
│ 0 │ 0 │
|
||||
│ 5 │ 17 │
|
||||
│ 20 │ 18 │
|
||||
│ 31 │ 25 │
|
||||
│ 37 │ 35 │
|
||||
│ 54 │ 45 │
|
||||
│ 72 │ 55 │
|
||||
└────────┴──────────────────┘
|
||||
```
|
||||
|
||||
## roundDown
|
||||
|
||||
Accepts a number and rounds it down to an element in the specified array. If the value is less than the lowest bound, the lowest bound is returned.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
roundDown(num, arr)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `num`: A number to round down. [Numeric](../data-types/int-uint.md).
|
||||
- `arr`: Array of elements to round `age` down to. [Array](../data-types/array.md) of [UInt](../data-types/int-uint.md)/[Float](../data-types/float.md) type.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Number rounded down to an element in `arr`. If the value is less than the lowest bound, the lowest bound is returned. [UInt](../data-types/int-uint.md)/[Float](../data-types/float.md) type deduced from the type of `arr`.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT *, roundDown(*, [3, 4, 5]) FROM system.numbers WHERE number IN (0, 1, 2, 3, 4, 5)
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─number─┬─roundDown(number, [3, 4, 5])─┐
|
||||
│ 0 │ 3 │
|
||||
│ 1 │ 3 │
|
||||
│ 2 │ 3 │
|
||||
│ 3 │ 3 │
|
||||
│ 4 │ 4 │
|
||||
│ 5 │ 5 │
|
||||
└────────┴──────────────────────────────┘
|
||||
```
|
||||
|
@ -88,20 +88,93 @@ Result:
|
||||
|
||||
## length
|
||||
|
||||
Returns the length of a string in bytes (not: in characters or Unicode code points).
|
||||
|
||||
The function also works for arrays.
|
||||
Returns the length of a string in bytes rather than in characters or Unicode code points. The function also works for arrays.
|
||||
|
||||
Alias: `OCTET_LENGTH`
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
length(s)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `s`: An input string or array. [String](../data-types/string)/[Array](../data-types/array).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Length of the string or array `s` in bytes. [UInt64](../data-types/int-uint).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT length('Hello, world!');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─length('Hello, world!')─┐
|
||||
│ 13 │
|
||||
└─────────────────────────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT length([1, 2, 3, 4]);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─length([1, 2, 3, 4])─┐
|
||||
│ 4 │
|
||||
└──────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
## lengthUTF8
|
||||
|
||||
Returns the length of a string in Unicode code points (not: in bytes or characters). It assumes that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined.
|
||||
Returns the length of a string in Unicode code points rather than in bytes or characters. It assumes that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined.
|
||||
|
||||
Alias:
|
||||
Aliases:
|
||||
- `CHAR_LENGTH`
|
||||
- `CHARACTER_LENGTH`
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
lengthUTF8(s)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `s`: String containing valid UTF-8 encoded text. [String](../data-types/string).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Length of the string `s` in Unicode code points. [UInt64](../data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT lengthUTF8('Здравствуй, мир!');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─lengthUTF8('Здравствуй, мир!')─┐
|
||||
│ 16 │
|
||||
└────────────────────────────────┘
|
||||
```
|
||||
|
||||
## left
|
||||
|
||||
Returns a substring of string `s` with a specified `offset` starting from the left.
|
||||
@ -1055,6 +1128,34 @@ Result:
|
||||
|
||||
Like `base58Decode` but returns an empty string in case of error.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
tryBase58Decode(encoded)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `encoded`: [String](../../sql-reference/data-types/string.md) column or constant. If the string is not a valid Base58-encoded value, returns an empty string in case of error.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A string containing the decoded value of the argument.
|
||||
|
||||
**Examples**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT tryBase58Decode('3dc8KtHrwM') as res;
|
||||
```
|
||||
|
||||
```response
|
||||
┌─res─────┐
|
||||
│ Encoded │
|
||||
└─────────┘
|
||||
```
|
||||
|
||||
## base64Encode
|
||||
|
||||
Encodes a String or FixedString as base64.
|
||||
@ -1071,6 +1172,30 @@ Alias: `FROM_BASE64`.
|
||||
|
||||
Like `base64Decode` but returns an empty string in case of error.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
tryBase64Decode(encoded)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `encoded`: [String](../../sql-reference/data-types/string.md) column or constant. If the string is not a valid Base58-encoded value, returns an empty string in case of error.
|
||||
|
||||
**Examples**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT tryBase64Decode('RW5jb2RlZA==') as res;
|
||||
```
|
||||
|
||||
```response
|
||||
┌─res─────┐
|
||||
│ Encoded │
|
||||
└─────────┘
|
||||
```
|
||||
|
||||
## endsWith {#endswith}
|
||||
|
||||
Returns whether string `str` ends with `suffix`.
|
||||
|
@ -817,6 +817,42 @@ Result:
|
||||
└─────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## flattenTuple
|
||||
|
||||
Returns a flattened `output` tuple from a nested named `input` tuple. Elements of the `output` tuple are the paths from the original `input` tuple. For instance: `Tuple(a Int, Tuple(b Int, c Int)) -> Tuple(a Int, b Int, c Int)`. `flattenTuple` can be used to select all paths from type `Object` as separate columns.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
flattenTuple(input)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `input`: Nested named tuple to flatten. [Tuple](../data-types/tuple).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- `output` tuple whose elements are paths from the original `input`. [Tuple](../data-types/tuple).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE t_flatten_tuple(t Tuple(t1 Nested(a UInt32, s String), b UInt32, t2 Tuple(k String, v UInt32))) ENGINE = Memory;
|
||||
INSERT INTO t_flatten_tuple VALUES (([(1, 'a'), (2, 'b')], 3, ('c', 4)));
|
||||
SELECT flattenTuple(t) FROM t_flatten_tuple;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─flattenTuple(t)───────────┐
|
||||
│ ([1,2],['a','b'],3,'c',4) │
|
||||
└───────────────────────────┘
|
||||
```
|
||||
|
||||
## Distance functions
|
||||
|
||||
All supported functions are described in [distance functions documentation](../../sql-reference/functions/distance-functions.md).
|
||||
|
@ -6,24 +6,29 @@ sidebar_label: Embedded Dictionaries
|
||||
|
||||
# Functions for Working with Embedded Dictionaries
|
||||
|
||||
:::note
|
||||
In order for the functions below to work, the server config must specify the paths and addresses for getting all the embedded dictionaries. The dictionaries are loaded at the first call of any of these functions. If the reference lists can’t be loaded, an exception is thrown.
|
||||
|
||||
For information about creating reference lists, see the section “Dictionaries”.
|
||||
As such, the examples shown in this section will throw an exception in [ClickHouse Fiddle](https://fiddle.clickhouse.com/) and in quick release and production deployments by default, unless first configured.
|
||||
:::
|
||||
|
||||
For information about creating reference lists, see the section [“Dictionaries”](../dictionaries#embedded-dictionaries).
|
||||
|
||||
## Multiple Geobases
|
||||
|
||||
ClickHouse supports working with multiple alternative geobases (regional hierarchies) simultaneously, in order to support various perspectives on which countries certain regions belong to.
|
||||
|
||||
The ‘clickhouse-server’ config specifies the file with the regional hierarchy::`<path_to_regions_hierarchy_file>/opt/geo/regions_hierarchy.txt</path_to_regions_hierarchy_file>`
|
||||
The ‘clickhouse-server’ config specifies the file with the regional hierarchy:
|
||||
|
||||
Besides this file, it also searches for files nearby that have the _ symbol and any suffix appended to the name (before the file extension).
|
||||
For example, it will also find the file `/opt/geo/regions_hierarchy_ua.txt`, if present.
|
||||
```<path_to_regions_hierarchy_file>/opt/geo/regions_hierarchy.txt</path_to_regions_hierarchy_file>```
|
||||
|
||||
`ua` is called the dictionary key. For a dictionary without a suffix, the key is an empty string.
|
||||
Besides this file, it also searches for files nearby that have the `_` symbol and any suffix appended to the name (before the file extension).
|
||||
For example, it will also find the file `/opt/geo/regions_hierarchy_ua.txt`, if present. Here `ua` is called the dictionary key. For a dictionary without a suffix, the key is an empty string.
|
||||
|
||||
All the dictionaries are re-loaded in runtime (once every certain number of seconds, as defined in the builtin_dictionaries_reload_interval config parameter, or once an hour by default). However, the list of available dictionaries is defined one time, when the server starts.
|
||||
All the dictionaries are re-loaded during runtime (once every certain number of seconds, as defined in the [`builtin_dictionaries_reload_interval`](../../operations/server-configuration-parameters/settings#builtin-dictionaries-reload-interval) config parameter, or once an hour by default). However, the list of available dictionaries is defined once, when the server starts.
|
||||
|
||||
All functions for working with regions have an optional argument at the end – the dictionary key. It is referred to as the geobase.
|
||||
|
||||
Example:
|
||||
|
||||
``` sql
|
||||
@ -32,13 +37,116 @@ regionToCountry(RegionID, '') – Uses the default dictionary: /opt/geo/regions_
|
||||
regionToCountry(RegionID, 'ua') – Uses the dictionary for the 'ua' key: /opt/geo/regions_hierarchy_ua.txt
|
||||
```
|
||||
|
||||
### regionToCity(id\[, geobase\])
|
||||
### regionToName
|
||||
|
||||
Accepts a UInt32 number – the region ID from the geobase. If this region is a city or part of a city, it returns the region ID for the appropriate city. Otherwise, returns 0.
|
||||
Accepts a region ID and geobase and returns a string of the name of the region in the corresponding language. If the region with the specified ID does not exist, an empty string is returned.
|
||||
|
||||
### regionToArea(id\[, geobase\])
|
||||
**Syntax**
|
||||
|
||||
Converts a region to an area (type 5 in the geobase). In every other way, this function is the same as ‘regionToCity’.
|
||||
``` sql
|
||||
regionToName(id\[, lang\])
|
||||
```
|
||||
**Parameters**
|
||||
|
||||
- `id` — Region ID from the geobase. [UInt32](../data-types/int-uint).
|
||||
- `geobase` — Dictionary key. See [Multiple Geobases](#multiple-geobases). [String](../data-types/string). Optional.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Name of the region in the corresponding language specified by `geobase`. [String](../data-types/string).
|
||||
- Otherwise, an empty string.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT regionToName(number::UInt32,'en') FROM numbers(0,5);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─regionToName(CAST(number, 'UInt32'), 'en')─┐
|
||||
│ │
|
||||
│ World │
|
||||
│ USA │
|
||||
│ Colorado │
|
||||
│ Boulder County │
|
||||
└────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### regionToCity
|
||||
|
||||
Accepts a region ID from the geobase. If this region is a city or part of a city, it returns the region ID for the appropriate city. Otherwise, returns 0.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
regionToCity(id [, geobase])
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `id` — Region ID from the geobase. [UInt32](../data-types/int-uint).
|
||||
- `geobase` — Dictionary key. See [Multiple Geobases](#multiple-geobases). [String](../data-types/string). Optional.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Region ID for the appropriate city, if it exists. [UInt32](../data-types/int-uint).
|
||||
- 0, if there is none.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT regionToName(number::UInt32, 'en'), regionToCity(number::UInt32) AS id, regionToName(id, 'en') FROM numbers(13);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─regionToName(CAST(number, 'UInt32'), 'en')─┬─id─┬─regionToName(regionToCity(CAST(number, 'UInt32')), 'en')─┐
|
||||
│ │ 0 │ │
|
||||
│ World │ 0 │ │
|
||||
│ USA │ 0 │ │
|
||||
│ Colorado │ 0 │ │
|
||||
│ Boulder County │ 0 │ │
|
||||
│ Boulder │ 5 │ Boulder │
|
||||
│ China │ 0 │ │
|
||||
│ Sichuan │ 0 │ │
|
||||
│ Chengdu │ 8 │ Chengdu │
|
||||
│ America │ 0 │ │
|
||||
│ North America │ 0 │ │
|
||||
│ Eurasia │ 0 │ │
|
||||
│ Asia │ 0 │ │
|
||||
└────────────────────────────────────────────┴────┴──────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### regionToArea
|
||||
|
||||
Converts a region to an area (type 5 in the geobase). In every other way, this function is the same as [‘regionToCity’](#regiontocity).
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
regionToArea(id [, geobase])
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `id` — Region ID from the geobase. [UInt32](../data-types/int-uint).
|
||||
- `geobase` — Dictionary key. See [Multiple Geobases](#multiple-geobases). [String](../data-types/string). Optional.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Region ID for the appropriate area, if it exists. [UInt32](../data-types/int-uint).
|
||||
- 0, if there is none.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT DISTINCT regionToName(regionToArea(toUInt32(number), 'ua'))
|
||||
@ -46,6 +154,8 @@ FROM system.numbers
|
||||
LIMIT 15
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─regionToName(regionToArea(toUInt32(number), \'ua\'))─┐
|
||||
│ │
|
||||
@ -66,16 +176,38 @@ LIMIT 15
|
||||
└──────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### regionToDistrict(id\[, geobase\])
|
||||
### regionToDistrict
|
||||
|
||||
Converts a region to a federal district (type 4 in the geobase). In every other way, this function is the same as ‘regionToCity’.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
regionToDistrict(id [, geobase])
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `id` — Region ID from the geobase. [UInt32](../data-types/int-uint).
|
||||
- `geobase` — Dictionary key. See [Multiple Geobases](#multiple-geobases). [String](../data-types/string). Optional.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Region ID for the appropriate city, if it exists. [UInt32](../data-types/int-uint).
|
||||
- 0, if there is none.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT DISTINCT regionToName(regionToDistrict(toUInt32(number), 'ua'))
|
||||
FROM system.numbers
|
||||
LIMIT 15
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─regionToName(regionToDistrict(toUInt32(number), \'ua\'))─┐
|
||||
│ │
|
||||
@ -96,17 +228,103 @@ LIMIT 15
|
||||
└──────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### regionToCountry(id\[, geobase\])
|
||||
### regionToCountry
|
||||
|
||||
Converts a region to a country. In every other way, this function is the same as ‘regionToCity’.
|
||||
Example: `regionToCountry(toUInt32(213)) = 225` converts Moscow (213) to Russia (225).
|
||||
Converts a region to a country (type 3 in the geobase). In every other way, this function is the same as ‘regionToCity’.
|
||||
|
||||
### regionToContinent(id\[, geobase\])
|
||||
**Syntax**
|
||||
|
||||
Converts a region to a continent. In every other way, this function is the same as ‘regionToCity’.
|
||||
Example: `regionToContinent(toUInt32(213)) = 10001` converts Moscow (213) to Eurasia (10001).
|
||||
```sql
|
||||
regionToCountry(id [, geobase])
|
||||
```
|
||||
|
||||
### regionToTopContinent(id\[, geobase\])
|
||||
**Parameters**
|
||||
|
||||
- `id` — Region ID from the geobase. [UInt32](../data-types/int-uint).
|
||||
- `geobase` — Dictionary key. See [Multiple Geobases](#multiple-geobases). [String](../data-types/string). Optional.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Region ID for the appropriate country, if it exists. [UInt32](../data-types/int-uint).
|
||||
- 0, if there is none.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT regionToName(number::UInt32, 'en'), regionToCountry(number::UInt32) AS id, regionToName(id, 'en') FROM numbers(13);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─regionToName(CAST(number, 'UInt32'), 'en')─┬─id─┬─regionToName(regionToCountry(CAST(number, 'UInt32')), 'en')─┐
|
||||
│ │ 0 │ │
|
||||
│ World │ 0 │ │
|
||||
│ USA │ 2 │ USA │
|
||||
│ Colorado │ 2 │ USA │
|
||||
│ Boulder County │ 2 │ USA │
|
||||
│ Boulder │ 2 │ USA │
|
||||
│ China │ 6 │ China │
|
||||
│ Sichuan │ 6 │ China │
|
||||
│ Chengdu │ 6 │ China │
|
||||
│ America │ 0 │ │
|
||||
│ North America │ 0 │ │
|
||||
│ Eurasia │ 0 │ │
|
||||
│ Asia │ 0 │ │
|
||||
└────────────────────────────────────────────┴────┴─────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### regionToContinent
|
||||
|
||||
Converts a region to a continent (type 1 in the geobase). In every other way, this function is the same as ‘regionToCity’.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
regionToContinent(id [, geobase])
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `id` — Region ID from the geobase. [UInt32](../data-types/int-uint).
|
||||
- `geobase` — Dictionary key. See [Multiple Geobases](#multiple-geobases). [String](../data-types/string). Optional.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Region ID for the appropriate continent, if it exists. [UInt32](../data-types/int-uint).
|
||||
- 0, if there is none.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT regionToName(number::UInt32, 'en'), regionToContinent(number::UInt32) AS id, regionToName(id, 'en') FROM numbers(13);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─regionToName(CAST(number, 'UInt32'), 'en')─┬─id─┬─regionToName(regionToContinent(CAST(number, 'UInt32')), 'en')─┐
|
||||
│ │ 0 │ │
|
||||
│ World │ 0 │ │
|
||||
│ USA │ 10 │ North America │
|
||||
│ Colorado │ 10 │ North America │
|
||||
│ Boulder County │ 10 │ North America │
|
||||
│ Boulder │ 10 │ North America │
|
||||
│ China │ 12 │ Asia │
|
||||
│ Sichuan │ 12 │ Asia │
|
||||
│ Chengdu │ 12 │ Asia │
|
||||
│ America │ 9 │ America │
|
||||
│ North America │ 10 │ North America │
|
||||
│ Eurasia │ 11 │ Eurasia │
|
||||
│ Asia │ 12 │ Asia │
|
||||
└────────────────────────────────────────────┴────┴───────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### regionToTopContinent
|
||||
|
||||
Finds the highest continent in the hierarchy for the region.
|
||||
|
||||
@ -116,37 +334,175 @@ Finds the highest continent in the hierarchy for the region.
|
||||
regionToTopContinent(id[, geobase])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
**Parameters**
|
||||
|
||||
- `id` — Region ID from the geobase. [UInt32](../../sql-reference/data-types/int-uint.md).
|
||||
- `geobase` — Dictionary key. See [Multiple Geobases](#multiple-geobases). [String](../../sql-reference/data-types/string.md). Optional.
|
||||
- `id` — Region ID from the geobase. [UInt32](../data-types/int-uint).
|
||||
- `geobase` — Dictionary key. See [Multiple Geobases](#multiple-geobases). [String](../data-types/string). Optional.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Identifier of the top level continent (the latter when you climb the hierarchy of regions).
|
||||
- Identifier of the top level continent (the latter when you climb the hierarchy of regions).[UInt32](../data-types/int-uint).
|
||||
- 0, if there is none.
|
||||
|
||||
Type: `UInt32`.
|
||||
**Example**
|
||||
|
||||
### regionToPopulation(id\[, geobase\])
|
||||
Query:
|
||||
|
||||
Gets the population for a region.
|
||||
The population can be recorded in files with the geobase. See the section “Dictionaries”.
|
||||
If the population is not recorded for the region, it returns 0.
|
||||
In the geobase, the population might be recorded for child regions, but not for parent regions.
|
||||
``` sql
|
||||
SELECT regionToName(number::UInt32, 'en'), regionToTopContinent(number::UInt32) AS id, regionToName(id, 'en') FROM numbers(13);
|
||||
```
|
||||
|
||||
### regionIn(lhs, rhs\[, geobase\])
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─regionToName(CAST(number, 'UInt32'), 'en')─┬─id─┬─regionToName(regionToTopContinent(CAST(number, 'UInt32')), 'en')─┐
|
||||
│ │ 0 │ │
|
||||
│ World │ 0 │ │
|
||||
│ USA │ 9 │ America │
|
||||
│ Colorado │ 9 │ America │
|
||||
│ Boulder County │ 9 │ America │
|
||||
│ Boulder │ 9 │ America │
|
||||
│ China │ 11 │ Eurasia │
|
||||
│ Sichuan │ 11 │ Eurasia │
|
||||
│ Chengdu │ 11 │ Eurasia │
|
||||
│ America │ 9 │ America │
|
||||
│ North America │ 9 │ America │
|
||||
│ Eurasia │ 11 │ Eurasia │
|
||||
│ Asia │ 11 │ Eurasia │
|
||||
└────────────────────────────────────────────┴────┴──────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### regionToPopulation
|
||||
|
||||
Gets the population for a region. The population can be recorded in files with the geobase. See the section [“Dictionaries”](../dictionaries#embedded-dictionaries). If the population is not recorded for the region, it returns 0. In the geobase, the population might be recorded for child regions, but not for parent regions.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
regionToPopulation(id[, geobase])
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `id` — Region ID from the geobase. [UInt32](../data-types/int-uint).
|
||||
- `geobase` — Dictionary key. See [Multiple Geobases](#multiple-geobases). [String](../data-types/string). Optional.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Population for the region. [UInt32](../data-types/int-uint).
|
||||
- 0, if there is none.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT regionToName(number::UInt32, 'en'), regionToPopulation(number::UInt32) AS id, regionToName(id, 'en') FROM numbers(13);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─regionToName(CAST(number, 'UInt32'), 'en')─┬─population─┐
|
||||
│ │ 0 │
|
||||
│ World │ 4294967295 │
|
||||
│ USA │ 330000000 │
|
||||
│ Colorado │ 5700000 │
|
||||
│ Boulder County │ 330000 │
|
||||
│ Boulder │ 100000 │
|
||||
│ China │ 1500000000 │
|
||||
│ Sichuan │ 83000000 │
|
||||
│ Chengdu │ 20000000 │
|
||||
│ America │ 1000000000 │
|
||||
│ North America │ 600000000 │
|
||||
│ Eurasia │ 4294967295 │
|
||||
│ Asia │ 4294967295 │
|
||||
└────────────────────────────────────────────┴────────────┘
|
||||
```
|
||||
|
||||
### regionIn
|
||||
|
||||
Checks whether a `lhs` region belongs to a `rhs` region. Returns a UInt8 number equal to 1 if it belongs, or 0 if it does not belong.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
regionIn(lhs, rhs\[, geobase\])
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `lhs` — Lhs region ID from the geobase. [UInt32](../../sql-reference/data-types/int-uint).
|
||||
- `rhs` — Rhs region ID from the geobase. [UInt32](../../sql-reference/data-types/int-uint).
|
||||
- `geobase` — Dictionary key. See [Multiple Geobases](#multiple-geobases). [String](../data-types/string). Optional.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- 1, if it belongs. [UInt8](../../sql-reference/data-types/int-uint).
|
||||
- 0, if it doesn't belong.
|
||||
|
||||
**Implementation details**
|
||||
|
||||
Checks whether a ‘lhs’ region belongs to a ‘rhs’ region. Returns a UInt8 number equal to 1 if it belongs, or 0 if it does not belong.
|
||||
The relationship is reflexive – any region also belongs to itself.
|
||||
|
||||
### regionHierarchy(id\[, geobase\])
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT regionToName(n1.number::UInt32, 'en') || (regionIn(n1.number::UInt32, n2.number::UInt32) ? ' is in ' : ' is not in ') || regionToName(n2.number::UInt32, 'en') FROM numbers(1,2) AS n1 CROSS JOIN numbers(1,5) AS n2;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
World is in World
|
||||
World is not in USA
|
||||
World is not in Colorado
|
||||
World is not in Boulder County
|
||||
World is not in Boulder
|
||||
USA is in World
|
||||
USA is in USA
|
||||
USA is not in Colorado
|
||||
USA is not in Boulder County
|
||||
USA is not in Boulder
|
||||
```
|
||||
|
||||
### regionHierarchy
|
||||
|
||||
Accepts a UInt32 number – the region ID from the geobase. Returns an array of region IDs consisting of the passed region and all parents along the chain.
|
||||
Example: `regionHierarchy(toUInt32(213)) = [213,1,3,225,10001,10000]`.
|
||||
|
||||
### regionToName(id\[, lang\])
|
||||
**Syntax**
|
||||
|
||||
Accepts a UInt32 number – the region ID from the geobase. A string with the name of the language can be passed as a second argument. Supported languages are: ru, en, ua, uk, by, kz, tr. If the second argument is omitted, the language ‘ru’ is used. If the language is not supported, an exception is thrown. Returns a string – the name of the region in the corresponding language. If the region with the specified ID does not exist, an empty string is returned.
|
||||
``` sql
|
||||
regionHierarchy(id\[, geobase\])
|
||||
```
|
||||
|
||||
`ua` and `uk` both mean Ukrainian.
|
||||
**Parameters**
|
||||
|
||||
- `id` — Region ID from the geobase. [UInt32](../data-types/int-uint).
|
||||
- `geobase` — Dictionary key. See [Multiple Geobases](#multiple-geobases). [String](../data-types/string). Optional.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Array of region IDs consisting of the passed region and all parents along the chain. [Array](../data-types/array)([UInt32](../data-types/int-uint)).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT regionHierarchy(number::UInt32) AS arr, arrayMap(id -> regionToName(id, 'en'), arr) FROM numbers(5);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─arr────────────┬─arrayMap(lambda(tuple(id), regionToName(id, 'en')), regionHierarchy(CAST(number, 'UInt32')))─┐
|
||||
│ [] │ [] │
|
||||
│ [1] │ ['World'] │
|
||||
│ [2,10,9,1] │ ['USA','North America','America','World'] │
|
||||
│ [3,2,10,9,1] │ ['Colorado','USA','North America','America','World'] │
|
||||
│ [4,3,2,10,9,1] │ ['Boulder County','Colorado','USA','North America','America','World'] │
|
||||
└────────────────┴──────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
@ -19,25 +19,51 @@ Subquery is another `SELECT` query that may be specified in parenthesis inside `
|
||||
|
||||
## FINAL Modifier
|
||||
|
||||
When `FINAL` is specified, ClickHouse fully merges the data before returning the result and thus performs all data transformations that happen during merges for the given table engine.
|
||||
When `FINAL` is specified, ClickHouse fully merges the data before returning the result. This also performs all data transformations that happen during merges for the given table engine.
|
||||
|
||||
It is applicable when selecting data from ReplacingMergeTree, SummingMergeTree, AggregatingMergeTree, CollapsingMergeTree and VersionedCollapsingMergeTree tables.
|
||||
It is applicable when selecting data from from tables using the following table engines:
|
||||
- `ReplacingMergeTree`
|
||||
- `SummingMergeTree`
|
||||
- `AggregatingMergeTree`
|
||||
- `CollapsingMergeTree`
|
||||
- `VersionedCollapsingMergeTree`
|
||||
|
||||
`SELECT` queries with `FINAL` are executed in parallel. The [max_final_threads](../../../operations/settings/settings.md#max-final-threads) setting limits the number of threads used.
|
||||
|
||||
There are drawbacks to using `FINAL` (see below).
|
||||
|
||||
### Drawbacks
|
||||
|
||||
Queries that use `FINAL` are executed slightly slower than similar queries that do not, because:
|
||||
Queries that use `FINAL` execute slightly slower than similar queries that do not use `FINAL` because:
|
||||
|
||||
- Data is merged during query execution.
|
||||
- Queries with `FINAL` read primary key columns in addition to the columns specified in the query.
|
||||
- Queries with `FINAL` may read primary key columns in addition to the columns specified in the query.
|
||||
|
||||
`FINAL` requires additional compute and memory resources, as the processing that normally would occur at merge time must occur in memory at the time of the query. However, using FINAL is sometimes necessary in order to produce accurate results, and is less expensive than running `OPTIMIZE` to force a merge. It is also sometimes possible to use different queries that assume the background processes of the `MergeTree` engine haven’t happened yet and deal with it by applying aggregation (for example, to discard duplicates). If you need to use FINAL in your queries in order to get the required results, then it is okay to do so but be aware of the additional processing required.
|
||||
`FINAL` requires additional compute and memory resources because the processing that normally would occur at merge time must occur in memory at the time of the query. However, using FINAL is sometimes necessary in order to produce accurate results (as data may not yet be fully merged). It is less expensive than running `OPTIMIZE` to force a merge.
|
||||
|
||||
As an alternative to using `FINAL`, it is sometimes possible to use different queries that assume the background processes of the `MergeTree` engine have not yet occurred and deal with it by applying an aggregation (for example, to discard duplicates). If you need to use `FINAL` in your queries in order to get the required results, it is okay to do so but be aware of the additional processing required.
|
||||
|
||||
`FINAL` can be applied automatically using [FINAL](../../../operations/settings/settings.md#final) setting to all tables in a query using a session or a user profile.
|
||||
|
||||
### Example Usage
|
||||
|
||||
**Using the `FINAL` keyword**
|
||||
|
||||
```sql
|
||||
SELECT x, y FROM mytable FINAL WHERE x > 1;
|
||||
```
|
||||
|
||||
**Using `FINAL` as a query-level setting**
|
||||
|
||||
```sql
|
||||
SELECT x, y FROM mytable WHERE x > 1 SETTINGS final = 1;
|
||||
```
|
||||
|
||||
**Using `FINAL` as a session-level setting**
|
||||
|
||||
```sql
|
||||
SET final = 1;
|
||||
SELECT x, y FROM mytable WHERE x > 1;
|
||||
```
|
||||
|
||||
## Implementation Details
|
||||
|
||||
If the `FROM` clause is omitted, data will be read from the `system.one` table.
|
||||
|
@ -3447,17 +3447,6 @@ SELECT
|
||||
FROM fuse_tbl
|
||||
```
|
||||
|
||||
## allow_experimental_database_replicated {#allow_experimental_database_replicated}
|
||||
|
||||
Позволяет создавать базы данных с движком [Replicated](../../engines/database-engines/replicated.md).
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- 0 — Disabled.
|
||||
- 1 — Enabled.
|
||||
|
||||
Значение по умолчанию: `0`.
|
||||
|
||||
## database_replicated_initial_query_timeout_sec {#database_replicated_initial_query_timeout_sec}
|
||||
|
||||
Устанавливает, как долго начальный DDL-запрос должен ждать, пока реплицированная база данных прецессирует предыдущие записи очереди DDL в секундах.
|
||||
|
@ -29,6 +29,16 @@ slug: /ru/operations/system-tables/text_log
|
||||
- `source_file` (LowCardinality(String)) — исходный файл, из которого была сделана запись.
|
||||
- `source_line` (UInt64) — исходная строка, из которой была сделана запись.
|
||||
- `message_format_string` (LowCardinality(String)) — форматная строка, с помощью которой было отформатировано сообщение.
|
||||
- `value1` (String) - аргумент 1, который использовался для форматирования сообщения.
|
||||
- `value2` (String) - аргумент 2, который использовался для форматирования сообщения.
|
||||
- `value3` (String) - аргумент 3, который использовался для форматирования сообщения.
|
||||
- `value4` (String) - аргумент 4, который использовался для форматирования сообщения.
|
||||
- `value5` (String) - аргумент 5, который использовался для форматирования сообщения.
|
||||
- `value6` (String) - аргумент 6, который использовался для форматирования сообщения.
|
||||
- `value7` (String) - аргумент 7, который использовался для форматирования сообщения.
|
||||
- `value8` (String) - аргумент 8, который использовался для форматирования сообщения.
|
||||
- `value9` (String) - аргумент 9, который использовался для форматирования сообщения.
|
||||
- `value10` (String) - аргумент 10, который использовался для форматирования сообщения.
|
||||
|
||||
**Пример**
|
||||
|
||||
@ -53,4 +63,14 @@ revision: 54440
|
||||
source_file: /ClickHouse/src/Interpreters/DNSCacheUpdater.cpp; void DB::DNSCacheUpdater::start()
|
||||
source_line: 45
|
||||
message_format_string: Update period {} seconds
|
||||
value1: 15
|
||||
value2:
|
||||
value3:
|
||||
value4:
|
||||
value5:
|
||||
value6:
|
||||
value7:
|
||||
value8:
|
||||
value9:
|
||||
value10:
|
||||
```
|
||||
|
@ -5,7 +5,7 @@ sidebar_position: 106
|
||||
|
||||
# argMax {#agg-function-argmax}
|
||||
|
||||
Вычисляет значение `arg` при максимальном значении `val`.
|
||||
Вычисляет значение `arg` при максимальном значении `val`. Если несколько строк имеют одинаковое `val`, в которых равное значение является максимальным, то возвращаемое `arg` не является детерминированным. Обе части, arg и max, ведут себя как агрегатные функции, они обе пропускают Null во время обработки и возвращают не Null значения, если не Null значения доступны.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
@ -49,3 +49,60 @@ SELECT argMax(user, salary), argMax(tuple(user, salary), salary) FROM salary;
|
||||
│ director │ ('director',5000) │
|
||||
└──────────────────────┴─────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Дополнительный пример**
|
||||
|
||||
```sql
|
||||
CREATE TABLE test
|
||||
(
|
||||
a Nullable(String),
|
||||
b Nullable(Int64)
|
||||
)
|
||||
ENGINE = Memory AS
|
||||
SELECT *
|
||||
FROM VALUES(('a', 1), ('b', 2), ('c', 2), (NULL, 3), (NULL, NULL), ('d', NULL));
|
||||
|
||||
select * from test;
|
||||
┌─a────┬────b─┐
|
||||
│ a │ 1 │
|
||||
│ b │ 2 │
|
||||
│ c │ 2 │
|
||||
│ ᴺᵁᴸᴸ │ 3 │
|
||||
│ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │
|
||||
│ d │ ᴺᵁᴸᴸ │
|
||||
└──────┴──────┘
|
||||
|
||||
SELECT argMax(a, b), max(b) FROM test;
|
||||
┌─argMax(a, b)─┬─max(b)─┐
|
||||
│ b │ 3 │ -- argMax = 'b' потому что это первое not Null значение, max(b) из другой строки!
|
||||
└──────────────┴────────┘
|
||||
|
||||
SELECT argMax(tuple(a), b) FROM test;
|
||||
┌─argMax(tuple(a), b)─┐
|
||||
│ (NULL) │ -- Кортеж `Tuple`, который содержит только `NULL` значения является не `NULL` кортежем, поэтому агрегатыне функции не будут пропускать эту строку с `NULL` значениями.
|
||||
└─────────────────────┘
|
||||
|
||||
SELECT (argMax((a, b), b) as t).1 argMaxA, t.2 argMaxB FROM test;
|
||||
┌─argMaxA─┬─argMaxB─┐
|
||||
│ ᴺᵁᴸᴸ │ 3 │ -- Вы можете использовать кортеж Tuple и получить оба значения для соответсвующего max(b).
|
||||
└─────────┴─────────┘
|
||||
|
||||
SELECT argMax(a, b), max(b) FROM test WHERE a IS NULL AND b IS NULL;
|
||||
┌─argMax(a, b)─┬─max(b)─┐
|
||||
│ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ -- Все агрегированные строки содержат хотя бы одно `NULL` значение, поэтому все строки пропускаются и результатом будет `NULL`.
|
||||
└──────────────┴────────┘
|
||||
|
||||
SELECT argMax(a, (b,a)) FROM test;
|
||||
┌─argMax(a, tuple(b, a))─┐
|
||||
│ c │ -- Есть две строки с b=2, кортеж `Tuple` в функции `Max` позволяет получить не первый `arg`.
|
||||
└────────────────────────┘
|
||||
|
||||
SELECT argMax(a, tuple(b)) FROM test;
|
||||
┌─argMax(a, tuple(b))─┐
|
||||
│ b │ -- Кортеж `Tuple` может использоваться в `Max`, чтобы не пропускать `NULL` значения в `Max`.
|
||||
└─────────────────────┘
|
||||
```
|
||||
|
||||
**Смотрите также**
|
||||
|
||||
- [Tuple](/docs/ru/sql-reference/data-types/tuple.md)
|
||||
|
@ -918,11 +918,13 @@ bool Client::processWithFuzzing(const String & full_query)
|
||||
}
|
||||
|
||||
|
||||
void Client::printHelpMessage(const OptionsDescription & options_description)
|
||||
void Client::printHelpMessage(const OptionsDescription & options_description, bool verbose)
|
||||
{
|
||||
std::cout << options_description.main_description.value() << "\n";
|
||||
std::cout << options_description.external_description.value() << "\n";
|
||||
std::cout << options_description.hosts_and_ports_description.value() << "\n";
|
||||
if (verbose)
|
||||
std::cout << "All settings are documented at https://clickhouse.com/docs/en/operations/settings/settings.\n\n";
|
||||
std::cout << "In addition, --param_name=value can be specified for substitution of parameters for parametrized queries.\n";
|
||||
std::cout << "\nSee also: https://clickhouse.com/docs/en/integrations/sql-clients/cli\n";
|
||||
}
|
||||
|
@ -25,7 +25,7 @@ protected:
|
||||
|
||||
String getName() const override { return "client"; }
|
||||
|
||||
void printHelpMessage(const OptionsDescription & options_description) override;
|
||||
void printHelpMessage(const OptionsDescription & options_description, bool verbose) override;
|
||||
|
||||
void addOptions(OptionsDescription & options_description) override;
|
||||
|
||||
|
@ -729,6 +729,15 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
||||
}
|
||||
}
|
||||
|
||||
/// Don't allow relative paths because install script may cd to / when installing
|
||||
/// And having path=./ may break the system
|
||||
if (log_path.is_relative())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Log path is relative: {}", log_path.string());
|
||||
if (data_path.is_relative())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Data path is relative: {}", data_path.string());
|
||||
if (pid_path.is_relative())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Pid path is relative: {}", pid_path.string());
|
||||
|
||||
/// Create directories for data and log.
|
||||
|
||||
if (fs::exists(log_path))
|
||||
|
@ -774,10 +774,12 @@ void LocalServer::processConfig()
|
||||
}
|
||||
|
||||
|
||||
void LocalServer::printHelpMessage([[maybe_unused]] const OptionsDescription & options_description)
|
||||
void LocalServer::printHelpMessage(const OptionsDescription & options_description, bool verbose)
|
||||
{
|
||||
std::cout << getHelpHeader() << "\n";
|
||||
std::cout << options_description.main_description.value() << "\n";
|
||||
if (verbose)
|
||||
std::cout << "All settings are documented at https://clickhouse.com/docs/en/operations/settings/settings.\n\n";
|
||||
std::cout << getHelpFooter() << "\n";
|
||||
std::cout << "In addition, --param_name=value can be specified for substitution of parameters for parametrized queries.\n";
|
||||
std::cout << "\nSee also: https://clickhouse.com/docs/en/operations/utilities/clickhouse-local/\n";
|
||||
|
@ -36,7 +36,7 @@ protected:
|
||||
|
||||
String getName() const override { return "local"; }
|
||||
|
||||
void printHelpMessage(const OptionsDescription & options_description) override;
|
||||
void printHelpMessage(const OptionsDescription & options_description, bool verbose) override;
|
||||
|
||||
void addOptions(OptionsDescription & options_description) override;
|
||||
|
||||
|
@ -280,11 +280,11 @@ void AccessControl::setUpFromMainConfig(const Poco::Util::AbstractConfiguration
|
||||
|
||||
/// Optional improvements in access control system.
|
||||
/// The default values are false because we need to be compatible with earlier access configurations
|
||||
setEnabledUsersWithoutRowPoliciesCanReadRows(config_.getBool("access_control_improvements.users_without_row_policies_can_read_rows", false));
|
||||
setOnClusterQueriesRequireClusterGrant(config_.getBool("access_control_improvements.on_cluster_queries_require_cluster_grant", false));
|
||||
setSelectFromSystemDatabaseRequiresGrant(config_.getBool("access_control_improvements.select_from_system_db_requires_grant", false));
|
||||
setSelectFromInformationSchemaRequiresGrant(config_.getBool("access_control_improvements.select_from_information_schema_requires_grant", false));
|
||||
setSettingsConstraintsReplacePrevious(config_.getBool("access_control_improvements.settings_constraints_replace_previous", false));
|
||||
setEnabledUsersWithoutRowPoliciesCanReadRows(config_.getBool("access_control_improvements.users_without_row_policies_can_read_rows", true));
|
||||
setOnClusterQueriesRequireClusterGrant(config_.getBool("access_control_improvements.on_cluster_queries_require_cluster_grant", true));
|
||||
setSelectFromSystemDatabaseRequiresGrant(config_.getBool("access_control_improvements.select_from_system_db_requires_grant", true));
|
||||
setSelectFromInformationSchemaRequiresGrant(config_.getBool("access_control_improvements.select_from_information_schema_requires_grant", true));
|
||||
setSettingsConstraintsReplacePrevious(config_.getBool("access_control_improvements.settings_constraints_replace_previous", true));
|
||||
setTableEnginesRequireGrant(config_.getBool("access_control_improvements.table_engines_require_grant", false));
|
||||
|
||||
addStoragesFromMainConfig(config_, config_path_, get_zookeeper_function_);
|
||||
@ -607,7 +607,8 @@ AuthResult AccessControl::authenticate(const Credentials & credentials, const Po
|
||||
/// We use the same message for all authentication failures because we don't want to give away any unnecessary information for security reasons,
|
||||
/// only the log will show the exact reason.
|
||||
throw Exception(PreformattedMessage{message.str(),
|
||||
"{}: Authentication failed: password is incorrect, or there is no user with such name.{}"},
|
||||
"{}: Authentication failed: password is incorrect, or there is no user with such name.{}",
|
||||
std::vector<std::string>{credentials.getUserName()}},
|
||||
ErrorCodes::AUTHENTICATION_FAILED);
|
||||
}
|
||||
}
|
||||
|
@ -56,7 +56,9 @@ void IdentifierNode::updateTreeHashImpl(HashState & state, CompareOptions) const
|
||||
|
||||
QueryTreeNodePtr IdentifierNode::cloneImpl() const
|
||||
{
|
||||
return std::make_shared<IdentifierNode>(identifier);
|
||||
auto clone_identifier_node = std::make_shared<IdentifierNode>(identifier);
|
||||
clone_identifier_node->table_expression_modifiers = table_expression_modifiers;
|
||||
return clone_identifier_node;
|
||||
}
|
||||
|
||||
ASTPtr IdentifierNode::toASTImpl(const ConvertToASTOptions & /* options */) const
|
||||
|
@ -1,9 +1,10 @@
|
||||
#include <Analyzer/Passes/QueryAnalysisPass.h>
|
||||
|
||||
#include <boost/algorithm/string.hpp>
|
||||
|
||||
#include <Common/checkStackSize.h>
|
||||
#include <Common/NamePrompter.h>
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <Analyzer/FunctionSecretArgumentsFinderTreeNode.h>
|
||||
|
||||
#include <IO/WriteBuffer.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
@ -81,8 +82,8 @@
|
||||
#include <Analyzer/QueryTreeBuilder.h>
|
||||
#include <Analyzer/IQueryTreeNode.h>
|
||||
#include <Analyzer/Identifier.h>
|
||||
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <Analyzer/FunctionSecretArgumentsFinderTreeNode.h>
|
||||
#include <Analyzer/RecursiveCTE.h>
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
@ -740,7 +741,7 @@ struct IdentifierResolveScope
|
||||
/// Identifier lookup to result
|
||||
std::unordered_map<IdentifierLookup, IdentifierResolveState, IdentifierLookupHash> identifier_lookup_to_resolve_state;
|
||||
|
||||
/// Lambda argument can be expression like constant, column, or it can be function
|
||||
/// Argument can be expression like constant, column, function or table expression
|
||||
std::unordered_map<std::string, QueryTreeNodePtr> expression_argument_name_to_node;
|
||||
|
||||
/// Alias name to query expression node
|
||||
@ -1464,7 +1465,8 @@ private:
|
||||
/// Lambdas that are currently in resolve process
|
||||
std::unordered_set<IQueryTreeNode *> lambdas_in_resolve_process;
|
||||
|
||||
std::unordered_set<std::string_view> cte_in_resolve_process;
|
||||
/// CTEs that are currently in resolve process
|
||||
std::unordered_set<std::string_view> ctes_in_resolve_process;
|
||||
|
||||
/// Function name to user defined lambda map
|
||||
std::unordered_map<std::string, QueryTreeNodePtr> function_name_to_user_defined_lambda;
|
||||
@ -2148,9 +2150,9 @@ void QueryAnalyzer::evaluateScalarSubqueryIfNeeded(QueryTreeNodePtr & node, Iden
|
||||
else
|
||||
{
|
||||
/** Make unique column names for tuple.
|
||||
*
|
||||
* Example: SELECT (SELECT 2 AS x, x)
|
||||
*/
|
||||
*
|
||||
* Example: SELECT (SELECT 2 AS x, x)
|
||||
*/
|
||||
makeUniqueColumnNamesInBlock(block);
|
||||
|
||||
scalar_block.insert({
|
||||
@ -3981,6 +3983,9 @@ IdentifierResolveResult QueryAnalyzer::tryResolveIdentifierInParentScopes(const
|
||||
auto * union_node = resolved_identifier->as<UnionNode>();
|
||||
|
||||
bool is_cte = (subquery_node && subquery_node->isCTE()) || (union_node && union_node->isCTE());
|
||||
bool is_table_from_expression_arguments = lookup_result.resolve_place == IdentifierResolvePlace::EXPRESSION_ARGUMENTS &&
|
||||
resolved_identifier->getNodeType() == QueryTreeNodeType::TABLE;
|
||||
bool is_valid_table_expression = is_cte || is_table_from_expression_arguments;
|
||||
|
||||
/** From parent scopes we can resolve table identifiers only as CTE.
|
||||
* Example: SELECT (SELECT 1 FROM a) FROM test_table AS a;
|
||||
@ -3988,14 +3993,10 @@ IdentifierResolveResult QueryAnalyzer::tryResolveIdentifierInParentScopes(const
|
||||
* During child scope table identifier resolve a, table node test_table with alias a from parent scope
|
||||
* is invalid.
|
||||
*/
|
||||
if (identifier_lookup.isTableExpressionLookup() && !is_cte)
|
||||
if (identifier_lookup.isTableExpressionLookup() && !is_valid_table_expression)
|
||||
continue;
|
||||
|
||||
if (is_cte)
|
||||
{
|
||||
return lookup_result;
|
||||
}
|
||||
else if (resolved_identifier->as<ConstantNode>())
|
||||
if (is_valid_table_expression || resolved_identifier->as<ConstantNode>())
|
||||
{
|
||||
return lookup_result;
|
||||
}
|
||||
@ -4071,13 +4072,9 @@ IdentifierResolveResult QueryAnalyzer::tryResolveIdentifier(const IdentifierLook
|
||||
|
||||
if (it->second.resolve_result.isResolved() &&
|
||||
scope.use_identifier_lookup_to_result_cache &&
|
||||
!scope.non_cached_identifier_lookups_during_expression_resolve.contains(identifier_lookup))
|
||||
{
|
||||
if (!it->second.resolve_result.isResolvedFromCTEs() || !cte_in_resolve_process.contains(identifier_lookup.identifier.getFullName()))
|
||||
{
|
||||
return it->second.resolve_result;
|
||||
}
|
||||
}
|
||||
!scope.non_cached_identifier_lookups_during_expression_resolve.contains(identifier_lookup) &&
|
||||
(!it->second.resolve_result.isResolvedFromCTEs() || !ctes_in_resolve_process.contains(identifier_lookup.identifier.getFullName())))
|
||||
return it->second.resolve_result;
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -4150,7 +4147,7 @@ IdentifierResolveResult QueryAnalyzer::tryResolveIdentifier(const IdentifierLook
|
||||
/// To accomplish this behaviour it's not allowed to resolve identifiers to
|
||||
/// CTE that is being resolved.
|
||||
if (cte_query_node_it != scope.cte_name_to_query_node.end()
|
||||
&& !cte_in_resolve_process.contains(full_name))
|
||||
&& !ctes_in_resolve_process.contains(full_name))
|
||||
{
|
||||
resolve_result.resolved_identifier = cte_query_node_it->second;
|
||||
resolve_result.resolve_place = IdentifierResolvePlace::CTE;
|
||||
@ -6296,14 +6293,14 @@ ProjectionNames QueryAnalyzer::resolveExpressionNode(QueryTreeNodePtr & node, Id
|
||||
///
|
||||
/// In this example argument of function `in` is being resolve here. If CTE `test1` is not forbidden,
|
||||
/// `test1` is resolved to CTE (not to the table) in `initializeQueryJoinTreeNode` function.
|
||||
cte_in_resolve_process.insert(cte_name);
|
||||
ctes_in_resolve_process.insert(cte_name);
|
||||
|
||||
if (subquery_node)
|
||||
resolveQuery(resolved_identifier_node, subquery_scope);
|
||||
else
|
||||
resolveUnion(resolved_identifier_node, subquery_scope);
|
||||
|
||||
cte_in_resolve_process.erase(cte_name);
|
||||
ctes_in_resolve_process.erase(cte_name);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -7874,7 +7871,7 @@ void QueryAnalyzer::resolveQuery(const QueryTreeNodePtr & query_node, Identifier
|
||||
auto & query_node_typed = query_node->as<QueryNode &>();
|
||||
|
||||
if (query_node_typed.isCTE())
|
||||
cte_in_resolve_process.insert(query_node_typed.getCTEName());
|
||||
ctes_in_resolve_process.insert(query_node_typed.getCTEName());
|
||||
|
||||
bool is_rollup_or_cube = query_node_typed.isGroupByWithRollup() || query_node_typed.isGroupByWithCube();
|
||||
|
||||
@ -7956,7 +7953,6 @@ void QueryAnalyzer::resolveQuery(const QueryTreeNodePtr & query_node, Identifier
|
||||
auto * union_node = node->as<UnionNode>();
|
||||
|
||||
bool subquery_is_cte = (subquery_node && subquery_node->isCTE()) || (union_node && union_node->isCTE());
|
||||
|
||||
if (!subquery_is_cte)
|
||||
continue;
|
||||
|
||||
@ -8213,7 +8209,7 @@ void QueryAnalyzer::resolveQuery(const QueryTreeNodePtr & query_node, Identifier
|
||||
query_node_typed.resolveProjectionColumns(std::move(projection_columns));
|
||||
|
||||
if (query_node_typed.isCTE())
|
||||
cte_in_resolve_process.erase(query_node_typed.getCTEName());
|
||||
ctes_in_resolve_process.erase(query_node_typed.getCTEName());
|
||||
}
|
||||
|
||||
void QueryAnalyzer::resolveUnion(const QueryTreeNodePtr & union_node, IdentifierResolveScope & scope)
|
||||
@ -8221,13 +8217,56 @@ void QueryAnalyzer::resolveUnion(const QueryTreeNodePtr & union_node, Identifier
|
||||
auto & union_node_typed = union_node->as<UnionNode &>();
|
||||
|
||||
if (union_node_typed.isCTE())
|
||||
cte_in_resolve_process.insert(union_node_typed.getCTEName());
|
||||
ctes_in_resolve_process.insert(union_node_typed.getCTEName());
|
||||
|
||||
auto & queries_nodes = union_node_typed.getQueries().getNodes();
|
||||
|
||||
for (auto & query_node : queries_nodes)
|
||||
std::optional<RecursiveCTETable> recursive_cte_table;
|
||||
TableNodePtr recursive_cte_table_node;
|
||||
|
||||
if (union_node_typed.isCTE() && union_node_typed.isRecursiveCTE())
|
||||
{
|
||||
auto & non_recursive_query = queries_nodes[0];
|
||||
bool non_recursive_query_is_query_node = non_recursive_query->getNodeType() == QueryTreeNodeType::QUERY;
|
||||
auto & non_recursive_query_mutable_context = non_recursive_query_is_query_node ? non_recursive_query->as<QueryNode &>().getMutableContext()
|
||||
: non_recursive_query->as<UnionNode &>().getMutableContext();
|
||||
|
||||
IdentifierResolveScope non_recursive_subquery_scope(non_recursive_query, &scope /*parent_scope*/);
|
||||
non_recursive_subquery_scope.subquery_depth = scope.subquery_depth + 1;
|
||||
|
||||
if (non_recursive_query_is_query_node)
|
||||
resolveQuery(non_recursive_query, non_recursive_subquery_scope);
|
||||
else
|
||||
resolveUnion(non_recursive_query, non_recursive_subquery_scope);
|
||||
|
||||
auto temporary_table_columns = non_recursive_query_is_query_node
|
||||
? non_recursive_query->as<QueryNode &>().getProjectionColumns()
|
||||
: non_recursive_query->as<UnionNode &>().computeProjectionColumns();
|
||||
|
||||
auto temporary_table_holder = std::make_shared<TemporaryTableHolder>(
|
||||
non_recursive_query_mutable_context,
|
||||
ColumnsDescription{NamesAndTypesList{temporary_table_columns.begin(), temporary_table_columns.end()}},
|
||||
ConstraintsDescription{},
|
||||
nullptr /*query*/,
|
||||
true /*create_for_global_subquery*/);
|
||||
auto temporary_table_storage = temporary_table_holder->getTable();
|
||||
|
||||
recursive_cte_table_node = std::make_shared<TableNode>(temporary_table_storage, non_recursive_query_mutable_context);
|
||||
recursive_cte_table_node->setTemporaryTableName(union_node_typed.getCTEName());
|
||||
|
||||
recursive_cte_table.emplace(std::move(temporary_table_holder), std::move(temporary_table_storage), std::move(temporary_table_columns));
|
||||
}
|
||||
|
||||
size_t queries_nodes_size = queries_nodes.size();
|
||||
for (size_t i = recursive_cte_table.has_value(); i < queries_nodes_size; ++i)
|
||||
{
|
||||
auto & query_node = queries_nodes[i];
|
||||
|
||||
IdentifierResolveScope subquery_scope(query_node, &scope /*parent_scope*/);
|
||||
|
||||
if (recursive_cte_table_node)
|
||||
subquery_scope.expression_argument_name_to_node[union_node_typed.getCTEName()] = recursive_cte_table_node;
|
||||
|
||||
auto query_node_type = query_node->getNodeType();
|
||||
|
||||
if (query_node_type == QueryTreeNodeType::QUERY)
|
||||
@ -8247,8 +8286,19 @@ void QueryAnalyzer::resolveUnion(const QueryTreeNodePtr & union_node, Identifier
|
||||
}
|
||||
}
|
||||
|
||||
if (recursive_cte_table && isStorageUsedInTree(recursive_cte_table->storage, union_node.get()))
|
||||
{
|
||||
if (union_node_typed.getUnionMode() != SelectUnionMode::UNION_ALL)
|
||||
throw Exception(ErrorCodes::UNSUPPORTED_METHOD,
|
||||
"Recursive CTE subquery {} with {} union mode is unsupported, only UNION ALL union mode is supported",
|
||||
union_node_typed.formatASTForErrorMessage(),
|
||||
toString(union_node_typed.getUnionMode()));
|
||||
|
||||
union_node_typed.setRecursiveCTETable(std::move(*recursive_cte_table));
|
||||
}
|
||||
|
||||
if (union_node_typed.isCTE())
|
||||
cte_in_resolve_process.erase(union_node_typed.getCTEName());
|
||||
ctes_in_resolve_process.erase(union_node_typed.getCTEName());
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -51,10 +51,15 @@ public:
|
||||
{
|
||||
const auto & second_const_value = second_const_node->getValue();
|
||||
if (second_const_value.isNull()
|
||||
|| (lower_name == "sum" && isInt64OrUInt64FieldType(second_const_value.getType()) && second_const_value.get<UInt64>() == 0))
|
||||
|| (lower_name == "sum" && isInt64OrUInt64FieldType(second_const_value.getType()) && second_const_value.get<UInt64>() == 0
|
||||
&& !function_node->getResultType()->isNullable()))
|
||||
{
|
||||
/// avg(if(cond, a, null)) -> avgIf(a, cond)
|
||||
/// avg(if(cond, nullable_a, null)) -> avgIfOrNull(a, cond)
|
||||
|
||||
/// sum(if(cond, a, 0)) -> sumIf(a, cond)
|
||||
/// sum(if(cond, nullable_a, 0)) **is not** equivalent to sumIfOrNull(cond, nullable_a) as
|
||||
/// it changes the output when no rows pass the condition (from 0 to NULL)
|
||||
function_arguments_nodes.resize(2);
|
||||
function_arguments_nodes[0] = std::move(if_arguments_nodes[1]);
|
||||
function_arguments_nodes[1] = std::move(if_arguments_nodes[0]);
|
||||
@ -66,10 +71,13 @@ public:
|
||||
{
|
||||
const auto & first_const_value = first_const_node->getValue();
|
||||
if (first_const_value.isNull()
|
||||
|| (lower_name == "sum" && isInt64OrUInt64FieldType(first_const_value.getType()) && first_const_value.get<UInt64>() == 0))
|
||||
|| (lower_name == "sum" && isInt64OrUInt64FieldType(first_const_value.getType()) && first_const_value.get<UInt64>() == 0
|
||||
&& !function_node->getResultType()->isNullable()))
|
||||
{
|
||||
/// avg(if(cond, null, a) -> avgIf(a, !cond))
|
||||
/// avg(if(cond, null, a) -> avgIfOrNullable(a, !cond))
|
||||
|
||||
/// sum(if(cond, 0, a) -> sumIf(a, !cond))
|
||||
/// sum(if(cond, 0, nullable_a) **is not** sumIf(a, !cond)) -> Same as above
|
||||
auto not_function = std::make_shared<FunctionNode>("not");
|
||||
auto & not_function_arguments = not_function->getArguments().getNodes();
|
||||
not_function_arguments.push_back(std::move(if_arguments_nodes[0]));
|
||||
|
@ -14,12 +14,14 @@
|
||||
|
||||
#include <Parsers/ASTExpressionList.h>
|
||||
#include <Parsers/ASTTablesInSelectQuery.h>
|
||||
#include <Parsers/ASTWithElement.h>
|
||||
#include <Parsers/ASTSubquery.h>
|
||||
#include <Parsers/ASTSelectQuery.h>
|
||||
#include <Parsers/ASTSelectWithUnionQuery.h>
|
||||
#include <Parsers/ASTSetQuery.h>
|
||||
|
||||
#include <Analyzer/Utils.h>
|
||||
#include <Analyzer/UnionNode.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -107,6 +109,9 @@ void QueryNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, s
|
||||
if (is_cte)
|
||||
buffer << ", is_cte: " << is_cte;
|
||||
|
||||
if (is_recursive_with)
|
||||
buffer << ", is_recursive_with: " << is_recursive_with;
|
||||
|
||||
if (is_distinct)
|
||||
buffer << ", is_distinct: " << is_distinct;
|
||||
|
||||
@ -259,6 +264,7 @@ bool QueryNode::isEqualImpl(const IQueryTreeNode & rhs, CompareOptions) const
|
||||
|
||||
return is_subquery == rhs_typed.is_subquery &&
|
||||
is_cte == rhs_typed.is_cte &&
|
||||
is_recursive_with == rhs_typed.is_recursive_with &&
|
||||
is_distinct == rhs_typed.is_distinct &&
|
||||
is_limit_with_ties == rhs_typed.is_limit_with_ties &&
|
||||
is_group_by_with_totals == rhs_typed.is_group_by_with_totals &&
|
||||
@ -291,6 +297,7 @@ void QueryNode::updateTreeHashImpl(HashState & state, CompareOptions) const
|
||||
state.update(projection_column_type_name);
|
||||
}
|
||||
|
||||
state.update(is_recursive_with);
|
||||
state.update(is_distinct);
|
||||
state.update(is_limit_with_ties);
|
||||
state.update(is_group_by_with_totals);
|
||||
@ -317,19 +324,20 @@ QueryTreeNodePtr QueryNode::cloneImpl() const
|
||||
{
|
||||
auto result_query_node = std::make_shared<QueryNode>(context);
|
||||
|
||||
result_query_node->is_subquery = is_subquery;
|
||||
result_query_node->is_cte = is_cte;
|
||||
result_query_node->is_distinct = is_distinct;
|
||||
result_query_node->is_limit_with_ties = is_limit_with_ties;
|
||||
result_query_node->is_group_by_with_totals = is_group_by_with_totals;
|
||||
result_query_node->is_group_by_with_rollup = is_group_by_with_rollup;
|
||||
result_query_node->is_group_by_with_cube = is_group_by_with_cube;
|
||||
result_query_node->is_subquery = is_subquery;
|
||||
result_query_node->is_cte = is_cte;
|
||||
result_query_node->is_recursive_with = is_recursive_with;
|
||||
result_query_node->is_distinct = is_distinct;
|
||||
result_query_node->is_limit_with_ties = is_limit_with_ties;
|
||||
result_query_node->is_group_by_with_totals = is_group_by_with_totals;
|
||||
result_query_node->is_group_by_with_rollup = is_group_by_with_rollup;
|
||||
result_query_node->is_group_by_with_cube = is_group_by_with_cube;
|
||||
result_query_node->is_group_by_with_grouping_sets = is_group_by_with_grouping_sets;
|
||||
result_query_node->is_group_by_all = is_group_by_all;
|
||||
result_query_node->is_order_by_all = is_order_by_all;
|
||||
result_query_node->cte_name = cte_name;
|
||||
result_query_node->projection_columns = projection_columns;
|
||||
result_query_node->settings_changes = settings_changes;
|
||||
result_query_node->is_group_by_all = is_group_by_all;
|
||||
result_query_node->is_order_by_all = is_order_by_all;
|
||||
result_query_node->cte_name = cte_name;
|
||||
result_query_node->projection_columns = projection_columns;
|
||||
result_query_node->settings_changes = settings_changes;
|
||||
|
||||
return result_query_node;
|
||||
}
|
||||
@ -337,6 +345,7 @@ QueryTreeNodePtr QueryNode::cloneImpl() const
|
||||
ASTPtr QueryNode::toASTImpl(const ConvertToASTOptions & options) const
|
||||
{
|
||||
auto select_query = std::make_shared<ASTSelectQuery>();
|
||||
select_query->recursive_with = is_recursive_with;
|
||||
select_query->distinct = is_distinct;
|
||||
select_query->limit_with_ties = is_limit_with_ties;
|
||||
select_query->group_by_with_totals = is_group_by_with_totals;
|
||||
@ -347,7 +356,41 @@ ASTPtr QueryNode::toASTImpl(const ConvertToASTOptions & options) const
|
||||
select_query->order_by_all = is_order_by_all;
|
||||
|
||||
if (hasWith())
|
||||
select_query->setExpression(ASTSelectQuery::Expression::WITH, getWith().toAST(options));
|
||||
{
|
||||
const auto & with = getWith();
|
||||
auto expression_list_ast = std::make_shared<ASTExpressionList>();
|
||||
expression_list_ast->children.reserve(with.getNodes().size());
|
||||
|
||||
for (const auto & with_node : with)
|
||||
{
|
||||
auto with_node_ast = with_node->toAST(options);
|
||||
expression_list_ast->children.push_back(with_node_ast);
|
||||
|
||||
const auto * with_query_node = with_node->as<QueryNode>();
|
||||
const auto * with_union_node = with_node->as<UnionNode>();
|
||||
if (!with_query_node && !with_union_node)
|
||||
continue;
|
||||
|
||||
bool is_with_node_cte = with_query_node ? with_query_node->isCTE() : with_union_node->isCTE();
|
||||
if (!is_with_node_cte)
|
||||
continue;
|
||||
|
||||
const auto & with_node_cte_name = with_query_node ? with_query_node->cte_name : with_union_node->getCTEName();
|
||||
|
||||
auto * with_node_ast_subquery = with_node_ast->as<ASTSubquery>();
|
||||
if (with_node_ast_subquery)
|
||||
with_node_ast_subquery->cte_name = "";
|
||||
|
||||
auto with_element_ast = std::make_shared<ASTWithElement>();
|
||||
with_element_ast->name = with_node_cte_name;
|
||||
with_element_ast->subquery = std::move(with_node_ast);
|
||||
with_element_ast->children.push_back(with_element_ast->subquery);
|
||||
|
||||
expression_list_ast->children.back() = std::move(with_element_ast);
|
||||
}
|
||||
|
||||
select_query->setExpression(ASTSelectQuery::Expression::WITH, std::move(expression_list_ast));
|
||||
}
|
||||
|
||||
auto projection_ast = getProjection().toAST(options);
|
||||
auto & projection_expression_list_ast = projection_ast->as<ASTExpressionList &>();
|
||||
|
@ -140,6 +140,18 @@ public:
|
||||
cte_name = std::move(cte_name_value);
|
||||
}
|
||||
|
||||
/// Returns true if query node has RECURSIVE WITH, false otherwise
|
||||
bool isRecursiveWith() const
|
||||
{
|
||||
return is_recursive_with;
|
||||
}
|
||||
|
||||
/// Set query node RECURSIVE WITH value
|
||||
void setIsRecursiveWith(bool is_recursive_with_value)
|
||||
{
|
||||
is_recursive_with = is_recursive_with_value;
|
||||
}
|
||||
|
||||
/// Returns true if query node has DISTINCT, false otherwise
|
||||
bool isDistinct() const
|
||||
{
|
||||
@ -618,6 +630,7 @@ protected:
|
||||
private:
|
||||
bool is_subquery = false;
|
||||
bool is_cte = false;
|
||||
bool is_recursive_with = false;
|
||||
bool is_distinct = false;
|
||||
bool is_limit_with_ties = false;
|
||||
bool is_group_by_with_totals = false;
|
||||
|
@ -271,6 +271,7 @@ QueryTreeNodePtr QueryTreeBuilder::buildSelectExpression(const ASTPtr & select_q
|
||||
current_query_tree->setIsSubquery(is_subquery);
|
||||
current_query_tree->setIsCTE(!cte_name.empty());
|
||||
current_query_tree->setCTEName(cte_name);
|
||||
current_query_tree->setIsRecursiveWith(select_query_typed.recursive_with);
|
||||
current_query_tree->setIsDistinct(select_query_typed.distinct);
|
||||
current_query_tree->setIsLimitWithTies(select_query_typed.limit_with_ties);
|
||||
current_query_tree->setIsGroupByWithTotals(select_query_typed.group_by_with_totals);
|
||||
@ -287,8 +288,22 @@ QueryTreeNodePtr QueryTreeBuilder::buildSelectExpression(const ASTPtr & select_q
|
||||
|
||||
auto select_with_list = select_query_typed.with();
|
||||
if (select_with_list)
|
||||
{
|
||||
current_query_tree->getWithNode() = buildExpressionList(select_with_list, current_context);
|
||||
|
||||
if (select_query_typed.recursive_with)
|
||||
{
|
||||
for (auto & with_node : current_query_tree->getWith().getNodes())
|
||||
{
|
||||
auto * with_union_node = with_node->as<UnionNode>();
|
||||
if (!with_union_node)
|
||||
continue;
|
||||
|
||||
with_union_node->setIsRecursiveCTE(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
auto select_expression_list = select_query_typed.select();
|
||||
if (select_expression_list)
|
||||
current_query_tree->getProjectionNode() = buildExpressionList(select_expression_list, current_context);
|
||||
|
@ -165,7 +165,6 @@ private:
|
||||
|
||||
/** ClickHouse query tree pass manager.
|
||||
*
|
||||
* TODO: Support setting optimize_monotonous_functions_in_order_by.
|
||||
* TODO: Add optimizations based on function semantics. Example: SELECT * FROM test_table WHERE id != id. (id is not nullable column).
|
||||
*/
|
||||
|
||||
|
21
src/Analyzer/RecursiveCTE.cpp
Normal file
21
src/Analyzer/RecursiveCTE.cpp
Normal file
@ -0,0 +1,21 @@
|
||||
#include <Analyzer/RecursiveCTE.h>
|
||||
|
||||
#include <Storages/IStorage.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
RecursiveCTETable::RecursiveCTETable(TemporaryTableHolderPtr holder_,
|
||||
StoragePtr storage_,
|
||||
NamesAndTypes columns_)
|
||||
: holder(std::move(holder_))
|
||||
, storage(std::move(storage_))
|
||||
, columns(std::move(columns_))
|
||||
{}
|
||||
|
||||
StorageID RecursiveCTETable::getStorageID() const
|
||||
{
|
||||
return storage->getStorageID();
|
||||
}
|
||||
|
||||
}
|
51
src/Analyzer/RecursiveCTE.h
Normal file
51
src/Analyzer/RecursiveCTE.h
Normal file
@ -0,0 +1,51 @@
|
||||
#pragma once
|
||||
|
||||
#include <Core/NamesAndTypes.h>
|
||||
|
||||
#include <Interpreters/DatabaseCatalog.h>
|
||||
|
||||
#include <Analyzer/IQueryTreeNode.h>
|
||||
#include <Analyzer/TableNode.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/** Recursive CTEs allow to recursively evaluate UNION subqueries.
|
||||
*
|
||||
* Overview:
|
||||
* https://www.postgresql.org/docs/current/queries-with.html#QUERIES-WITH-RECURSIVE
|
||||
*
|
||||
* Current implementation algorithm:
|
||||
*
|
||||
* During query analysis, when we resolve UNION node that is inside WITH RECURSIVE section of parent query we:
|
||||
* 1. First resolve non recursive subquery.
|
||||
* 2. Create temporary table using projection columns of resolved subquery from step 1.
|
||||
* 3. Create temporary table expression node using storage from step 2.
|
||||
* 4. Create resolution scope for recursive subquery. In that scope we add node from step 3 as expression argument with UNION node CTE name.
|
||||
* 5. Resolve recursive subquery.
|
||||
* 6. If in resolved UNION node temporary table expression storage from step 2 is used, we update UNION query with recursive CTE table.
|
||||
*
|
||||
* During query planning if UNION node contains recursive CTE table, we add ReadFromRecursiveCTEStep to query plan. That step is responsible for whole
|
||||
* recursive CTE query execution.
|
||||
*
|
||||
* TODO: Improve locking in ReadFromRecursiveCTEStep.
|
||||
* TODO: Improve query analysis if query contains aggregates, JOINS, GROUP BY, ORDER BY, LIMIT, OFFSET.
|
||||
* TODO: Support SEARCH DEPTH FIRST BY, SEARCH BREADTH FIRST BY syntax.
|
||||
* TODO: Support CYCLE syntax.
|
||||
* TODO: Support UNION DISTINCT recursive CTE mode.
|
||||
*/
|
||||
class RecursiveCTETable
|
||||
{
|
||||
public:
|
||||
RecursiveCTETable(TemporaryTableHolderPtr holder_,
|
||||
StoragePtr storage_,
|
||||
NamesAndTypes columns_);
|
||||
|
||||
StorageID getStorageID() const;
|
||||
|
||||
TemporaryTableHolderPtr holder;
|
||||
StoragePtr storage;
|
||||
NamesAndTypes columns;
|
||||
};
|
||||
|
||||
}
|
@ -33,6 +33,14 @@ TableNode::TableNode(StoragePtr storage_, const ContextPtr & context)
|
||||
{
|
||||
}
|
||||
|
||||
void TableNode::updateStorage(StoragePtr storage_value, const ContextPtr & context)
|
||||
{
|
||||
storage = std::move(storage_value);
|
||||
storage_id = storage->getStorageID();
|
||||
storage_lock = storage->lockForShare(context->getInitialQueryId(), context->getSettingsRef().lock_acquire_timeout);
|
||||
storage_snapshot = storage->getStorageSnapshot(storage->getInMemoryMetadataPtr(), context);
|
||||
}
|
||||
|
||||
void TableNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const
|
||||
{
|
||||
buffer << std::string(indent, ' ') << "TABLE id: " << format_state.getNodeId(this);
|
||||
|
@ -32,6 +32,11 @@ public:
|
||||
/// Construct table node with storage, context
|
||||
explicit TableNode(StoragePtr storage_, const ContextPtr & context);
|
||||
|
||||
/** Update table node storage.
|
||||
* After this call storage, storage_id, storage_lock, storage_snapshot will be updated using new storage.
|
||||
*/
|
||||
void updateStorage(StoragePtr storage_value, const ContextPtr & context);
|
||||
|
||||
/// Get storage
|
||||
const StoragePtr & getStorage() const
|
||||
{
|
||||
|
@ -9,6 +9,7 @@
|
||||
|
||||
#include <Parsers/ASTExpressionList.h>
|
||||
#include <Parsers/ASTTablesInSelectQuery.h>
|
||||
#include <Parsers/ASTWithElement.h>
|
||||
#include <Parsers/ASTSubquery.h>
|
||||
#include <Parsers/ASTSelectQuery.h>
|
||||
#include <Parsers/ASTSelectWithUnionQuery.h>
|
||||
@ -20,6 +21,8 @@
|
||||
|
||||
#include <DataTypes/getLeastSupertype.h>
|
||||
|
||||
#include <Storages/IStorage.h>
|
||||
|
||||
#include <Interpreters/Context.h>
|
||||
|
||||
#include <Analyzer/QueryNode.h>
|
||||
@ -49,6 +52,9 @@ UnionNode::UnionNode(ContextMutablePtr context_, SelectUnionMode union_mode_)
|
||||
|
||||
NamesAndTypes UnionNode::computeProjectionColumns() const
|
||||
{
|
||||
if (recursive_cte_table)
|
||||
return recursive_cte_table->columns;
|
||||
|
||||
std::vector<NamesAndTypes> projections;
|
||||
|
||||
NamesAndTypes query_node_projection;
|
||||
@ -90,6 +96,9 @@ NamesAndTypes UnionNode::computeProjectionColumns() const
|
||||
|
||||
void UnionNode::removeUnusedProjectionColumns(const std::unordered_set<std::string> & used_projection_columns)
|
||||
{
|
||||
if (recursive_cte_table)
|
||||
return;
|
||||
|
||||
auto projection_columns = computeProjectionColumns();
|
||||
size_t projection_columns_size = projection_columns.size();
|
||||
std::unordered_set<size_t> used_projection_column_indexes;
|
||||
@ -113,6 +122,9 @@ void UnionNode::removeUnusedProjectionColumns(const std::unordered_set<std::stri
|
||||
|
||||
void UnionNode::removeUnusedProjectionColumns(const std::unordered_set<size_t> & used_projection_columns_indexes)
|
||||
{
|
||||
if (recursive_cte_table)
|
||||
return;
|
||||
|
||||
auto & query_nodes = getQueries().getNodes();
|
||||
for (auto & query_node : query_nodes)
|
||||
{
|
||||
@ -136,6 +148,12 @@ void UnionNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, s
|
||||
if (is_cte)
|
||||
buffer << ", is_cte: " << is_cte;
|
||||
|
||||
if (is_recursive_cte)
|
||||
buffer << ", is_recursive_cte: " << is_recursive_cte;
|
||||
|
||||
if (recursive_cte_table)
|
||||
buffer << ", recursive_cte_table: " << recursive_cte_table->storage->getStorageID().getNameForLogs();
|
||||
|
||||
if (!cte_name.empty())
|
||||
buffer << ", cte_name: " << cte_name;
|
||||
|
||||
@ -149,14 +167,28 @@ bool UnionNode::isEqualImpl(const IQueryTreeNode & rhs, CompareOptions) const
|
||||
{
|
||||
const auto & rhs_typed = assert_cast<const UnionNode &>(rhs);
|
||||
|
||||
return is_subquery == rhs_typed.is_subquery && is_cte == rhs_typed.is_cte && cte_name == rhs_typed.cte_name &&
|
||||
union_mode == rhs_typed.union_mode;
|
||||
if (recursive_cte_table && rhs_typed.recursive_cte_table &&
|
||||
recursive_cte_table->getStorageID() != rhs_typed.recursive_cte_table->getStorageID())
|
||||
return false;
|
||||
else if ((recursive_cte_table && !rhs_typed.recursive_cte_table) || (!recursive_cte_table && rhs_typed.recursive_cte_table))
|
||||
return false;
|
||||
|
||||
return is_subquery == rhs_typed.is_subquery && is_cte == rhs_typed.is_cte && is_recursive_cte == rhs_typed.is_recursive_cte
|
||||
&& cte_name == rhs_typed.cte_name && union_mode == rhs_typed.union_mode;
|
||||
}
|
||||
|
||||
void UnionNode::updateTreeHashImpl(HashState & state, CompareOptions) const
|
||||
{
|
||||
state.update(is_subquery);
|
||||
state.update(is_cte);
|
||||
state.update(is_recursive_cte);
|
||||
|
||||
if (recursive_cte_table)
|
||||
{
|
||||
auto full_name = recursive_cte_table->getStorageID().getFullNameNotQuoted();
|
||||
state.update(full_name.size());
|
||||
state.update(full_name);
|
||||
}
|
||||
|
||||
state.update(cte_name.size());
|
||||
state.update(cte_name);
|
||||
@ -170,6 +202,8 @@ QueryTreeNodePtr UnionNode::cloneImpl() const
|
||||
|
||||
result_union_node->is_subquery = is_subquery;
|
||||
result_union_node->is_cte = is_cte;
|
||||
result_union_node->is_recursive_cte = is_recursive_cte;
|
||||
result_union_node->recursive_cte_table = recursive_cte_table;
|
||||
result_union_node->cte_name = cte_name;
|
||||
|
||||
return result_union_node;
|
||||
@ -183,14 +217,64 @@ ASTPtr UnionNode::toASTImpl(const ConvertToASTOptions & options) const
|
||||
select_with_union_query->children.push_back(getQueriesNode()->toAST(options));
|
||||
select_with_union_query->list_of_selects = select_with_union_query->children.back();
|
||||
|
||||
if (is_subquery)
|
||||
ASTPtr result_query = std::move(select_with_union_query);
|
||||
bool set_subquery_cte_name = true;
|
||||
|
||||
if (recursive_cte_table)
|
||||
{
|
||||
auto subquery = std::make_shared<ASTSubquery>(std::move(select_with_union_query));
|
||||
subquery->cte_name = cte_name;
|
||||
return subquery;
|
||||
auto recursive_select_query = std::make_shared<ASTSelectQuery>();
|
||||
recursive_select_query->recursive_with = true;
|
||||
|
||||
auto with_element_ast = std::make_shared<ASTWithElement>();
|
||||
with_element_ast->name = cte_name;
|
||||
with_element_ast->subquery = std::make_shared<ASTSubquery>(std::move(result_query));
|
||||
with_element_ast->children.push_back(with_element_ast->subquery);
|
||||
|
||||
auto with_expression_list_ast = std::make_shared<ASTExpressionList>();
|
||||
with_expression_list_ast->children.push_back(std::move(with_element_ast));
|
||||
|
||||
recursive_select_query->setExpression(ASTSelectQuery::Expression::WITH, std::move(with_expression_list_ast));
|
||||
|
||||
auto select_expression_list_ast = std::make_shared<ASTExpressionList>();
|
||||
select_expression_list_ast->children.reserve(recursive_cte_table->columns.size());
|
||||
for (const auto & recursive_cte_table_column : recursive_cte_table->columns)
|
||||
select_expression_list_ast->children.push_back(std::make_shared<ASTIdentifier>(recursive_cte_table_column.name));
|
||||
|
||||
recursive_select_query->setExpression(ASTSelectQuery::Expression::SELECT, std::move(select_expression_list_ast));
|
||||
|
||||
auto table_expression_ast = std::make_shared<ASTTableExpression>();
|
||||
table_expression_ast->children.push_back(std::make_shared<ASTTableIdentifier>(cte_name));
|
||||
table_expression_ast->database_and_table_name = table_expression_ast->children.back();
|
||||
|
||||
auto tables_in_select_query_element_ast = std::make_shared<ASTTablesInSelectQueryElement>();
|
||||
tables_in_select_query_element_ast->children.push_back(std::move(table_expression_ast));
|
||||
tables_in_select_query_element_ast->table_expression = tables_in_select_query_element_ast->children.back();
|
||||
|
||||
ASTPtr tables_in_select_query_ast = std::make_shared<ASTTablesInSelectQuery>();
|
||||
tables_in_select_query_ast->children.push_back(std::move(tables_in_select_query_element_ast));
|
||||
|
||||
recursive_select_query->setExpression(ASTSelectQuery::Expression::TABLES, std::move(tables_in_select_query_ast));
|
||||
|
||||
auto recursive_select_with_union_query = std::make_shared<ASTSelectWithUnionQuery>();
|
||||
auto recursive_select_with_union_query_list_of_selects = std::make_shared<ASTExpressionList>();
|
||||
recursive_select_with_union_query_list_of_selects->children.push_back(std::move(recursive_select_query));
|
||||
recursive_select_with_union_query->children.push_back(std::move(recursive_select_with_union_query_list_of_selects));
|
||||
recursive_select_with_union_query->list_of_selects = recursive_select_with_union_query->children.back();
|
||||
|
||||
result_query = std::move(recursive_select_with_union_query);
|
||||
set_subquery_cte_name = false;
|
||||
}
|
||||
|
||||
return select_with_union_query;
|
||||
if (is_subquery)
|
||||
{
|
||||
auto subquery = std::make_shared<ASTSubquery>(std::move(result_query));
|
||||
if (set_subquery_cte_name)
|
||||
subquery->cte_name = cte_name;
|
||||
|
||||
result_query = std::move(subquery);
|
||||
}
|
||||
|
||||
return result_query;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <Analyzer/IQueryTreeNode.h>
|
||||
#include <Analyzer/ListNode.h>
|
||||
#include <Analyzer/TableExpressionModifiers.h>
|
||||
#include <Analyzer/RecursiveCTE.h>
|
||||
|
||||
#include <Interpreters/Context_fwd.h>
|
||||
|
||||
@ -84,6 +85,42 @@ public:
|
||||
is_cte = is_cte_value;
|
||||
}
|
||||
|
||||
/// Returns true if union node CTE is specified in WITH RECURSIVE, false otherwise
|
||||
bool isRecursiveCTE() const
|
||||
{
|
||||
return is_recursive_cte;
|
||||
}
|
||||
|
||||
/// Set union node is recursive CTE value
|
||||
void setIsRecursiveCTE(bool is_recursive_cte_value)
|
||||
{
|
||||
is_recursive_cte = is_recursive_cte_value;
|
||||
}
|
||||
|
||||
/// Returns true if union node has recursive CTE table, false otherwise
|
||||
bool hasRecursiveCTETable() const
|
||||
{
|
||||
return recursive_cte_table.has_value();
|
||||
}
|
||||
|
||||
/// Returns optional recursive CTE table
|
||||
const std::optional<RecursiveCTETable> & getRecursiveCTETable() const
|
||||
{
|
||||
return recursive_cte_table;
|
||||
}
|
||||
|
||||
/// Returns optional recursive CTE table
|
||||
std::optional<RecursiveCTETable> & getRecursiveCTETable()
|
||||
{
|
||||
return recursive_cte_table;
|
||||
}
|
||||
|
||||
/// Set union node recursive CTE table value
|
||||
void setRecursiveCTETable(RecursiveCTETable recursive_cte_table_value)
|
||||
{
|
||||
recursive_cte_table.emplace(std::move(recursive_cte_table_value));
|
||||
}
|
||||
|
||||
/// Get union node CTE name
|
||||
const std::string & getCTEName() const
|
||||
{
|
||||
@ -154,6 +191,8 @@ protected:
|
||||
private:
|
||||
bool is_subquery = false;
|
||||
bool is_cte = false;
|
||||
bool is_recursive_cte = false;
|
||||
std::optional<RecursiveCTETable> recursive_cte_table;
|
||||
std::string cte_name;
|
||||
ContextMutablePtr context;
|
||||
SelectUnionMode union_mode;
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <Parsers/ASTSubquery.h>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <DataTypes/DataTypeTuple.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
@ -15,6 +16,8 @@
|
||||
#include <Functions/FunctionHelpers.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
|
||||
#include <Storages/IStorage.h>
|
||||
|
||||
#include <Interpreters/Context.h>
|
||||
|
||||
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||
@ -61,6 +64,36 @@ bool isNodePartOfTree(const IQueryTreeNode * node, const IQueryTreeNode * root)
|
||||
return false;
|
||||
}
|
||||
|
||||
bool isStorageUsedInTree(const StoragePtr & storage, const IQueryTreeNode * root)
|
||||
{
|
||||
std::vector<const IQueryTreeNode *> nodes_to_process;
|
||||
nodes_to_process.push_back(root);
|
||||
|
||||
while (!nodes_to_process.empty())
|
||||
{
|
||||
const auto * subtree_node = nodes_to_process.back();
|
||||
nodes_to_process.pop_back();
|
||||
|
||||
const auto * table_node = subtree_node->as<TableNode>();
|
||||
const auto * table_function_node = subtree_node->as<TableFunctionNode>();
|
||||
|
||||
if (table_node || table_function_node)
|
||||
{
|
||||
const auto & table_storage = table_node ? table_node->getStorage() : table_function_node->getStorage();
|
||||
if (table_storage->getStorageID() == storage->getStorageID())
|
||||
return true;
|
||||
}
|
||||
|
||||
for (const auto & child : subtree_node->getChildren())
|
||||
{
|
||||
if (child)
|
||||
nodes_to_process.push_back(child.get());
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool isNameOfInFunction(const std::string & function_name)
|
||||
{
|
||||
bool is_special_function_in = function_name == "in" ||
|
||||
@ -808,26 +841,87 @@ QueryTreeNodePtr getExpressionSource(const QueryTreeNodePtr & node)
|
||||
return source;
|
||||
}
|
||||
|
||||
QueryTreeNodePtr buildSubqueryToReadColumnsFromTableExpression(QueryTreeNodePtr table_node, const ContextPtr & context)
|
||||
/** There are no limits on the maximum size of the result for the subquery.
|
||||
* Since the result of the query is not the result of the entire query.
|
||||
*/
|
||||
void updateContextForSubqueryExecution(ContextMutablePtr & mutable_context)
|
||||
{
|
||||
/** The subquery in the IN / JOIN section does not have any restrictions on the maximum size of the result.
|
||||
* Because the result of this query is not the result of the entire query.
|
||||
* Constraints work instead
|
||||
* max_rows_in_set, max_bytes_in_set, set_overflow_mode,
|
||||
* max_rows_in_join, max_bytes_in_join, join_overflow_mode,
|
||||
* which are checked separately (in the Set, Join objects).
|
||||
*/
|
||||
Settings subquery_settings = mutable_context->getSettings();
|
||||
subquery_settings.max_result_rows = 0;
|
||||
subquery_settings.max_result_bytes = 0;
|
||||
/// The calculation of extremes does not make sense and is not necessary (if you do it, then the extremes of the subquery can be taken for whole query).
|
||||
subquery_settings.extremes = false;
|
||||
mutable_context->setSettings(subquery_settings);
|
||||
}
|
||||
|
||||
QueryTreeNodePtr buildQueryToReadColumnsFromTableExpression(const NamesAndTypes & columns,
|
||||
const QueryTreeNodePtr & table_expression,
|
||||
ContextMutablePtr & context)
|
||||
{
|
||||
auto projection_columns = columns;
|
||||
|
||||
QueryTreeNodes subquery_projection_nodes;
|
||||
subquery_projection_nodes.reserve(projection_columns.size());
|
||||
|
||||
for (const auto & column : projection_columns)
|
||||
subquery_projection_nodes.push_back(std::make_shared<ColumnNode>(column, table_expression));
|
||||
|
||||
if (subquery_projection_nodes.empty())
|
||||
{
|
||||
auto constant_data_type = std::make_shared<DataTypeUInt64>();
|
||||
subquery_projection_nodes.push_back(std::make_shared<ConstantNode>(1UL, constant_data_type));
|
||||
projection_columns.push_back({"1", std::move(constant_data_type)});
|
||||
}
|
||||
|
||||
updateContextForSubqueryExecution(context);
|
||||
|
||||
auto query_node = std::make_shared<QueryNode>(std::move(context));
|
||||
|
||||
query_node->getProjection().getNodes() = std::move(subquery_projection_nodes);
|
||||
query_node->resolveProjectionColumns(projection_columns);
|
||||
query_node->getJoinTree() = table_expression;
|
||||
|
||||
return query_node;
|
||||
}
|
||||
|
||||
QueryTreeNodePtr buildSubqueryToReadColumnsFromTableExpression(const NamesAndTypes & columns,
|
||||
const QueryTreeNodePtr & table_expression,
|
||||
ContextMutablePtr & context)
|
||||
{
|
||||
auto result = buildQueryToReadColumnsFromTableExpression(columns, table_expression, context);
|
||||
result->as<QueryNode &>().setIsSubquery(true);
|
||||
return result;
|
||||
}
|
||||
|
||||
QueryTreeNodePtr buildQueryToReadColumnsFromTableExpression(const NamesAndTypes & columns,
|
||||
const QueryTreeNodePtr & table_expression,
|
||||
const ContextPtr & context)
|
||||
{
|
||||
auto context_copy = Context::createCopy(context);
|
||||
return buildQueryToReadColumnsFromTableExpression(columns, table_expression, context_copy);
|
||||
}
|
||||
|
||||
QueryTreeNodePtr buildSubqueryToReadColumnsFromTableExpression(const NamesAndTypes & columns,
|
||||
const QueryTreeNodePtr & table_expression,
|
||||
const ContextPtr & context)
|
||||
{
|
||||
auto context_copy = Context::createCopy(context);
|
||||
return buildSubqueryToReadColumnsFromTableExpression(columns, table_expression, context_copy);
|
||||
}
|
||||
|
||||
QueryTreeNodePtr buildSubqueryToReadColumnsFromTableExpression(const QueryTreeNodePtr & table_node, const ContextPtr & context)
|
||||
{
|
||||
const auto & storage_snapshot = table_node->as<TableNode>()->getStorageSnapshot();
|
||||
auto columns_to_select = storage_snapshot->getColumns(GetColumnsOptions(GetColumnsOptions::Ordinary));
|
||||
size_t columns_to_select_size = columns_to_select.size();
|
||||
auto column_nodes_to_select = std::make_shared<ListNode>();
|
||||
column_nodes_to_select->getNodes().reserve(columns_to_select_size);
|
||||
NamesAndTypes projection_columns;
|
||||
projection_columns.reserve(columns_to_select_size);
|
||||
for (auto & column : columns_to_select)
|
||||
{
|
||||
column_nodes_to_select->getNodes().emplace_back(std::make_shared<ColumnNode>(column, table_node));
|
||||
projection_columns.emplace_back(column.name, column.type);
|
||||
}
|
||||
auto subquery_for_table = std::make_shared<QueryNode>(Context::createCopy(context));
|
||||
subquery_for_table->setIsSubquery(true);
|
||||
subquery_for_table->getProjectionNode() = std::move(column_nodes_to_select);
|
||||
subquery_for_table->getJoinTree() = std::move(table_node);
|
||||
subquery_for_table->resolveProjectionColumns(std::move(projection_columns));
|
||||
return subquery_for_table;
|
||||
auto columns_to_select_list = storage_snapshot->getColumns(GetColumnsOptions(GetColumnsOptions::Ordinary));
|
||||
NamesAndTypes columns_to_select(columns_to_select_list.begin(), columns_to_select_list.end());
|
||||
return buildSubqueryToReadColumnsFromTableExpression(columns_to_select, table_node, context);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1,9 +1,13 @@
|
||||
#pragma once
|
||||
|
||||
#include <Analyzer/IQueryTreeNode.h>
|
||||
#include <Core/NamesAndTypes.h>
|
||||
|
||||
#include <Storages/IStorage_fwd.h>
|
||||
|
||||
#include <Interpreters/Context_fwd.h>
|
||||
|
||||
#include <Analyzer/IQueryTreeNode.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -12,6 +16,9 @@ class FunctionNode;
|
||||
/// Returns true if node part of root tree, false otherwise
|
||||
bool isNodePartOfTree(const IQueryTreeNode * node, const IQueryTreeNode * root);
|
||||
|
||||
/// Returns true if storage is used in tree, false otherwise
|
||||
bool isStorageUsedInTree(const StoragePtr & storage, const IQueryTreeNode * root);
|
||||
|
||||
/// Returns true if function name is name of IN function or its variations, false otherwise
|
||||
bool isNameOfInFunction(const std::string & function_name);
|
||||
|
||||
@ -108,7 +115,41 @@ QueryTreeNodePtr createCastFunction(QueryTreeNodePtr node, DataTypePtr result_ty
|
||||
/// Checks that node has only one source and returns it
|
||||
QueryTreeNodePtr getExpressionSource(const QueryTreeNodePtr & node);
|
||||
|
||||
/// Build subquery which we execute for `IN table` function.
|
||||
QueryTreeNodePtr buildSubqueryToReadColumnsFromTableExpression(QueryTreeNodePtr table_node, const ContextPtr & context);
|
||||
/// Update mutable context for subquery execution
|
||||
void updateContextForSubqueryExecution(ContextMutablePtr & mutable_context);
|
||||
|
||||
/** Build query to read specified columns from table expression.
|
||||
* Specified mutable context will be used as query context.
|
||||
*/
|
||||
QueryTreeNodePtr buildQueryToReadColumnsFromTableExpression(const NamesAndTypes & columns,
|
||||
const QueryTreeNodePtr & table_expression,
|
||||
ContextMutablePtr & context);
|
||||
|
||||
/** Build subquery to read specified columns from table expression.
|
||||
* Specified mutable context will be used as query context.
|
||||
*/
|
||||
QueryTreeNodePtr buildSubqueryToReadColumnsFromTableExpression(const NamesAndTypes & columns,
|
||||
const QueryTreeNodePtr & table_expression,
|
||||
ContextMutablePtr & context);
|
||||
|
||||
/** Build query to read specified columns from table expression.
|
||||
* Specified context will be copied and used as query context.
|
||||
*/
|
||||
QueryTreeNodePtr buildQueryToReadColumnsFromTableExpression(const NamesAndTypes & columns,
|
||||
const QueryTreeNodePtr & table_expression,
|
||||
const ContextPtr & context);
|
||||
|
||||
/** Build subquery to read specified columns from table expression.
|
||||
* Specified context will be copied and used as query context.
|
||||
*/
|
||||
QueryTreeNodePtr buildSubqueryToReadColumnsFromTableExpression(const NamesAndTypes & columns,
|
||||
const QueryTreeNodePtr & table_expression,
|
||||
const ContextPtr & context);
|
||||
|
||||
/** Build subquery to read all columns from table expression.
|
||||
* Specified context will be copied and used as query context.
|
||||
*/
|
||||
QueryTreeNodePtr buildSubqueryToReadColumnsFromTableExpression(const QueryTreeNodePtr & table_node, const ContextPtr & context);
|
||||
|
||||
|
||||
}
|
||||
|
@ -221,7 +221,8 @@ std::unique_ptr<WriteBuffer> BackupWriterAzureBlobStorage::writeFile(const Strin
|
||||
key,
|
||||
DBMS_DEFAULT_BUFFER_SIZE,
|
||||
write_settings,
|
||||
settings);
|
||||
settings,
|
||||
threadPoolCallbackRunnerUnsafe<void>(getBackupsIOThreadPool().get(), "BackupWRAzure"));
|
||||
}
|
||||
|
||||
void BackupWriterAzureBlobStorage::removeFile(const String & file_name)
|
||||
|
@ -109,7 +109,7 @@ RestorerFromBackup::~RestorerFromBackup()
|
||||
if (getNumFutures() > 0)
|
||||
{
|
||||
LOG_INFO(log, "Waiting for {} tasks to finish", getNumFutures());
|
||||
waitFutures();
|
||||
waitFutures(/* throw_if_error= */ false);
|
||||
}
|
||||
}
|
||||
|
||||
@ -161,7 +161,7 @@ void RestorerFromBackup::run(Mode mode)
|
||||
setStage(Stage::COMPLETED);
|
||||
}
|
||||
|
||||
void RestorerFromBackup::waitFutures()
|
||||
void RestorerFromBackup::waitFutures(bool throw_if_error)
|
||||
{
|
||||
std::exception_ptr error;
|
||||
|
||||
@ -176,11 +176,7 @@ void RestorerFromBackup::waitFutures()
|
||||
if (futures_to_wait.empty())
|
||||
break;
|
||||
|
||||
/// Wait for all tasks.
|
||||
for (auto & future : futures_to_wait)
|
||||
future.wait();
|
||||
|
||||
/// Check if there is an exception.
|
||||
/// Wait for all tasks to finish.
|
||||
for (auto & future : futures_to_wait)
|
||||
{
|
||||
try
|
||||
@ -197,7 +193,12 @@ void RestorerFromBackup::waitFutures()
|
||||
}
|
||||
|
||||
if (error)
|
||||
std::rethrow_exception(error);
|
||||
{
|
||||
if (throw_if_error)
|
||||
std::rethrow_exception(error);
|
||||
else
|
||||
tryLogException(error, log);
|
||||
}
|
||||
}
|
||||
|
||||
size_t RestorerFromBackup::getNumFutures() const
|
||||
|
@ -130,7 +130,7 @@ private:
|
||||
|
||||
/// Waits until all tasks are processed (including the tasks scheduled while we're waiting).
|
||||
/// Throws an exception if any of the tasks throws an exception.
|
||||
void waitFutures();
|
||||
void waitFutures(bool throw_if_error = true);
|
||||
|
||||
/// Throws an exception if the RESTORE query was cancelled.
|
||||
void checkIsQueryCancelled() const;
|
||||
|
@ -2955,7 +2955,8 @@ void ClientBase::init(int argc, char ** argv)
|
||||
|
||||
/// Common options for clickhouse-client and clickhouse-local.
|
||||
options_description.main_description->add_options()
|
||||
("help", "produce help message")
|
||||
("help", "print usage summary, combine with --verbose to display all options")
|
||||
("verbose", "print query and other debugging info")
|
||||
("version,V", "print version information and exit")
|
||||
("version-clean", "print version in machine-readable format and exit")
|
||||
|
||||
@ -2979,7 +2980,6 @@ void ClientBase::init(int argc, char ** argv)
|
||||
("time,t", "print query execution time to stderr in non-interactive mode (for benchmarks)")
|
||||
|
||||
("echo", "in batch mode, print query before execution")
|
||||
("verbose", "print query and other debugging info")
|
||||
|
||||
("log-level", po::value<std::string>(), "log level")
|
||||
("server_logs_file", po::value<std::string>(), "put server logs into specified file")
|
||||
@ -3008,6 +3008,8 @@ void ClientBase::init(int argc, char ** argv)
|
||||
|
||||
addOptions(options_description);
|
||||
|
||||
OptionsDescription options_description_non_verbose = options_description;
|
||||
|
||||
auto getter = [](const auto & op)
|
||||
{
|
||||
String op_long_name = op->long_name();
|
||||
@ -3042,11 +3044,17 @@ void ClientBase::init(int argc, char ** argv)
|
||||
exit(0); // NOLINT(concurrency-mt-unsafe)
|
||||
}
|
||||
|
||||
if (options.count("verbose"))
|
||||
config().setBool("verbose", true);
|
||||
|
||||
/// Output of help message.
|
||||
if (options.count("help")
|
||||
|| (options.count("host") && options["host"].as<std::string>() == "elp")) /// If user writes -help instead of --help.
|
||||
{
|
||||
printHelpMessage(options_description);
|
||||
if (config().getBool("verbose", false))
|
||||
printHelpMessage(options_description, true);
|
||||
else
|
||||
printHelpMessage(options_description_non_verbose, false);
|
||||
exit(0); // NOLINT(concurrency-mt-unsafe)
|
||||
}
|
||||
|
||||
@ -3113,8 +3121,6 @@ void ClientBase::init(int argc, char ** argv)
|
||||
config().setBool("highlight", options["highlight"].as<bool>());
|
||||
if (options.count("history_file"))
|
||||
config().setString("history_file", options["history_file"].as<std::string>());
|
||||
if (options.count("verbose"))
|
||||
config().setBool("verbose", true);
|
||||
if (options.count("interactive"))
|
||||
config().setBool("interactive", true);
|
||||
if (options.count("pager"))
|
||||
|
@ -121,7 +121,7 @@ protected:
|
||||
};
|
||||
|
||||
virtual void updateLoggerLevel(const String &) {}
|
||||
virtual void printHelpMessage(const OptionsDescription & options_description) = 0;
|
||||
virtual void printHelpMessage(const OptionsDescription & options_description, bool verbose) = 0;
|
||||
virtual void addOptions(OptionsDescription & options_description) = 0;
|
||||
virtual void processOptions(const OptionsDescription & options_description,
|
||||
const CommandLineOptions & options,
|
||||
|
@ -28,28 +28,6 @@ namespace ErrorCodes
|
||||
extern const int USER_SESSION_LIMIT_EXCEEDED;
|
||||
}
|
||||
|
||||
Suggest::Suggest()
|
||||
{
|
||||
/// Keywords may be not up to date with ClickHouse parser.
|
||||
addWords({"CREATE", "DATABASE", "IF", "NOT", "EXISTS", "TEMPORARY", "TABLE", "ON",
|
||||
"CLUSTER", "DEFAULT", "MATERIALIZED", "ALIAS", "ENGINE", "AS", "VIEW", "POPULATE",
|
||||
"SETTINGS", "ATTACH", "DETACH", "DROP", "RENAME", "TO", "ALTER", "ADD",
|
||||
"MODIFY", "CLEAR", "COLUMN", "AFTER", "COPY", "PROJECT", "PRIMARY", "KEY",
|
||||
"CHECK", "PARTITION", "PART", "FREEZE", "FETCH", "FROM", "SHOW", "INTO",
|
||||
"OUTFILE", "FORMAT", "TABLES", "DATABASES", "LIKE", "PROCESSLIST", "CASE", "WHEN",
|
||||
"THEN", "ELSE", "END", "DESCRIBE", "DESC", "USE", "SET", "OPTIMIZE",
|
||||
"FINAL", "DEDUPLICATE", "INSERT", "VALUES", "SELECT", "DISTINCT", "SAMPLE", "ARRAY",
|
||||
"JOIN", "GLOBAL", "LOCAL", "ANY", "ALL", "INNER", "LEFT", "RIGHT",
|
||||
"FULL", "OUTER", "CROSS", "USING", "PREWHERE", "WHERE", "GROUP", "BY",
|
||||
"WITH", "TOTALS", "HAVING", "ORDER", "COLLATE", "LIMIT", "UNION", "AND",
|
||||
"OR", "ASC", "IN", "KILL", "QUERY", "SYNC", "ASYNC", "TEST",
|
||||
"BETWEEN", "TRUNCATE", "USER", "ROLE", "PROFILE", "QUOTA", "POLICY", "ROW",
|
||||
"GRANT", "REVOKE", "OPTION", "ADMIN", "EXCEPT", "REPLACE", "IDENTIFIED", "HOST",
|
||||
"NAME", "READONLY", "WRITABLE", "PERMISSIVE", "FOR", "RESTRICTIVE", "RANDOMIZED", "INTERVAL",
|
||||
"LIMITS", "ONLY", "TRACKING", "IP", "REGEXP", "ILIKE", "CLEANUP", "APPEND",
|
||||
"IGNORE NULLS", "RESPECT NULLS", "OVER", "PASTE", "WINDOW", "QUALIFY"});
|
||||
}
|
||||
|
||||
static String getLoadSuggestionQuery(Int32 suggestion_limit, bool basic_suggestion)
|
||||
{
|
||||
/// NOTE: Once you will update the completion list,
|
||||
@ -82,6 +60,7 @@ static String getLoadSuggestionQuery(Int32 suggestion_limit, bool basic_suggesti
|
||||
add_column("name", "data_type_families", false, {});
|
||||
add_column("name", "merge_tree_settings", false, {});
|
||||
add_column("name", "settings", false, {});
|
||||
add_column("keyword", "keywords", false, {});
|
||||
|
||||
if (!basic_suggestion)
|
||||
{
|
||||
|
@ -17,7 +17,7 @@ namespace DB
|
||||
class Suggest : public LineReader::Suggest, boost::noncopyable
|
||||
{
|
||||
public:
|
||||
Suggest();
|
||||
Suggest() = default;
|
||||
|
||||
~Suggest()
|
||||
{
|
||||
|
@ -391,6 +391,7 @@ PreformattedMessage getCurrentExceptionMessageAndPattern(bool with_stacktrace, b
|
||||
{
|
||||
WriteBufferFromOwnString stream;
|
||||
std::string_view message_format_string;
|
||||
std::vector<std::string> message_format_string_args;
|
||||
|
||||
try
|
||||
{
|
||||
@ -402,6 +403,7 @@ PreformattedMessage getCurrentExceptionMessageAndPattern(bool with_stacktrace, b
|
||||
<< (with_extra_info ? getExtraExceptionInfo(e) : "")
|
||||
<< " (version " << VERSION_STRING << VERSION_OFFICIAL << ")";
|
||||
message_format_string = e.tryGetMessageFormatString();
|
||||
message_format_string_args = e.getMessageFormatStringArgs();
|
||||
}
|
||||
catch (const Poco::Exception & e)
|
||||
{
|
||||
@ -462,7 +464,7 @@ PreformattedMessage getCurrentExceptionMessageAndPattern(bool with_stacktrace, b
|
||||
catch (...) {} // NOLINT(bugprone-empty-catch)
|
||||
}
|
||||
|
||||
return PreformattedMessage{stream.str(), message_format_string};
|
||||
return PreformattedMessage{stream.str(), message_format_string, message_format_string_args};
|
||||
}
|
||||
|
||||
|
||||
@ -581,7 +583,7 @@ PreformattedMessage getExceptionMessageAndPattern(const Exception & e, bool with
|
||||
}
|
||||
catch (...) {} // NOLINT(bugprone-empty-catch)
|
||||
|
||||
return PreformattedMessage{stream.str(), e.tryGetMessageFormatString()};
|
||||
return PreformattedMessage{stream.str(), e.tryGetMessageFormatString(), e.getMessageFormatStringArgs()};
|
||||
}
|
||||
|
||||
std::string getExceptionMessage(std::exception_ptr e, bool with_stacktrace)
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include <fmt/core.h>
|
||||
#include <fmt/format.h>
|
||||
#include <Poco/Exception.h>
|
||||
|
||||
@ -59,6 +60,7 @@ public:
|
||||
std::terminate();
|
||||
capture_thread_frame_pointers = thread_frame_pointers;
|
||||
message_format_string = msg.format_string;
|
||||
message_format_string_args = msg.format_string_args;
|
||||
}
|
||||
|
||||
Exception(PreformattedMessage && msg, int code): Exception(std::move(msg.text), code)
|
||||
@ -67,6 +69,7 @@ public:
|
||||
std::terminate();
|
||||
capture_thread_frame_pointers = thread_frame_pointers;
|
||||
message_format_string = msg.format_string;
|
||||
message_format_string_args = msg.format_string_args;
|
||||
}
|
||||
|
||||
/// Collect call stacks of all previous jobs' schedulings leading to this thread job's execution
|
||||
@ -107,12 +110,7 @@ public:
|
||||
|
||||
// Format message with fmt::format, like the logging functions.
|
||||
template <typename... Args>
|
||||
Exception(int code, FormatStringHelper<Args...> fmt, Args &&... args)
|
||||
: Exception(fmt::format(fmt.fmt_str, std::forward<Args>(args)...), code)
|
||||
{
|
||||
capture_thread_frame_pointers = thread_frame_pointers;
|
||||
message_format_string = fmt.message_format_string;
|
||||
}
|
||||
Exception(int code, FormatStringHelper<Args...> fmt, Args &&... args) : Exception(fmt.format(std::forward<Args>(args)...), code) {}
|
||||
|
||||
struct CreateFromPocoTag {};
|
||||
struct CreateFromSTDTag {};
|
||||
@ -152,6 +150,8 @@ public:
|
||||
|
||||
std::string_view tryGetMessageFormatString() const { return message_format_string; }
|
||||
|
||||
std::vector<std::string> getMessageFormatStringArgs() const { return message_format_string_args; }
|
||||
|
||||
private:
|
||||
#ifndef STD_EXCEPTION_HAS_STACK_TRACE
|
||||
StackTrace trace;
|
||||
@ -162,6 +162,7 @@ private:
|
||||
|
||||
protected:
|
||||
std::string_view message_format_string;
|
||||
std::vector<std::string> message_format_string_args;
|
||||
/// Local copy of static per-thread thread_frame_pointers, should be mutable to be unpoisoned on printout
|
||||
mutable std::vector<StackTrace::FramePointers> capture_thread_frame_pointers;
|
||||
};
|
||||
@ -193,26 +194,29 @@ public:
|
||||
// Format message with fmt::format, like the logging functions.
|
||||
template <typename... Args>
|
||||
ErrnoException(int code, FormatStringHelper<Args...> fmt, Args &&... args)
|
||||
: Exception(fmt::format(fmt.fmt_str, std::forward<Args>(args)...), code), saved_errno(errno)
|
||||
: Exception(fmt.format(std::forward<Args>(args)...), code), saved_errno(errno)
|
||||
{
|
||||
addMessage(", {}", errnoToString(saved_errno));
|
||||
}
|
||||
|
||||
template <typename... Args>
|
||||
ErrnoException(int code, int with_errno, FormatStringHelper<Args...> fmt, Args &&... args)
|
||||
: Exception(fmt.format(std::forward<Args>(args)...), code), saved_errno(with_errno)
|
||||
{
|
||||
capture_thread_frame_pointers = thread_frame_pointers;
|
||||
message_format_string = fmt.message_format_string;
|
||||
addMessage(", {}", errnoToString(saved_errno));
|
||||
}
|
||||
|
||||
template <typename... Args>
|
||||
[[noreturn]] static void throwWithErrno(int code, int with_errno, FormatStringHelper<Args...> fmt, Args &&... args)
|
||||
{
|
||||
auto e = ErrnoException(fmt::format(fmt.fmt_str, std::forward<Args>(args)...), code, with_errno);
|
||||
e.message_format_string = fmt.message_format_string;
|
||||
auto e = ErrnoException(code, with_errno, std::move(fmt), std::forward<Args>(args)...);
|
||||
throw e; /// NOLINT
|
||||
}
|
||||
|
||||
template <typename... Args>
|
||||
[[noreturn]] static void throwFromPath(int code, const std::string & path, FormatStringHelper<Args...> fmt, Args &&... args)
|
||||
{
|
||||
auto e = ErrnoException(fmt::format(fmt.fmt_str, std::forward<Args>(args)...), code, errno);
|
||||
e.message_format_string = fmt.message_format_string;
|
||||
auto e = ErrnoException(code, errno, std::move(fmt), std::forward<Args>(args)...);
|
||||
e.path = path;
|
||||
throw e; /// NOLINT
|
||||
}
|
||||
@ -221,8 +225,7 @@ public:
|
||||
[[noreturn]] static void
|
||||
throwFromPathWithErrno(int code, const std::string & path, int with_errno, FormatStringHelper<Args...> fmt, Args &&... args)
|
||||
{
|
||||
auto e = ErrnoException(fmt::format(fmt.fmt_str, std::forward<Args>(args)...), code, with_errno);
|
||||
e.message_format_string = fmt.message_format_string;
|
||||
auto e = ErrnoException(code, with_errno, std::move(fmt), std::forward<Args>(args)...);
|
||||
e.path = path;
|
||||
throw e; /// NOLINT
|
||||
}
|
||||
|
@ -39,6 +39,7 @@ static struct InitFiu
|
||||
REGULAR(replicated_merge_tree_commit_zk_fail_when_recovering_from_hw_fault) \
|
||||
REGULAR(use_delayed_remote_source) \
|
||||
REGULAR(cluster_discovery_faults) \
|
||||
REGULAR(replicated_sends_failpoint) \
|
||||
ONCE(smt_commit_merge_mutate_zk_fail_after_op) \
|
||||
ONCE(smt_commit_merge_mutate_zk_fail_before_op) \
|
||||
ONCE(smt_commit_write_zk_fail_after_op) \
|
||||
|
@ -2,8 +2,11 @@
|
||||
|
||||
#include <base/defines.h>
|
||||
#include <base/types.h>
|
||||
#include <fmt/args.h>
|
||||
#include <fmt/core.h>
|
||||
#include <fmt/format.h>
|
||||
#include <mutex>
|
||||
#include <type_traits>
|
||||
#include <unordered_map>
|
||||
#include <Poco/Logger.h>
|
||||
#include <Poco/Message.h>
|
||||
@ -14,6 +17,10 @@ struct PreformattedMessage;
|
||||
consteval void formatStringCheckArgsNumImpl(std::string_view str, size_t nargs);
|
||||
template <typename T> constexpr std::string_view tryGetStaticFormatString(T && x);
|
||||
|
||||
[[maybe_unused]] inline void tryGetFormattedArgs(std::vector<std::string>&) {};
|
||||
template <typename T, typename... Ts> [[maybe_unused]] inline void tryGetFormattedArgs(std::vector<std::string>&, T &&, Ts && ...);
|
||||
template <typename... Args> inline std::string tryGetArgsAndFormat(std::vector<std::string>&, fmt::format_string<Args...>, Args && ...);
|
||||
|
||||
/// Extract format string from a string literal and constructs consteval fmt::format_string
|
||||
template <typename... Args>
|
||||
struct FormatStringHelperImpl
|
||||
@ -39,6 +46,7 @@ struct PreformattedMessage
|
||||
{
|
||||
std::string text;
|
||||
std::string_view format_string;
|
||||
std::vector<std::string> format_string_args;
|
||||
|
||||
template <typename... Args>
|
||||
static PreformattedMessage create(FormatStringHelper<Args...> fmt, Args &&... args);
|
||||
@ -47,22 +55,26 @@ struct PreformattedMessage
|
||||
operator std::string () && { return std::move(text); } /// NOLINT
|
||||
operator fmt::format_string<> () const { UNREACHABLE(); } /// NOLINT
|
||||
|
||||
void apply(std::string & out_text, std::string_view & out_format_string) const &
|
||||
void apply(std::string & out_text, std::string_view & out_format_string, std::vector<std::string> & out_format_string_args) const &
|
||||
{
|
||||
out_text = text;
|
||||
out_format_string = format_string;
|
||||
out_format_string_args = format_string_args;
|
||||
}
|
||||
void apply(std::string & out_text, std::string_view & out_format_string) &&
|
||||
void apply(std::string & out_text, std::string_view & out_format_string, std::vector<std::string> & out_format_string_args) &&
|
||||
{
|
||||
out_text = std::move(text);
|
||||
out_format_string = format_string;
|
||||
out_format_string_args = std::move(format_string_args);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename... Args>
|
||||
PreformattedMessage FormatStringHelperImpl<Args...>::format(Args && ...args) const
|
||||
{
|
||||
return PreformattedMessage{fmt::format(fmt_str, std::forward<Args>(args)...), message_format_string};
|
||||
std::vector<std::string> out_format_string_args;
|
||||
std::string msg_text = tryGetArgsAndFormat(out_format_string_args, fmt_str, std::forward<Args>(args)...);
|
||||
return PreformattedMessage{msg_text, message_format_string, out_format_string_args};
|
||||
}
|
||||
|
||||
template <typename... Args>
|
||||
@ -113,12 +125,23 @@ template <typename T> constexpr std::string_view tryGetStaticFormatString(T && x
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T, typename... Ts> void tryGetFormattedArgs(std::vector<std::string>& out, T && x, Ts && ...rest)
|
||||
{
|
||||
if constexpr (std::is_base_of_v<fmt::detail::view, std::decay_t<T>>)
|
||||
out.push_back(fmt::format("{}", std::remove_reference_t<T>(x)));
|
||||
else
|
||||
out.push_back(fmt::format("{}", std::forward<T>(x)));
|
||||
|
||||
tryGetFormattedArgs(out, std::forward<Ts>(rest)...);
|
||||
}
|
||||
|
||||
/// Constexpr ifs are not like ifdefs, and compiler still checks that unneeded code can be compiled
|
||||
/// This template is useful to avoid compilation failures when condition of some "constexpr if" is false
|
||||
template<bool enable> struct ConstexprIfsAreNotIfdefs
|
||||
{
|
||||
template <typename T> constexpr static std::string_view getStaticFormatString(T &&) { return {}; }
|
||||
template <typename T> static PreformattedMessage getPreformatted(T &&) { return {}; }
|
||||
template <typename... Args> static std::string getArgsAndFormat(std::vector<std::string>&, fmt::format_string<Args...>, Args &&...) { return {}; }
|
||||
};
|
||||
|
||||
template<> struct ConstexprIfsAreNotIfdefs<true>
|
||||
@ -133,8 +156,19 @@ template<> struct ConstexprIfsAreNotIfdefs<true>
|
||||
}
|
||||
|
||||
template <typename T> static T && getPreformatted(T && x) { return std::forward<T>(x); }
|
||||
|
||||
template <typename... Args> static std::string getArgsAndFormat(std::vector<std::string>& out, fmt::format_string<Args...> fmt_str, Args && ...args)
|
||||
{
|
||||
return tryGetArgsAndFormat(out, std::move(fmt_str), std::forward<Args>(args)...);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename... Args> inline std::string tryGetArgsAndFormat(std::vector<std::string>& out, fmt::format_string<Args...> fmt_str, Args && ...args)
|
||||
{
|
||||
tryGetFormattedArgs(out, args...);
|
||||
return fmt::format(fmt_str, std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
template <typename... Ts> constexpr size_t numArgs(Ts &&...) { return sizeof...(Ts); }
|
||||
template <typename T, typename... Ts> constexpr auto firstArg(T && x, Ts &&...) { return std::forward<T>(x); }
|
||||
/// For implicit conversion of fmt::basic_runtime<> to char* for std::string ctor
|
||||
|
@ -518,7 +518,8 @@ bool ZooKeeper::existsWatch(const std::string & path, Coordination::Stat * stat,
|
||||
return code != Coordination::Error::ZNONODE;
|
||||
}
|
||||
|
||||
Coordination::Error ZooKeeper::getImpl(const std::string & path, std::string & res, Coordination::Stat * stat, Coordination::WatchCallback watch_callback)
|
||||
Coordination::Error ZooKeeper::getImpl(
|
||||
const std::string & path, std::string & res, Coordination::Stat * stat, Coordination::WatchCallbackPtr watch_callback)
|
||||
{
|
||||
auto future_result = asyncTryGetNoThrow(path, watch_callback);
|
||||
|
||||
@ -541,6 +542,11 @@ Coordination::Error ZooKeeper::getImpl(const std::string & path, std::string & r
|
||||
}
|
||||
}
|
||||
|
||||
Coordination::Error ZooKeeper::getImpl(const std::string & path, std::string & res, Coordination::Stat * stat, Coordination::WatchCallback watch_callback)
|
||||
{
|
||||
return getImpl(path, res, stat, watch_callback ? std::make_shared<Coordination::WatchCallback>(watch_callback) : Coordination::WatchCallbackPtr{});
|
||||
}
|
||||
|
||||
std::string ZooKeeper::get(const std::string & path, Coordination::Stat * stat, const EventPtr & watch)
|
||||
{
|
||||
Coordination::Error code = Coordination::Error::ZOK;
|
||||
@ -561,6 +567,17 @@ std::string ZooKeeper::getWatch(const std::string & path, Coordination::Stat * s
|
||||
throw KeeperException(code, "Can't get data for node '{}': node doesn't exist", path);
|
||||
}
|
||||
|
||||
|
||||
std::string ZooKeeper::getWatch(const std::string & path, Coordination::Stat * stat, Coordination::WatchCallbackPtr watch_callback)
|
||||
{
|
||||
Coordination::Error code = Coordination::Error::ZOK;
|
||||
std::string res;
|
||||
if (tryGetWatch(path, res, stat, watch_callback, &code))
|
||||
return res;
|
||||
else
|
||||
throw KeeperException(code, "Can't get data for node '{}': node doesn't exist", path);
|
||||
}
|
||||
|
||||
bool ZooKeeper::tryGet(
|
||||
const std::string & path,
|
||||
std::string & res,
|
||||
@ -571,6 +588,25 @@ bool ZooKeeper::tryGet(
|
||||
return tryGetWatch(path, res, stat, callbackForEvent(watch), return_code);
|
||||
}
|
||||
|
||||
bool ZooKeeper::tryGetWatch(
|
||||
const std::string & path,
|
||||
std::string & res,
|
||||
Coordination::Stat * stat,
|
||||
Coordination::WatchCallbackPtr watch_callback,
|
||||
Coordination::Error * return_code)
|
||||
{
|
||||
Coordination::Error code = getImpl(path, res, stat, watch_callback);
|
||||
|
||||
if (!(code == Coordination::Error::ZOK || code == Coordination::Error::ZNONODE))
|
||||
throw KeeperException::fromPath(code, path);
|
||||
|
||||
if (return_code)
|
||||
*return_code = code;
|
||||
|
||||
return code == Coordination::Error::ZOK;
|
||||
|
||||
}
|
||||
|
||||
bool ZooKeeper::tryGetWatch(
|
||||
const std::string & path,
|
||||
std::string & res,
|
||||
@ -589,6 +625,7 @@ bool ZooKeeper::tryGetWatch(
|
||||
return code == Coordination::Error::ZOK;
|
||||
}
|
||||
|
||||
|
||||
Coordination::Error ZooKeeper::setImpl(const std::string & path, const std::string & data,
|
||||
int32_t version, Coordination::Stat * stat)
|
||||
{
|
||||
@ -1062,6 +1099,11 @@ std::future<Coordination::GetResponse> ZooKeeper::asyncGet(const std::string & p
|
||||
}
|
||||
|
||||
std::future<Coordination::GetResponse> ZooKeeper::asyncTryGetNoThrow(const std::string & path, Coordination::WatchCallback watch_callback)
|
||||
{
|
||||
return asyncTryGetNoThrow(path, watch_callback ? std::make_shared<Coordination::WatchCallback>(watch_callback) : Coordination::WatchCallbackPtr{});
|
||||
}
|
||||
|
||||
std::future<Coordination::GetResponse> ZooKeeper::asyncTryGetNoThrow(const std::string & path, Coordination::WatchCallbackPtr watch_callback)
|
||||
{
|
||||
auto promise = std::make_shared<std::promise<Coordination::GetResponse>>();
|
||||
auto future = promise->get_future();
|
||||
@ -1071,8 +1113,7 @@ std::future<Coordination::GetResponse> ZooKeeper::asyncTryGetNoThrow(const std::
|
||||
promise->set_value(response);
|
||||
};
|
||||
|
||||
impl->get(path, std::move(callback),
|
||||
watch_callback ? std::make_shared<Coordination::WatchCallback>(watch_callback) : Coordination::WatchCallbackPtr{});
|
||||
impl->get(path, std::move(callback), watch_callback);
|
||||
return future;
|
||||
}
|
||||
|
||||
|
@ -306,6 +306,7 @@ public:
|
||||
|
||||
std::string get(const std::string & path, Coordination::Stat * stat = nullptr, const EventPtr & watch = nullptr);
|
||||
std::string getWatch(const std::string & path, Coordination::Stat * stat, Coordination::WatchCallback watch_callback);
|
||||
std::string getWatch(const std::string & path, Coordination::Stat * stat, Coordination::WatchCallbackPtr watch_callback);
|
||||
|
||||
using MultiGetResponse = MultiReadResponses<Coordination::GetResponse, false>;
|
||||
using MultiTryGetResponse = MultiReadResponses<Coordination::GetResponse, true>;
|
||||
@ -338,6 +339,13 @@ public:
|
||||
Coordination::WatchCallback watch_callback,
|
||||
Coordination::Error * code = nullptr);
|
||||
|
||||
bool tryGetWatch(
|
||||
const std::string & path,
|
||||
std::string & res,
|
||||
Coordination::Stat * stat,
|
||||
Coordination::WatchCallbackPtr watch_callback,
|
||||
Coordination::Error * code = nullptr);
|
||||
|
||||
template <typename TIter>
|
||||
MultiTryGetResponse tryGet(TIter start, TIter end)
|
||||
{
|
||||
@ -520,6 +528,8 @@ public:
|
||||
/// Like the previous one but don't throw any exceptions on future.get()
|
||||
FutureGet asyncTryGetNoThrow(const std::string & path, Coordination::WatchCallback watch_callback = {});
|
||||
|
||||
FutureGet asyncTryGetNoThrow(const std::string & path, Coordination::WatchCallbackPtr watch_callback = {});
|
||||
|
||||
using FutureExists = std::future<Coordination::ExistsResponse>;
|
||||
FutureExists asyncExists(const std::string & path, Coordination::WatchCallback watch_callback = {});
|
||||
/// Like the previous one but don't throw any exceptions on future.get()
|
||||
@ -625,6 +635,8 @@ private:
|
||||
Coordination::Error removeImpl(const std::string & path, int32_t version);
|
||||
Coordination::Error getImpl(
|
||||
const std::string & path, std::string & res, Coordination::Stat * stat, Coordination::WatchCallback watch_callback);
|
||||
Coordination::Error getImpl(
|
||||
const std::string & path, std::string & res, Coordination::Stat * stat, Coordination::WatchCallbackPtr watch_callback);
|
||||
Coordination::Error setImpl(const std::string & path, const std::string & data, int32_t version, Coordination::Stat * stat);
|
||||
Coordination::Error getChildrenImpl(
|
||||
const std::string & path,
|
||||
|
@ -22,13 +22,16 @@ ZooKeeperLock::ZooKeeperLock(
|
||||
const ZooKeeperPtr & zookeeper_,
|
||||
const std::string & lock_prefix_,
|
||||
const std::string & lock_name_,
|
||||
const std::string & lock_message_)
|
||||
const std::string & lock_message_,
|
||||
bool throw_if_lost_)
|
||||
: zookeeper(zookeeper_)
|
||||
, lock_path(fs::path(lock_prefix_) / lock_name_)
|
||||
, lock_message(lock_message_)
|
||||
, throw_if_lost(throw_if_lost_)
|
||||
, log(getLogger("zkutil::Lock"))
|
||||
{
|
||||
zookeeper->createIfNotExists(lock_prefix_, "");
|
||||
LOG_TRACE(log, "Trying to create zookeeper lock on path {} for session {}", lock_path, zookeeper->getClientID());
|
||||
}
|
||||
|
||||
ZooKeeperLock::~ZooKeeperLock()
|
||||
@ -45,7 +48,7 @@ ZooKeeperLock::~ZooKeeperLock()
|
||||
|
||||
bool ZooKeeperLock::isLocked() const
|
||||
{
|
||||
return locked;
|
||||
return locked && !zookeeper->expired();
|
||||
}
|
||||
|
||||
const std::string & ZooKeeperLock::getLockPath() const
|
||||
@ -56,7 +59,10 @@ const std::string & ZooKeeperLock::getLockPath() const
|
||||
void ZooKeeperLock::unlock()
|
||||
{
|
||||
if (!locked)
|
||||
{
|
||||
LOG_TRACE(log, "Lock on path {} for session {} is not locked, exiting", lock_path, zookeeper->getClientID());
|
||||
return;
|
||||
}
|
||||
|
||||
locked = false;
|
||||
|
||||
@ -71,12 +77,19 @@ void ZooKeeperLock::unlock()
|
||||
bool result = zookeeper->exists(lock_path, &stat);
|
||||
|
||||
if (result && stat.ephemeralOwner == zookeeper->getClientID())
|
||||
{
|
||||
zookeeper->remove(lock_path, -1);
|
||||
LOG_TRACE(log, "Lock on path {} for session {} is unlocked", lock_path, zookeeper->getClientID());
|
||||
}
|
||||
else if (result)
|
||||
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Lock is lost, it has another owner. Path: {}, message: {}, owner: {}, our id: {}",
|
||||
lock_path, lock_message, stat.ephemeralOwner, zookeeper->getClientID());
|
||||
else if (throw_if_lost)
|
||||
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Lock is lost, node does not exist. Path: {}, message: {}, our id: {}",
|
||||
lock_path, lock_message, zookeeper->getClientID());
|
||||
else
|
||||
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Lock is lost, node does not exist. Path: {}, message: {}", lock_path, lock_message);
|
||||
LOG_INFO(log, "Lock is lost, node does not exist. Path: {}, message: {}, our id: {}",
|
||||
lock_path, lock_message, zookeeper->getClientID());
|
||||
}
|
||||
|
||||
bool ZooKeeperLock::tryLock()
|
||||
@ -96,9 +109,9 @@ bool ZooKeeperLock::tryLock()
|
||||
}
|
||||
|
||||
std::unique_ptr<ZooKeeperLock> createSimpleZooKeeperLock(
|
||||
const ZooKeeperPtr & zookeeper, const String & lock_prefix, const String & lock_name, const String & lock_message)
|
||||
const ZooKeeperPtr & zookeeper, const String & lock_prefix, const String & lock_name, const String & lock_message, bool throw_if_lost)
|
||||
{
|
||||
return std::make_unique<ZooKeeperLock>(zookeeper, lock_prefix, lock_name, lock_message);
|
||||
return std::make_unique<ZooKeeperLock>(zookeeper, lock_prefix, lock_name, lock_message, throw_if_lost);
|
||||
}
|
||||
|
||||
|
||||
|
@ -32,7 +32,8 @@ public:
|
||||
const ZooKeeperPtr & zookeeper_,
|
||||
const std::string & lock_prefix_,
|
||||
const std::string & lock_name_,
|
||||
const std::string & lock_message_ = "");
|
||||
const std::string & lock_message_ = "",
|
||||
bool throw_if_lost_ = true);
|
||||
|
||||
~ZooKeeperLock();
|
||||
|
||||
@ -46,12 +47,13 @@ private:
|
||||
|
||||
std::string lock_path;
|
||||
std::string lock_message;
|
||||
bool throw_if_lost{true};
|
||||
LoggerPtr log;
|
||||
bool locked = false;
|
||||
|
||||
};
|
||||
|
||||
std::unique_ptr<ZooKeeperLock> createSimpleZooKeeperLock(
|
||||
const ZooKeeperPtr & zookeeper, const String & lock_prefix, const String & lock_name, const String & lock_message);
|
||||
const ZooKeeperPtr & zookeeper, const String & lock_prefix, const String & lock_name, const String & lock_message, bool throw_if_lost = true);
|
||||
|
||||
}
|
||||
|
@ -1,5 +1,6 @@
|
||||
#include <Common/COW.h>
|
||||
#include <iostream>
|
||||
#include <base/defines.h>
|
||||
|
||||
|
||||
class IColumn : public COW<IColumn>
|
||||
@ -15,8 +16,6 @@ public:
|
||||
|
||||
virtual int get() const = 0;
|
||||
virtual void set(int value) = 0;
|
||||
|
||||
virtual MutablePtr test() const = 0;
|
||||
};
|
||||
|
||||
using ColumnPtr = IColumn::Ptr;
|
||||
@ -31,58 +30,63 @@ private:
|
||||
explicit ConcreteColumn(int data_) : data(data_) {}
|
||||
ConcreteColumn(const ConcreteColumn &) = default;
|
||||
|
||||
MutableColumnPtr test() const override
|
||||
{
|
||||
MutableColumnPtr res = create(123);
|
||||
return res;
|
||||
}
|
||||
|
||||
public:
|
||||
int get() const override { return data; }
|
||||
void set(int value) override { data = value; }
|
||||
};
|
||||
|
||||
template <typename ColPtr>
|
||||
void print(const ColumnPtr & x, const ColPtr & y)
|
||||
{
|
||||
std::cerr << "values: " << x->get() << ", " << y->get() << "\n";
|
||||
std::cerr << "refcounts: " << x->use_count() << ", " << y->use_count() << "\n";
|
||||
std::cerr << "addresses: " << x.get() << ", " << y.get() << "\n";
|
||||
}
|
||||
|
||||
int main(int, char **)
|
||||
{
|
||||
ColumnPtr x = ConcreteColumn::create(1);
|
||||
ColumnPtr y = x;//x->test();
|
||||
|
||||
std::cerr << "values: " << x->get() << ", " << y->get() << "\n";
|
||||
std::cerr << "refcounts: " << x->use_count() << ", " << y->use_count() << "\n";
|
||||
std::cerr << "addresses: " << x.get() << ", " << y.get() << "\n";
|
||||
ColumnPtr y = x;
|
||||
print(x, y);
|
||||
chassert(x->get() == 1 && y->get() == 1);
|
||||
chassert(x->use_count() == 2 && y->use_count() == 2);
|
||||
chassert(x.get() == y.get());
|
||||
|
||||
{
|
||||
MutableColumnPtr mut = IColumn::mutate(std::move(y));
|
||||
mut->set(2);
|
||||
print(x, mut);
|
||||
chassert(x->get() == 1 && mut->get() == 2);
|
||||
chassert(x->use_count() == 1 && mut->use_count() == 1);
|
||||
chassert(x.get() != mut.get());
|
||||
|
||||
std::cerr << "refcounts: " << x->use_count() << ", " << mut->use_count() << "\n";
|
||||
std::cerr << "addresses: " << x.get() << ", " << mut.get() << "\n";
|
||||
y = std::move(mut);
|
||||
}
|
||||
|
||||
std::cerr << "values: " << x->get() << ", " << y->get() << "\n";
|
||||
std::cerr << "refcounts: " << x->use_count() << ", " << y->use_count() << "\n";
|
||||
std::cerr << "addresses: " << x.get() << ", " << y.get() << "\n";
|
||||
print(x, y);
|
||||
chassert(x->get() == 1 && y->get() == 2);
|
||||
chassert(x->use_count() == 1 && y->use_count() == 1);
|
||||
chassert(x.get() != y.get());
|
||||
|
||||
x = ConcreteColumn::create(0);
|
||||
|
||||
std::cerr << "values: " << x->get() << ", " << y->get() << "\n";
|
||||
std::cerr << "refcounts: " << x->use_count() << ", " << y->use_count() << "\n";
|
||||
std::cerr << "addresses: " << x.get() << ", " << y.get() << "\n";
|
||||
print(x, y);
|
||||
chassert(x->get() == 0 && y->get() == 2);
|
||||
chassert(x->use_count() == 1 && y->use_count() == 1);
|
||||
chassert(x.get() != y.get());
|
||||
|
||||
{
|
||||
MutableColumnPtr mut = IColumn::mutate(std::move(y));
|
||||
mut->set(3);
|
||||
print(x, mut);
|
||||
chassert(x->get() == 0 && mut->get() == 3);
|
||||
chassert(x->use_count() == 1 && mut->use_count() == 1);
|
||||
chassert(x.get() != mut.get());
|
||||
|
||||
std::cerr << "refcounts: " << x->use_count() << ", " << mut->use_count() << "\n";
|
||||
std::cerr << "addresses: " << x.get() << ", " << mut.get() << "\n";
|
||||
y = std::move(mut);
|
||||
}
|
||||
|
||||
std::cerr << "values: " << x->get() << ", " << y->get() << "\n";
|
||||
std::cerr << "refcounts: " << x->use_count() << ", " << y->use_count() << "\n";
|
||||
print(x, y);
|
||||
chassert(x->get() == 0 && y->get() == 3);
|
||||
chassert(x->use_count() == 1 && y->use_count() == 1);
|
||||
chassert(x.get() != y.get());
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
#include <Common/COW.h>
|
||||
#include <iostream>
|
||||
#include <base/defines.h>
|
||||
|
||||
|
||||
class IColumn : public COW<IColumn>
|
||||
@ -61,47 +62,58 @@ public:
|
||||
void set(int value) override { wrapped->set(value); }
|
||||
};
|
||||
|
||||
template <typename ColPtr>
|
||||
void print(const ColumnPtr & x, const ColPtr & y)
|
||||
{
|
||||
std::cerr << "values: " << x->get() << ", " << y->get() << "\n";
|
||||
std::cerr << "refcounts: " << x->use_count() << ", " << y->use_count() << "\n";
|
||||
std::cerr << "addresses: " << x.get() << ", " << y.get() << "\n";
|
||||
}
|
||||
|
||||
int main(int, char **)
|
||||
{
|
||||
ColumnPtr x = ColumnComposition::create(1);
|
||||
ColumnPtr y = x;
|
||||
|
||||
std::cerr << "values: " << x->get() << ", " << y->get() << "\n";
|
||||
std::cerr << "refcounts: " << x->use_count() << ", " << y->use_count() << "\n";
|
||||
std::cerr << "addresses: " << x.get() << ", " << y.get() << "\n";
|
||||
print(x, y);
|
||||
chassert(x->get() == 1 && y->get() == 1);
|
||||
chassert(x->use_count() == 2 && y->use_count() == 2);
|
||||
chassert(x.get() == y.get());
|
||||
|
||||
{
|
||||
MutableColumnPtr mut = IColumn::mutate(std::move(y));
|
||||
mut->set(2);
|
||||
print(x, mut);
|
||||
chassert(x->get() == 1 && mut->get() == 2);
|
||||
chassert(x->use_count() == 1 && mut->use_count() == 1);
|
||||
chassert(x.get() != mut.get());
|
||||
|
||||
std::cerr << "refcounts: " << x->use_count() << ", " << mut->use_count() << "\n";
|
||||
std::cerr << "addresses: " << x.get() << ", " << mut.get() << "\n";
|
||||
y = std::move(mut);
|
||||
}
|
||||
|
||||
std::cerr << "values: " << x->get() << ", " << y->get() << "\n";
|
||||
std::cerr << "refcounts: " << x->use_count() << ", " << y->use_count() << "\n";
|
||||
std::cerr << "addresses: " << x.get() << ", " << y.get() << "\n";
|
||||
print(x, y);
|
||||
chassert(x->get() == 1 && y->get() == 2);
|
||||
chassert(x->use_count() == 1 && y->use_count() == 1);
|
||||
chassert(x.get() != y.get());
|
||||
|
||||
x = ColumnComposition::create(0);
|
||||
|
||||
std::cerr << "values: " << x->get() << ", " << y->get() << "\n";
|
||||
std::cerr << "refcounts: " << x->use_count() << ", " << y->use_count() << "\n";
|
||||
std::cerr << "addresses: " << x.get() << ", " << y.get() << "\n";
|
||||
print(x, y);
|
||||
chassert(x->get() == 0 && y->get() == 2);
|
||||
chassert(x->use_count() == 1 && y->use_count() == 1);
|
||||
chassert(x.get() != y.get());
|
||||
|
||||
{
|
||||
MutableColumnPtr mut = IColumn::mutate(std::move(y));
|
||||
mut->set(3);
|
||||
print(x, mut);
|
||||
chassert(x->get() == 0 && mut->get() == 3);
|
||||
chassert(x->use_count() == 1 && mut->use_count() == 1);
|
||||
chassert(x.get() != mut.get());
|
||||
|
||||
std::cerr << "refcounts: " << x->use_count() << ", " << mut->use_count() << "\n";
|
||||
std::cerr << "addresses: " << x.get() << ", " << mut.get() << "\n";
|
||||
y = std::move(mut);
|
||||
}
|
||||
|
||||
std::cerr << "values: " << x->get() << ", " << y->get() << "\n";
|
||||
std::cerr << "refcounts: " << x->use_count() << ", " << y->use_count() << "\n";
|
||||
print(x, y);
|
||||
chassert(x->get() == 0 && y->get() == 3);
|
||||
chassert(x->use_count() == 1 && y->use_count() == 1);
|
||||
chassert(x.get() != y.get());
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
/// Macros for convenient usage of Poco logger.
|
||||
#include <unistd.h>
|
||||
#include <fmt/args.h>
|
||||
#include <fmt/format.h>
|
||||
#include <Poco/Logger.h>
|
||||
#include <Poco/Message.h>
|
||||
@ -80,6 +81,7 @@ namespace impl
|
||||
\
|
||||
std::string_view _format_string; \
|
||||
std::string _formatted_message; \
|
||||
std::vector<std::string> _format_string_args; \
|
||||
\
|
||||
if constexpr (LogTypeInfo::is_static) \
|
||||
{ \
|
||||
@ -91,17 +93,17 @@ namespace impl
|
||||
if constexpr (is_preformatted_message) \
|
||||
{ \
|
||||
static_assert(_nargs == 1 || !is_preformatted_message); \
|
||||
ConstexprIfsAreNotIfdefs<is_preformatted_message>::getPreformatted(LOG_IMPL_FIRST_ARG(__VA_ARGS__)).apply(_formatted_message, _format_string); \
|
||||
ConstexprIfsAreNotIfdefs<is_preformatted_message>::getPreformatted(LOG_IMPL_FIRST_ARG(__VA_ARGS__)).apply(_formatted_message, _format_string, _format_string_args); \
|
||||
} \
|
||||
else \
|
||||
{ \
|
||||
_formatted_message = _nargs == 1 ? firstArg(__VA_ARGS__) : fmt::format(__VA_ARGS__); \
|
||||
_formatted_message = _nargs == 1 ? firstArg(__VA_ARGS__) : ConstexprIfsAreNotIfdefs<!is_preformatted_message>::getArgsAndFormat(_format_string_args, __VA_ARGS__); \
|
||||
} \
|
||||
\
|
||||
std::string _file_function = __FILE__ "; "; \
|
||||
_file_function += __PRETTY_FUNCTION__; \
|
||||
Poco::Message _poco_message(_logger->name(), std::move(_formatted_message), \
|
||||
(PRIORITY), _file_function.c_str(), __LINE__, _format_string); \
|
||||
(PRIORITY), _file_function.c_str(), __LINE__, _format_string, _format_string_args); \
|
||||
_channel->log(_poco_message); \
|
||||
} \
|
||||
catch (const Poco::Exception & logger_exception) \
|
||||
|
@ -132,7 +132,9 @@ static PollPidResult pollPid(pid_t pid, int timeout_in_ms)
|
||||
if (kq == -1)
|
||||
return PollPidResult::FAILED;
|
||||
|
||||
struct kevent change = {.ident = 0};
|
||||
struct kevent change;
|
||||
change.ident = 0;
|
||||
|
||||
EV_SET(&change, pid, EVFILT_PROC, EV_ADD, NOTE_EXIT, 0, NULL);
|
||||
|
||||
int event_add_result = HANDLE_EINTR(kevent(kq, &change, 1, NULL, 0, NULL));
|
||||
@ -144,7 +146,9 @@ static PollPidResult pollPid(pid_t pid, int timeout_in_ms)
|
||||
return PollPidResult::FAILED;
|
||||
}
|
||||
|
||||
struct kevent event = {.ident = 0};
|
||||
struct kevent event;
|
||||
event.ident = 0;
|
||||
|
||||
struct timespec remaining_timespec = {.tv_sec = timeout_in_ms / 1000, .tv_nsec = (timeout_in_ms % 1000) * 1000000};
|
||||
int ready = HANDLE_EINTR(kevent(kq, nullptr, 0, &event, 1, &remaining_timespec));
|
||||
PollPidResult result = ready < 0 ? PollPidResult::FAILED : PollPidResult::RESTART;
|
||||
|
@ -24,6 +24,7 @@ public:
|
||||
void updateHash(SipHash & hash) const override;
|
||||
|
||||
protected:
|
||||
/// 1 byte (`gcd_bytes_size` value) + 1 byte (`bytes_to_skip` value) + `bytes_to_skip` bytes (trash) + `gcd_bytes_size` bytes (gcd value) + (`source_size` - `bytes_to_skip`) bytes (data)
|
||||
UInt32 doCompressData(const char * source, UInt32 source_size, char * dest) const override;
|
||||
void doDecompressData(const char * source, UInt32 source_size, char * dest, UInt32 uncompressed_size) const override;
|
||||
UInt32 getMaxCompressedDataSize(UInt32 uncompressed_size) const override;
|
||||
@ -54,7 +55,7 @@ UInt32 CompressionCodecGCD::getMaxCompressedDataSize(UInt32 uncompressed_size) c
|
||||
{
|
||||
return uncompressed_size
|
||||
+ gcd_bytes_size // To store gcd
|
||||
+ 2; // Local header
|
||||
+ 2; // Values of `gcd_bytes_size` and `bytes_to_skip`
|
||||
}
|
||||
|
||||
uint8_t CompressionCodecGCD::getMethodByte() const
|
||||
@ -147,7 +148,7 @@ void decompressDataForType(const char * source, UInt32 source_size, char * dest,
|
||||
if (source_size - sizeof(T) != output_size)
|
||||
throw Exception(ErrorCodes::CANNOT_DECOMPRESS, "Cannot decompress GCD-encoded data");
|
||||
|
||||
memcpy(dest, source, source_size);
|
||||
memcpy(dest, source, source_size - sizeof(T));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -160,6 +161,7 @@ void decompressDataForType(const char * source, UInt32 source_size, char * dest,
|
||||
source += sizeof(T);
|
||||
dest += sizeof(T);
|
||||
}
|
||||
chassert(source == source_end);
|
||||
}
|
||||
|
||||
}
|
||||
@ -209,6 +211,8 @@ void CompressionCodecGCD::doDecompressData(const char * source, UInt32 source_si
|
||||
throw Exception(ErrorCodes::CANNOT_DECOMPRESS, "Cannot decompress GCD-encoded data. File has wrong header");
|
||||
|
||||
UInt8 bytes_to_skip = uncompressed_size % bytes_size;
|
||||
chassert(bytes_to_skip == static_cast<UInt8>(source[1]));
|
||||
|
||||
UInt32 output_size = uncompressed_size - bytes_to_skip;
|
||||
|
||||
if (static_cast<UInt32>(2 + bytes_to_skip) > source_size)
|
||||
|
@ -67,6 +67,9 @@ static constexpr auto DBMS_DEFAULT_MAX_PARSER_DEPTH = 1000;
|
||||
/// Default limit on the amount of backtracking of recursive descend parser.
|
||||
static constexpr auto DBMS_DEFAULT_MAX_PARSER_BACKTRACKS = 1000000;
|
||||
|
||||
/// Default limit on recursive CTE evaluation depth.
|
||||
static constexpr auto DBMS_RECURSIVE_CTE_MAX_EVALUATION_DEPTH = 1000;
|
||||
|
||||
/// Default limit on query size.
|
||||
static constexpr auto DBMS_DEFAULT_MAX_QUERY_SIZE = 262144;
|
||||
|
||||
|
@ -112,6 +112,7 @@ class IColumn;
|
||||
M(Bool, azure_create_new_file_on_insert, false, "Enables or disables creating a new file on each insert in azure engine tables", 0) \
|
||||
M(Bool, s3_check_objects_after_upload, false, "Check each uploaded object to s3 with head request to be sure that upload was successful", 0) \
|
||||
M(Bool, s3_allow_parallel_part_upload, true, "Use multiple threads for s3 multipart upload. It may lead to slightly higher memory usage", 0) \
|
||||
M(Bool, azure_allow_parallel_part_upload, true, "Use multiple threads for azure multipart upload.", 0) \
|
||||
M(Bool, s3_throw_on_zero_files_match, false, "Throw an error, when ListObjects request cannot match any files", 0) \
|
||||
M(Bool, s3_disable_checksum, false, "Do not calculate a checksum when sending a file to S3. This speeds up writes by avoiding excessive processing passes on a file. It is mostly safe as the data of MergeTree tables is checksummed by ClickHouse anyway, and when S3 is accessed with HTTPS, the TLS layer already provides integrity while transferring through the network. While additional checksums on S3 give defense in depth.", 0) \
|
||||
M(UInt64, s3_retry_attempts, 100, "Setting for Aws::Client::RetryStrategy, Aws::Client does retries itself, 0 means no retries", 0) \
|
||||
@ -603,7 +604,6 @@ class IColumn;
|
||||
M(Bool, optimize_if_chain_to_multiif, false, "Replace if(cond1, then1, if(cond2, ...)) chains to multiIf. Currently it's not beneficial for numeric types.", 0) \
|
||||
M(Bool, optimize_multiif_to_if, true, "Replace 'multiIf' with only one condition to 'if'.", 0) \
|
||||
M(Bool, optimize_if_transform_strings_to_enum, false, "Replaces string-type arguments in If and Transform to enum. Disabled by default cause it could make inconsistent change in distributed query that would lead to its fail.", 0) \
|
||||
M(Bool, optimize_monotonous_functions_in_order_by, false, "Replace monotonous function with its argument in ORDER BY", 0) \
|
||||
M(Bool, optimize_functions_to_subcolumns, false, "Transform functions to subcolumns, if possible, to reduce amount of read data. E.g. 'length(arr)' -> 'arr.size0', 'col IS NULL' -> 'col.null' ", 0) \
|
||||
M(Bool, optimize_using_constraints, false, "Use constraints for query optimization", 0) \
|
||||
M(Bool, optimize_substitute_columns, false, "Use constraints for column substitution", 0) \
|
||||
@ -622,6 +622,7 @@ class IColumn;
|
||||
M(Bool, validate_polygons, true, "Throw exception if polygon is invalid in function pointInPolygon (e.g. self-tangent, self-intersecting). If the setting is false, the function will accept invalid polygons but may silently return wrong result.", 0) \
|
||||
M(UInt64, max_parser_depth, DBMS_DEFAULT_MAX_PARSER_DEPTH, "Maximum parser depth (recursion depth of recursive descend parser).", 0) \
|
||||
M(UInt64, max_parser_backtracks, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS, "Maximum parser backtracking (how many times it tries different alternatives in the recursive descend parsing process).", 0) \
|
||||
M(UInt64, max_recursive_cte_evaluation_depth, DBMS_RECURSIVE_CTE_MAX_EVALUATION_DEPTH, "Maximum limit on recursive CTE evaluation depth", 0) \
|
||||
M(Bool, allow_settings_after_format_in_insert, false, "Allow SETTINGS after FORMAT, but note, that this is not always safe (note: this is a compatibility setting).", 0) \
|
||||
M(Seconds, periodic_live_view_refresh, 60, "Interval after which periodically refreshed live view is forced to refresh.", 0) \
|
||||
M(Bool, transform_null_in, false, "If enabled, NULL values will be matched with 'IN' operator as if they are considered equal.", 0) \
|
||||
@ -706,7 +707,6 @@ class IColumn;
|
||||
M(Bool, engine_file_skip_empty_files, false, "Allows to skip empty files in file table engine", 0) \
|
||||
M(Bool, engine_url_skip_empty_files, false, "Allows to skip empty files in url table engine", 0) \
|
||||
M(Bool, enable_url_encoding, true, " Allows to enable/disable decoding/encoding path in uri in URL table engine", 0) \
|
||||
M(Bool, allow_experimental_database_replicated, false, "Allow to create databases with Replicated engine", 0) \
|
||||
M(UInt64, database_replicated_initial_query_timeout_sec, 300, "How long initial DDL query should wait for Replicated database to precess previous DDL queue entries", 0) \
|
||||
M(Bool, database_replicated_enforce_synchronous_settings, false, "Enforces synchronous waiting for some queries (see also database_atomic_wait_for_drop_and_detach_synchronously, mutation_sync, alter_sync). Not recommended to enable these settings.", 0) \
|
||||
M(UInt64, max_distributed_depth, 5, "Maximum distributed query depth", 0) \
|
||||
@ -739,6 +739,7 @@ class IColumn;
|
||||
M(Bool, query_plan_split_filter, true, "Allow to split filters in the query plan", 0) \
|
||||
M(Bool, query_plan_merge_expressions, true, "Allow to merge expressions in the query plan", 0) \
|
||||
M(Bool, query_plan_filter_push_down, true, "Allow to push down filter by predicate query plan step", 0) \
|
||||
M(Bool, query_plan_convert_outer_join_to_inner_join, true, "Allow to convert OUTER JOIN to INNER JOIN if filter after JOIN always filters default values", 0) \
|
||||
M(Bool, query_plan_optimize_prewhere, true, "Allow to push down filter to PREWHERE expression for supported storages", 0) \
|
||||
M(Bool, query_plan_execute_functions_after_sorting, true, "Allow to re-order functions after sorting", 0) \
|
||||
M(Bool, query_plan_reuse_storage_ordering_for_window_functions, true, "Allow to use the storage sorting for window functions", 0) \
|
||||
@ -933,6 +934,7 @@ class IColumn;
|
||||
MAKE_OBSOLETE(M, Bool, allow_experimental_query_cache, true) \
|
||||
MAKE_OBSOLETE(M, Bool, allow_experimental_alter_materialized_view_structure, true) \
|
||||
MAKE_OBSOLETE(M, Bool, allow_experimental_shared_merge_tree, true) \
|
||||
MAKE_OBSOLETE(M, Bool, allow_experimental_database_replicated, true) \
|
||||
\
|
||||
MAKE_OBSOLETE(M, Milliseconds, async_insert_stale_timeout_ms, 0) \
|
||||
MAKE_OBSOLETE(M, StreamingHandleErrorMode, handle_kafka_error_mode, StreamingHandleErrorMode::DEFAULT) \
|
||||
@ -977,6 +979,7 @@ class IColumn;
|
||||
MAKE_OBSOLETE(M, Bool, allow_experimental_undrop_table_query, true) \
|
||||
MAKE_OBSOLETE(M, Bool, allow_experimental_s3queue, true) \
|
||||
MAKE_OBSOLETE(M, Bool, query_plan_optimize_primary_key, true) \
|
||||
MAKE_OBSOLETE(M, Bool, optimize_monotonous_functions_in_order_by, false) \
|
||||
|
||||
/** The section above is for obsolete settings. Do not add anything there. */
|
||||
|
||||
|
@ -92,8 +92,12 @@ static std::map<ClickHouseVersion, SettingsChangesHistory::SettingsChanges> sett
|
||||
{"input_format_json_ignore_unnecessary_fields", false, true, "Ignore unnecessary fields and not parse them. Enabling this may not throw exceptions on json strings of invalid format or with duplicated fields"},
|
||||
{"input_format_hive_text_allow_variable_number_of_columns", false, true, "Ignore extra columns in Hive Text input (if file has more columns than expected) and treat missing fields in Hive Text input as default values."},
|
||||
{"first_day_of_week", "Monday", "Monday", "Added a setting for the first day of the week for date/time functions"},
|
||||
{"allow_experimental_database_replicated", false, true, "Database engine Replicated is now in Beta stage"},
|
||||
{"temporary_data_in_cache_reserve_space_wait_lock_timeout_milliseconds", (10 * 60 * 1000), (10 * 60 * 1000), "Wait time to lock cache for sapce reservation in temporary data in filesystem cache"},
|
||||
}},
|
||||
{"azure_allow_parallel_part_upload", "true", "true", "Use multiple threads for azure multipart upload."},
|
||||
{"max_recursive_cte_evaluation_depth", DBMS_RECURSIVE_CTE_MAX_EVALUATION_DEPTH, DBMS_RECURSIVE_CTE_MAX_EVALUATION_DEPTH, "Maximum limit on recursive CTE evaluation depth"},
|
||||
{"query_plan_convert_outer_join_to_inner_join", false, true, "Allow to convert OUTER JOIN to INNER JOIN if filter after JOIN always filters default values"},
|
||||
}},
|
||||
{"24.3", {{"s3_connect_timeout_ms", 1000, 1000, "Introduce new dedicated setting for s3 connection timeout"},
|
||||
{"allow_experimental_shared_merge_tree", false, true, "The setting is obsolete"},
|
||||
{"use_page_cache_for_disks_without_file_cache", false, false, "Added userspace page cache"},
|
||||
|
@ -95,16 +95,21 @@ static void setReplicatedEngine(ASTCreateQuery * create_query, ContextPtr contex
|
||||
create_query->storage->set(create_query->storage->engine, engine->clone());
|
||||
}
|
||||
|
||||
String DatabaseOrdinary::getConvertToReplicatedFlagPath(const String & name, bool tableStarted)
|
||||
String DatabaseOrdinary::getConvertToReplicatedFlagPath(const String & name, const StoragePolicyPtr storage_policy, bool tableStarted)
|
||||
{
|
||||
fs::path data_path;
|
||||
if (storage_policy->getDisks().empty())
|
||||
data_path = getContext()->getPath();
|
||||
else
|
||||
data_path = storage_policy->getDisks()[0]->getPath();
|
||||
|
||||
if (!tableStarted)
|
||||
{
|
||||
auto create_query = tryGetCreateTableQuery(name, getContext());
|
||||
data_path = fs::path(getContext()->getPath()) / getTableDataPath(create_query->as<ASTCreateQuery &>());
|
||||
data_path = data_path / getTableDataPath(create_query->as<ASTCreateQuery &>());
|
||||
}
|
||||
else
|
||||
data_path = fs::path(getContext()->getPath()) / getTableDataPath(name);
|
||||
data_path = data_path / getTableDataPath(name);
|
||||
|
||||
return (data_path / CONVERT_TO_REPLICATED_FLAG_NAME).string();
|
||||
}
|
||||
@ -120,7 +125,14 @@ void DatabaseOrdinary::convertMergeTreeToReplicatedIfNeeded(ASTPtr ast, const Qu
|
||||
if (!create_query->storage || !create_query->storage->engine->name.ends_with("MergeTree") || create_query->storage->engine->name.starts_with("Replicated") || create_query->storage->engine->name.starts_with("Shared"))
|
||||
return;
|
||||
|
||||
auto convert_to_replicated_flag_path = getConvertToReplicatedFlagPath(qualified_name.table, false);
|
||||
/// Get table's storage policy
|
||||
MergeTreeSettings default_settings = getContext()->getMergeTreeSettings();
|
||||
auto policy = getContext()->getStoragePolicy(default_settings.storage_policy);
|
||||
if (auto * query_settings = create_query->storage->settings)
|
||||
if (Field * policy_setting = query_settings->changes.tryGet("storage_policy"))
|
||||
policy = getContext()->getStoragePolicy(policy_setting->safeGet<String>());
|
||||
|
||||
auto convert_to_replicated_flag_path = getConvertToReplicatedFlagPath(qualified_name.table, policy, false);
|
||||
|
||||
if (!fs::exists(convert_to_replicated_flag_path))
|
||||
return;
|
||||
@ -288,7 +300,7 @@ void DatabaseOrdinary::restoreMetadataAfterConvertingToReplicated(StoragePtr tab
|
||||
if (!rmt)
|
||||
return;
|
||||
|
||||
auto convert_to_replicated_flag_path = getConvertToReplicatedFlagPath(name.table, true);
|
||||
auto convert_to_replicated_flag_path = getConvertToReplicatedFlagPath(name.table, table->getStoragePolicy(), true);
|
||||
if (!fs::exists(convert_to_replicated_flag_path))
|
||||
return;
|
||||
|
||||
|
@ -86,7 +86,7 @@ protected:
|
||||
private:
|
||||
void convertMergeTreeToReplicatedIfNeeded(ASTPtr ast, const QualifiedTableName & qualified_name, const String & file_name);
|
||||
void restoreMetadataAfterConvertingToReplicated(StoragePtr table, const QualifiedTableName & name);
|
||||
String getConvertToReplicatedFlagPath(const String & name, bool tableStarted);
|
||||
String getConvertToReplicatedFlagPath(const String & name, StoragePolicyPtr storage_policy, bool tableStarted);
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -1139,8 +1139,10 @@ void DatabaseReplicated::recoverLostReplica(const ZooKeeperPtr & current_zookeep
|
||||
}
|
||||
LOG_INFO(log, "All tables are created successfully");
|
||||
|
||||
chassert(max_log_ptr_at_creation || our_log_ptr);
|
||||
UInt32 first_entry_to_mark_finished = new_replica ? max_log_ptr_at_creation : our_log_ptr;
|
||||
/// NOTE first_entry_to_mark_finished can be 0 if our replica has crashed just after creating its nodes in ZK,
|
||||
/// so it's a new replica, but after restarting we don't know max_log_ptr_at_creation anymore...
|
||||
/// It's a very rare case, and it's okay if some queries throw TIMEOUT_EXCEEDED when waiting for all replicas
|
||||
if (first_entry_to_mark_finished)
|
||||
{
|
||||
/// If the replica is new and some of the queries applied during recovery
|
||||
|
@ -154,6 +154,7 @@ void IDisk::copyThroughBuffers(
|
||||
/// Disable parallel write. We already copy in parallel.
|
||||
/// Avoid high memory usage. See test_s3_zero_copy_ttl/test.py::test_move_and_s3_memory_usage
|
||||
write_settings.s3_allow_parallel_part_upload = false;
|
||||
write_settings.azure_allow_parallel_part_upload = false;
|
||||
|
||||
asyncCopy(*this, from_path, *to_disk, to_path, copying_thread_pool, results, copy_root_dir, read_settings, write_settings, cancellation_hook);
|
||||
|
||||
|
@ -188,7 +188,8 @@ Azure::Storage::Blobs::BlobClientOptions getAzureBlobClientOptions(const Poco::U
|
||||
retry_options.MaxRetryDelay = std::chrono::milliseconds(config.getUInt(config_prefix + ".retry_max_backoff_ms", 1000));
|
||||
|
||||
using CurlOptions = Azure::Core::Http::CurlTransportOptions;
|
||||
CurlOptions curl_options{.NoSignal = true};
|
||||
CurlOptions curl_options;
|
||||
curl_options.NoSignal = true;
|
||||
|
||||
if (config.has(config_prefix + ".curl_ip_resolve"))
|
||||
{
|
||||
|
@ -282,12 +282,17 @@ std::unique_ptr<WriteBufferFromFileBase> AzureObjectStorage::writeObject( /// NO
|
||||
|
||||
LOG_TEST(log, "Writing file: {}", object.remote_path);
|
||||
|
||||
ThreadPoolCallbackRunnerUnsafe<void> scheduler;
|
||||
if (write_settings.azure_allow_parallel_part_upload)
|
||||
scheduler = threadPoolCallbackRunnerUnsafe<void>(getThreadPoolWriter(), "VFSWrite");
|
||||
|
||||
return std::make_unique<WriteBufferFromAzureBlobStorage>(
|
||||
client.get(),
|
||||
object.remote_path,
|
||||
buf_size,
|
||||
patchSettings(write_settings),
|
||||
settings.get());
|
||||
settings.get(),
|
||||
std::move(scheduler));
|
||||
}
|
||||
|
||||
void AzureObjectStorage::removeObjectImpl(const StoredObject & object, const SharedAzureClientPtr & client_ptr, bool if_exists)
|
||||
|
@ -3736,6 +3736,7 @@ namespace
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "ClickHouse doesn't support type recursion ({})", field_descriptor->full_name());
|
||||
}
|
||||
pending_resolution.emplace(field_descriptor);
|
||||
SCOPE_EXIT({ pending_resolution.erase(field_descriptor); });
|
||||
|
||||
if (allow_repeat && field_descriptor->is_map())
|
||||
{
|
||||
|
@ -1169,7 +1169,10 @@ struct ToStartOfHourImpl
|
||||
struct ToYearImpl
|
||||
{
|
||||
static constexpr auto name = "toYear";
|
||||
|
||||
static UInt16 execute(UInt64 t, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return time_zone.toYear(t);
|
||||
}
|
||||
static UInt16 execute(Int64 t, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return time_zone.toYear(t);
|
||||
@ -1217,7 +1220,10 @@ struct ToWeekYearImpl
|
||||
static constexpr auto name = "toWeekYear";
|
||||
|
||||
static constexpr Int8 week_mode = 3;
|
||||
|
||||
static UInt16 execute(UInt64 t, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return time_zone.toYearWeek(t, week_mode).first;
|
||||
}
|
||||
static UInt16 execute(Int64 t, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return time_zone.toYearWeek(t, week_mode).first;
|
||||
@ -1241,7 +1247,10 @@ struct ToWeekYearImpl
|
||||
struct ToWeekOfWeekYearImpl
|
||||
{
|
||||
static constexpr auto name = "toWeekOfWeekYear";
|
||||
|
||||
static UInt16 execute(UInt64 t, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return time_zone.toISOWeek(t);
|
||||
}
|
||||
static UInt16 execute(Int64 t, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return time_zone.toISOWeek(t);
|
||||
@ -1265,7 +1274,10 @@ struct ToWeekOfWeekYearImpl
|
||||
struct ToQuarterImpl
|
||||
{
|
||||
static constexpr auto name = "toQuarter";
|
||||
|
||||
static UInt8 execute(UInt64 t, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return time_zone.toQuarter(t);
|
||||
}
|
||||
static UInt8 execute(Int64 t, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return time_zone.toQuarter(t);
|
||||
@ -1290,7 +1302,10 @@ struct ToQuarterImpl
|
||||
struct ToMonthImpl
|
||||
{
|
||||
static constexpr auto name = "toMonth";
|
||||
|
||||
static UInt8 execute(UInt64 t, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return time_zone.toMonth(t);
|
||||
}
|
||||
static UInt8 execute(Int64 t, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return time_zone.toMonth(t);
|
||||
@ -1315,7 +1330,10 @@ struct ToMonthImpl
|
||||
struct ToDayOfMonthImpl
|
||||
{
|
||||
static constexpr auto name = "toDayOfMonth";
|
||||
|
||||
static UInt8 execute(UInt64 t, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return time_zone.toDayOfMonth(t);
|
||||
}
|
||||
static UInt8 execute(Int64 t, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return time_zone.toDayOfMonth(t);
|
||||
@ -1341,7 +1359,10 @@ struct ToDayOfWeekImpl
|
||||
{
|
||||
static constexpr auto name = "toDayOfWeek";
|
||||
static constexpr bool value_may_be_string = true;
|
||||
|
||||
static UInt8 execute(UInt64 t, UInt8 mode, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return time_zone.toDayOfWeek(t, mode);
|
||||
}
|
||||
static UInt8 execute(Int64 t, UInt8 mode, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return time_zone.toDayOfWeek(t, mode);
|
||||
@ -1365,7 +1386,10 @@ struct ToDayOfWeekImpl
|
||||
struct ToDayOfYearImpl
|
||||
{
|
||||
static constexpr auto name = "toDayOfYear";
|
||||
|
||||
static UInt16 execute(UInt64 t, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return time_zone.toDayOfYear(t);
|
||||
}
|
||||
static UInt16 execute(Int64 t, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return time_zone.toDayOfYear(t);
|
||||
@ -1421,7 +1445,10 @@ public:
|
||||
struct ToHourImpl
|
||||
{
|
||||
static constexpr auto name = "toHour";
|
||||
|
||||
static UInt8 execute(UInt64 t, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return time_zone.toHour(t);
|
||||
}
|
||||
static UInt8 execute(Int64 t, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return time_zone.toHour(t);
|
||||
@ -1446,7 +1473,10 @@ struct ToHourImpl
|
||||
struct TimezoneOffsetImpl
|
||||
{
|
||||
static constexpr auto name = "timezoneOffset";
|
||||
|
||||
static time_t execute(UInt64 t, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return time_zone.timezoneOffset(t);
|
||||
}
|
||||
static time_t execute(Int64 t, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return time_zone.timezoneOffset(t);
|
||||
@ -1474,7 +1504,10 @@ struct TimezoneOffsetImpl
|
||||
struct ToMinuteImpl
|
||||
{
|
||||
static constexpr auto name = "toMinute";
|
||||
|
||||
static UInt8 execute(UInt64 t, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return time_zone.toMinute(t);
|
||||
}
|
||||
static UInt8 execute(Int64 t, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return time_zone.toMinute(t);
|
||||
@ -1499,7 +1532,10 @@ struct ToMinuteImpl
|
||||
struct ToSecondImpl
|
||||
{
|
||||
static constexpr auto name = "toSecond";
|
||||
|
||||
static UInt8 execute(UInt64 t, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return time_zone.toSecond(t);
|
||||
}
|
||||
static UInt8 execute(Int64 t, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return time_zone.toSecond(t);
|
||||
@ -1550,7 +1586,10 @@ struct ToMillisecondImpl
|
||||
struct ToISOYearImpl
|
||||
{
|
||||
static constexpr auto name = "toISOYear";
|
||||
|
||||
static UInt16 execute(UInt64 t, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return time_zone.toISOYear(time_zone.toDayNum(t));
|
||||
}
|
||||
static UInt16 execute(Int64 t, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return time_zone.toISOYear(time_zone.toDayNum(t));
|
||||
@ -1607,7 +1646,10 @@ struct ToStartOfISOYearImpl
|
||||
struct ToISOWeekImpl
|
||||
{
|
||||
static constexpr auto name = "toISOWeek";
|
||||
|
||||
static UInt8 execute(UInt64 t, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return time_zone.toISOWeek(time_zone.toDayNum(t));
|
||||
}
|
||||
static UInt8 execute(Int64 t, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return time_zone.toISOWeek(time_zone.toDayNum(t));
|
||||
|
@ -794,7 +794,7 @@ inline bool tryParseImpl<DataTypeIPv6>(DataTypeIPv6::FieldType & x, ReadBuffer &
|
||||
if (isNativeNumber(result_type) && !(result_type.getName() == "IPv4" || result_type.getName() == "IPv6"))
|
||||
message_buf << ". Note: there are to" << result_type.getName() << "OrZero and to" << result_type.getName() << "OrNull functions, which returns zero/NULL instead of throwing exception.";
|
||||
|
||||
throw Exception(PreformattedMessage{message_buf.str(), "Cannot parse string {} as {}: syntax error {}"}, ErrorCodes::CANNOT_PARSE_TEXT);
|
||||
throw Exception(PreformattedMessage{message_buf.str(), "Cannot parse string {} as {}: syntax error {}", {String(read_buffer.buffer().begin(), read_buffer.buffer().size()), result_type.getName()}}, ErrorCodes::CANNOT_PARSE_TEXT);
|
||||
}
|
||||
|
||||
|
||||
|
@ -62,8 +62,8 @@ template <> struct InstructionValueTypeMap<DataTypeInt16> { using Instructi
|
||||
template <> struct InstructionValueTypeMap<DataTypeUInt16> { using InstructionValueType = UInt32; };
|
||||
template <> struct InstructionValueTypeMap<DataTypeInt32> { using InstructionValueType = UInt32; };
|
||||
template <> struct InstructionValueTypeMap<DataTypeUInt32> { using InstructionValueType = UInt32; };
|
||||
template <> struct InstructionValueTypeMap<DataTypeInt64> { using InstructionValueType = UInt32; };
|
||||
template <> struct InstructionValueTypeMap<DataTypeUInt64> { using InstructionValueType = UInt32; };
|
||||
template <> struct InstructionValueTypeMap<DataTypeInt64> { using InstructionValueType = Int64; };
|
||||
template <> struct InstructionValueTypeMap<DataTypeUInt64> { using InstructionValueType = UInt64; };
|
||||
template <> struct InstructionValueTypeMap<DataTypeDate> { using InstructionValueType = UInt16; };
|
||||
template <> struct InstructionValueTypeMap<DataTypeDate32> { using InstructionValueType = Int32; };
|
||||
template <> struct InstructionValueTypeMap<DataTypeDateTime> { using InstructionValueType = UInt32; };
|
||||
@ -1017,7 +1017,7 @@ public:
|
||||
else
|
||||
{
|
||||
for (auto & instruction : instructions)
|
||||
instruction.perform(pos, static_cast<UInt32>(vec[i]), 0, 0, *time_zone);
|
||||
instruction.perform(pos, static_cast<T>(vec[i]), 0, 0, *time_zone);
|
||||
}
|
||||
*pos++ = '\0';
|
||||
|
||||
@ -1073,7 +1073,7 @@ public:
|
||||
{
|
||||
/// DateTime/DateTime64 --> insert instruction
|
||||
/// Other types cannot provide the requested data --> write out template
|
||||
if constexpr (is_any_of<T, UInt32, Int64>)
|
||||
if constexpr (is_any_of<T, UInt32, Int64, UInt64>)
|
||||
{
|
||||
Instruction<T> instruction;
|
||||
instruction.setMysqlFunc(std::move(func));
|
||||
@ -1539,7 +1539,7 @@ public:
|
||||
/// If the argument was DateTime, add instruction for printing. If it was date, just append default literal
|
||||
auto add_instruction = [&]([[maybe_unused]] typename Instruction<T>::FuncJoda && func, [[maybe_unused]] const String & default_literal)
|
||||
{
|
||||
if constexpr (is_any_of<T, UInt32, Int64>)
|
||||
if constexpr (is_any_of<T, UInt32, Int64, UInt64>)
|
||||
{
|
||||
Instruction<T> instruction;
|
||||
instruction.setJodaFunc(std::move(func));
|
||||
|
@ -23,6 +23,7 @@ struct WriteSettings
|
||||
size_t filesystem_cache_reserve_space_wait_lock_timeout_milliseconds = 1000;
|
||||
|
||||
bool s3_allow_parallel_part_upload = true;
|
||||
bool azure_allow_parallel_part_upload = true;
|
||||
|
||||
/// Monitoring
|
||||
bool for_object_storage = false; // to choose which profile events should be incremented
|
||||
|
@ -109,4 +109,9 @@ void copyDataWithThrottler(ReadBuffer & from, WriteBuffer & to, size_t bytes, co
|
||||
copyDataImpl(from, to, true, bytes, &is_cancelled, throttler);
|
||||
}
|
||||
|
||||
void copyDataWithThrottler(ReadBuffer & from, WriteBuffer & to, std::function<void()> cancellation_hook, ThrottlerPtr throttler)
|
||||
{
|
||||
copyDataImpl(from, to, false, std::numeric_limits<size_t>::max(), cancellation_hook, throttler);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -33,5 +33,6 @@ void copyDataMaxBytes(ReadBuffer & from, WriteBuffer & to, size_t max_bytes);
|
||||
/// Same as above but also use throttler to limit maximum speed
|
||||
void copyDataWithThrottler(ReadBuffer & from, WriteBuffer & to, const std::atomic<int> & is_cancelled, ThrottlerPtr throttler);
|
||||
void copyDataWithThrottler(ReadBuffer & from, WriteBuffer & to, size_t bytes, const std::atomic<int> & is_cancelled, ThrottlerPtr throttler);
|
||||
void copyDataWithThrottler(ReadBuffer & from, WriteBuffer & to, std::function<void()> cancellation_hook, ThrottlerPtr throttler);
|
||||
|
||||
}
|
||||
|
@ -2013,6 +2013,63 @@ ActionsDAG::SplitResult ActionsDAG::splitActionsBySortingDescription(const NameS
|
||||
return res;
|
||||
}
|
||||
|
||||
bool ActionsDAG::isFilterAlwaysFalseForDefaultValueInputs(const std::string & filter_name, const Block & input_stream_header)
|
||||
{
|
||||
const auto * filter_node = tryFindInOutputs(filter_name);
|
||||
if (!filter_node)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
"Outputs for ActionsDAG does not contain filter column name {}. DAG:\n{}",
|
||||
filter_name,
|
||||
dumpDAG());
|
||||
|
||||
std::unordered_map<std::string, ColumnWithTypeAndName> input_node_name_to_default_input_column;
|
||||
|
||||
for (const auto * input : inputs)
|
||||
{
|
||||
if (!input_stream_header.has(input->result_name))
|
||||
continue;
|
||||
|
||||
if (input->column)
|
||||
continue;
|
||||
|
||||
auto constant_column = input->result_type->createColumnConst(0, input->result_type->getDefault());
|
||||
auto constant_column_with_type_and_name = ColumnWithTypeAndName{constant_column, input->result_type, input->result_name};
|
||||
input_node_name_to_default_input_column.emplace(input->result_name, std::move(constant_column_with_type_and_name));
|
||||
}
|
||||
|
||||
ActionsDAGPtr filter_with_default_value_inputs;
|
||||
|
||||
try
|
||||
{
|
||||
filter_with_default_value_inputs = buildFilterActionsDAG({filter_node}, input_node_name_to_default_input_column);
|
||||
}
|
||||
catch (const Exception &)
|
||||
{
|
||||
/** It is possible that duing DAG construction, some functions cannot be executed for constant default value inputs
|
||||
* and exception will be thrown.
|
||||
*/
|
||||
return false;
|
||||
}
|
||||
|
||||
const auto * filter_with_default_value_inputs_filter_node = filter_with_default_value_inputs->getOutputs()[0];
|
||||
if (!filter_with_default_value_inputs_filter_node->column || !isColumnConst(*filter_with_default_value_inputs_filter_node->column))
|
||||
return false;
|
||||
|
||||
const auto & constant_type = filter_with_default_value_inputs_filter_node->result_type;
|
||||
auto which_constant_type = WhichDataType(constant_type);
|
||||
if (!which_constant_type.isUInt8() && !which_constant_type.isNothing())
|
||||
return false;
|
||||
|
||||
Field value;
|
||||
filter_with_default_value_inputs_filter_node->column->get(0, value);
|
||||
|
||||
if (value.isNull())
|
||||
return true;
|
||||
|
||||
UInt8 predicate_value = value.safeGet<UInt8>();
|
||||
return predicate_value == 0;
|
||||
}
|
||||
|
||||
ActionsDAG::SplitResult ActionsDAG::splitActionsForFilter(const std::string & column_name) const
|
||||
{
|
||||
const auto * node = tryFindInOutputs(column_name);
|
||||
|
@ -355,6 +355,13 @@ public:
|
||||
/// The second contains the rest.
|
||||
SplitResult splitActionsBySortingDescription(const NameSet & sort_columns) const;
|
||||
|
||||
/** Returns true if filter DAG is always false for inputs with default values.
|
||||
*
|
||||
* @param filter_name - name of filter node in current DAG.
|
||||
* @param input_stream_header - input stream header.
|
||||
*/
|
||||
bool isFilterAlwaysFalseForDefaultValueInputs(const std::string & filter_name, const Block & input_stream_header);
|
||||
|
||||
/// Create actions which may calculate part of filter using only available_inputs.
|
||||
/// If nothing may be calculated, returns nullptr.
|
||||
/// Otherwise, return actions which inputs are from available_inputs.
|
||||
|
@ -82,6 +82,7 @@ void EvictionCandidates::removeQueueEntries(const CachePriorityGuard::Lock & loc
|
||||
auto queue_iterator = candidate->getQueueIterator();
|
||||
queue_iterator->invalidate();
|
||||
|
||||
chassert(candidate->releasable());
|
||||
candidate->file_segment->resetQueueIterator();
|
||||
/// We need to set removed flag in file segment metadata,
|
||||
/// because in dynamic cache resize we first remove queue entries,
|
||||
@ -122,7 +123,13 @@ void EvictionCandidates::evict()
|
||||
while (!key_candidates.candidates.empty())
|
||||
{
|
||||
auto & candidate = key_candidates.candidates.back();
|
||||
chassert(candidate->releasable());
|
||||
if (!candidate->releasable())
|
||||
{
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
"Eviction candidate is not releasable: {} (evicting or removed flag: {})",
|
||||
candidate->file_segment->getInfoForLog(), candidate->isEvictingOrRemoved(*locked_key));
|
||||
}
|
||||
|
||||
const auto segment = candidate->file_segment;
|
||||
|
||||
IFileCachePriority::IteratorPtr iterator;
|
||||
|
@ -128,6 +128,11 @@ const FileCache::UserInfo & FileCache::getInternalUser()
|
||||
return user;
|
||||
}
|
||||
|
||||
bool FileCache::isInitialized() const
|
||||
{
|
||||
return is_initialized.load(std::memory_order_seq_cst);
|
||||
}
|
||||
|
||||
const String & FileCache::getBasePath() const
|
||||
{
|
||||
return metadata.getBaseDirectory();
|
||||
|
@ -80,6 +80,8 @@ public:
|
||||
|
||||
void initialize();
|
||||
|
||||
bool isInitialized() const;
|
||||
|
||||
const String & getBasePath() const;
|
||||
|
||||
static Key createKeyForPath(const String & path);
|
||||
|
@ -118,7 +118,7 @@ LockedKeyPtr KeyMetadata::lockNoStateCheck()
|
||||
return std::make_unique<LockedKey>(shared_from_this());
|
||||
}
|
||||
|
||||
bool KeyMetadata::createBaseDirectory()
|
||||
bool KeyMetadata::createBaseDirectory(bool throw_if_failed)
|
||||
{
|
||||
if (!created_base_directory.exchange(true))
|
||||
{
|
||||
@ -131,7 +131,7 @@ bool KeyMetadata::createBaseDirectory()
|
||||
{
|
||||
created_base_directory = false;
|
||||
|
||||
if (e.code() == std::errc::no_space_on_device)
|
||||
if (!throw_if_failed && e.code() == std::errc::no_space_on_device)
|
||||
{
|
||||
LOG_TRACE(cache_metadata->log, "Failed to create base directory for key {}, "
|
||||
"because no space left on device", key);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user