Merge branch 'master' into issue-20309

This commit is contained in:
vdimir 2021-05-10 16:28:18 +03:00
commit bd90b52f8e
No known key found for this signature in database
GPG Key ID: F57B3E10A21DBB31
325 changed files with 34489 additions and 1313 deletions

View File

@ -169,8 +169,8 @@ endif ()
option(ENABLE_TESTS "Provide unit_test_dbms target with Google.Test unit tests" ON) option(ENABLE_TESTS "Provide unit_test_dbms target with Google.Test unit tests" ON)
option(ENABLE_EXAMPLES "Build all example programs in 'examples' subdirectories" OFF) option(ENABLE_EXAMPLES "Build all example programs in 'examples' subdirectories" OFF)
if (OS_LINUX AND NOT UNBUNDLED AND MAKE_STATIC_LIBRARIES AND NOT SPLIT_SHARED_LIBRARIES AND CMAKE_VERSION VERSION_GREATER "3.9.0") if (OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64) AND NOT UNBUNDLED AND MAKE_STATIC_LIBRARIES AND NOT SPLIT_SHARED_LIBRARIES AND CMAKE_VERSION VERSION_GREATER "3.9.0")
# Only for Linux, x86_64. # Only for Linux, x86_64 or aarch64.
option(GLIBC_COMPATIBILITY "Enable compatibility with older glibc libraries." ON) option(GLIBC_COMPATIBILITY "Enable compatibility with older glibc libraries." ON)
elseif(GLIBC_COMPATIBILITY) elseif(GLIBC_COMPATIBILITY)
message (${RECONFIGURE_MESSAGE_LEVEL} "Glibc compatibility cannot be enabled in current configuration") message (${RECONFIGURE_MESSAGE_LEVEL} "Glibc compatibility cannot be enabled in current configuration")

View File

@ -102,7 +102,7 @@ void SentryWriter::initialize(Poco::Util::LayeredConfiguration & config)
auto * logger = &Poco::Logger::get("SentryWriter"); auto * logger = &Poco::Logger::get("SentryWriter");
if (config.getBool("send_crash_reports.enabled", false)) if (config.getBool("send_crash_reports.enabled", false))
{ {
if (debug || (strlen(VERSION_OFFICIAL) > 0)) if (debug || (strlen(VERSION_OFFICIAL) > 0)) //-V560
{ {
enabled = true; enabled = true;
} }

View File

@ -15,7 +15,7 @@ if (GLIBC_COMPATIBILITY)
add_headers_and_sources(glibc_compatibility .) add_headers_and_sources(glibc_compatibility .)
add_headers_and_sources(glibc_compatibility musl) add_headers_and_sources(glibc_compatibility musl)
if (ARCH_ARM) if (ARCH_AARCH64)
list (APPEND glibc_compatibility_sources musl/aarch64/syscall.s musl/aarch64/longjmp.s) list (APPEND glibc_compatibility_sources musl/aarch64/syscall.s musl/aarch64/longjmp.s)
set (musl_arch_include_dir musl/aarch64) set (musl_arch_include_dir musl/aarch64)
elseif (ARCH_AMD64) elseif (ARCH_AMD64)

View File

@ -78,6 +78,9 @@
* *
*/ */
// Disable warnings by PVS-Studio
//-V::GA
static const double static const double
pi = 3.14159265358979311600e+00, /* 0x400921FB, 0x54442D18 */ pi = 3.14159265358979311600e+00, /* 0x400921FB, 0x54442D18 */
a0 = 7.72156649015328655494e-02, /* 0x3FB3C467, 0xE37DB0C8 */ a0 = 7.72156649015328655494e-02, /* 0x3FB3C467, 0xE37DB0C8 */

View File

@ -85,6 +85,9 @@
* *
*/ */
// Disable warnings by PVS-Studio
//-V::GA
#include <stdint.h> #include <stdint.h>
#include <math.h> #include <math.h>
#include "libm.h" #include "libm.h"

View File

@ -155,7 +155,7 @@ static inline long double fp_barrierl(long double x)
static inline void fp_force_evalf(float x) static inline void fp_force_evalf(float x)
{ {
volatile float y; volatile float y;
y = x; y = x; //-V1001
} }
#endif #endif
@ -164,7 +164,7 @@ static inline void fp_force_evalf(float x)
static inline void fp_force_eval(double x) static inline void fp_force_eval(double x)
{ {
volatile double y; volatile double y;
y = x; y = x; //-V1001
} }
#endif #endif
@ -173,7 +173,7 @@ static inline void fp_force_eval(double x)
static inline void fp_force_evall(long double x) static inline void fp_force_evall(long double x)
{ {
volatile long double y; volatile long double y;
y = x; y = x; //-V1001
} }
#endif #endif

View File

@ -3,6 +3,9 @@
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
*/ */
// Disable warnings by PVS-Studio
//-V::GA
#include <math.h> #include <math.h>
#include <stdint.h> #include <stdint.h>
#include "libm.h" #include "libm.h"

View File

@ -40,7 +40,7 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
split->addTextLog(log, text_log_max_priority); split->addTextLog(log, text_log_max_priority);
auto current_logger = config.getString("logger", ""); auto current_logger = config.getString("logger", "");
if (config_logger == current_logger) if (config_logger == current_logger) //-V1051
return; return;
config_logger = current_logger; config_logger = current_logger;

View File

@ -447,69 +447,6 @@ inline SrcIter uneven_copy(SrcIter src_first,
std::integral_constant<bool, DEST_IS_SMALLER>{}); std::integral_constant<bool, DEST_IS_SMALLER>{});
} }
/* generate_to, fill in a fixed-size array of integral type using a SeedSeq
* (actually works for any random-access iterator)
*/
template <size_t size, typename SeedSeq, typename DestIter>
inline void generate_to_impl(SeedSeq&& generator, DestIter dest,
std::true_type)
{
generator.generate(dest, dest+size);
}
template <size_t size, typename SeedSeq, typename DestIter>
void generate_to_impl(SeedSeq&& generator, DestIter dest,
std::false_type)
{
typedef typename std::iterator_traits<DestIter>::value_type dest_t;
constexpr auto DEST_SIZE = sizeof(dest_t);
constexpr auto GEN_SIZE = sizeof(uint32_t);
constexpr bool GEN_IS_SMALLER = GEN_SIZE < DEST_SIZE;
constexpr size_t FROM_ELEMS =
GEN_IS_SMALLER
? size * ((DEST_SIZE+GEN_SIZE-1) / GEN_SIZE)
: (size + (GEN_SIZE / DEST_SIZE) - 1)
/ ((GEN_SIZE / DEST_SIZE) + GEN_IS_SMALLER);
// this odd code ^^^^^^^^^^^^^^^^^ is work-around for
// a bug: http://llvm.org/bugs/show_bug.cgi?id=21287
if (FROM_ELEMS <= 1024) {
uint32_t buffer[FROM_ELEMS];
generator.generate(buffer, buffer+FROM_ELEMS);
uneven_copy(buffer, dest, dest+size);
} else {
uint32_t* buffer = static_cast<uint32_t*>(malloc(GEN_SIZE * FROM_ELEMS));
generator.generate(buffer, buffer+FROM_ELEMS);
uneven_copy(buffer, dest, dest+size);
free(static_cast<void*>(buffer));
}
}
template <size_t size, typename SeedSeq, typename DestIter>
inline void generate_to(SeedSeq&& generator, DestIter dest)
{
typedef typename std::iterator_traits<DestIter>::value_type dest_t;
constexpr bool IS_32BIT = sizeof(dest_t) == sizeof(uint32_t);
generate_to_impl<size>(std::forward<SeedSeq>(generator), dest,
std::integral_constant<bool, IS_32BIT>{});
}
/* generate_one, produce a value of integral type using a SeedSeq
* (optionally, we can have it produce more than one and pick which one
* we want)
*/
template <typename UInt, size_t i = 0UL, size_t N = i+1UL, typename SeedSeq>
inline UInt generate_one(SeedSeq&& generator)
{
UInt result[N];
generate_to<N>(std::forward<SeedSeq>(generator), result);
return result[i];
}
template <typename RngType> template <typename RngType>
auto bounded_rand(RngType& rng, typename RngType::result_type upper_bound) auto bounded_rand(RngType& rng, typename RngType::result_type upper_bound)
-> typename RngType::result_type -> typename RngType::result_type
@ -517,7 +454,7 @@ auto bounded_rand(RngType& rng, typename RngType::result_type upper_bound)
typedef typename RngType::result_type rtype; typedef typename RngType::result_type rtype;
rtype threshold = (RngType::max() - RngType::min() + rtype(1) - upper_bound) rtype threshold = (RngType::max() - RngType::min() + rtype(1) - upper_bound)
% upper_bound; % upper_bound;
for (;;) { for (;;) { //-V1044
rtype r = rng() - RngType::min(); rtype r = rng() - RngType::min();
if (r >= threshold) if (r >= threshold)
return r % upper_bound; return r % upper_bound;

View File

@ -928,7 +928,7 @@ struct rxs_m_xs_mixin {
constexpr bitcount_t shift = bits - xtypebits; constexpr bitcount_t shift = bits - xtypebits;
constexpr bitcount_t mask = (1 << opbits) - 1; constexpr bitcount_t mask = (1 << opbits) - 1;
bitcount_t rshift = bitcount_t rshift =
opbits ? bitcount_t(internal >> (bits - opbits)) & mask : 0; opbits ? bitcount_t(internal >> (bits - opbits)) & mask : 0; //-V547
internal ^= internal >> (opbits + rshift); internal ^= internal >> (opbits + rshift);
internal *= mcg_multiplier<itype>::multiplier(); internal *= mcg_multiplier<itype>::multiplier();
xtype result = internal >> shift; xtype result = internal >> shift;
@ -950,7 +950,7 @@ struct rxs_m_xs_mixin {
internal *= mcg_unmultiplier<itype>::unmultiplier(); internal *= mcg_unmultiplier<itype>::unmultiplier();
bitcount_t rshift = opbits ? (internal >> (bits - opbits)) & mask : 0; bitcount_t rshift = opbits ? (internal >> (bits - opbits)) & mask : 0; //-V547
internal = unxorshift(internal, bits, opbits + rshift); internal = unxorshift(internal, bits, opbits + rshift);
return internal; return internal;
@ -975,7 +975,7 @@ struct rxs_m_mixin {
: 2; : 2;
constexpr bitcount_t shift = bits - xtypebits; constexpr bitcount_t shift = bits - xtypebits;
constexpr bitcount_t mask = (1 << opbits) - 1; constexpr bitcount_t mask = (1 << opbits) - 1;
bitcount_t rshift = opbits ? (internal >> (bits - opbits)) & mask : 0; bitcount_t rshift = opbits ? (internal >> (bits - opbits)) & mask : 0; //-V547
internal ^= internal >> (opbits + rshift); internal ^= internal >> (opbits + rshift);
internal *= mcg_multiplier<itype>::multiplier(); internal *= mcg_multiplier<itype>::multiplier();
xtype result = internal >> shift; xtype result = internal >> shift;
@ -1366,7 +1366,7 @@ void extended<table_pow2,advance_pow2,baseclass,extvalclass,kdd>::selfinit()
// - any strange correlations would only be apparent if we // - any strange correlations would only be apparent if we
// were to backstep the generator so that the base generator // were to backstep the generator so that the base generator
// was generating the same values again // was generating the same values again
result_type xdiff = baseclass::operator()() - baseclass::operator()(); result_type xdiff = baseclass::operator()() - baseclass::operator()(); //-V501
for (size_t i = 0; i < table_size; ++i) { for (size_t i = 0; i < table_size; ++i) {
data_[i] = baseclass::operator()() ^ xdiff; data_[i] = baseclass::operator()() ^ xdiff;
} }

View File

@ -35,7 +35,7 @@ RUN apt-get update \
ENV TZ=Europe/Moscow ENV TZ=Europe/Moscow
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
RUN pip3 install urllib3 testflows==1.6.74 docker-compose docker dicttoxml kazoo tzlocal RUN pip3 install urllib3 testflows==1.6.74 docker-compose docker dicttoxml kazoo tzlocal python-dateutil numpy
ENV DOCKER_CHANNEL stable ENV DOCKER_CHANNEL stable
ENV DOCKER_VERSION 17.09.1-ce ENV DOCKER_VERSION 17.09.1-ce
@ -74,4 +74,3 @@ VOLUME /var/lib/docker
EXPOSE 2375 EXPOSE 2375
ENTRYPOINT ["dockerd-entrypoint.sh"] ENTRYPOINT ["dockerd-entrypoint.sh"]
CMD ["sh", "-c", "python3 regression.py --no-color -o classic --local --clickhouse-binary-path ${CLICKHOUSE_TESTS_SERVER_BIN_PATH} --log test.log ${TESTFLOWS_OPTS}; cat test.log | tfs report results --format json > results.json; /usr/local/bin/process_testflows_result.py || echo -e 'failure\tCannot parse results' > check_status.tsv"] CMD ["sh", "-c", "python3 regression.py --no-color -o classic --local --clickhouse-binary-path ${CLICKHOUSE_TESTS_SERVER_BIN_PATH} --log test.log ${TESTFLOWS_OPTS}; cat test.log | tfs report results --format json > results.json; /usr/local/bin/process_testflows_result.py || echo -e 'failure\tCannot parse results' > check_status.tsv"]

View File

@ -31,10 +31,10 @@ toc_title: Cloud
## Alibaba Cloud {#alibaba-cloud} ## Alibaba Cloud {#alibaba-cloud}
Alibaba Cloud Managed Service for ClickHouse. [China Site](https://www.aliyun.com/product/clickhouse) (will be available at the international site in May 2021). Provides the following key features: [Alibaba Cloud Managed Service for ClickHouse](https://www.alibabacloud.com/product/clickhouse) provides the following key features:
- Highly reliable cloud disk storage engine based on [Alibaba Cloud Apsara](https://www.alibabacloud.com/product/apsara-stack) distributed system - Highly reliable cloud disk storage engine based on [Alibaba Cloud Apsara](https://www.alibabacloud.com/product/apsara-stack) distributed system
- Expand capacity on-demand without manual data migration - Expand capacity on demand without manual data migration
- Support single-node, single-replica, multi-node, and multi-replica architectures, and support hot and cold data tiering - Support single-node, single-replica, multi-node, and multi-replica architectures, and support hot and cold data tiering
- Support access allow-list, one-key recovery, multi-layer network security protection, cloud disk encryption - Support access allow-list, one-key recovery, multi-layer network security protection, cloud disk encryption
- Seamless integration with cloud log systems, databases, and data application tools - Seamless integration with cloud log systems, databases, and data application tools

View File

@ -139,6 +139,7 @@ The following settings can be specified in configuration file for given endpoint
- `endpoint` — Specifies prefix of an endpoint. Mandatory. - `endpoint` — Specifies prefix of an endpoint. Mandatory.
- `access_key_id` and `secret_access_key` — Specifies credentials to use with given endpoint. Optional. - `access_key_id` and `secret_access_key` — Specifies credentials to use with given endpoint. Optional.
- `region` — Specifies S3 region name. Optional.
- `use_environment_credentials` — If set to `true`, S3 client will try to obtain credentials from environment variables and Amazon EC2 metadata for given endpoint. Optional, default value is `false`. - `use_environment_credentials` — If set to `true`, S3 client will try to obtain credentials from environment variables and Amazon EC2 metadata for given endpoint. Optional, default value is `false`.
- `use_insecure_imds_request` — If set to `true`, S3 client will use insecure IMDS request while obtaining credentials from Amazon EC2 metadata. Optional, default value is `false`. - `use_insecure_imds_request` — If set to `true`, S3 client will use insecure IMDS request while obtaining credentials from Amazon EC2 metadata. Optional, default value is `false`.
- `header` — Adds specified HTTP header to a request to given endpoint. Optional, can be speficied multiple times. - `header` — Adds specified HTTP header to a request to given endpoint. Optional, can be speficied multiple times.
@ -152,6 +153,7 @@ The following settings can be specified in configuration file for given endpoint
<endpoint>https://storage.yandexcloud.net/my-test-bucket-768/</endpoint> <endpoint>https://storage.yandexcloud.net/my-test-bucket-768/</endpoint>
<!-- <access_key_id>ACCESS_KEY_ID</access_key_id> --> <!-- <access_key_id>ACCESS_KEY_ID</access_key_id> -->
<!-- <secret_access_key>SECRET_ACCESS_KEY</secret_access_key> --> <!-- <secret_access_key>SECRET_ACCESS_KEY</secret_access_key> -->
<!-- <region>us-west-1</region> -->
<!-- <use_environment_credentials>false</use_environment_credentials> --> <!-- <use_environment_credentials>false</use_environment_credentials> -->
<!-- <use_insecure_imds_request>false</use_insecure_imds_request> --> <!-- <use_insecure_imds_request>false</use_insecure_imds_request> -->
<!-- <header>Authorization: Bearer SOME-TOKEN</header> --> <!-- <header>Authorization: Bearer SOME-TOKEN</header> -->

View File

@ -739,6 +739,7 @@ Configuration markup:
<endpoint>https://storage.yandexcloud.net/my-bucket/root-path/</endpoint> <endpoint>https://storage.yandexcloud.net/my-bucket/root-path/</endpoint>
<access_key_id>your_access_key_id</access_key_id> <access_key_id>your_access_key_id</access_key_id>
<secret_access_key>your_secret_access_key</secret_access_key> <secret_access_key>your_secret_access_key</secret_access_key>
<region></region>
<server_side_encryption_customer_key_base64>your_base64_encoded_customer_key</server_side_encryption_customer_key_base64> <server_side_encryption_customer_key_base64>your_base64_encoded_customer_key</server_side_encryption_customer_key_base64>
<proxy> <proxy>
<uri>http://proxy1</uri> <uri>http://proxy1</uri>
@ -764,6 +765,7 @@ Required parameters:
- `secret_access_key` — S3 secret access key. - `secret_access_key` — S3 secret access key.
Optional parameters: Optional parameters:
- `region` — S3 region name.
- `use_environment_credentials` — Reads AWS credentials from the Environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN if they exist. Default value is `false`. - `use_environment_credentials` — Reads AWS credentials from the Environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN if they exist. Default value is `false`.
- `use_insecure_imds_request` — If set to `true`, S3 client will use insecure IMDS request while obtaining credentials from Amazon EC2 metadata. Default value is `false`. - `use_insecure_imds_request` — If set to `true`, S3 client will use insecure IMDS request while obtaining credentials from Amazon EC2 metadata. Default value is `false`.
- `proxy` — Proxy configuration for S3 endpoint. Each `uri` element inside `proxy` block should contain a proxy URL. - `proxy` — Proxy configuration for S3 endpoint. Each `uri` element inside `proxy` block should contain a proxy URL.

View File

@ -101,6 +101,9 @@ Privileges can be granted to a role by the [GRANT](../sql-reference/statements/g
Row policy is a filter that defines which of the rows are available to a user or a role. Row policy contains filters for one particular table, as well as a list of roles and/or users which should use this row policy. Row policy is a filter that defines which of the rows are available to a user or a role. Row policy contains filters for one particular table, as well as a list of roles and/or users which should use this row policy.
!!! note "Warning"
Row policies makes sense only for users with readonly access. If user can modify table or copy partitions between tables, it defeats the restrictions of row policies.
Management queries: Management queries:
- [CREATE ROW POLICY](../sql-reference/statements/create/row-policy.md) - [CREATE ROW POLICY](../sql-reference/statements/create/row-policy.md)

View File

@ -21,6 +21,7 @@ Columns:
- `bytes_allocated` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Amount of RAM allocated for the dictionary. - `bytes_allocated` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Amount of RAM allocated for the dictionary.
- `query_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of queries since the dictionary was loaded or since the last successful reboot. - `query_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of queries since the dictionary was loaded or since the last successful reboot.
- `hit_rate` ([Float64](../../sql-reference/data-types/float.md)) — For cache dictionaries, the percentage of uses for which the value was in the cache. - `hit_rate` ([Float64](../../sql-reference/data-types/float.md)) — For cache dictionaries, the percentage of uses for which the value was in the cache.
- `found_rate` ([Float64](../../sql-reference/data-types/float.md)) — The percentage of uses for which the value was found.
- `element_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of items stored in the dictionary. - `element_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of items stored in the dictionary.
- `load_factor` ([Float64](../../sql-reference/data-types/float.md)) — Percentage filled in the dictionary (for a hashed dictionary, the percentage filled in the hash table). - `load_factor` ([Float64](../../sql-reference/data-types/float.md)) — Percentage filled in the dictionary (for a hashed dictionary, the percentage filled in the hash table).
- `source` ([String](../../sql-reference/data-types/string.md)) — Text describing the [data source](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md) for the dictionary. - `source` ([String](../../sql-reference/data-types/string.md)) — Text describing the [data source](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md) for the dictionary.
@ -60,4 +61,4 @@ SELECT * FROM system.dictionaries
└──────────┴──────┴────────┴─────────────┴──────┴────────┴──────────────────────────────────────┴─────────────────────┴─────────────────┴─────────────┴──────────┴───────────────┴───────────────────────┴────────────────────────────┴──────────────┴──────────────┴─────────────────────┴──────────────────────────────┘───────────────────────┴────────────────┘ └──────────┴──────┴────────┴─────────────┴──────┴────────┴──────────────────────────────────────┴─────────────────────┴─────────────────┴─────────────┴──────────┴───────────────┴───────────────────────┴────────────────────────────┴──────────────┴──────────────┴─────────────────────┴──────────────────────────────┘───────────────────────┴────────────────┘
``` ```
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/dictionaries) <!--hide--> [Original article](https://clickhouse.tech/docs/en/operations/system_tables/dictionaries) <!--hide-->

View File

@ -12,6 +12,9 @@ The result depends on the order of running the query, and is nondeterministic.
When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) function. When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) function.
!!! note "Note"
Using `quantileTDigestWeighted` [is not recommended for tiny data sets](https://github.com/tdunning/t-digest/issues/167#issuecomment-828650275) and can lead to significat error. In this case, consider possibility of using [`quantileTDigest`](../../../sql-reference/aggregate-functions/reference/quantiletdigest.md) instead.
**Syntax** **Syntax**
``` sql ``` sql

View File

@ -7,6 +7,9 @@ toc_title: ROW POLICY
Creates a [row policy](../../../operations/access-rights.md#row-policy-management), i.e. a filter used to determine which rows a user can read from a table. Creates a [row policy](../../../operations/access-rights.md#row-policy-management), i.e. a filter used to determine which rows a user can read from a table.
!!! note "Warning"
Row policies makes sense only for users with readonly access. If user can modify table or copy partitions between tables, it defeats the restrictions of row policies.
Syntax: Syntax:
``` sql ``` sql

View File

@ -22,7 +22,7 @@ toc_title: "\u30AF\u30E9\u30A6\u30C9"
## Alibaba Cloud {#alibaba-cloud} ## Alibaba Cloud {#alibaba-cloud}
ClickHouseのためのAlibaba Cloudの管理サービス [中国サイト](https://www.aliyun.com/product/clickhouse) (2021年5月に国際サイトで利用可能になります) 次の主な機能を提供します: [ClickHouseのためのAlibaba Cloudの管理サービス](https://www.alibabacloud.com/product/clickhouse) 次の主な機能を提供します:
- Alibaba Cloud Apsara分散システムをベースにした信頼性の高いクラウドディスクストレージエンジン - Alibaba Cloud Apsara分散システムをベースにした信頼性の高いクラウドディスクストレージエンジン
- 手動でのデータ移行を必要とせずに、オン・デマンドで容量を拡張 - 手動でのデータ移行を必要とせずに、オン・デマンドで容量を拡張

View File

@ -82,6 +82,7 @@ SELECT * FROM s3_engine_table LIMIT 2;
Необязательные настройки: Необязательные настройки:
- `access_key_id` и `secret_access_key` — указывают учетные данные для использования с данной точкой приема запроса. - `access_key_id` и `secret_access_key` — указывают учетные данные для использования с данной точкой приема запроса.
- `region` — название региона S3.
- `use_environment_credentials` — если `true`, S3-клиент будет пытаться получить учетные данные из переменных среды и метаданных Amazon EC2 для данной точки приема запроса. Значение по умолчанию - `false`. - `use_environment_credentials` — если `true`, S3-клиент будет пытаться получить учетные данные из переменных среды и метаданных Amazon EC2 для данной точки приема запроса. Значение по умолчанию - `false`.
- `header` — добавляет указанный HTTP-заголовок к запросу на заданную точку приема запроса. Может быть определен несколько раз. - `header` — добавляет указанный HTTP-заголовок к запросу на заданную точку приема запроса. Может быть определен несколько раз.
- `server_side_encryption_customer_key_base64` — устанавливает необходимые заголовки для доступа к объектам S3 с шифрованием SSE-C. - `server_side_encryption_customer_key_base64` — устанавливает необходимые заголовки для доступа к объектам S3 с шифрованием SSE-C.
@ -94,6 +95,7 @@ SELECT * FROM s3_engine_table LIMIT 2;
<endpoint>https://storage.yandexcloud.net/my-test-bucket-768/</endpoint> <endpoint>https://storage.yandexcloud.net/my-test-bucket-768/</endpoint>
<!-- <access_key_id>ACCESS_KEY_ID</access_key_id> --> <!-- <access_key_id>ACCESS_KEY_ID</access_key_id> -->
<!-- <secret_access_key>SECRET_ACCESS_KEY</secret_access_key> --> <!-- <secret_access_key>SECRET_ACCESS_KEY</secret_access_key> -->
<!-- <region>us-west-1</region> -->
<!-- <use_environment_credentials>false</use_environment_credentials> --> <!-- <use_environment_credentials>false</use_environment_credentials> -->
<!-- <header>Authorization: Bearer SOME-TOKEN</header> --> <!-- <header>Authorization: Bearer SOME-TOKEN</header> -->
<!-- <server_side_encryption_customer_key_base64>BASE64-ENCODED-KEY</server_side_encryption_customer_key_base64> --> <!-- <server_side_encryption_customer_key_base64>BASE64-ENCODED-KEY</server_side_encryption_customer_key_base64> -->

View File

@ -727,6 +727,7 @@ SETTINGS storage_policy = 'moving_from_ssd_to_hdd'
<endpoint>https://storage.yandexcloud.net/my-bucket/root-path/</endpoint> <endpoint>https://storage.yandexcloud.net/my-bucket/root-path/</endpoint>
<access_key_id>your_access_key_id</access_key_id> <access_key_id>your_access_key_id</access_key_id>
<secret_access_key>your_secret_access_key</secret_access_key> <secret_access_key>your_secret_access_key</secret_access_key>
<region></region>
<proxy> <proxy>
<uri>http://proxy1</uri> <uri>http://proxy1</uri>
<uri>http://proxy2</uri> <uri>http://proxy2</uri>
@ -753,6 +754,7 @@ SETTINGS storage_policy = 'moving_from_ssd_to_hdd'
Необязательные параметры: Необязательные параметры:
- `region` — название региона S3.
- `use_environment_credentials` — признак, нужно ли считывать учетные данные AWS из сетевого окружения, а также из переменных окружения `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` и `AWS_SESSION_TOKEN`, если они есть. Значение по умолчанию: `false`. - `use_environment_credentials` — признак, нужно ли считывать учетные данные AWS из сетевого окружения, а также из переменных окружения `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` и `AWS_SESSION_TOKEN`, если они есть. Значение по умолчанию: `false`.
- `use_insecure_imds_request` — признак, нужно ли использовать менее безопасное соединение при выполнении запроса к IMDS при получении учётных данных из метаданных Amazon EC2. Значение по умолчанию: `false`. - `use_insecure_imds_request` — признак, нужно ли использовать менее безопасное соединение при выполнении запроса к IMDS при получении учётных данных из метаданных Amazon EC2. Значение по умолчанию: `false`.
- `proxy` — конфигурация прокси-сервера для конечной точки S3. Каждый элемент `uri` внутри блока `proxy` должен содержать URL прокси-сервера. - `proxy` — конфигурация прокси-сервера для конечной точки S3. Каждый элемент `uri` внутри блока `proxy` должен содержать URL прокси-сервера.

View File

@ -12,6 +12,9 @@ toc_priority: 208
Внутренние состояния функций `quantile*` не объединяются, если они используются в одном запросе. Если вам необходимо вычислить квантили нескольких уровней, используйте функцию [quantiles](#quantiles), это повысит эффективность запроса. Внутренние состояния функций `quantile*` не объединяются, если они используются в одном запросе. Если вам необходимо вычислить квантили нескольких уровней, используйте функцию [quantiles](#quantiles), это повысит эффективность запроса.
!!! note "Примечание"
Использование `quantileTDigestWeighted` [не рекомендуется для небольших наборов данных](https://github.com/tdunning/t-digest/issues/167#issuecomment-828650275) и может привести к значительной ошибке. Рассмотрите возможность использования [`quantileTDigest`](../../../sql-reference/aggregate-functions/reference/quantiletdigest.md) в таких случаях.
**Синтаксис** **Синтаксис**
``` sql ``` sql

View File

@ -9,8 +9,9 @@ toc_title: "Манипуляции с индексами"
Добавить или удалить индекс можно с помощью операций Добавить или удалить индекс можно с помощью операций
``` sql ``` sql
ALTER TABLE [db].name ADD INDEX name expression TYPE type GRANULARITY value [AFTER name] ALTER TABLE [db.]name ADD INDEX name expression TYPE type GRANULARITY value [AFTER name]
ALTER TABLE [db].name DROP INDEX name ALTER TABLE [db.]name DROP INDEX name
ALTER TABLE [db.]table MATERIALIZE INDEX name IN PARTITION partition_name
``` ```
Поддерживается только таблицами семейства `*MergeTree`. Поддерживается только таблицами семейства `*MergeTree`.
@ -18,6 +19,7 @@ ALTER TABLE [db].name DROP INDEX name
Команда `ADD INDEX` добавляет описание индексов в метаданные, а `DROP INDEX` удаляет индекс из метаданных и стирает файлы индекса с диска, поэтому они легковесные и работают мгновенно. Команда `ADD INDEX` добавляет описание индексов в метаданные, а `DROP INDEX` удаляет индекс из метаданных и стирает файлы индекса с диска, поэтому они легковесные и работают мгновенно.
Если индекс появился в метаданных, то он начнет считаться в последующих слияниях и записях в таблицу, а не сразу после выполнения операции `ALTER`. Если индекс появился в метаданных, то он начнет считаться в последующих слияниях и записях в таблицу, а не сразу после выполнения операции `ALTER`.
`MATERIALIZE INDEX` - перестраивает индекс в указанной партиции. Реализовано как мутация.
Запрос на изменение индексов реплицируется, сохраняя новые метаданные в ZooKeeper и применяя изменения на всех репликах. Запрос на изменение индексов реплицируется, сохраняя новые метаданные в ZooKeeper и применяя изменения на всех репликах.

View File

@ -31,7 +31,7 @@ toc_title: 云
## 阿里云 {#alibaba-cloud} ## 阿里云 {#alibaba-cloud}
阿里云的 ClickHouse 托管服务 [中国站](https://www.aliyun.com/product/clickhouse) (国际站于2021年5月初开放) 提供以下主要功能: [阿里云的 ClickHouse 托管服务](https://www.alibabacloud.com/zh/product/clickhouse) 提供以下主要功能:
- 基于阿里飞天分布式系统的高可靠云盘存储引擎 - 基于阿里飞天分布式系统的高可靠云盘存储引擎
- 按需扩容,无需手动进行数据搬迁 - 按需扩容,无需手动进行数据搬迁

View File

@ -23,6 +23,8 @@ public:
SharedLibraryHandler(const SharedLibraryHandler & other); SharedLibraryHandler(const SharedLibraryHandler & other);
SharedLibraryHandler & operator=(const SharedLibraryHandler & other) = delete;
~SharedLibraryHandler(); ~SharedLibraryHandler();
BlockInputStreamPtr loadAll(); BlockInputStreamPtr loadAll();

View File

@ -109,7 +109,7 @@ void ODBCColumnsInfoHandler::handleRequest(HTTPServerRequest & request, HTTPServ
validateODBCConnectionString(connection_string), validateODBCConnectionString(connection_string),
getContext()->getSettingsRef().odbc_bridge_connection_pool_size); getContext()->getSettingsRef().odbc_bridge_connection_pool_size);
nanodbc::catalog catalog(*connection); nanodbc::catalog catalog(connection->get());
std::string catalog_name; std::string catalog_name;
/// In XDBC tables it is allowed to pass either database_name or schema_name in table definion, but not both of them. /// In XDBC tables it is allowed to pass either database_name or schema_name in table definion, but not both of them.

View File

@ -46,7 +46,7 @@ void IdentifierQuoteHandler::handleRequest(HTTPServerRequest & request, HTTPServ
validateODBCConnectionString(connection_string), validateODBCConnectionString(connection_string),
getContext()->getSettingsRef().odbc_bridge_connection_pool_size); getContext()->getSettingsRef().odbc_bridge_connection_pool_size);
auto identifier = getIdentifierQuote(*connection); auto identifier = getIdentifierQuote(connection->get());
WriteBufferFromHTTPServerResponse out(response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD, keep_alive_timeout); WriteBufferFromHTTPServerResponse out(response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD, keep_alive_timeout);
try try

View File

@ -18,13 +18,10 @@
#include <Processors/Formats/InputStreamFromInputFormat.h> #include <Processors/Formats/InputStreamFromInputFormat.h>
#include <common/logger_useful.h> #include <common/logger_useful.h>
#include <Server/HTTP/HTMLForm.h> #include <Server/HTTP/HTMLForm.h>
#include "ODBCConnectionFactory.h"
#include <mutex> #include <mutex>
#include <memory> #include <memory>
#include <nanodbc/nanodbc.h>
namespace DB namespace DB
{ {
@ -133,12 +130,12 @@ void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse
auto quoting_style = IdentifierQuotingStyle::None; auto quoting_style = IdentifierQuotingStyle::None;
#if USE_ODBC #if USE_ODBC
quoting_style = getQuotingStyle(*connection); quoting_style = getQuotingStyle(connection->get());
#endif #endif
auto & read_buf = request.getStream(); auto & read_buf = request.getStream();
auto input_format = FormatFactory::instance().getInput(format, read_buf, *sample_block, getContext(), max_block_size); auto input_format = FormatFactory::instance().getInput(format, read_buf, *sample_block, getContext(), max_block_size);
auto input_stream = std::make_shared<InputStreamFromInputFormat>(input_format); auto input_stream = std::make_shared<InputStreamFromInputFormat>(input_format);
ODBCBlockOutputStream output_stream(*connection, db_name, table_name, *sample_block, getContext(), quoting_style); ODBCBlockOutputStream output_stream(std::move(connection), db_name, table_name, *sample_block, getContext(), quoting_style);
copyData(*input_stream, output_stream); copyData(*input_stream, output_stream);
writeStringBinary("Ok.", out); writeStringBinary("Ok.", out);
} }
@ -148,7 +145,7 @@ void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse
LOG_TRACE(log, "Query: {}", query); LOG_TRACE(log, "Query: {}", query);
BlockOutputStreamPtr writer = FormatFactory::instance().getOutputStreamParallelIfPossible(format, out, *sample_block, getContext()); BlockOutputStreamPtr writer = FormatFactory::instance().getOutputStreamParallelIfPossible(format, out, *sample_block, getContext());
ODBCBlockInputStream inp(*connection, query, *sample_block, max_block_size); ODBCBlockInputStream inp(std::move(connection), query, *sample_block, max_block_size);
copyData(inp, *writer); copyData(inp, *writer);
} }
} }

View File

@ -21,14 +21,13 @@ namespace ErrorCodes
ODBCBlockInputStream::ODBCBlockInputStream( ODBCBlockInputStream::ODBCBlockInputStream(
nanodbc::connection & connection_, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size_) nanodbc::ConnectionHolderPtr connection, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size_)
: log(&Poco::Logger::get("ODBCBlockInputStream")) : log(&Poco::Logger::get("ODBCBlockInputStream"))
, max_block_size{max_block_size_} , max_block_size{max_block_size_}
, connection(connection_)
, query(query_str) , query(query_str)
{ {
description.init(sample_block); description.init(sample_block);
result = execute(connection, NANODBC_TEXT(query)); result = execute(connection->get(), NANODBC_TEXT(query));
} }

View File

@ -4,7 +4,7 @@
#include <Core/Block.h> #include <Core/Block.h>
#include <DataStreams/IBlockInputStream.h> #include <DataStreams/IBlockInputStream.h>
#include <Core/ExternalResultDescription.h> #include <Core/ExternalResultDescription.h>
#include <nanodbc/nanodbc.h> #include "ODBCConnectionFactory.h"
namespace DB namespace DB
@ -13,7 +13,7 @@ namespace DB
class ODBCBlockInputStream final : public IBlockInputStream class ODBCBlockInputStream final : public IBlockInputStream
{ {
public: public:
ODBCBlockInputStream(nanodbc::connection & connection_, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size_); ODBCBlockInputStream(nanodbc::ConnectionHolderPtr connection, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size_);
String getName() const override { return "ODBC"; } String getName() const override { return "ODBC"; }
@ -36,7 +36,6 @@ private:
const UInt64 max_block_size; const UInt64 max_block_size;
ExternalResultDescription description; ExternalResultDescription description;
nanodbc::connection & connection;
nanodbc::result result; nanodbc::result result;
String query; String query;
bool finished = false; bool finished = false;

View File

@ -40,14 +40,14 @@ namespace
} }
ODBCBlockOutputStream::ODBCBlockOutputStream(nanodbc::connection & connection_, ODBCBlockOutputStream::ODBCBlockOutputStream(nanodbc::ConnectionHolderPtr connection_,
const std::string & remote_database_name_, const std::string & remote_database_name_,
const std::string & remote_table_name_, const std::string & remote_table_name_,
const Block & sample_block_, const Block & sample_block_,
ContextPtr local_context_, ContextPtr local_context_,
IdentifierQuotingStyle quoting_) IdentifierQuotingStyle quoting_)
: log(&Poco::Logger::get("ODBCBlockOutputStream")) : log(&Poco::Logger::get("ODBCBlockOutputStream"))
, connection(connection_) , connection(std::move(connection_))
, db_name(remote_database_name_) , db_name(remote_database_name_)
, table_name(remote_table_name_) , table_name(remote_table_name_)
, sample_block(sample_block_) , sample_block(sample_block_)
@ -69,7 +69,7 @@ void ODBCBlockOutputStream::write(const Block & block)
writer->write(block); writer->write(block);
std::string query = getInsertQuery(db_name, table_name, block.getColumnsWithTypeAndName(), quoting) + values_buf.str(); std::string query = getInsertQuery(db_name, table_name, block.getColumnsWithTypeAndName(), quoting) + values_buf.str();
execute(connection, query); execute(connection->get(), query);
} }
} }

View File

@ -5,7 +5,7 @@
#include <Core/ExternalResultDescription.h> #include <Core/ExternalResultDescription.h>
#include <Parsers/IdentifierQuotingStyle.h> #include <Parsers/IdentifierQuotingStyle.h>
#include <Interpreters/Context_fwd.h> #include <Interpreters/Context_fwd.h>
#include <nanodbc/nanodbc.h> #include "ODBCConnectionFactory.h"
namespace DB namespace DB
@ -16,7 +16,7 @@ class ODBCBlockOutputStream : public IBlockOutputStream
public: public:
ODBCBlockOutputStream( ODBCBlockOutputStream(
nanodbc::connection & connection_, nanodbc::ConnectionHolderPtr connection_,
const std::string & remote_database_name_, const std::string & remote_database_name_,
const std::string & remote_table_name_, const std::string & remote_table_name_,
const Block & sample_block_, const Block & sample_block_,
@ -29,7 +29,7 @@ public:
private: private:
Poco::Logger * log; Poco::Logger * log;
nanodbc::connection & connection; nanodbc::ConnectionHolderPtr connection;
std::string db_name; std::string db_name;
std::string table_name; std::string table_name;
Block sample_block; Block sample_block;

View File

@ -6,53 +6,51 @@
#include <common/BorrowedObjectPool.h> #include <common/BorrowedObjectPool.h>
#include <unordered_map> #include <unordered_map>
namespace DB
{
namespace ErrorCodes
{
extern const int NO_FREE_CONNECTION;
}
}
namespace nanodbc namespace nanodbc
{ {
static constexpr inline auto ODBC_CONNECT_TIMEOUT = 100; using ConnectionPtr = std::unique_ptr<nanodbc::connection>;
using ConnectionPtr = std::shared_ptr<nanodbc::connection>;
using Pool = BorrowedObjectPool<ConnectionPtr>; using Pool = BorrowedObjectPool<ConnectionPtr>;
using PoolPtr = std::shared_ptr<Pool>; using PoolPtr = std::shared_ptr<Pool>;
class ConnectionHolder class ConnectionHolder
{ {
public: public:
ConnectionHolder(const std::string & connection_string_, PoolPtr pool_) : connection_string(connection_string_), pool(pool_) {} ConnectionHolder(PoolPtr pool_, ConnectionPtr connection_) : pool(pool_), connection(std::move(connection_)) {}
~ConnectionHolder() ConnectionHolder(const ConnectionHolder & other) = delete;
~ConnectionHolder() { pool->returnObject(std::move(connection)); }
nanodbc::connection & get() const
{ {
if (connection) assert(connection != nullptr);
pool->returnObject(std::move(connection));
}
nanodbc::connection & operator*()
{
if (!connection)
{
pool->borrowObject(connection, [&]()
{
return std::make_shared<nanodbc::connection>(connection_string, ODBC_CONNECT_TIMEOUT);
});
}
return *connection; return *connection;
} }
private: private:
std::string connection_string;
PoolPtr pool; PoolPtr pool;
ConnectionPtr connection; ConnectionPtr connection;
}; };
using ConnectionHolderPtr = std::unique_ptr<ConnectionHolder>;
} }
namespace DB namespace DB
{ {
static constexpr inline auto ODBC_CONNECT_TIMEOUT = 100;
static constexpr inline auto ODBC_POOL_WAIT_TIMEOUT = 10000;
class ODBCConnectionFactory final : private boost::noncopyable class ODBCConnectionFactory final : private boost::noncopyable
{ {
public: public:
@ -62,14 +60,32 @@ public:
return ret; return ret;
} }
nanodbc::ConnectionHolder get(const std::string & connection_string, size_t pool_size) nanodbc::ConnectionHolderPtr get(const std::string & connection_string, size_t pool_size)
{ {
std::lock_guard lock(mutex); std::lock_guard lock(mutex);
if (!factory.count(connection_string)) if (!factory.count(connection_string))
factory.emplace(std::make_pair(connection_string, std::make_shared<nanodbc::Pool>(pool_size))); factory.emplace(std::make_pair(connection_string, std::make_shared<nanodbc::Pool>(pool_size)));
return nanodbc::ConnectionHolder(connection_string, factory[connection_string]); auto & pool = factory[connection_string];
nanodbc::ConnectionPtr connection;
auto connection_available = pool->tryBorrowObject(connection, []() { return nullptr; }, ODBC_POOL_WAIT_TIMEOUT);
if (!connection_available)
throw Exception("Unable to fetch connection within the timeout", ErrorCodes::NO_FREE_CONNECTION);
try
{
if (!connection || !connection->connected())
connection = std::make_unique<nanodbc::connection>(connection_string, ODBC_CONNECT_TIMEOUT);
}
catch (...)
{
pool->returnObject(std::move(connection));
}
return std::make_unique<nanodbc::ConnectionHolder>(factory[connection_string], std::move(connection));
} }
private: private:

View File

@ -53,7 +53,7 @@ void SchemaAllowedHandler::handleRequest(HTTPServerRequest & request, HTTPServer
validateODBCConnectionString(connection_string), validateODBCConnectionString(connection_string),
getContext()->getSettingsRef().odbc_bridge_connection_pool_size); getContext()->getSettingsRef().odbc_bridge_connection_pool_size);
bool result = isSchemaAllowed(*connection); bool result = isSchemaAllowed(connection->get());
WriteBufferFromHTTPServerResponse out(response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD, keep_alive_timeout); WriteBufferFromHTTPServerResponse out(response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD, keep_alive_timeout);
try try

View File

@ -3,7 +3,6 @@
#if USE_ODBC #if USE_ODBC
#include <common/logger_useful.h> #include <common/logger_useful.h>
#include <nanodbc/nanodbc.h>
#include <sql.h> #include <sql.h>
#include <sqlext.h> #include <sqlext.h>

View File

@ -13,6 +13,7 @@
#include <Poco/Net/HTTPServer.h> #include <Poco/Net/HTTPServer.h>
#include <Poco/Net/NetException.h> #include <Poco/Net/NetException.h>
#include <Poco/Util/HelpFormatter.h> #include <Poco/Util/HelpFormatter.h>
#include <Poco/Environment.h>
#include <ext/scope_guard.h> #include <ext/scope_guard.h>
#include <common/defines.h> #include <common/defines.h>
#include <common/logger_useful.h> #include <common/logger_useful.h>
@ -385,6 +386,11 @@ void Server::initialize(Poco::Util::Application & self)
{ {
BaseDaemon::initialize(self); BaseDaemon::initialize(self);
logger().information("starting up"); logger().information("starting up");
LOG_INFO(&logger(), "OS Name = {}, OS Version = {}, OS Architecture = {}",
Poco::Environment::osName(),
Poco::Environment::osVersion(),
Poco::Environment::osArchitecture());
} }
std::string Server::getDefaultCorePath() const std::string Server::getDefaultCorePath() const

View File

@ -518,6 +518,33 @@
<!-- Reallocate memory for machine code ("text") using huge pages. Highly experimental. --> <!-- Reallocate memory for machine code ("text") using huge pages. Highly experimental. -->
<remap_executable>false</remap_executable> <remap_executable>false</remap_executable>
<![CDATA[
Uncomment below in order to use JDBC table engine and function.
To install and run JDBC bridge in background:
* [Debian/Ubuntu]
export MVN_URL=https://repo1.maven.org/maven2/ru/yandex/clickhouse/clickhouse-jdbc-bridge
export PKG_VER=$(curl -sL $MVN_URL/maven-metadata.xml | grep '<release>' | sed -e 's|.*>\(.*\)<.*|\1|')
wget https://github.com/ClickHouse/clickhouse-jdbc-bridge/releases/download/v$PKG_VER/clickhouse-jdbc-bridge_$PKG_VER-1_all.deb
apt install --no-install-recommends -f ./clickhouse-jdbc-bridge_$PKG_VER-1_all.deb
clickhouse-jdbc-bridge &
* [CentOS/RHEL]
export MVN_URL=https://repo1.maven.org/maven2/ru/yandex/clickhouse/clickhouse-jdbc-bridge
export PKG_VER=$(curl -sL $MVN_URL/maven-metadata.xml | grep '<release>' | sed -e 's|.*>\(.*\)<.*|\1|')
wget https://github.com/ClickHouse/clickhouse-jdbc-bridge/releases/download/v$PKG_VER/clickhouse-jdbc-bridge-$PKG_VER-1.noarch.rpm
yum localinstall -y clickhouse-jdbc-bridge-$PKG_VER-1.noarch.rpm
clickhouse-jdbc-bridge &
Please refer to https://github.com/ClickHouse/clickhouse-jdbc-bridge#usage for more information.
]]>
<!--
<jdbc_bridge>
<host>127.0.0.1</host>
<port>9019</port>
</jdbc_bridge>
-->
<!-- Configuration of clusters that could be used in Distributed tables. <!-- Configuration of clusters that could be used in Distributed tables.
https://clickhouse.tech/docs/en/operations/table_engines/distributed/ https://clickhouse.tech/docs/en/operations/table_engines/distributed/
--> -->

View File

@ -355,8 +355,9 @@ String DiskAccessStorage::getStorageParamsJSON() const
std::lock_guard lock{mutex}; std::lock_guard lock{mutex};
Poco::JSON::Object json; Poco::JSON::Object json;
json.set("path", directory_path); json.set("path", directory_path);
if (readonly) bool readonly_loaded = readonly;
json.set("readonly", readonly.load()); if (readonly_loaded)
json.set("readonly", Poco::Dynamic::Var{true});
std::ostringstream oss; // STYLE_CHECK_ALLOW_STD_STRING_STREAM std::ostringstream oss; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
oss.exceptions(std::ios::failbit); oss.exceptions(std::ios::failbit);
Poco::JSON::Stringifier::stringify(json, oss); Poco::JSON::Stringifier::stringify(json, oss);

View File

@ -77,7 +77,7 @@ auto parseLDAPServer(const Poco::Util::AbstractConfiguration & config, const Str
if (enable_tls_lc_str == "starttls") if (enable_tls_lc_str == "starttls")
params.enable_tls = LDAPClient::Params::TLSEnable::YES_STARTTLS; params.enable_tls = LDAPClient::Params::TLSEnable::YES_STARTTLS;
else if (config.getBool(ldap_server_config + ".enable_tls")) else if (config.getBool(ldap_server_config + ".enable_tls"))
params.enable_tls = LDAPClient::Params::TLSEnable::YES; params.enable_tls = LDAPClient::Params::TLSEnable::YES; //-V1048
else else
params.enable_tls = LDAPClient::Params::TLSEnable::NO; params.enable_tls = LDAPClient::Params::TLSEnable::NO;
} }
@ -96,7 +96,7 @@ auto parseLDAPServer(const Poco::Util::AbstractConfiguration & config, const Str
else if (tls_minimum_protocol_version_lc_str == "tls1.1") else if (tls_minimum_protocol_version_lc_str == "tls1.1")
params.tls_minimum_protocol_version = LDAPClient::Params::TLSProtocolVersion::TLS1_1; params.tls_minimum_protocol_version = LDAPClient::Params::TLSProtocolVersion::TLS1_1;
else if (tls_minimum_protocol_version_lc_str == "tls1.2") else if (tls_minimum_protocol_version_lc_str == "tls1.2")
params.tls_minimum_protocol_version = LDAPClient::Params::TLSProtocolVersion::TLS1_2; params.tls_minimum_protocol_version = LDAPClient::Params::TLSProtocolVersion::TLS1_2; //-V1048
else else
throw Exception("Bad value for 'tls_minimum_protocol_version' entry, allowed values are: 'ssl2', 'ssl3', 'tls1.0', 'tls1.1', 'tls1.2'", ErrorCodes::BAD_ARGUMENTS); throw Exception("Bad value for 'tls_minimum_protocol_version' entry, allowed values are: 'ssl2', 'ssl3', 'tls1.0', 'tls1.1', 'tls1.2'", ErrorCodes::BAD_ARGUMENTS);
} }
@ -113,7 +113,7 @@ auto parseLDAPServer(const Poco::Util::AbstractConfiguration & config, const Str
else if (tls_require_cert_lc_str == "try") else if (tls_require_cert_lc_str == "try")
params.tls_require_cert = LDAPClient::Params::TLSRequireCert::TRY; params.tls_require_cert = LDAPClient::Params::TLSRequireCert::TRY;
else if (tls_require_cert_lc_str == "demand") else if (tls_require_cert_lc_str == "demand")
params.tls_require_cert = LDAPClient::Params::TLSRequireCert::DEMAND; params.tls_require_cert = LDAPClient::Params::TLSRequireCert::DEMAND; //-V1048
else else
throw Exception("Bad value for 'tls_require_cert' entry, allowed values are: 'never', 'allow', 'try', 'demand'", ErrorCodes::BAD_ARGUMENTS); throw Exception("Bad value for 'tls_require_cert' entry, allowed values are: 'never', 'allow', 'try', 'demand'", ErrorCodes::BAD_ARGUMENTS);
} }

View File

@ -136,7 +136,7 @@ GrantedRoles::Elements GrantedRoles::getElements() const
boost::range::set_difference(roles, roles_with_admin_option, std::back_inserter(element.ids)); boost::range::set_difference(roles, roles_with_admin_option, std::back_inserter(element.ids));
if (!element.empty()) if (!element.empty())
{ {
element.admin_option = false; element.admin_option = false; //-V1048
elements.emplace_back(std::move(element)); elements.emplace_back(std::move(element));
} }

View File

@ -45,7 +45,7 @@ struct Quota : public IAccessEntity
struct ResourceTypeInfo struct ResourceTypeInfo
{ {
const char * const raw_name; const char * const raw_name = "";
const String name; /// Lowercased with underscores, e.g. "result_rows". const String name; /// Lowercased with underscores, e.g. "result_rows".
const String keyword; /// Uppercased with spaces, e.g. "RESULT ROWS". const String keyword; /// Uppercased with spaces, e.g. "RESULT ROWS".
const bool output_as_float = false; const bool output_as_float = false;

View File

@ -20,7 +20,7 @@ namespace ErrorCodes
/** Tracks the leftmost and rightmost (x, y) data points. /** Tracks the leftmost and rightmost (x, y) data points.
*/ */
struct AggregateFunctionBoundingRatioData struct AggregateFunctionBoundingRatioData //-V730
{ {
struct Point struct Point
{ {

View File

@ -220,7 +220,7 @@ private:
} }
public: public:
AggregateFunctionHistogramData() AggregateFunctionHistogramData() //-V730
: size(0) : size(0)
, lower_bound(std::numeric_limits<Mean>::max()) , lower_bound(std::numeric_limits<Mean>::max())
, upper_bound(std::numeric_limits<Mean>::lowest()) , upper_bound(std::numeric_limits<Mean>::lowest())

View File

@ -181,7 +181,7 @@ public:
/** For strings. Short strings are stored in the object itself, and long strings are allocated separately. /** For strings. Short strings are stored in the object itself, and long strings are allocated separately.
* NOTE It could also be suitable for arrays of numbers. * NOTE It could also be suitable for arrays of numbers.
*/ */
struct SingleValueDataString struct SingleValueDataString //-V730
{ {
private: private:
using Self = SingleValueDataString; using Self = SingleValueDataString;

View File

@ -13,7 +13,7 @@
namespace DB namespace DB
{ {
namespace namespace detail
{ {
/// This function returns true if both values are large and comparable. /// This function returns true if both values are large and comparable.
@ -72,7 +72,7 @@ public:
Float64 factor = static_cast<Float64>(count * source.count) / total_count; Float64 factor = static_cast<Float64>(count * source.count) / total_count;
Float64 delta = mean - source.mean; Float64 delta = mean - source.mean;
if (areComparable(count, source.count)) if (detail::areComparable(count, source.count))
mean = (source.count * source.mean + count * mean) / total_count; mean = (source.count * source.mean + count * mean) / total_count;
else else
mean = source.mean + delta * (static_cast<Float64>(count) / total_count); mean = source.mean + delta * (static_cast<Float64>(count) / total_count);
@ -302,7 +302,7 @@ public:
Float64 left_delta = left_mean - source.left_mean; Float64 left_delta = left_mean - source.left_mean;
Float64 right_delta = right_mean - source.right_mean; Float64 right_delta = right_mean - source.right_mean;
if (areComparable(count, source.count)) if (detail::areComparable(count, source.count))
{ {
left_mean = (source.count * source.left_mean + count * left_mean) / total_count; left_mean = (source.count * source.left_mean + count * left_mean) / total_count;
right_mean = (source.count * source.right_mean + count * right_mean) / total_count; right_mean = (source.count * source.right_mean + count * right_mean) / total_count;

View File

@ -263,7 +263,7 @@ struct QuantileExactLow : public QuantileExactBase<Value, QuantileExactLow<Value
{ {
// sort inputs in ascending order // sort inputs in ascending order
std::sort(array.begin(), array.end()); std::sort(array.begin(), array.end());
size_t n = level < 1 ? level * array.size() : (array.size() - 1);
// if level is 0.5 then compute the "low" median of the sorted array // if level is 0.5 then compute the "low" median of the sorted array
// by the method of rounding. // by the method of rounding.
if (level == 0.5) if (level == 0.5)
@ -278,10 +278,14 @@ struct QuantileExactLow : public QuantileExactBase<Value, QuantileExactLow<Value
return array[static_cast<size_t>((floor(s / 2)) - 1)]; return array[static_cast<size_t>((floor(s / 2)) - 1)];
} }
} }
// else quantile is the nth index of the sorted array obtained by multiplying else
// level and size of array. Example if level = 0.1 and size of array is 10, {
// then return array[1]. // else quantile is the nth index of the sorted array obtained by multiplying
return array[n]; // level and size of array. Example if level = 0.1 and size of array is 10,
// then return array[1].
size_t n = level < 1 ? level * array.size() : (array.size() - 1);
return array[n];
}
} }
return std::numeric_limits<Value>::quiet_NaN(); return std::numeric_limits<Value>::quiet_NaN();
} }
@ -295,7 +299,7 @@ struct QuantileExactLow : public QuantileExactBase<Value, QuantileExactLow<Value
for (size_t i = 0; i < size; ++i) for (size_t i = 0; i < size; ++i)
{ {
auto level = levels[indices[i]]; auto level = levels[indices[i]];
size_t n = level < 1 ? level * array.size() : (array.size() - 1);
// if level is 0.5 then compute the "low" median of the sorted array // if level is 0.5 then compute the "low" median of the sorted array
// by the method of rounding. // by the method of rounding.
if (level == 0.5) if (level == 0.5)
@ -310,9 +314,13 @@ struct QuantileExactLow : public QuantileExactBase<Value, QuantileExactLow<Value
result[indices[i]] = array[static_cast<size_t>(floor((s / 2) - 1))]; result[indices[i]] = array[static_cast<size_t>(floor((s / 2) - 1))];
} }
} }
// else quantile is the nth index of the sorted array obtained by multiplying else
// level and size of array. Example if level = 0.1 and size of array is 10. {
result[indices[i]] = array[n]; // else quantile is the nth index of the sorted array obtained by multiplying
// level and size of array. Example if level = 0.1 and size of array is 10.
size_t n = level < 1 ? level * array.size() : (array.size() - 1);
result[indices[i]] = array[n];
}
} }
} }
else else
@ -337,7 +345,7 @@ struct QuantileExactHigh : public QuantileExactBase<Value, QuantileExactHigh<Val
{ {
// sort inputs in ascending order // sort inputs in ascending order
std::sort(array.begin(), array.end()); std::sort(array.begin(), array.end());
size_t n = level < 1 ? level * array.size() : (array.size() - 1);
// if level is 0.5 then compute the "high" median of the sorted array // if level is 0.5 then compute the "high" median of the sorted array
// by the method of rounding. // by the method of rounding.
if (level == 0.5) if (level == 0.5)
@ -345,9 +353,13 @@ struct QuantileExactHigh : public QuantileExactBase<Value, QuantileExactHigh<Val
auto s = array.size(); auto s = array.size();
return array[static_cast<size_t>(floor(s / 2))]; return array[static_cast<size_t>(floor(s / 2))];
} }
// else quantile is the nth index of the sorted array obtained by multiplying else
// level and size of array. Example if level = 0.1 and size of array is 10. {
return array[n]; // else quantile is the nth index of the sorted array obtained by multiplying
// level and size of array. Example if level = 0.1 and size of array is 10.
size_t n = level < 1 ? level * array.size() : (array.size() - 1);
return array[n];
}
} }
return std::numeric_limits<Value>::quiet_NaN(); return std::numeric_limits<Value>::quiet_NaN();
} }
@ -361,7 +373,7 @@ struct QuantileExactHigh : public QuantileExactBase<Value, QuantileExactHigh<Val
for (size_t i = 0; i < size; ++i) for (size_t i = 0; i < size; ++i)
{ {
auto level = levels[indices[i]]; auto level = levels[indices[i]];
size_t n = level < 1 ? level * array.size() : (array.size() - 1);
// if level is 0.5 then compute the "high" median of the sorted array // if level is 0.5 then compute the "high" median of the sorted array
// by the method of rounding. // by the method of rounding.
if (level == 0.5) if (level == 0.5)
@ -369,9 +381,13 @@ struct QuantileExactHigh : public QuantileExactBase<Value, QuantileExactHigh<Val
auto s = array.size(); auto s = array.size();
result[indices[i]] = array[static_cast<size_t>(floor(s / 2))]; result[indices[i]] = array[static_cast<size_t>(floor(s / 2))];
} }
// else quantile is the nth index of the sorted array obtained by multiplying else
// level and size of array. Example if level = 0.1 and size of array is 10. {
result[indices[i]] = array[n]; // else quantile is the nth index of the sorted array obtained by multiplying
// level and size of array. Example if level = 0.1 and size of array is 10.
size_t n = level < 1 ? level * array.size() : (array.size() - 1);
result[indices[i]] = array[n];
}
} }
} }
else else

View File

@ -159,12 +159,12 @@ private:
/// The number of bytes read. /// The number of bytes read.
size_t read_count = 0; size_t read_count = 0;
/// The content in the current position. /// The content in the current position.
UInt8 value_l; UInt8 value_l = 0;
UInt8 value_r; UInt8 value_r = 0;
/// ///
bool is_eof = false; bool is_eof = false;
/// Does the cell fully fit into one byte? /// Does the cell fully fit into one byte?
bool fits_in_byte; bool fits_in_byte = false;
}; };
/** TODO This code looks very suboptimal. /** TODO This code looks very suboptimal.

View File

@ -804,7 +804,7 @@ bool Dwarf::findLocation(
findSubProgramDieForAddress(cu, die, address, base_addr_cu, subprogram); findSubProgramDieForAddress(cu, die, address, base_addr_cu, subprogram);
// Subprogram is the DIE of caller function. // Subprogram is the DIE of caller function.
if (check_inline && subprogram.abbr.has_children) if (/*check_inline &&*/ subprogram.abbr.has_children)
{ {
// Use an extra location and get its call file and call line, so that // Use an extra location and get its call file and call line, so that
// they can be used for the second last location when we don't have // they can be used for the second last location when we don't have
@ -832,7 +832,7 @@ bool Dwarf::findLocation(
// file+line of the non-inlined outer function making the call. // file+line of the non-inlined outer function making the call.
// locationInfo.name is already set by the caller by looking up the // locationInfo.name is already set by the caller by looking up the
// non-inlined function @address belongs to. // non-inlined function @address belongs to.
info.has_file_and_line = true; info.has_file_and_line = true; //-V1048
info.file = call_locations[0].file; info.file = call_locations[0].file;
info.line = call_locations[0].line; info.line = call_locations[0].line;

View File

@ -121,7 +121,7 @@ public:
struct CallLocation struct CallLocation
{ {
Path file = {}; Path file = {};
uint64_t line; uint64_t line = 0;
std::string_view name; std::string_view name;
}; };
@ -202,8 +202,8 @@ private:
// Abbreviation for a Debugging Information Entry. // Abbreviation for a Debugging Information Entry.
struct DIEAbbreviation struct DIEAbbreviation
{ {
uint64_t code; uint64_t code = 0;
uint64_t tag; uint64_t tag = 0;
bool has_children = false; bool has_children = false;
std::string_view attributes; std::string_view attributes;

View File

@ -29,7 +29,7 @@ namespace ErrorCodes
struct Error struct Error
{ {
/// Number of times Exception with this ErrorCode had been throw. /// Number of times Exception with this ErrorCode had been throw.
Value count; Value count = 0;
/// Time of the last error. /// Time of the last error.
UInt64 error_time_ms = 0; UInt64 error_time_ms = 0;
/// Message for the last error. /// Message for the last error.

View File

@ -44,7 +44,7 @@ struct ClearableHashTableCell : public BaseCell
/// Do I need to store the zero key separately (that is, can a zero key be inserted into the hash table). /// Do I need to store the zero key separately (that is, can a zero key be inserted into the hash table).
static constexpr bool need_zero_value_storage = false; static constexpr bool need_zero_value_storage = false;
ClearableHashTableCell() {} ClearableHashTableCell() {} //-V730
ClearableHashTableCell(const Key & key_, const State & state) : BaseCell(key_, state), version(state.version) {} ClearableHashTableCell(const Key & key_, const State & state) : BaseCell(key_, state), version(state.version) {}
}; };

View File

@ -13,7 +13,7 @@ struct FixedClearableHashTableCell
using mapped_type = VoidMapped; using mapped_type = VoidMapped;
UInt32 version; UInt32 version;
FixedClearableHashTableCell() {} FixedClearableHashTableCell() {} //-V730
FixedClearableHashTableCell(const Key &, const State & state) : version(state.version) {} FixedClearableHashTableCell(const Key &, const State & state) : version(state.version) {}
const VoidKey getKey() const { return {}; } const VoidKey getKey() const { return {}; }

View File

@ -16,7 +16,7 @@ struct FixedHashMapCell
bool full; bool full;
Mapped mapped; Mapped mapped;
FixedHashMapCell() {} FixedHashMapCell() {} //-V730
FixedHashMapCell(const Key &, const State &) : full(true) {} FixedHashMapCell(const Key &, const State &) : full(true) {}
FixedHashMapCell(const value_type & value_, const State &) : full(true), mapped(value_.second) {} FixedHashMapCell(const value_type & value_, const State &) : full(true), mapped(value_.second) {}
@ -31,7 +31,7 @@ struct FixedHashMapCell
/// Note that we have to assemble a continuous layout for the value_type on each call of getValue(). /// Note that we have to assemble a continuous layout for the value_type on each call of getValue().
struct CellExt struct CellExt
{ {
CellExt() {} CellExt() {} //-V730
CellExt(Key && key_, const FixedHashMapCell * ptr_) : key(key_), ptr(const_cast<FixedHashMapCell *>(ptr_)) {} CellExt(Key && key_, const FixedHashMapCell * ptr_) : key(key_), ptr(const_cast<FixedHashMapCell *>(ptr_)) {}
void update(Key && key_, const FixedHashMapCell * ptr_) void update(Key && key_, const FixedHashMapCell * ptr_)
{ {
@ -76,7 +76,7 @@ struct FixedHashMapImplicitZeroCell
/// Note that we have to assemble a continuous layout for the value_type on each call of getValue(). /// Note that we have to assemble a continuous layout for the value_type on each call of getValue().
struct CellExt struct CellExt
{ {
CellExt() {} CellExt() {} //-V730
CellExt(Key && key_, const FixedHashMapImplicitZeroCell * ptr_) : key(key_), ptr(const_cast<FixedHashMapImplicitZeroCell *>(ptr_)) {} CellExt(Key && key_, const FixedHashMapImplicitZeroCell * ptr_) : key(key_), ptr(const_cast<FixedHashMapImplicitZeroCell *>(ptr_)) {}
void update(Key && key_, const FixedHashMapImplicitZeroCell * ptr_) void update(Key && key_, const FixedHashMapImplicitZeroCell * ptr_)
{ {

View File

@ -19,7 +19,7 @@ struct FixedHashTableCell
using mapped_type = VoidMapped; using mapped_type = VoidMapped;
bool full; bool full;
FixedHashTableCell() {} FixedHashTableCell() {} //-V730
FixedHashTableCell(const Key &, const State &) : full(true) {} FixedHashTableCell(const Key &, const State &) : full(true) {}
const VoidKey getKey() const { return {}; } const VoidKey getKey() const { return {}; }
@ -267,7 +267,7 @@ public:
DB::ReadBuffer & in; DB::ReadBuffer & in;
Cell cell; Cell cell;
size_t read_count = 0; size_t read_count = 0;
size_t size; size_t size = 0;
bool is_eof = false; bool is_eof = false;
bool is_initialized = false; bool is_initialized = false;
}; };

View File

@ -73,8 +73,8 @@ struct HashSetCellWithSavedHash : public HashTableCell<Key, Hash, TState>
size_t saved_hash; size_t saved_hash;
HashSetCellWithSavedHash() : Base() {} HashSetCellWithSavedHash() : Base() {} //-V730
HashSetCellWithSavedHash(const Key & key_, const typename Base::State & state) : Base(key_, state) {} HashSetCellWithSavedHash(const Key & key_, const typename Base::State & state) : Base(key_, state) {} //-V730
bool keyEquals(const Key & key_) const { return bitEquals(this->key, key_); } bool keyEquals(const Key & key_) const { return bitEquals(this->key, key_); }
bool keyEquals(const Key & key_, size_t hash_) const { return saved_hash == hash_ && bitEquals(this->key, key_); } bool keyEquals(const Key & key_, size_t hash_) const { return saved_hash == hash_ && bitEquals(this->key, key_); }

View File

@ -305,7 +305,7 @@ template <bool need_zero_value_storage, typename Cell>
struct ZeroValueStorage; struct ZeroValueStorage;
template <typename Cell> template <typename Cell>
struct ZeroValueStorage<true, Cell> struct ZeroValueStorage<true, Cell> //-V730
{ {
private: private:
bool has_zero = false; bool has_zero = false;

View File

@ -80,7 +80,7 @@ public:
{ {
public: public:
Reader(DB::ReadBuffer & in_) Reader(DB::ReadBuffer & in_)
: in(in_) : in(in_)
{ {
} }
@ -124,15 +124,15 @@ public:
DB::ReadBuffer & in; DB::ReadBuffer & in;
Cell cell; Cell cell;
size_t read_count = 0; size_t read_count = 0;
size_t size; size_t size = 0;
bool is_eof = false; bool is_eof = false;
bool is_initialized = false; bool is_initialized = false;
}; };
class iterator class iterator
{ {
Self * container; Self * container = nullptr;
Cell * ptr; Cell * ptr = nullptr;
friend class SmallTable; friend class SmallTable;
@ -158,8 +158,8 @@ public:
class const_iterator class const_iterator
{ {
const Self * container; const Self * container = nullptr;
const Cell * ptr; const Cell * ptr = nullptr;
friend class SmallTable; friend class SmallTable;
@ -184,16 +184,16 @@ public:
}; };
const_iterator begin() const { return iteratorTo(buf); } const_iterator begin() const { return iteratorTo(buf); }
iterator begin() { return iteratorTo(buf); } iterator begin() { return iteratorTo(buf); }
const_iterator end() const { return iteratorTo(buf + m_size); } const_iterator end() const { return iteratorTo(buf + m_size); }
iterator end() { return iteratorTo(buf + m_size); } iterator end() { return iteratorTo(buf + m_size); }
protected: protected:
const_iterator iteratorTo(const Cell * ptr) const { return const_iterator(this, ptr); } const_iterator iteratorTo(const Cell * ptr) const { return const_iterator(this, ptr); }
iterator iteratorTo(Cell * ptr) { return iterator(this, ptr); } iterator iteratorTo(Cell * ptr) { return iterator(this, ptr); }
public: public:

View File

@ -79,7 +79,7 @@ struct StringHashTableHash
}; };
template <typename Cell> template <typename Cell>
struct StringHashTableEmpty struct StringHashTableEmpty //-V730
{ {
using Self = StringHashTableEmpty; using Self = StringHashTableEmpty;

View File

@ -119,9 +119,9 @@ public:
class iterator class iterator
{ {
Self * container; Self * container{};
size_t bucket; size_t bucket{};
typename Impl::iterator current_it; typename Impl::iterator current_it{};
friend class TwoLevelHashTable; friend class TwoLevelHashTable;
@ -156,9 +156,9 @@ public:
class const_iterator class const_iterator
{ {
Self * container; Self * container{};
size_t bucket; size_t bucket{};
typename Impl::const_iterator current_it; typename Impl::const_iterator current_it{};
friend class TwoLevelHashTable; friend class TwoLevelHashTable;

View File

@ -80,10 +80,7 @@ template <UInt64 MaxValue> struct MinCounterType
/// Denominator of expression for HyperLogLog algorithm. /// Denominator of expression for HyperLogLog algorithm.
template <UInt8 precision, int max_rank, typename HashValueType, typename DenominatorType, template <UInt8 precision, int max_rank, typename HashValueType, typename DenominatorType,
DenominatorMode denominator_mode, typename Enable = void> DenominatorMode denominator_mode, typename Enable = void>
class __attribute__ ((packed)) Denominator; class Denominator;
namespace
{
/// Returns true if rank storage is big. /// Returns true if rank storage is big.
constexpr bool isBigRankStore(UInt8 precision) constexpr bool isBigRankStore(UInt8 precision)
@ -91,8 +88,6 @@ constexpr bool isBigRankStore(UInt8 precision)
return precision >= 12; return precision >= 12;
} }
}
/// Used to deduce denominator type depending on options provided. /// Used to deduce denominator type depending on options provided.
template <typename HashValueType, typename DenominatorType, DenominatorMode denominator_mode, typename Enable = void> template <typename HashValueType, typename DenominatorType, DenominatorMode denominator_mode, typename Enable = void>
struct IntermediateDenominator; struct IntermediateDenominator;
@ -120,7 +115,7 @@ struct IntermediateDenominator<HashValueType, DenominatorType, DenominatorMode::
/// Satisfiable when rank storage is small enough. /// Satisfiable when rank storage is small enough.
template <UInt8 precision, int max_rank, typename HashValueType, typename DenominatorType, template <UInt8 precision, int max_rank, typename HashValueType, typename DenominatorType,
DenominatorMode denominator_mode> DenominatorMode denominator_mode>
class __attribute__ ((packed)) Denominator<precision, max_rank, HashValueType, DenominatorType, class __attribute__((__packed__)) Denominator<precision, max_rank, HashValueType, DenominatorType,
denominator_mode, denominator_mode,
std::enable_if_t<!details::isBigRankStore(precision) || !(denominator_mode == DenominatorMode::StableIfBig)>> std::enable_if_t<!details::isBigRankStore(precision) || !(denominator_mode == DenominatorMode::StableIfBig)>>
{ {
@ -164,7 +159,7 @@ private:
/// Used when rank storage is big. /// Used when rank storage is big.
template <UInt8 precision, int max_rank, typename HashValueType, typename DenominatorType, template <UInt8 precision, int max_rank, typename HashValueType, typename DenominatorType,
DenominatorMode denominator_mode> DenominatorMode denominator_mode>
class __attribute__ ((packed)) Denominator<precision, max_rank, HashValueType, DenominatorType, class __attribute__((__packed__)) Denominator<precision, max_rank, HashValueType, DenominatorType,
denominator_mode, denominator_mode,
std::enable_if_t<details::isBigRankStore(precision) && denominator_mode == DenominatorMode::StableIfBig>> std::enable_if_t<details::isBigRankStore(precision) && denominator_mode == DenominatorMode::StableIfBig>>
{ {
@ -252,6 +247,7 @@ struct RankWidth<UInt64>
} }
/// Sets behavior of HyperLogLog class. /// Sets behavior of HyperLogLog class.
enum class HyperLogLogMode enum class HyperLogLogMode
{ {

View File

@ -208,7 +208,7 @@ public:
static bool isBlocked(VariableContext current_level, bool fault_injection) static bool isBlocked(VariableContext current_level, bool fault_injection)
{ {
return counter > 0 && current_level >= level && (!fault_injection || (fault_injection && block_fault_injections)); return counter > 0 && current_level >= level && (!fault_injection || block_fault_injections);
} }
}; };
}; };

View File

@ -4,48 +4,33 @@
#include <limits> #include <limits>
#include <type_traits> #include <type_traits>
#include <common/extended_types.h>
/// To be sure, that this function is zero-cost for non-floating point types.
template <typename T>
inline std::enable_if_t<std::is_floating_point_v<T>, bool> isNaN(T x)
{
return std::isnan(x);
}
template <typename T> template <typename T>
inline std::enable_if_t<!std::is_floating_point_v<T>, bool> isNaN(T) inline bool isNaN(T x)
{ {
return false; /// To be sure, that this function is zero-cost for non-floating point types.
if constexpr (std::is_floating_point_v<T>)
return std::isnan(x);
else
return false;
} }
template <typename T>
inline std::enable_if_t<std::is_floating_point_v<T>, bool> isFinite(T x)
{
return std::isfinite(x);
}
template <typename T> template <typename T>
inline std::enable_if_t<!std::is_floating_point_v<T>, bool> isFinite(T) inline bool isFinite(T x)
{ {
return true; if constexpr (std::is_floating_point_v<T>)
return std::isfinite(x);
else
return true;
} }
template <typename T>
std::enable_if_t<std::is_floating_point_v<T>, T> NaNOrZero()
{
return std::numeric_limits<T>::quiet_NaN();
}
template <typename T> template <typename T>
std::enable_if_t<is_integer_v<T>, T> NaNOrZero() T NaNOrZero()
{ {
return T{0}; if constexpr (std::is_floating_point_v<T>)
} return std::numeric_limits<T>::quiet_NaN();
else
template <typename T> return {};
std::enable_if_t<std::is_class_v<T> && !is_integer_v<T>, T> NaNOrZero()
{
return T{};
} }

View File

@ -388,7 +388,7 @@ void PoolWithFailoverBase<TNestedPool>::updateErrorCounts(PoolWithFailoverBase<T
{ {
time_t current_time = time(nullptr); time_t current_time = time(nullptr);
if (last_decrease_time) if (last_decrease_time) //-V1051
{ {
time_t delta = current_time - last_decrease_time; time_t delta = current_time - last_decrease_time;

View File

@ -44,7 +44,7 @@ using SharedBlockPtr = boost::intrusive_ptr<detail::SharedBlock>;
struct SharedBlockRowRef struct SharedBlockRowRef
{ {
ColumnRawPtrs * columns = nullptr; ColumnRawPtrs * columns = nullptr;
size_t row_num; size_t row_num = 0;
SharedBlockPtr shared_block; SharedBlockPtr shared_block;
void swap(SharedBlockRowRef & other) void swap(SharedBlockRowRef & other)

View File

@ -71,7 +71,7 @@ public:
LogsLevel client_logs_level = LogsLevel::none; LogsLevel client_logs_level = LogsLevel::none;
String query; String query;
UInt64 normalized_query_hash; UInt64 normalized_query_hash = 0;
}; };
using ThreadGroupStatusPtr = std::shared_ptr<ThreadGroupStatus>; using ThreadGroupStatusPtr = std::shared_ptr<ThreadGroupStatus>;

View File

@ -303,7 +303,7 @@ namespace VolnitskyTraits
{ {
/// ngram for Ul /// ngram for Ul
chars.c0 = c0u; chars.c0 = c0u;
chars.c1 = c1l; chars.c1 = c1l; //-V1048
putNGramBase(n, offset); putNGramBase(n, offset);
} }

View File

@ -198,7 +198,7 @@ std::pair<ResponsePtr, Undo> TestKeeperCreateRequest::process(TestKeeper::Contai
else else
{ {
TestKeeper::Node created_node; TestKeeper::Node created_node;
created_node.seq_num = 0; created_node.seq_num = 0; //-V1048
created_node.stat.czxid = zxid; created_node.stat.czxid = zxid;
created_node.stat.mzxid = zxid; created_node.stat.mzxid = zxid;
created_node.stat.ctime = std::chrono::system_clock::now().time_since_epoch() / std::chrono::milliseconds(1); created_node.stat.ctime = std::chrono::system_clock::now().time_since_epoch() / std::chrono::milliseconds(1);
@ -271,7 +271,7 @@ std::pair<ResponsePtr, Undo> TestKeeperRemoveRequest::process(TestKeeper::Contai
auto & parent = container.at(parentPath(path)); auto & parent = container.at(parentPath(path));
--parent.stat.numChildren; --parent.stat.numChildren;
++parent.stat.cversion; ++parent.stat.cversion;
response.error = Error::ZOK; response.error = Error::ZOK; //-V1048
undo = [prev_node, &container, path = path] undo = [prev_node, &container, path = path]
{ {
@ -293,7 +293,7 @@ std::pair<ResponsePtr, Undo> TestKeeperExistsRequest::process(TestKeeper::Contai
if (it != container.end()) if (it != container.end())
{ {
response.stat = it->second.stat; response.stat = it->second.stat;
response.error = Error::ZOK; response.error = Error::ZOK; //-V1048
} }
else else
{ {
@ -316,7 +316,7 @@ std::pair<ResponsePtr, Undo> TestKeeperGetRequest::process(TestKeeper::Container
{ {
response.stat = it->second.stat; response.stat = it->second.stat;
response.data = it->second.data; response.data = it->second.data;
response.error = Error::ZOK; response.error = Error::ZOK; //-V1048
} }
return { std::make_shared<GetResponse>(response), {} }; return { std::make_shared<GetResponse>(response), {} };
@ -343,7 +343,7 @@ std::pair<ResponsePtr, Undo> TestKeeperSetRequest::process(TestKeeper::Container
it->second.data = data; it->second.data = data;
++container.at(parentPath(path)).stat.cversion; ++container.at(parentPath(path)).stat.cversion;
response.stat = it->second.stat; response.stat = it->second.stat;
response.error = Error::ZOK; response.error = Error::ZOK; //-V1048
undo = [prev_node, &container, path = path] undo = [prev_node, &container, path = path]
{ {
@ -387,7 +387,7 @@ std::pair<ResponsePtr, Undo> TestKeeperListRequest::process(TestKeeper::Containe
} }
response.stat = it->second.stat; response.stat = it->second.stat;
response.error = Error::ZOK; response.error = Error::ZOK; //-V1048
} }
return { std::make_shared<ListResponse>(response), {} }; return { std::make_shared<ListResponse>(response), {} };
@ -407,7 +407,7 @@ std::pair<ResponsePtr, Undo> TestKeeperCheckRequest::process(TestKeeper::Contain
} }
else else
{ {
response.error = Error::ZOK; response.error = Error::ZOK; //-V1048
} }
return { std::make_shared<CheckResponse>(response), {} }; return { std::make_shared<CheckResponse>(response), {} };
@ -422,7 +422,7 @@ std::pair<ResponsePtr, Undo> TestKeeperMultiRequest::process(TestKeeper::Contain
try try
{ {
auto request_it = requests.begin(); auto request_it = requests.begin();
response.error = Error::ZOK; response.error = Error::ZOK; //-V1048
while (request_it != requests.end()) while (request_it != requests.end())
{ {
const TestKeeperRequest & concrete_request = dynamic_cast<const TestKeeperRequest &>(**request_it); const TestKeeperRequest & concrete_request = dynamic_cast<const TestKeeperRequest &>(**request_it);

View File

@ -25,7 +25,7 @@ namespace Coordination
struct ZooKeeperResponse : virtual Response struct ZooKeeperResponse : virtual Response
{ {
XID xid = 0; XID xid = 0;
int64_t zxid; int64_t zxid = 0;
virtual ~ZooKeeperResponse() override = default; virtual ~ZooKeeperResponse() override = default;
virtual void readImpl(ReadBuffer &) = 0; virtual void readImpl(ReadBuffer &) = 0;

View File

@ -79,7 +79,6 @@ std::filesystem::path getMountPoint(std::filesystem::path absolute_path)
if (device_id != parent_device_id) if (device_id != parent_device_id)
return absolute_path; return absolute_path;
absolute_path = parent; absolute_path = parent;
device_id = parent_device_id;
} }
return absolute_path; return absolute_path;

View File

@ -7,7 +7,7 @@
namespace DB namespace DB
{ {
/// Get 64 integer valuses, makes 64x64 bit matrix, transpose it and crop unused bits (most significant zeroes). /// Get 64 integer values, makes 64x64 bit matrix, transpose it and crop unused bits (most significant zeroes).
/// In example, if we have UInt8 with only 0 and 1 inside 64xUInt8 would be compressed into 1xUInt64. /// In example, if we have UInt8 with only 0 and 1 inside 64xUInt8 would be compressed into 1xUInt64.
/// It detects unused bits by calculating min and max values of data part, saving them in header in compression phase. /// It detects unused bits by calculating min and max values of data part, saving them in header in compression phase.
/// There's a special case with signed integers parts with crossing zero data. Here it stores one more bit to detect sign of value. /// There's a special case with signed integers parts with crossing zero data. Here it stores one more bit to detect sign of value.

View File

@ -35,29 +35,6 @@
using namespace DB; using namespace DB;
namespace std
{
template <typename T>
std::ostream & operator<<(std::ostream & ostr, const std::optional<T> & opt)
{
if (!opt)
{
return ostr << "<empty optional>";
}
return ostr << *opt;
}
template <typename T>
std::vector<T> operator+(std::vector<T> && left, std::vector<T> && right)
{
std::vector<T> result(std::move(left));
std::move(std::begin(right), std::end(right), std::back_inserter(result));
return result;
}
}
namespace namespace
{ {
@ -337,6 +314,14 @@ CodecTestSequence operator+(CodecTestSequence && left, const CodecTestSequence &
return left.append(right); return left.append(right);
} }
std::vector<CodecTestSequence> operator+(const std::vector<CodecTestSequence> & left, const std::vector<CodecTestSequence> & right)
{
std::vector<CodecTestSequence> result(std::move(left));
std::move(std::begin(right), std::end(right), std::back_inserter(result));
return result;
}
template <typename T> template <typename T>
CodecTestSequence operator*(CodecTestSequence && left, T times) CodecTestSequence operator*(CodecTestSequence && left, T times)
{ {
@ -362,7 +347,7 @@ std::ostream & operator<<(std::ostream & ostr, const Codec & codec)
{ {
return ostr << "Codec{" return ostr << "Codec{"
<< "name: " << codec.codec_statement << "name: " << codec.codec_statement
<< ", expected_compression_ratio: " << codec.expected_compression_ratio << ", expected_compression_ratio: " << *codec.expected_compression_ratio
<< "}"; << "}";
} }
@ -775,15 +760,13 @@ auto FFand0Generator = []()
return [step = 0](auto i) mutable return [step = 0](auto i) mutable
{ {
decltype(i) result; decltype(i) result;
if (step++ % 2 == 0)
{
memset(&result, 0, sizeof(result));
}
else
{
memset(&result, 0xFF, sizeof(result));
}
if (step % 2 == 0)
memset(&result, 0, sizeof(result));
else
memset(&result, 0xFF, sizeof(result));
++step;
return result; return result;
}; };
}; };
@ -1129,7 +1112,7 @@ template <typename ValueType>
auto DDCompatibilityTestSequence() auto DDCompatibilityTestSequence()
{ {
// Generates sequences with double delta in given range. // Generates sequences with double delta in given range.
auto dd_generator = [prev_delta = static_cast<Int64>(0), prev = static_cast<Int64>(0)](auto dd) mutable auto dd_generator = [prev_delta = static_cast<Int64>(0), prev = static_cast<Int64>(0)](auto dd) mutable //-V788
{ {
const auto curr = dd + prev + prev_delta; const auto curr = dd + prev + prev_delta;
prev = curr; prev = curr;

View File

@ -30,10 +30,10 @@ static constexpr auto CURRENT_CHANGELOG_VERSION = ChangelogVersion::V0;
struct ChangelogRecordHeader struct ChangelogRecordHeader
{ {
ChangelogVersion version = CURRENT_CHANGELOG_VERSION; ChangelogVersion version = CURRENT_CHANGELOG_VERSION;
uint64_t index; /// entry log number uint64_t index = 0; /// entry log number
uint64_t term; uint64_t term = 0;
nuraft::log_val_type value_type; nuraft::log_val_type value_type{};
uint64_t blob_size; uint64_t blob_size = 0;
}; };
/// Changelog record on disk /// Changelog record on disk

View File

@ -103,7 +103,8 @@ struct KeeperStorageSyncRequest final : public KeeperStorageRequest
std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(KeeperStorage::Container & /* container */, KeeperStorage::Ephemerals & /* ephemerals */, int64_t /* zxid */, int64_t /* session_id */) const override std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(KeeperStorage::Container & /* container */, KeeperStorage::Ephemerals & /* ephemerals */, int64_t /* zxid */, int64_t /* session_id */) const override
{ {
auto response = zk_request->makeResponse(); auto response = zk_request->makeResponse();
dynamic_cast<Coordination::ZooKeeperSyncResponse *>(response.get())->path = dynamic_cast<Coordination::ZooKeeperSyncRequest *>(zk_request.get())->path; dynamic_cast<Coordination::ZooKeeperSyncResponse &>(*response).path
= dynamic_cast<Coordination::ZooKeeperSyncRequest &>(*zk_request).path;
return {response, {}}; return {response, {}};
} }
}; };

View File

@ -64,7 +64,7 @@ TEST(CoordinationTest, BufferSerde)
{ {
Coordination::ZooKeeperRequestPtr request = Coordination::ZooKeeperRequestFactory::instance().get(Coordination::OpNum::Get); Coordination::ZooKeeperRequestPtr request = Coordination::ZooKeeperRequestFactory::instance().get(Coordination::OpNum::Get);
request->xid = 3; request->xid = 3;
dynamic_cast<Coordination::ZooKeeperGetRequest *>(request.get())->path = "/path/value"; dynamic_cast<Coordination::ZooKeeperGetRequest &>(*request).path = "/path/value";
DB::WriteBufferFromNuraftBuffer wbuf; DB::WriteBufferFromNuraftBuffer wbuf;
request->write(wbuf); request->write(wbuf);
@ -90,7 +90,7 @@ TEST(CoordinationTest, BufferSerde)
EXPECT_EQ(request_read->getOpNum(), Coordination::OpNum::Get); EXPECT_EQ(request_read->getOpNum(), Coordination::OpNum::Get);
EXPECT_EQ(request_read->xid, 3); EXPECT_EQ(request_read->xid, 3);
EXPECT_EQ(dynamic_cast<Coordination::ZooKeeperGetRequest *>(request_read.get())->path, "/path/value"); EXPECT_EQ(dynamic_cast<Coordination::ZooKeeperGetRequest &>(*request_read).path, "/path/value");
} }
template <typename StateMachine> template <typename StateMachine>

View File

@ -474,19 +474,19 @@ namespace MySQLReplication
} }
case MYSQL_TYPE_NEWDECIMAL: case MYSQL_TYPE_NEWDECIMAL:
{ {
const auto & dispatch = [](const size_t & precision, const size_t & scale, const auto & function) -> Field const auto & dispatch = [](size_t precision, size_t scale, const auto & function) -> Field
{ {
if (precision <= DecimalUtils::max_precision<Decimal32>) if (precision <= DecimalUtils::max_precision<Decimal32>)
return Field(function(precision, scale, Decimal32())); return Field(function(precision, scale, Decimal32()));
else if (precision <= DecimalUtils::max_precision<Decimal64>) else if (precision <= DecimalUtils::max_precision<Decimal64>) //-V547
return Field(function(precision, scale, Decimal64())); return Field(function(precision, scale, Decimal64()));
else if (precision <= DecimalUtils::max_precision<Decimal128>) else if (precision <= DecimalUtils::max_precision<Decimal128>) //-V547
return Field(function(precision, scale, Decimal128())); return Field(function(precision, scale, Decimal128()));
return Field(function(precision, scale, Decimal256())); return Field(function(precision, scale, Decimal256()));
}; };
const auto & read_decimal = [&](const size_t & precision, const size_t & scale, auto decimal) const auto & read_decimal = [&](size_t precision, size_t scale, auto decimal)
{ {
using DecimalType = decltype(decimal); using DecimalType = decltype(decimal);
static constexpr size_t digits_per_integer = 9; static constexpr size_t digits_per_integer = 9;
@ -543,7 +543,7 @@ namespace MySQLReplication
UInt32 val = 0; UInt32 val = 0;
size_t to_read = compressed_bytes_map[compressed_decimals]; size_t to_read = compressed_bytes_map[compressed_decimals];
if (to_read) if (to_read) //-V547
{ {
readBigEndianStrict(payload, reinterpret_cast<char *>(&val), to_read); readBigEndianStrict(payload, reinterpret_cast<char *>(&val), to_read);
res *= intExp10OfSize<DecimalType>(compressed_decimals); res *= intExp10OfSize<DecimalType>(compressed_decimals);

View File

@ -257,6 +257,7 @@ class FirstMessage : public FrontMessage
{ {
public: public:
Int32 payload_size; Int32 payload_size;
FirstMessage() = delete; FirstMessage() = delete;
FirstMessage(int payload_size_) : payload_size(payload_size_) {} FirstMessage(int payload_size_) : payload_size(payload_size_) {}
}; };
@ -264,8 +265,9 @@ public:
class CancelRequest : public FirstMessage class CancelRequest : public FirstMessage
{ {
public: public:
Int32 process_id; Int32 process_id = 0;
Int32 secret_key; Int32 secret_key = 0;
CancelRequest(int payload_size_) : FirstMessage(payload_size_) {} CancelRequest(int payload_size_) : FirstMessage(payload_size_) {}
void deserialize(ReadBuffer & in) override void deserialize(ReadBuffer & in) override

View File

@ -371,7 +371,7 @@ class IColumn;
M(Bool, allow_drop_detached, false, "Allow ALTER TABLE ... DROP DETACHED PART[ITION] ... queries", 0) \ M(Bool, allow_drop_detached, false, "Allow ALTER TABLE ... DROP DETACHED PART[ITION] ... queries", 0) \
\ \
M(UInt64, postgresql_connection_pool_size, 16, "Connection pool size for PostgreSQL table engine and database engine.", 0) \ M(UInt64, postgresql_connection_pool_size, 16, "Connection pool size for PostgreSQL table engine and database engine.", 0) \
M(Int64, postgresql_connection_pool_wait_timeout, -1, "Connection pool push/pop timeout on empty pool for PostgreSQL table engine and database engine. By default it will block on empty pool.", 0) \ M(UInt64, postgresql_connection_pool_wait_timeout, 5000, "Connection pool push/pop timeout on empty pool for PostgreSQL table engine and database engine. By default it will block on empty pool.", 0) \
M(UInt64, glob_expansion_max_elements, 1000, "Maximum number of allowed addresses (For external storages, table functions, etc).", 0) \ M(UInt64, glob_expansion_max_elements, 1000, "Maximum number of allowed addresses (For external storages, table functions, etc).", 0) \
M(UInt64, odbc_bridge_connection_pool_size, 16, "Connection pool size for each connection settings string in ODBC bridge.", 0) \ M(UInt64, odbc_bridge_connection_pool_size, 16, "Connection pool size for each connection settings string in ODBC bridge.", 0) \
\ \

View File

@ -4,9 +4,7 @@
#ifdef __linux__ #ifdef __linux__
#include <linux/version.h> #include <linux/version.h>
#endif
#ifdef __linux__
/// Detect does epoll_wait with nested epoll fds works correctly. /// Detect does epoll_wait with nested epoll fds works correctly.
/// Polling nested epoll fds from epoll_wait is required for async_socket_for_remote and use_hedged_requests. /// Polling nested epoll fds from epoll_wait is required for async_socket_for_remote and use_hedged_requests.
/// ///
@ -16,21 +14,15 @@
/// [2]: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=0c54a6a44bf3 /// [2]: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=0c54a6a44bf3
bool nestedEpollWorks(Poco::Logger * log) bool nestedEpollWorks(Poco::Logger * log)
{ {
bool nested_epoll_works =
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) && (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 13)) #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) && (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 13))
/// the check is correct since there will be no more 5.5.x releases. /// the check is correct since there will be no more 5.5.x releases.
false
#else
true
#endif
;
if (!nested_epoll_works)
{
if (log) if (log)
LOG_WARNING(log, "Nested epoll_wait has some issues on kernels [5.5.0, 5.6.13). You should upgrade it to avoid possible issues."); LOG_WARNING(log, "Nested epoll_wait has some issues on kernels [5.5.0, 5.6.13). You should upgrade it to avoid possible issues.");
} return false;
return nested_epoll_works; #else
(void)log;
return true;
#endif
} }
#else #else
bool nestedEpollWorks(Poco::Logger *) { return true; } bool nestedEpollWorks(Poco::Logger *) { return true; }

View File

@ -126,7 +126,7 @@ struct SortCursorImpl
/// Prevent using pos instead of getRow() /// Prevent using pos instead of getRow()
private: private:
size_t pos; size_t pos = 0;
}; };
using SortCursorImpls = std::vector<SortCursorImpl>; using SortCursorImpls = std::vector<SortCursorImpl>;

View File

@ -158,7 +158,7 @@ public:
/** Set the approximate total number of rows to read. /** Set the approximate total number of rows to read.
*/ */
virtual void addTotalRowsApprox(size_t value) { total_rows_approx += value; } void addTotalRowsApprox(size_t value) { total_rows_approx += value; }
/** Ask to abort the receipt of data as soon as possible. /** Ask to abort the receipt of data as soon as possible.

View File

@ -51,7 +51,7 @@ static void writeData(const IDataType & type, const ColumnPtr & column, WriteBuf
ISerialization::SerializeBinaryBulkSettings settings; ISerialization::SerializeBinaryBulkSettings settings;
settings.getter = [&ostr](ISerialization::SubstreamPath) -> WriteBuffer * { return &ostr; }; settings.getter = [&ostr](ISerialization::SubstreamPath) -> WriteBuffer * { return &ostr; };
settings.position_independent_encoding = false; settings.position_independent_encoding = false;
settings.low_cardinality_max_dictionary_size = 0; settings.low_cardinality_max_dictionary_size = 0; //-V1048
auto serialization = type.getDefaultSerialization(); auto serialization = type.getDefaultSerialization();

View File

@ -28,13 +28,13 @@ namespace ErrorCodes
} }
PostgreSQLBlockInputStream::PostgreSQLBlockInputStream( PostgreSQLBlockInputStream::PostgreSQLBlockInputStream(
postgres::ConnectionHolderPtr connection_, postgres::ConnectionHolderPtr connection_holder_,
const std::string & query_str_, const std::string & query_str_,
const Block & sample_block, const Block & sample_block,
const UInt64 max_block_size_) const UInt64 max_block_size_)
: query_str(query_str_) : query_str(query_str_)
, max_block_size(max_block_size_) , max_block_size(max_block_size_)
, connection(std::move(connection_)) , connection_holder(std::move(connection_holder_))
{ {
description.init(sample_block); description.init(sample_block);
for (const auto idx : ext::range(0, description.sample_block.columns())) for (const auto idx : ext::range(0, description.sample_block.columns()))
@ -48,7 +48,7 @@ PostgreSQLBlockInputStream::PostgreSQLBlockInputStream(
void PostgreSQLBlockInputStream::readPrefix() void PostgreSQLBlockInputStream::readPrefix()
{ {
tx = std::make_unique<pqxx::read_transaction>(connection->conn()); tx = std::make_unique<pqxx::read_transaction>(connection_holder->get());
stream = std::make_unique<pqxx::stream_from>(*tx, pqxx::from_query, std::string_view(query_str)); stream = std::make_unique<pqxx::stream_from>(*tx, pqxx::from_query, std::string_view(query_str));
} }
@ -210,7 +210,8 @@ void PostgreSQLBlockInputStream::insertValue(IColumn & column, std::string_view
{ {
max_dimension = std::max(max_dimension, dimension); max_dimension = std::max(max_dimension, dimension);
if (--dimension == 0) --dimension;
if (dimension == 0)
break; break;
dimensions[dimension].emplace_back(Array(dimensions[dimension + 1].begin(), dimensions[dimension + 1].end())); dimensions[dimension].emplace_back(Array(dimensions[dimension + 1].begin(), dimensions[dimension + 1].end()));

View File

@ -9,7 +9,7 @@
#include <DataStreams/IBlockInputStream.h> #include <DataStreams/IBlockInputStream.h>
#include <Core/ExternalResultDescription.h> #include <Core/ExternalResultDescription.h>
#include <Core/Field.h> #include <Core/Field.h>
#include <Storages/PostgreSQL/PostgreSQLConnectionPool.h> #include <Storages/PostgreSQL/ConnectionHolder.h>
namespace DB namespace DB
@ -19,7 +19,7 @@ class PostgreSQLBlockInputStream : public IBlockInputStream
{ {
public: public:
PostgreSQLBlockInputStream( PostgreSQLBlockInputStream(
postgres::ConnectionHolderPtr connection_, postgres::ConnectionHolderPtr connection_holder_,
const std::string & query_str, const std::string & query_str,
const Block & sample_block, const Block & sample_block,
const UInt64 max_block_size_); const UInt64 max_block_size_);
@ -46,7 +46,7 @@ private:
const UInt64 max_block_size; const UInt64 max_block_size;
ExternalResultDescription description; ExternalResultDescription description;
postgres::ConnectionHolderPtr connection; postgres::ConnectionHolderPtr connection_holder;
std::unique_ptr<pqxx::read_transaction> tx; std::unique_ptr<pqxx::read_transaction> tx;
std::unique_ptr<pqxx::stream_from> stream; std::unique_ptr<pqxx::stream_from> stream;

View File

@ -63,7 +63,7 @@ PushingToViewsBlockOutputStream::PushingToViewsBlockOutputStream(
// Do not deduplicate insertions into MV if the main insertion is Ok // Do not deduplicate insertions into MV if the main insertion is Ok
if (disable_deduplication_for_children) if (disable_deduplication_for_children)
insert_context->setSetting("insert_deduplicate", false); insert_context->setSetting("insert_deduplicate", Field{false});
// Separate min_insert_block_size_rows/min_insert_block_size_bytes for children // Separate min_insert_block_size_rows/min_insert_block_size_bytes for children
if (insert_settings.min_insert_block_size_rows_for_materialized_views) if (insert_settings.min_insert_block_size_rows_for_materialized_views)

View File

@ -66,11 +66,22 @@ T EnumValues<T>::getValue(StringRef field_name, bool try_treat_as_id) const
if (tmp_buf.eof() && value_to_name_map.find(x) != value_to_name_map.end()) if (tmp_buf.eof() && value_to_name_map.find(x) != value_to_name_map.end())
return x; return x;
} }
throw Exception{"Unknown element '" + field_name.toString() + "' for enum", ErrorCodes::BAD_ARGUMENTS}; auto hints = this->getHints(field_name.toString());
auto hints_string = !hints.empty() ? ", may be you meant: " + toString(hints) : "";
throw Exception{"Unknown element '" + field_name.toString() + "' for enum" + hints_string, ErrorCodes::BAD_ARGUMENTS};
} }
return it->getMapped(); return it->getMapped();
} }
template <typename T>
Names EnumValues<T>::getAllRegisteredNames() const
{
Names result;
for (const auto & value : values)
result.emplace_back(value.first);
return result;
}
template class EnumValues<Int8>; template class EnumValues<Int8>;
template class EnumValues<Int16>; template class EnumValues<Int16>;

View File

@ -1,7 +1,8 @@
#pragma once #pragma once
#include <Common/HashTable/HashMap.h>
#include <unordered_map> #include <unordered_map>
#include <Common/HashTable/HashMap.h>
#include <Common/NamePrompter.h>
namespace DB namespace DB
{ {
@ -12,7 +13,7 @@ namespace ErrorCodes
} }
template <typename T> template <typename T>
class EnumValues class EnumValues : public IHints<1, EnumValues<T>>
{ {
public: public:
using Value = std::pair<std::string, T>; using Value = std::pair<std::string, T>;
@ -65,6 +66,8 @@ public:
return std::all_of(rhs_values.begin(), rhs_values.end(), check); return std::all_of(rhs_values.begin(), rhs_values.end(), check);
} }
Names getAllRegisteredNames() const override;
}; };
} }

View File

@ -160,7 +160,7 @@ struct IndexesSerializationType
return std::make_shared<DataTypeUInt16>(); return std::make_shared<DataTypeUInt16>();
if (type == TUInt32) if (type == TUInt32)
return std::make_shared<DataTypeUInt32>(); return std::make_shared<DataTypeUInt32>();
if (type == TUInt64) if (type == TUInt64) //-V547
return std::make_shared<DataTypeUInt64>(); return std::make_shared<DataTypeUInt64>();
throw Exception("Can't create DataType from IndexesSerializationType.", ErrorCodes::LOGICAL_ERROR); throw Exception("Can't create DataType from IndexesSerializationType.", ErrorCodes::LOGICAL_ERROR);

View File

@ -105,9 +105,9 @@ DataTypePtr convertMySQLDataType(MultiEnum<MySQLDataTypesSupport> type_support,
{ {
if (precision <= DecimalUtils::max_precision<Decimal32>) if (precision <= DecimalUtils::max_precision<Decimal32>)
res = std::make_shared<DataTypeDecimal<Decimal32>>(precision, scale); res = std::make_shared<DataTypeDecimal<Decimal32>>(precision, scale);
else if (precision <= DecimalUtils::max_precision<Decimal64>) else if (precision <= DecimalUtils::max_precision<Decimal64>) //-V547
res = std::make_shared<DataTypeDecimal<Decimal64>>(precision, scale); res = std::make_shared<DataTypeDecimal<Decimal64>>(precision, scale);
else if (precision <= DecimalUtils::max_precision<Decimal128>) else if (precision <= DecimalUtils::max_precision<Decimal128>) //-V547
res = std::make_shared<DataTypeDecimal<Decimal128>>(precision, scale); res = std::make_shared<DataTypeDecimal<Decimal128>>(precision, scale);
} }

View File

@ -423,7 +423,7 @@ DataTypePtr getLeastSupertype(const DataTypes & types)
size_t min_bit_width_of_integer = std::max(max_bits_of_signed_integer, max_bits_of_unsigned_integer); size_t min_bit_width_of_integer = std::max(max_bits_of_signed_integer, max_bits_of_unsigned_integer);
/// If unsigned is not covered by signed. /// If unsigned is not covered by signed.
if (max_bits_of_signed_integer && max_bits_of_unsigned_integer >= max_bits_of_signed_integer) if (max_bits_of_signed_integer && max_bits_of_unsigned_integer >= max_bits_of_signed_integer) //-V1051
{ {
// Because 128 and 256 bit integers are significantly slower, we should not promote to them. // Because 128 and 256 bit integers are significantly slower, we should not promote to them.
// But if we already have wide numbers, promotion is necessary. // But if we already have wide numbers, promotion is necessary.

View File

@ -15,8 +15,6 @@
#include <Core/iostream_debug_helpers.h> #include <Core/iostream_debug_helpers.h>
namespace std
{
template <typename T> template <typename T>
inline std::ostream& operator<<(std::ostream & ostr, const std::vector<T> & v) inline std::ostream& operator<<(std::ostream & ostr, const std::vector<T> & v)
@ -29,8 +27,6 @@ inline std::ostream& operator<<(std::ostream & ostr, const std::vector<T> & v)
return ostr << "] (" << v.size() << ") items"; return ostr << "] (" << v.size() << ") items";
} }
}
using namespace DB; using namespace DB;
struct ParseDataTypeTestCase struct ParseDataTypeTestCase

View File

@ -37,7 +37,7 @@ static auto typesFromString(const std::string & str)
struct TypesTestCase struct TypesTestCase
{ {
const char * from_types; const char * from_types = nullptr;
const char * expected_type = nullptr; const char * expected_type = nullptr;
}; };

View File

@ -102,7 +102,7 @@ StoragePtr DatabaseAtomic::detachTable(const String & name)
auto table = DatabaseOrdinary::detachTableUnlocked(name, lock); auto table = DatabaseOrdinary::detachTableUnlocked(name, lock);
table_name_to_path.erase(name); table_name_to_path.erase(name);
detached_tables.emplace(table->getStorageID().uuid, table); detached_tables.emplace(table->getStorageID().uuid, table);
not_in_use = cleanupDetachedTables(); not_in_use = cleanupDetachedTables(); //-V1001
return table; return table;
} }

View File

@ -37,7 +37,7 @@
#if USE_LIBPQXX #if USE_LIBPQXX
#include <Databases/PostgreSQL/DatabasePostgreSQL.h> // Y_IGNORE #include <Databases/PostgreSQL/DatabasePostgreSQL.h> // Y_IGNORE
#include <Storages/PostgreSQL/PostgreSQLConnectionPool.h> #include <Storages/PostgreSQL/PoolWithFailover.h>
#endif #endif
namespace DB namespace DB

View File

@ -3,17 +3,16 @@
#include <common/types.h> #include <common/types.h>
#include <Parsers/IAST_fwd.h> #include <Parsers/IAST_fwd.h>
#include <Storages/IStorage_fwd.h> #include <Storages/IStorage_fwd.h>
#include <Storages/StorageInMemoryMetadata.h> #include <Interpreters/Context_fwd.h>
#include <Dictionaries/IDictionary.h>
#include <Databases/DictionaryAttachInfo.h>
#include <Common/Exception.h> #include <Common/Exception.h>
#include <Core/UUID.h>
#include <boost/range/adaptor/map.hpp>
#include <boost/range/algorithm/copy.hpp>
#include <ctime> #include <ctime>
#include <functional> #include <functional>
#include <memory> #include <memory>
#include <mutex>
#include <vector>
#include <map>
namespace DB namespace DB
@ -22,6 +21,8 @@ namespace DB
struct Settings; struct Settings;
struct ConstraintsDescription; struct ConstraintsDescription;
struct IndicesDescription; struct IndicesDescription;
struct StorageInMemoryMetadata;
struct StorageID;
class ASTCreateQuery; class ASTCreateQuery;
using DictionariesWithID = std::vector<std::pair<String, UUID>>; using DictionariesWithID = std::vector<std::pair<String, UUID>>;

View File

@ -6,6 +6,7 @@
#include <mysqlxx/Pool.h> #include <mysqlxx/Pool.h>
#include <Core/MultiEnum.h> #include <Core/MultiEnum.h>
#include <Core/NamesAndTypes.h>
#include <Common/ThreadPool.h> #include <Common/ThreadPool.h>
#include <Databases/DatabasesCommon.h> #include <Databases/DatabasesCommon.h>
#include <Databases/MySQL/ConnectionMySQLSettings.h> #include <Databases/MySQL/ConnectionMySQLSettings.h>

View File

@ -16,7 +16,6 @@
#include <Poco/File.h> #include <Poco/File.h>
#include <Databases/PostgreSQL/fetchPostgreSQLTableStructure.h> #include <Databases/PostgreSQL/fetchPostgreSQLTableStructure.h>
#include <Common/quoteString.h> #include <Common/quoteString.h>
#include <Storages/PostgreSQL/PostgreSQLConnectionPool.h>
namespace DB namespace DB
@ -89,8 +88,8 @@ std::unordered_set<std::string> DatabasePostgreSQL::fetchTablesList() const
std::unordered_set<std::string> tables; std::unordered_set<std::string> tables;
std::string query = "SELECT tablename FROM pg_catalog.pg_tables " std::string query = "SELECT tablename FROM pg_catalog.pg_tables "
"WHERE schemaname != 'pg_catalog' AND schemaname != 'information_schema'"; "WHERE schemaname != 'pg_catalog' AND schemaname != 'information_schema'";
auto connection = connection_pool->get(); auto connection_holder = connection_pool->get();
pqxx::read_transaction tx(connection->conn()); pqxx::read_transaction tx(connection_holder->get());
for (auto table_name : tx.stream<std::string>(query)) for (auto table_name : tx.stream<std::string>(query))
tables.insert(std::get<0>(table_name)); tables.insert(std::get<0>(table_name));
@ -108,8 +107,8 @@ bool DatabasePostgreSQL::checkPostgresTable(const String & table_name) const
"PostgreSQL table name cannot contain single quote or backslash characters, passed {}", table_name); "PostgreSQL table name cannot contain single quote or backslash characters, passed {}", table_name);
} }
auto connection = connection_pool->get(); auto connection_holder = connection_pool->get();
pqxx::nontransaction tx(connection->conn()); pqxx::nontransaction tx(connection_holder->get());
try try
{ {
@ -170,7 +169,7 @@ StoragePtr DatabasePostgreSQL::fetchTable(const String & table_name, ContextPtr
return StoragePtr{}; return StoragePtr{};
auto storage = StoragePostgreSQL::create( auto storage = StoragePostgreSQL::create(
StorageID(database_name, table_name), *connection_pool, table_name, StorageID(database_name, table_name), connection_pool, table_name,
ColumnsDescription{*columns}, ConstraintsDescription{}, local_context); ColumnsDescription{*columns}, ConstraintsDescription{}, local_context);
if (cache_tables) if (cache_tables)

View File

@ -9,7 +9,7 @@
#include <Databases/DatabasesCommon.h> #include <Databases/DatabasesCommon.h>
#include <Core/BackgroundSchedulePool.h> #include <Core/BackgroundSchedulePool.h>
#include <Parsers/ASTCreateQuery.h> #include <Parsers/ASTCreateQuery.h>
#include <Storages/PostgreSQL/PostgreSQLPoolWithFailover.h> #include <Storages/PostgreSQL/PoolWithFailover.h>
namespace DB namespace DB

View File

@ -25,7 +25,7 @@ namespace ErrorCodes
} }
static DataTypePtr convertPostgreSQLDataType(std::string & type, bool is_nullable, uint16_t dimensions) static DataTypePtr convertPostgreSQLDataType(String & type, bool is_nullable, uint16_t dimensions)
{ {
DataTypePtr res; DataTypePtr res;
@ -67,11 +67,11 @@ static DataTypePtr convertPostgreSQLDataType(std::string & type, bool is_nullabl
if (precision <= DecimalUtils::max_precision<Decimal32>) if (precision <= DecimalUtils::max_precision<Decimal32>)
res = std::make_shared<DataTypeDecimal<Decimal32>>(precision, scale); res = std::make_shared<DataTypeDecimal<Decimal32>>(precision, scale);
else if (precision <= DecimalUtils::max_precision<Decimal64>) else if (precision <= DecimalUtils::max_precision<Decimal64>) //-V547
res = std::make_shared<DataTypeDecimal<Decimal64>>(precision, scale); res = std::make_shared<DataTypeDecimal<Decimal64>>(precision, scale);
else if (precision <= DecimalUtils::max_precision<Decimal128>) else if (precision <= DecimalUtils::max_precision<Decimal128>) //-V547
res = std::make_shared<DataTypeDecimal<Decimal128>>(precision, scale); res = std::make_shared<DataTypeDecimal<Decimal128>>(precision, scale);
else if (precision <= DecimalUtils::max_precision<Decimal256>) else if (precision <= DecimalUtils::max_precision<Decimal256>) //-V547
res = std::make_shared<DataTypeDecimal<Decimal256>>(precision, scale); res = std::make_shared<DataTypeDecimal<Decimal256>>(precision, scale);
else else
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Precision {} and scale {} are too big and not supported", precision, scale); throw Exception(ErrorCodes::BAD_ARGUMENTS, "Precision {} and scale {} are too big and not supported", precision, scale);
@ -96,7 +96,7 @@ static DataTypePtr convertPostgreSQLDataType(std::string & type, bool is_nullabl
std::shared_ptr<NamesAndTypesList> fetchPostgreSQLTableStructure( std::shared_ptr<NamesAndTypesList> fetchPostgreSQLTableStructure(
postgres::ConnectionHolderPtr connection, const String & postgres_table_name, bool use_nulls) postgres::ConnectionHolderPtr connection_holder, const String & postgres_table_name, bool use_nulls)
{ {
auto columns = NamesAndTypesList(); auto columns = NamesAndTypesList();
@ -115,7 +115,7 @@ std::shared_ptr<NamesAndTypesList> fetchPostgreSQLTableStructure(
"AND NOT attisdropped AND attnum > 0", postgres_table_name); "AND NOT attisdropped AND attnum > 0", postgres_table_name);
try try
{ {
pqxx::read_transaction tx(connection->conn()); pqxx::read_transaction tx(connection_holder->get());
pqxx::stream_from stream(tx, pqxx::from_query, std::string_view(query)); pqxx::stream_from stream(tx, pqxx::from_query, std::string_view(query));
std::tuple<std::string, std::string, std::string, uint16_t> row; std::tuple<std::string, std::string, std::string, uint16_t> row;
@ -135,7 +135,7 @@ std::shared_ptr<NamesAndTypesList> fetchPostgreSQLTableStructure(
{ {
throw Exception(fmt::format( throw Exception(fmt::format(
"PostgreSQL table {}.{} does not exist", "PostgreSQL table {}.{} does not exist",
connection->conn().dbname(), postgres_table_name), ErrorCodes::UNKNOWN_TABLE); connection_holder->get().dbname(), postgres_table_name), ErrorCodes::UNKNOWN_TABLE);
} }
catch (Exception & e) catch (Exception & e)
{ {

View File

@ -5,14 +5,15 @@
#endif #endif
#if USE_LIBPQXX #if USE_LIBPQXX
#include <Storages/StoragePostgreSQL.h> #include <Storages/PostgreSQL/ConnectionHolder.h>
#include <Core/NamesAndTypes.h>
namespace DB namespace DB
{ {
std::shared_ptr<NamesAndTypesList> fetchPostgreSQLTableStructure( std::shared_ptr<NamesAndTypesList> fetchPostgreSQLTableStructure(
postgres::ConnectionHolderPtr connection, const String & postgres_table_name, bool use_nulls); postgres::ConnectionHolderPtr connection_holder, const String & postgres_table_name, bool use_nulls);
} }

View File

@ -176,8 +176,9 @@ Columns CacheDictionary<dictionary_key_type>::getColumns(
ProfileEvents::increment(ProfileEvents::DictCacheKeysExpired, expired_keys_size); ProfileEvents::increment(ProfileEvents::DictCacheKeysExpired, expired_keys_size);
ProfileEvents::increment(ProfileEvents::DictCacheKeysNotFound, not_found_keys_size); ProfileEvents::increment(ProfileEvents::DictCacheKeysNotFound, not_found_keys_size);
query_count.fetch_add(keys.size()); query_count.fetch_add(keys.size(), std::memory_order_relaxed);
hit_count.fetch_add(found_keys_size); hit_count.fetch_add(found_keys_size, std::memory_order_relaxed);
found_count.fetch_add(found_keys_size, std::memory_order_relaxed);
MutableColumns & fetched_columns_from_storage = result_of_fetch_from_storage.fetched_columns; MutableColumns & fetched_columns_from_storage = result_of_fetch_from_storage.fetched_columns;
const PaddedPODArray<KeyState> & key_index_to_state_from_storage = result_of_fetch_from_storage.key_index_to_state; const PaddedPODArray<KeyState> & key_index_to_state_from_storage = result_of_fetch_from_storage.key_index_to_state;
@ -296,8 +297,9 @@ ColumnUInt8::Ptr CacheDictionary<dictionary_key_type>::hasKeys(const Columns & k
ProfileEvents::increment(ProfileEvents::DictCacheKeysExpired, expired_keys_size); ProfileEvents::increment(ProfileEvents::DictCacheKeysExpired, expired_keys_size);
ProfileEvents::increment(ProfileEvents::DictCacheKeysNotFound, not_found_keys_size); ProfileEvents::increment(ProfileEvents::DictCacheKeysNotFound, not_found_keys_size);
query_count.fetch_add(keys.size()); query_count.fetch_add(keys.size(), std::memory_order_relaxed);
hit_count.fetch_add(found_keys_size); hit_count.fetch_add(found_keys_size, std::memory_order_relaxed);
found_count.fetch_add(found_keys_size, std::memory_order_relaxed);
size_t keys_to_update_size = expired_keys_size + not_found_keys_size; size_t keys_to_update_size = expired_keys_size + not_found_keys_size;
auto update_unit = std::make_shared<CacheDictionaryUpdateUnit<dictionary_key_type>>(key_columns, result_of_fetch_from_storage.key_index_to_state, request, keys_to_update_size); auto update_unit = std::make_shared<CacheDictionaryUpdateUnit<dictionary_key_type>>(key_columns, result_of_fetch_from_storage.key_index_to_state, request, keys_to_update_size);
@ -365,8 +367,10 @@ ColumnPtr CacheDictionary<dictionary_key_type>::getHierarchy(
{ {
if (dictionary_key_type == DictionaryKeyType::simple) if (dictionary_key_type == DictionaryKeyType::simple)
{ {
auto result = getKeysHierarchyDefaultImplementation(this, key_column, key_type); size_t keys_found;
auto result = getKeysHierarchyDefaultImplementation(this, key_column, key_type, keys_found);
query_count.fetch_add(key_column->size(), std::memory_order_relaxed); query_count.fetch_add(key_column->size(), std::memory_order_relaxed);
found_count.fetch_add(keys_found, std::memory_order_relaxed);
return result; return result;
} }
else else
@ -381,8 +385,10 @@ ColumnUInt8::Ptr CacheDictionary<dictionary_key_type>::isInHierarchy(
{ {
if (dictionary_key_type == DictionaryKeyType::simple) if (dictionary_key_type == DictionaryKeyType::simple)
{ {
auto result = getKeysIsInHierarchyDefaultImplementation(this, key_column, in_key_column, key_type); size_t keys_found;
auto result = getKeysIsInHierarchyDefaultImplementation(this, key_column, in_key_column, key_type, keys_found);
query_count.fetch_add(key_column->size(), std::memory_order_relaxed); query_count.fetch_add(key_column->size(), std::memory_order_relaxed);
found_count.fetch_add(keys_found, std::memory_order_relaxed);
return result; return result;
} }
else else
@ -520,8 +526,6 @@ void CacheDictionary<dictionary_key_type>::update(CacheDictionaryUpdateUnitPtr<d
*/ */
CurrentMetrics::Increment metric_increment{CurrentMetrics::DictCacheRequests}; CurrentMetrics::Increment metric_increment{CurrentMetrics::DictCacheRequests};
size_t found_keys_size = 0;
Arena * complex_key_arena = update_unit_ptr->complex_keys_arena_holder.getComplexKeyArena(); Arena * complex_key_arena = update_unit_ptr->complex_keys_arena_holder.getComplexKeyArena();
DictionaryKeysExtractor<dictionary_key_type> requested_keys_extractor(update_unit_ptr->key_columns, complex_key_arena); DictionaryKeysExtractor<dictionary_key_type> requested_keys_extractor(update_unit_ptr->key_columns, complex_key_arena);
auto requested_keys = requested_keys_extractor.extractAllKeys(); auto requested_keys = requested_keys_extractor.extractAllKeys();
@ -610,9 +614,8 @@ void CacheDictionary<dictionary_key_type>::update(CacheDictionaryUpdateUnitPtr<d
auto fetched_key_from_source = keys_extracted_from_block[i]; auto fetched_key_from_source = keys_extracted_from_block[i];
not_found_keys.erase(fetched_key_from_source); not_found_keys.erase(fetched_key_from_source);
update_unit_ptr->requested_keys_to_fetched_columns_during_update_index[fetched_key_from_source] = found_keys_size; update_unit_ptr->requested_keys_to_fetched_columns_during_update_index[fetched_key_from_source] = found_keys_in_source.size();
found_keys_in_source.emplace_back(fetched_key_from_source); found_keys_in_source.emplace_back(fetched_key_from_source);
++found_keys_size;
} }
} }
@ -666,9 +669,13 @@ void CacheDictionary<dictionary_key_type>::update(CacheDictionaryUpdateUnitPtr<d
} }
} }
/// The underlying source can have duplicates, so count only unique keys this formula is used.
size_t found_keys_size = requested_keys_size - not_found_keys.size();
ProfileEvents::increment(ProfileEvents::DictCacheKeysRequestedMiss, requested_keys_size - found_keys_size); ProfileEvents::increment(ProfileEvents::DictCacheKeysRequestedMiss, requested_keys_size - found_keys_size);
ProfileEvents::increment(ProfileEvents::DictCacheKeysRequestedFound, found_keys_size); ProfileEvents::increment(ProfileEvents::DictCacheKeysRequestedFound, found_keys_size);
ProfileEvents::increment(ProfileEvents::DictCacheRequests); ProfileEvents::increment(ProfileEvents::DictCacheRequests);
found_count.fetch_add(found_keys_size, std::memory_order_relaxed);
} }
else else
{ {

Some files were not shown because too many files have changed in this diff Show More