Merge remote-tracking branch 'remotes/clickhouse/master' into sevirov-DOCSUP-8940-edit_and_translate_to_russian_deltasumtimestamp

This commit is contained in:
Dmitriy 2021-05-10 23:16:58 +03:00
commit 10eaa83a48
270 changed files with 10523 additions and 1688 deletions

View File

@ -169,8 +169,8 @@ endif ()
option(ENABLE_TESTS "Provide unit_test_dbms target with Google.Test unit tests" ON)
option(ENABLE_EXAMPLES "Build all example programs in 'examples' subdirectories" OFF)
if (OS_LINUX AND NOT UNBUNDLED AND MAKE_STATIC_LIBRARIES AND NOT SPLIT_SHARED_LIBRARIES AND CMAKE_VERSION VERSION_GREATER "3.9.0")
# Only for Linux, x86_64.
if (OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64) AND NOT UNBUNDLED AND MAKE_STATIC_LIBRARIES AND NOT SPLIT_SHARED_LIBRARIES AND CMAKE_VERSION VERSION_GREATER "3.9.0")
# Only for Linux, x86_64 or aarch64.
option(GLIBC_COMPATIBILITY "Enable compatibility with older glibc libraries." ON)
elseif(GLIBC_COMPATIBILITY)
message (${RECONFIGURE_MESSAGE_LEVEL} "Glibc compatibility cannot be enabled in current configuration")

View File

@ -102,7 +102,7 @@ void SentryWriter::initialize(Poco::Util::LayeredConfiguration & config)
auto * logger = &Poco::Logger::get("SentryWriter");
if (config.getBool("send_crash_reports.enabled", false))
{
if (debug || (strlen(VERSION_OFFICIAL) > 0))
if (debug || (strlen(VERSION_OFFICIAL) > 0)) //-V560
{
enabled = true;
}

View File

@ -15,7 +15,7 @@ if (GLIBC_COMPATIBILITY)
add_headers_and_sources(glibc_compatibility .)
add_headers_and_sources(glibc_compatibility musl)
if (ARCH_ARM)
if (ARCH_AARCH64)
list (APPEND glibc_compatibility_sources musl/aarch64/syscall.s musl/aarch64/longjmp.s)
set (musl_arch_include_dir musl/aarch64)
elseif (ARCH_AMD64)

View File

@ -78,6 +78,9 @@
*
*/
// Disable warnings by PVS-Studio
//-V::GA
static const double
pi = 3.14159265358979311600e+00, /* 0x400921FB, 0x54442D18 */
a0 = 7.72156649015328655494e-02, /* 0x3FB3C467, 0xE37DB0C8 */

View File

@ -85,6 +85,9 @@
*
*/
// Disable warnings by PVS-Studio
//-V::GA
#include <stdint.h>
#include <math.h>
#include "libm.h"

View File

@ -155,7 +155,7 @@ static inline long double fp_barrierl(long double x)
static inline void fp_force_evalf(float x)
{
volatile float y;
y = x;
y = x; //-V1001
}
#endif
@ -164,7 +164,7 @@ static inline void fp_force_evalf(float x)
static inline void fp_force_eval(double x)
{
volatile double y;
y = x;
y = x; //-V1001
}
#endif
@ -173,7 +173,7 @@ static inline void fp_force_eval(double x)
static inline void fp_force_evall(long double x)
{
volatile long double y;
y = x;
y = x; //-V1001
}
#endif

View File

@ -3,6 +3,9 @@
* SPDX-License-Identifier: MIT
*/
// Disable warnings by PVS-Studio
//-V::GA
#include <math.h>
#include <stdint.h>
#include "libm.h"

View File

@ -40,7 +40,7 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
split->addTextLog(log, text_log_max_priority);
auto current_logger = config.getString("logger", "");
if (config_logger == current_logger)
if (config_logger == current_logger) //-V1051
return;
config_logger = current_logger;

View File

@ -447,69 +447,6 @@ inline SrcIter uneven_copy(SrcIter src_first,
std::integral_constant<bool, DEST_IS_SMALLER>{});
}
/* generate_to, fill in a fixed-size array of integral type using a SeedSeq
* (actually works for any random-access iterator)
*/
template <size_t size, typename SeedSeq, typename DestIter>
inline void generate_to_impl(SeedSeq&& generator, DestIter dest,
std::true_type)
{
generator.generate(dest, dest+size);
}
template <size_t size, typename SeedSeq, typename DestIter>
void generate_to_impl(SeedSeq&& generator, DestIter dest,
std::false_type)
{
typedef typename std::iterator_traits<DestIter>::value_type dest_t;
constexpr auto DEST_SIZE = sizeof(dest_t);
constexpr auto GEN_SIZE = sizeof(uint32_t);
constexpr bool GEN_IS_SMALLER = GEN_SIZE < DEST_SIZE;
constexpr size_t FROM_ELEMS =
GEN_IS_SMALLER
? size * ((DEST_SIZE+GEN_SIZE-1) / GEN_SIZE)
: (size + (GEN_SIZE / DEST_SIZE) - 1)
/ ((GEN_SIZE / DEST_SIZE) + GEN_IS_SMALLER);
// this odd code ^^^^^^^^^^^^^^^^^ is work-around for
// a bug: http://llvm.org/bugs/show_bug.cgi?id=21287
if (FROM_ELEMS <= 1024) {
uint32_t buffer[FROM_ELEMS];
generator.generate(buffer, buffer+FROM_ELEMS);
uneven_copy(buffer, dest, dest+size);
} else {
uint32_t* buffer = static_cast<uint32_t*>(malloc(GEN_SIZE * FROM_ELEMS));
generator.generate(buffer, buffer+FROM_ELEMS);
uneven_copy(buffer, dest, dest+size);
free(static_cast<void*>(buffer));
}
}
template <size_t size, typename SeedSeq, typename DestIter>
inline void generate_to(SeedSeq&& generator, DestIter dest)
{
typedef typename std::iterator_traits<DestIter>::value_type dest_t;
constexpr bool IS_32BIT = sizeof(dest_t) == sizeof(uint32_t);
generate_to_impl<size>(std::forward<SeedSeq>(generator), dest,
std::integral_constant<bool, IS_32BIT>{});
}
/* generate_one, produce a value of integral type using a SeedSeq
* (optionally, we can have it produce more than one and pick which one
* we want)
*/
template <typename UInt, size_t i = 0UL, size_t N = i+1UL, typename SeedSeq>
inline UInt generate_one(SeedSeq&& generator)
{
UInt result[N];
generate_to<N>(std::forward<SeedSeq>(generator), result);
return result[i];
}
template <typename RngType>
auto bounded_rand(RngType& rng, typename RngType::result_type upper_bound)
-> typename RngType::result_type
@ -517,7 +454,7 @@ auto bounded_rand(RngType& rng, typename RngType::result_type upper_bound)
typedef typename RngType::result_type rtype;
rtype threshold = (RngType::max() - RngType::min() + rtype(1) - upper_bound)
% upper_bound;
for (;;) {
for (;;) { //-V1044
rtype r = rng() - RngType::min();
if (r >= threshold)
return r % upper_bound;

View File

@ -928,7 +928,7 @@ struct rxs_m_xs_mixin {
constexpr bitcount_t shift = bits - xtypebits;
constexpr bitcount_t mask = (1 << opbits) - 1;
bitcount_t rshift =
opbits ? bitcount_t(internal >> (bits - opbits)) & mask : 0;
opbits ? bitcount_t(internal >> (bits - opbits)) & mask : 0; //-V547
internal ^= internal >> (opbits + rshift);
internal *= mcg_multiplier<itype>::multiplier();
xtype result = internal >> shift;
@ -950,7 +950,7 @@ struct rxs_m_xs_mixin {
internal *= mcg_unmultiplier<itype>::unmultiplier();
bitcount_t rshift = opbits ? (internal >> (bits - opbits)) & mask : 0;
bitcount_t rshift = opbits ? (internal >> (bits - opbits)) & mask : 0; //-V547
internal = unxorshift(internal, bits, opbits + rshift);
return internal;
@ -975,7 +975,7 @@ struct rxs_m_mixin {
: 2;
constexpr bitcount_t shift = bits - xtypebits;
constexpr bitcount_t mask = (1 << opbits) - 1;
bitcount_t rshift = opbits ? (internal >> (bits - opbits)) & mask : 0;
bitcount_t rshift = opbits ? (internal >> (bits - opbits)) & mask : 0; //-V547
internal ^= internal >> (opbits + rshift);
internal *= mcg_multiplier<itype>::multiplier();
xtype result = internal >> shift;
@ -1366,7 +1366,7 @@ void extended<table_pow2,advance_pow2,baseclass,extvalclass,kdd>::selfinit()
// - any strange correlations would only be apparent if we
// were to backstep the generator so that the base generator
// was generating the same values again
result_type xdiff = baseclass::operator()() - baseclass::operator()();
result_type xdiff = baseclass::operator()() - baseclass::operator()(); //-V501
for (size_t i = 0; i < table_size; ++i) {
data_[i] = baseclass::operator()() ^ xdiff;
}

View File

@ -2,7 +2,7 @@ if (APPLE OR SPLIT_SHARED_LIBRARIES OR NOT ARCH_AMD64)
set (ENABLE_EMBEDDED_COMPILER OFF CACHE INTERNAL "")
endif()
option (ENABLE_EMBEDDED_COMPILER "Set to TRUE to enable support for 'compile_expressions' option for query execution" ${ENABLE_LIBRARIES})
option (ENABLE_EMBEDDED_COMPILER "Enable support for 'compile_expressions' option for query execution" ON)
# Broken in macos. TODO: update clang, re-test, enable on Apple
if (ENABLE_EMBEDDED_COMPILER AND NOT SPLIT_SHARED_LIBRARIES AND ARCH_AMD64 AND NOT (SANITIZE STREQUAL "undefined"))
option (USE_INTERNAL_LLVM_LIBRARY "Use bundled or system LLVM library." ${NOT_UNBUNDLED})

View File

@ -370,6 +370,10 @@ function run_tests
# Depends on AWS
01801_s3_cluster
# Depends on LLVM JIT
01852_jit_if
01865_jit_comparison_constant_result
)
(time clickhouse-test --hung-check -j 8 --order=random --use-skip-list --no-long --testname --shard --zookeeper --skip "${TESTS_TO_SKIP[@]}" -- "$FASTTEST_FOCUS" 2>&1 ||:) | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/test_log.txt"

View File

@ -14,11 +14,6 @@
<max_memory_usage>
<max>10G</max>
</max_memory_usage>
<!-- Not ready for production -->
<compile_expressions>
<readonly />
</compile_expressions>
</constraints>
</default>
</profiles>

View File

@ -35,7 +35,7 @@ RUN apt-get update \
ENV TZ=Europe/Moscow
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
RUN pip3 install urllib3 testflows==1.6.74 docker-compose docker dicttoxml kazoo tzlocal
RUN pip3 install urllib3 testflows==1.6.74 docker-compose docker dicttoxml kazoo tzlocal python-dateutil numpy
ENV DOCKER_CHANNEL stable
ENV DOCKER_VERSION 17.09.1-ce
@ -74,4 +74,3 @@ VOLUME /var/lib/docker
EXPOSE 2375
ENTRYPOINT ["dockerd-entrypoint.sh"]
CMD ["sh", "-c", "python3 regression.py --no-color -o classic --local --clickhouse-binary-path ${CLICKHOUSE_TESTS_SERVER_BIN_PATH} --log test.log ${TESTFLOWS_OPTS}; cat test.log | tfs report results --format json > results.json; /usr/local/bin/process_testflows_result.py || echo -e 'failure\tCannot parse results' > check_status.tsv"]

View File

@ -140,6 +140,7 @@ The following settings can be specified in configuration file for given endpoint
- `endpoint` — Specifies prefix of an endpoint. Mandatory.
- `access_key_id` and `secret_access_key` — Specifies credentials to use with given endpoint. Optional.
- `use_environment_credentials` — If set to `true`, S3 client will try to obtain credentials from environment variables and [Amazon EC2](https://en.wikipedia.org/wiki/Amazon_Elastic_Compute_Cloud) metadata for given endpoint. Optional, default value is `false`.
- `region` — Specifies S3 region name. Optional.
- `use_insecure_imds_request` — If set to `true`, S3 client will use insecure IMDS request while obtaining credentials from Amazon EC2 metadata. Optional, default value is `false`.
- `header` — Adds specified HTTP header to a request to given endpoint. Optional, can be speficied multiple times.
- `server_side_encryption_customer_key_base64` — If specified, required headers for accessing S3 objects with SSE-C encryption will be set. Optional.
@ -152,6 +153,7 @@ The following settings can be specified in configuration file for given endpoint
<endpoint>https://storage.yandexcloud.net/my-test-bucket-768/</endpoint>
<!-- <access_key_id>ACCESS_KEY_ID</access_key_id> -->
<!-- <secret_access_key>SECRET_ACCESS_KEY</secret_access_key> -->
<!-- <region>us-west-1</region> -->
<!-- <use_environment_credentials>false</use_environment_credentials> -->
<!-- <use_insecure_imds_request>false</use_insecure_imds_request> -->
<!-- <header>Authorization: Bearer SOME-TOKEN</header> -->

View File

@ -739,6 +739,7 @@ Configuration markup:
<endpoint>https://storage.yandexcloud.net/my-bucket/root-path/</endpoint>
<access_key_id>your_access_key_id</access_key_id>
<secret_access_key>your_secret_access_key</secret_access_key>
<region></region>
<server_side_encryption_customer_key_base64>your_base64_encoded_customer_key</server_side_encryption_customer_key_base64>
<proxy>
<uri>http://proxy1</uri>
@ -764,6 +765,7 @@ Required parameters:
- `secret_access_key` — S3 secret access key.
Optional parameters:
- `region` — S3 region name.
- `use_environment_credentials` — Reads AWS credentials from the Environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN if they exist. Default value is `false`.
- `use_insecure_imds_request` — If set to `true`, S3 client will use insecure IMDS request while obtaining credentials from Amazon EC2 metadata. Default value is `false`.
- `proxy` — Proxy configuration for S3 endpoint. Each `uri` element inside `proxy` block should contain a proxy URL.

View File

@ -21,6 +21,7 @@ Columns:
- `bytes_allocated` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Amount of RAM allocated for the dictionary.
- `query_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of queries since the dictionary was loaded or since the last successful reboot.
- `hit_rate` ([Float64](../../sql-reference/data-types/float.md)) — For cache dictionaries, the percentage of uses for which the value was in the cache.
- `found_rate` ([Float64](../../sql-reference/data-types/float.md)) — The percentage of uses for which the value was found.
- `element_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of items stored in the dictionary.
- `load_factor` ([Float64](../../sql-reference/data-types/float.md)) — Percentage filled in the dictionary (for a hashed dictionary, the percentage filled in the hash table).
- `source` ([String](../../sql-reference/data-types/string.md)) — Text describing the [data source](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md) for the dictionary.
@ -60,4 +61,4 @@ SELECT * FROM system.dictionaries
└──────────┴──────┴────────┴─────────────┴──────┴────────┴──────────────────────────────────────┴─────────────────────┴─────────────────┴─────────────┴──────────┴───────────────┴───────────────────────┴────────────────────────────┴──────────────┴──────────────┴─────────────────────┴──────────────────────────────┘───────────────────────┴────────────────┘
```
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/dictionaries) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/dictionaries) <!--hide-->

View File

@ -12,6 +12,9 @@ The result depends on the order of running the query, and is nondeterministic.
When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) function.
!!! note "Note"
Using `quantileTDigestWeighted` [is not recommended for tiny data sets](https://github.com/tdunning/t-digest/issues/167#issuecomment-828650275) and can lead to significat error. In this case, consider possibility of using [`quantileTDigest`](../../../sql-reference/aggregate-functions/reference/quantiletdigest.md) instead.
**Syntax**
``` sql

View File

@ -84,6 +84,7 @@ SELECT * FROM s3_engine_table LIMIT 2;
- `access_key_id` и `secret_access_key` — указывают учетные данные для использования с данной точкой приема запроса.
- `use_environment_credentials` — если `true`, S3-клиент будет пытаться получить учетные данные из переменных среды и метаданных [Amazon EC2](https://ru.wikipedia.org/wiki/Amazon_EC2) для данной точки приема запроса. Значение по умолчанию — `false`.
- `use_insecure_imds_request` — признак, нужно ли использовать менее безопасное соединение при выполнении запроса к IMDS при получении учётных данных из метаданных Amazon EC2. Значение по умолчанию — `false`.
- `region` — название региона S3.
- `header` — добавляет указанный HTTP-заголовок к запросу на заданную точку приема запроса. Может быть определен несколько раз.
- `server_side_encryption_customer_key_base64` — устанавливает необходимые заголовки для доступа к объектам S3 с шифрованием SSE-C.
@ -95,6 +96,7 @@ SELECT * FROM s3_engine_table LIMIT 2;
<endpoint>https://storage.yandexcloud.net/my-test-bucket-768/</endpoint>
<!-- <access_key_id>ACCESS_KEY_ID</access_key_id> -->
<!-- <secret_access_key>SECRET_ACCESS_KEY</secret_access_key> -->
<!-- <region>us-west-1</region> -->
<!-- <use_environment_credentials>false</use_environment_credentials> -->
<!-- <use_insecure_imds_request>false</use_insecure_imds_request> -->
<!-- <header>Authorization: Bearer SOME-TOKEN</header> -->

View File

@ -727,6 +727,7 @@ SETTINGS storage_policy = 'moving_from_ssd_to_hdd'
<endpoint>https://storage.yandexcloud.net/my-bucket/root-path/</endpoint>
<access_key_id>your_access_key_id</access_key_id>
<secret_access_key>your_secret_access_key</secret_access_key>
<region></region>
<proxy>
<uri>http://proxy1</uri>
<uri>http://proxy2</uri>
@ -753,6 +754,7 @@ SETTINGS storage_policy = 'moving_from_ssd_to_hdd'
Необязательные параметры:
- `region` — название региона S3.
- `use_environment_credentials` — признак, нужно ли считывать учетные данные AWS из сетевого окружения, а также из переменных окружения `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` и `AWS_SESSION_TOKEN`, если они есть. Значение по умолчанию: `false`.
- `use_insecure_imds_request` — признак, нужно ли использовать менее безопасное соединение при выполнении запроса к IMDS при получении учётных данных из метаданных Amazon EC2. Значение по умолчанию: `false`.
- `proxy` — конфигурация прокси-сервера для конечной точки S3. Каждый элемент `uri` внутри блока `proxy` должен содержать URL прокси-сервера.

View File

@ -12,6 +12,9 @@ toc_priority: 208
Внутренние состояния функций `quantile*` не объединяются, если они используются в одном запросе. Если вам необходимо вычислить квантили нескольких уровней, используйте функцию [quantiles](#quantiles), это повысит эффективность запроса.
!!! note "Примечание"
Использование `quantileTDigestWeighted` [не рекомендуется для небольших наборов данных](https://github.com/tdunning/t-digest/issues/167#issuecomment-828650275) и может привести к значительной ошибке. Рассмотрите возможность использования [`quantileTDigest`](../../../sql-reference/aggregate-functions/reference/quantiletdigest.md) в таких случаях.
**Синтаксис**
``` sql

View File

@ -23,6 +23,8 @@ public:
SharedLibraryHandler(const SharedLibraryHandler & other);
SharedLibraryHandler & operator=(const SharedLibraryHandler & other) = delete;
~SharedLibraryHandler();
BlockInputStreamPtr loadAll();

View File

@ -13,6 +13,7 @@
#include <Poco/Net/HTTPServer.h>
#include <Poco/Net/NetException.h>
#include <Poco/Util/HelpFormatter.h>
#include <Poco/Environment.h>
#include <ext/scope_guard.h>
#include <common/defines.h>
#include <common/logger_useful.h>
@ -385,6 +386,11 @@ void Server::initialize(Poco::Util::Application & self)
{
BaseDaemon::initialize(self);
logger().information("starting up");
LOG_INFO(&logger(), "OS Name = {}, OS Version = {}, OS Architecture = {}",
Poco::Environment::osName(),
Poco::Environment::osVersion(),
Poco::Environment::osArchitecture());
}
std::string Server::getDefaultCorePath() const
@ -879,7 +885,8 @@ int Server::main(const std::vector<std::string> & /*args*/)
global_context->setMMappedFileCache(mmap_cache_size);
#if USE_EMBEDDED_COMPILER
size_t compiled_expression_cache_size = config().getUInt64("compiled_expression_cache_size", 500);
constexpr size_t compiled_expression_cache_size_default = 1024 * 1024 * 1024;
size_t compiled_expression_cache_size = config().getUInt64("compiled_expression_cache_size", compiled_expression_cache_size_default);
CompiledExpressionCacheFactory::instance().init(compiled_expression_cache_size);
#endif

View File

@ -329,6 +329,8 @@
-->
<mmap_cache_size>1000</mmap_cache_size>
<!-- Cache size for compiled expressions.-->
<compiled_expression_cache_size>1073741824</compiled_expression_cache_size>
<!-- Path to data directory, with trailing slash. -->
<path>/var/lib/clickhouse/</path>

View File

@ -355,8 +355,9 @@ String DiskAccessStorage::getStorageParamsJSON() const
std::lock_guard lock{mutex};
Poco::JSON::Object json;
json.set("path", directory_path);
if (readonly)
json.set("readonly", readonly.load());
bool readonly_loaded = readonly;
if (readonly_loaded)
json.set("readonly", Poco::Dynamic::Var{true});
std::ostringstream oss; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
oss.exceptions(std::ios::failbit);
Poco::JSON::Stringifier::stringify(json, oss);

View File

@ -77,7 +77,7 @@ auto parseLDAPServer(const Poco::Util::AbstractConfiguration & config, const Str
if (enable_tls_lc_str == "starttls")
params.enable_tls = LDAPClient::Params::TLSEnable::YES_STARTTLS;
else if (config.getBool(ldap_server_config + ".enable_tls"))
params.enable_tls = LDAPClient::Params::TLSEnable::YES;
params.enable_tls = LDAPClient::Params::TLSEnable::YES; //-V1048
else
params.enable_tls = LDAPClient::Params::TLSEnable::NO;
}
@ -96,7 +96,7 @@ auto parseLDAPServer(const Poco::Util::AbstractConfiguration & config, const Str
else if (tls_minimum_protocol_version_lc_str == "tls1.1")
params.tls_minimum_protocol_version = LDAPClient::Params::TLSProtocolVersion::TLS1_1;
else if (tls_minimum_protocol_version_lc_str == "tls1.2")
params.tls_minimum_protocol_version = LDAPClient::Params::TLSProtocolVersion::TLS1_2;
params.tls_minimum_protocol_version = LDAPClient::Params::TLSProtocolVersion::TLS1_2; //-V1048
else
throw Exception("Bad value for 'tls_minimum_protocol_version' entry, allowed values are: 'ssl2', 'ssl3', 'tls1.0', 'tls1.1', 'tls1.2'", ErrorCodes::BAD_ARGUMENTS);
}
@ -113,7 +113,7 @@ auto parseLDAPServer(const Poco::Util::AbstractConfiguration & config, const Str
else if (tls_require_cert_lc_str == "try")
params.tls_require_cert = LDAPClient::Params::TLSRequireCert::TRY;
else if (tls_require_cert_lc_str == "demand")
params.tls_require_cert = LDAPClient::Params::TLSRequireCert::DEMAND;
params.tls_require_cert = LDAPClient::Params::TLSRequireCert::DEMAND; //-V1048
else
throw Exception("Bad value for 'tls_require_cert' entry, allowed values are: 'never', 'allow', 'try', 'demand'", ErrorCodes::BAD_ARGUMENTS);
}

View File

@ -136,7 +136,7 @@ GrantedRoles::Elements GrantedRoles::getElements() const
boost::range::set_difference(roles, roles_with_admin_option, std::back_inserter(element.ids));
if (!element.empty())
{
element.admin_option = false;
element.admin_option = false; //-V1048
elements.emplace_back(std::move(element));
}

View File

@ -45,7 +45,7 @@ struct Quota : public IAccessEntity
struct ResourceTypeInfo
{
const char * const raw_name;
const char * const raw_name = "";
const String name; /// Lowercased with underscores, e.g. "result_rows".
const String keyword; /// Uppercased with spaces, e.g. "RESULT ROWS".
const bool output_as_float = false;

View File

@ -20,7 +20,7 @@ namespace ErrorCodes
/** Tracks the leftmost and rightmost (x, y) data points.
*/
struct AggregateFunctionBoundingRatioData
struct AggregateFunctionBoundingRatioData //-V730
{
struct Point
{

View File

@ -220,7 +220,7 @@ private:
}
public:
AggregateFunctionHistogramData()
AggregateFunctionHistogramData() //-V730
: size(0)
, lower_bound(std::numeric_limits<Mean>::max())
, upper_bound(std::numeric_limits<Mean>::lowest())

View File

@ -181,7 +181,7 @@ public:
/** For strings. Short strings are stored in the object itself, and long strings are allocated separately.
* NOTE It could also be suitable for arrays of numbers.
*/
struct SingleValueDataString
struct SingleValueDataString //-V730
{
private:
using Self = SingleValueDataString;

View File

@ -13,7 +13,7 @@
namespace DB
{
namespace
namespace detail
{
/// This function returns true if both values are large and comparable.
@ -72,7 +72,7 @@ public:
Float64 factor = static_cast<Float64>(count * source.count) / total_count;
Float64 delta = mean - source.mean;
if (areComparable(count, source.count))
if (detail::areComparable(count, source.count))
mean = (source.count * source.mean + count * mean) / total_count;
else
mean = source.mean + delta * (static_cast<Float64>(count) / total_count);
@ -302,7 +302,7 @@ public:
Float64 left_delta = left_mean - source.left_mean;
Float64 right_delta = right_mean - source.right_mean;
if (areComparable(count, source.count))
if (detail::areComparable(count, source.count))
{
left_mean = (source.count * source.left_mean + count * left_mean) / total_count;
right_mean = (source.count * source.right_mean + count * right_mean) / total_count;

View File

@ -263,7 +263,7 @@ struct QuantileExactLow : public QuantileExactBase<Value, QuantileExactLow<Value
{
// sort inputs in ascending order
std::sort(array.begin(), array.end());
size_t n = level < 1 ? level * array.size() : (array.size() - 1);
// if level is 0.5 then compute the "low" median of the sorted array
// by the method of rounding.
if (level == 0.5)
@ -278,10 +278,14 @@ struct QuantileExactLow : public QuantileExactBase<Value, QuantileExactLow<Value
return array[static_cast<size_t>((floor(s / 2)) - 1)];
}
}
// else quantile is the nth index of the sorted array obtained by multiplying
// level and size of array. Example if level = 0.1 and size of array is 10,
// then return array[1].
return array[n];
else
{
// else quantile is the nth index of the sorted array obtained by multiplying
// level and size of array. Example if level = 0.1 and size of array is 10,
// then return array[1].
size_t n = level < 1 ? level * array.size() : (array.size() - 1);
return array[n];
}
}
return std::numeric_limits<Value>::quiet_NaN();
}
@ -295,7 +299,7 @@ struct QuantileExactLow : public QuantileExactBase<Value, QuantileExactLow<Value
for (size_t i = 0; i < size; ++i)
{
auto level = levels[indices[i]];
size_t n = level < 1 ? level * array.size() : (array.size() - 1);
// if level is 0.5 then compute the "low" median of the sorted array
// by the method of rounding.
if (level == 0.5)
@ -310,9 +314,13 @@ struct QuantileExactLow : public QuantileExactBase<Value, QuantileExactLow<Value
result[indices[i]] = array[static_cast<size_t>(floor((s / 2) - 1))];
}
}
// else quantile is the nth index of the sorted array obtained by multiplying
// level and size of array. Example if level = 0.1 and size of array is 10.
result[indices[i]] = array[n];
else
{
// else quantile is the nth index of the sorted array obtained by multiplying
// level and size of array. Example if level = 0.1 and size of array is 10.
size_t n = level < 1 ? level * array.size() : (array.size() - 1);
result[indices[i]] = array[n];
}
}
}
else
@ -337,7 +345,7 @@ struct QuantileExactHigh : public QuantileExactBase<Value, QuantileExactHigh<Val
{
// sort inputs in ascending order
std::sort(array.begin(), array.end());
size_t n = level < 1 ? level * array.size() : (array.size() - 1);
// if level is 0.5 then compute the "high" median of the sorted array
// by the method of rounding.
if (level == 0.5)
@ -345,9 +353,13 @@ struct QuantileExactHigh : public QuantileExactBase<Value, QuantileExactHigh<Val
auto s = array.size();
return array[static_cast<size_t>(floor(s / 2))];
}
// else quantile is the nth index of the sorted array obtained by multiplying
// level and size of array. Example if level = 0.1 and size of array is 10.
return array[n];
else
{
// else quantile is the nth index of the sorted array obtained by multiplying
// level and size of array. Example if level = 0.1 and size of array is 10.
size_t n = level < 1 ? level * array.size() : (array.size() - 1);
return array[n];
}
}
return std::numeric_limits<Value>::quiet_NaN();
}
@ -361,7 +373,7 @@ struct QuantileExactHigh : public QuantileExactBase<Value, QuantileExactHigh<Val
for (size_t i = 0; i < size; ++i)
{
auto level = levels[indices[i]];
size_t n = level < 1 ? level * array.size() : (array.size() - 1);
// if level is 0.5 then compute the "high" median of the sorted array
// by the method of rounding.
if (level == 0.5)
@ -369,9 +381,13 @@ struct QuantileExactHigh : public QuantileExactBase<Value, QuantileExactHigh<Val
auto s = array.size();
result[indices[i]] = array[static_cast<size_t>(floor(s / 2))];
}
// else quantile is the nth index of the sorted array obtained by multiplying
// level and size of array. Example if level = 0.1 and size of array is 10.
result[indices[i]] = array[n];
else
{
// else quantile is the nth index of the sorted array obtained by multiplying
// level and size of array. Example if level = 0.1 and size of array is 10.
size_t n = level < 1 ? level * array.size() : (array.size() - 1);
result[indices[i]] = array[n];
}
}
}
else

View File

@ -184,6 +184,7 @@ add_object_library(clickhouse_disks Disks)
add_object_library(clickhouse_interpreters Interpreters)
add_object_library(clickhouse_interpreters_mysql Interpreters/MySQL)
add_object_library(clickhouse_interpreters_clusterproxy Interpreters/ClusterProxy)
add_object_library(clickhouse_interpreters_jit Interpreters/JIT)
add_object_library(clickhouse_columns Columns)
add_object_library(clickhouse_storages Storages)
add_object_library(clickhouse_storages_distributed Storages/Distributed)

View File

@ -159,12 +159,12 @@ private:
/// The number of bytes read.
size_t read_count = 0;
/// The content in the current position.
UInt8 value_l;
UInt8 value_r;
UInt8 value_l = 0;
UInt8 value_r = 0;
///
bool is_eof = false;
/// Does the cell fully fit into one byte?
bool fits_in_byte;
bool fits_in_byte = false;
};
/** TODO This code looks very suboptimal.

View File

@ -804,7 +804,7 @@ bool Dwarf::findLocation(
findSubProgramDieForAddress(cu, die, address, base_addr_cu, subprogram);
// Subprogram is the DIE of caller function.
if (check_inline && subprogram.abbr.has_children)
if (/*check_inline &&*/ subprogram.abbr.has_children)
{
// Use an extra location and get its call file and call line, so that
// they can be used for the second last location when we don't have
@ -832,7 +832,7 @@ bool Dwarf::findLocation(
// file+line of the non-inlined outer function making the call.
// locationInfo.name is already set by the caller by looking up the
// non-inlined function @address belongs to.
info.has_file_and_line = true;
info.has_file_and_line = true; //-V1048
info.file = call_locations[0].file;
info.line = call_locations[0].line;

View File

@ -121,7 +121,7 @@ public:
struct CallLocation
{
Path file = {};
uint64_t line;
uint64_t line = 0;
std::string_view name;
};
@ -202,8 +202,8 @@ private:
// Abbreviation for a Debugging Information Entry.
struct DIEAbbreviation
{
uint64_t code;
uint64_t tag;
uint64_t code = 0;
uint64_t tag = 0;
bool has_children = false;
std::string_view attributes;

View File

@ -29,7 +29,7 @@ namespace ErrorCodes
struct Error
{
/// Number of times Exception with this ErrorCode had been throw.
Value count;
Value count = 0;
/// Time of the last error.
UInt64 error_time_ms = 0;
/// Message for the last error.

View File

@ -44,7 +44,7 @@ struct ClearableHashTableCell : public BaseCell
/// Do I need to store the zero key separately (that is, can a zero key be inserted into the hash table).
static constexpr bool need_zero_value_storage = false;
ClearableHashTableCell() {}
ClearableHashTableCell() {} //-V730
ClearableHashTableCell(const Key & key_, const State & state) : BaseCell(key_, state), version(state.version) {}
};

View File

@ -13,7 +13,7 @@ struct FixedClearableHashTableCell
using mapped_type = VoidMapped;
UInt32 version;
FixedClearableHashTableCell() {}
FixedClearableHashTableCell() {} //-V730
FixedClearableHashTableCell(const Key &, const State & state) : version(state.version) {}
const VoidKey getKey() const { return {}; }

View File

@ -16,7 +16,7 @@ struct FixedHashMapCell
bool full;
Mapped mapped;
FixedHashMapCell() {}
FixedHashMapCell() {} //-V730
FixedHashMapCell(const Key &, const State &) : full(true) {}
FixedHashMapCell(const value_type & value_, const State &) : full(true), mapped(value_.second) {}
@ -31,7 +31,7 @@ struct FixedHashMapCell
/// Note that we have to assemble a continuous layout for the value_type on each call of getValue().
struct CellExt
{
CellExt() {}
CellExt() {} //-V730
CellExt(Key && key_, const FixedHashMapCell * ptr_) : key(key_), ptr(const_cast<FixedHashMapCell *>(ptr_)) {}
void update(Key && key_, const FixedHashMapCell * ptr_)
{
@ -76,7 +76,7 @@ struct FixedHashMapImplicitZeroCell
/// Note that we have to assemble a continuous layout for the value_type on each call of getValue().
struct CellExt
{
CellExt() {}
CellExt() {} //-V730
CellExt(Key && key_, const FixedHashMapImplicitZeroCell * ptr_) : key(key_), ptr(const_cast<FixedHashMapImplicitZeroCell *>(ptr_)) {}
void update(Key && key_, const FixedHashMapImplicitZeroCell * ptr_)
{

View File

@ -19,7 +19,7 @@ struct FixedHashTableCell
using mapped_type = VoidMapped;
bool full;
FixedHashTableCell() {}
FixedHashTableCell() {} //-V730
FixedHashTableCell(const Key &, const State &) : full(true) {}
const VoidKey getKey() const { return {}; }
@ -267,7 +267,7 @@ public:
DB::ReadBuffer & in;
Cell cell;
size_t read_count = 0;
size_t size;
size_t size = 0;
bool is_eof = false;
bool is_initialized = false;
};

View File

@ -73,8 +73,8 @@ struct HashSetCellWithSavedHash : public HashTableCell<Key, Hash, TState>
size_t saved_hash;
HashSetCellWithSavedHash() : Base() {}
HashSetCellWithSavedHash(const Key & key_, const typename Base::State & state) : Base(key_, state) {}
HashSetCellWithSavedHash() : Base() {} //-V730
HashSetCellWithSavedHash(const Key & key_, const typename Base::State & state) : Base(key_, state) {} //-V730
bool keyEquals(const Key & key_) const { return bitEquals(this->key, key_); }
bool keyEquals(const Key & key_, size_t hash_) const { return saved_hash == hash_ && bitEquals(this->key, key_); }

View File

@ -305,7 +305,7 @@ template <bool need_zero_value_storage, typename Cell>
struct ZeroValueStorage;
template <typename Cell>
struct ZeroValueStorage<true, Cell>
struct ZeroValueStorage<true, Cell> //-V730
{
private:
bool has_zero = false;

View File

@ -80,7 +80,7 @@ public:
{
public:
Reader(DB::ReadBuffer & in_)
: in(in_)
: in(in_)
{
}
@ -124,15 +124,15 @@ public:
DB::ReadBuffer & in;
Cell cell;
size_t read_count = 0;
size_t size;
size_t size = 0;
bool is_eof = false;
bool is_initialized = false;
};
class iterator
{
Self * container;
Cell * ptr;
Self * container = nullptr;
Cell * ptr = nullptr;
friend class SmallTable;
@ -158,8 +158,8 @@ public:
class const_iterator
{
const Self * container;
const Cell * ptr;
const Self * container = nullptr;
const Cell * ptr = nullptr;
friend class SmallTable;
@ -184,16 +184,16 @@ public:
};
const_iterator begin() const { return iteratorTo(buf); }
iterator begin() { return iteratorTo(buf); }
const_iterator begin() const { return iteratorTo(buf); }
iterator begin() { return iteratorTo(buf); }
const_iterator end() const { return iteratorTo(buf + m_size); }
iterator end() { return iteratorTo(buf + m_size); }
const_iterator end() const { return iteratorTo(buf + m_size); }
iterator end() { return iteratorTo(buf + m_size); }
protected:
const_iterator iteratorTo(const Cell * ptr) const { return const_iterator(this, ptr); }
iterator iteratorTo(Cell * ptr) { return iterator(this, ptr); }
const_iterator iteratorTo(const Cell * ptr) const { return const_iterator(this, ptr); }
iterator iteratorTo(Cell * ptr) { return iterator(this, ptr); }
public:

View File

@ -79,7 +79,7 @@ struct StringHashTableHash
};
template <typename Cell>
struct StringHashTableEmpty
struct StringHashTableEmpty //-V730
{
using Self = StringHashTableEmpty;

View File

@ -119,9 +119,9 @@ public:
class iterator
{
Self * container;
size_t bucket;
typename Impl::iterator current_it;
Self * container{};
size_t bucket{};
typename Impl::iterator current_it{};
friend class TwoLevelHashTable;
@ -156,9 +156,9 @@ public:
class const_iterator
{
Self * container;
size_t bucket;
typename Impl::const_iterator current_it;
Self * container{};
size_t bucket{};
typename Impl::const_iterator current_it{};
friend class TwoLevelHashTable;

View File

@ -80,10 +80,7 @@ template <UInt64 MaxValue> struct MinCounterType
/// Denominator of expression for HyperLogLog algorithm.
template <UInt8 precision, int max_rank, typename HashValueType, typename DenominatorType,
DenominatorMode denominator_mode, typename Enable = void>
class __attribute__ ((packed)) Denominator;
namespace
{
class Denominator;
/// Returns true if rank storage is big.
constexpr bool isBigRankStore(UInt8 precision)
@ -91,8 +88,6 @@ constexpr bool isBigRankStore(UInt8 precision)
return precision >= 12;
}
}
/// Used to deduce denominator type depending on options provided.
template <typename HashValueType, typename DenominatorType, DenominatorMode denominator_mode, typename Enable = void>
struct IntermediateDenominator;
@ -120,7 +115,7 @@ struct IntermediateDenominator<HashValueType, DenominatorType, DenominatorMode::
/// Satisfiable when rank storage is small enough.
template <UInt8 precision, int max_rank, typename HashValueType, typename DenominatorType,
DenominatorMode denominator_mode>
class __attribute__ ((packed)) Denominator<precision, max_rank, HashValueType, DenominatorType,
class __attribute__((__packed__)) Denominator<precision, max_rank, HashValueType, DenominatorType,
denominator_mode,
std::enable_if_t<!details::isBigRankStore(precision) || !(denominator_mode == DenominatorMode::StableIfBig)>>
{
@ -164,7 +159,7 @@ private:
/// Used when rank storage is big.
template <UInt8 precision, int max_rank, typename HashValueType, typename DenominatorType,
DenominatorMode denominator_mode>
class __attribute__ ((packed)) Denominator<precision, max_rank, HashValueType, DenominatorType,
class __attribute__((__packed__)) Denominator<precision, max_rank, HashValueType, DenominatorType,
denominator_mode,
std::enable_if_t<details::isBigRankStore(precision) && denominator_mode == DenominatorMode::StableIfBig>>
{
@ -252,6 +247,7 @@ struct RankWidth<UInt64>
}
/// Sets behavior of HyperLogLog class.
enum class HyperLogLogMode
{

View File

@ -145,6 +145,11 @@ public:
return cells.size();
}
size_t maxSize() const
{
return max_size;
}
void reset()
{
std::lock_guard lock(mutex);

View File

@ -208,7 +208,7 @@ public:
static bool isBlocked(VariableContext current_level, bool fault_injection)
{
return counter > 0 && current_level >= level && (!fault_injection || (fault_injection && block_fault_injections));
return counter > 0 && current_level >= level && (!fault_injection || block_fault_injections);
}
};
};

View File

@ -388,7 +388,7 @@ void PoolWithFailoverBase<TNestedPool>::updateErrorCounts(PoolWithFailoverBase<T
{
time_t current_time = time(nullptr);
if (last_decrease_time)
if (last_decrease_time) //-V1051
{
time_t delta = current_time - last_decrease_time;

View File

@ -44,7 +44,7 @@ using SharedBlockPtr = boost::intrusive_ptr<detail::SharedBlock>;
struct SharedBlockRowRef
{
ColumnRawPtrs * columns = nullptr;
size_t row_num;
size_t row_num = 0;
SharedBlockPtr shared_block;
void swap(SharedBlockRowRef & other)

View File

@ -71,7 +71,7 @@ public:
LogsLevel client_logs_level = LogsLevel::none;
String query;
UInt64 normalized_query_hash;
UInt64 normalized_query_hash = 0;
};
using ThreadGroupStatusPtr = std::shared_ptr<ThreadGroupStatus>;

View File

@ -303,7 +303,7 @@ namespace VolnitskyTraits
{
/// ngram for Ul
chars.c0 = c0u;
chars.c1 = c1l;
chars.c1 = c1l; //-V1048
putNGramBase(n, offset);
}

View File

@ -198,7 +198,7 @@ std::pair<ResponsePtr, Undo> TestKeeperCreateRequest::process(TestKeeper::Contai
else
{
TestKeeper::Node created_node;
created_node.seq_num = 0;
created_node.seq_num = 0; //-V1048
created_node.stat.czxid = zxid;
created_node.stat.mzxid = zxid;
created_node.stat.ctime = std::chrono::system_clock::now().time_since_epoch() / std::chrono::milliseconds(1);
@ -271,7 +271,7 @@ std::pair<ResponsePtr, Undo> TestKeeperRemoveRequest::process(TestKeeper::Contai
auto & parent = container.at(parentPath(path));
--parent.stat.numChildren;
++parent.stat.cversion;
response.error = Error::ZOK;
response.error = Error::ZOK; //-V1048
undo = [prev_node, &container, path = path]
{
@ -293,7 +293,7 @@ std::pair<ResponsePtr, Undo> TestKeeperExistsRequest::process(TestKeeper::Contai
if (it != container.end())
{
response.stat = it->second.stat;
response.error = Error::ZOK;
response.error = Error::ZOK; //-V1048
}
else
{
@ -316,7 +316,7 @@ std::pair<ResponsePtr, Undo> TestKeeperGetRequest::process(TestKeeper::Container
{
response.stat = it->second.stat;
response.data = it->second.data;
response.error = Error::ZOK;
response.error = Error::ZOK; //-V1048
}
return { std::make_shared<GetResponse>(response), {} };
@ -343,7 +343,7 @@ std::pair<ResponsePtr, Undo> TestKeeperSetRequest::process(TestKeeper::Container
it->second.data = data;
++container.at(parentPath(path)).stat.cversion;
response.stat = it->second.stat;
response.error = Error::ZOK;
response.error = Error::ZOK; //-V1048
undo = [prev_node, &container, path = path]
{
@ -387,7 +387,7 @@ std::pair<ResponsePtr, Undo> TestKeeperListRequest::process(TestKeeper::Containe
}
response.stat = it->second.stat;
response.error = Error::ZOK;
response.error = Error::ZOK; //-V1048
}
return { std::make_shared<ListResponse>(response), {} };
@ -407,7 +407,7 @@ std::pair<ResponsePtr, Undo> TestKeeperCheckRequest::process(TestKeeper::Contain
}
else
{
response.error = Error::ZOK;
response.error = Error::ZOK; //-V1048
}
return { std::make_shared<CheckResponse>(response), {} };
@ -422,7 +422,7 @@ std::pair<ResponsePtr, Undo> TestKeeperMultiRequest::process(TestKeeper::Contain
try
{
auto request_it = requests.begin();
response.error = Error::ZOK;
response.error = Error::ZOK; //-V1048
while (request_it != requests.end())
{
const TestKeeperRequest & concrete_request = dynamic_cast<const TestKeeperRequest &>(**request_it);

View File

@ -25,7 +25,7 @@ namespace Coordination
struct ZooKeeperResponse : virtual Response
{
XID xid = 0;
int64_t zxid;
int64_t zxid = 0;
virtual ~ZooKeeperResponse() override = default;
virtual void readImpl(ReadBuffer &) = 0;

View File

@ -79,7 +79,6 @@ std::filesystem::path getMountPoint(std::filesystem::path absolute_path)
if (device_id != parent_device_id)
return absolute_path;
absolute_path = parent;
device_id = parent_device_id;
}
return absolute_path;

View File

@ -35,29 +35,6 @@
using namespace DB;
namespace std
{
template <typename T>
std::ostream & operator<<(std::ostream & ostr, const std::optional<T> & opt)
{
if (!opt)
{
return ostr << "<empty optional>";
}
return ostr << *opt;
}
template <typename T>
std::vector<T> operator+(std::vector<T> && left, std::vector<T> && right)
{
std::vector<T> result(std::move(left));
std::move(std::begin(right), std::end(right), std::back_inserter(result));
return result;
}
}
namespace
{
@ -337,6 +314,14 @@ CodecTestSequence operator+(CodecTestSequence && left, const CodecTestSequence &
return left.append(right);
}
std::vector<CodecTestSequence> operator+(const std::vector<CodecTestSequence> & left, const std::vector<CodecTestSequence> & right)
{
std::vector<CodecTestSequence> result(std::move(left));
std::move(std::begin(right), std::end(right), std::back_inserter(result));
return result;
}
template <typename T>
CodecTestSequence operator*(CodecTestSequence && left, T times)
{
@ -362,7 +347,7 @@ std::ostream & operator<<(std::ostream & ostr, const Codec & codec)
{
return ostr << "Codec{"
<< "name: " << codec.codec_statement
<< ", expected_compression_ratio: " << codec.expected_compression_ratio
<< ", expected_compression_ratio: " << *codec.expected_compression_ratio
<< "}";
}
@ -775,15 +760,13 @@ auto FFand0Generator = []()
return [step = 0](auto i) mutable
{
decltype(i) result;
if (step++ % 2 == 0)
{
memset(&result, 0, sizeof(result));
}
else
{
memset(&result, 0xFF, sizeof(result));
}
if (step % 2 == 0)
memset(&result, 0, sizeof(result));
else
memset(&result, 0xFF, sizeof(result));
++step;
return result;
};
};
@ -1129,7 +1112,7 @@ template <typename ValueType>
auto DDCompatibilityTestSequence()
{
// Generates sequences with double delta in given range.
auto dd_generator = [prev_delta = static_cast<Int64>(0), prev = static_cast<Int64>(0)](auto dd) mutable
auto dd_generator = [prev_delta = static_cast<Int64>(0), prev = static_cast<Int64>(0)](auto dd) mutable //-V788
{
const auto curr = dd + prev + prev_delta;
prev = curr;

View File

@ -30,10 +30,10 @@ static constexpr auto CURRENT_CHANGELOG_VERSION = ChangelogVersion::V0;
struct ChangelogRecordHeader
{
ChangelogVersion version = CURRENT_CHANGELOG_VERSION;
uint64_t index; /// entry log number
uint64_t term;
nuraft::log_val_type value_type;
uint64_t blob_size;
uint64_t index = 0; /// entry log number
uint64_t term = 0;
nuraft::log_val_type value_type{};
uint64_t blob_size = 0;
};
/// Changelog record on disk

View File

@ -103,7 +103,8 @@ struct KeeperStorageSyncRequest final : public KeeperStorageRequest
std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(KeeperStorage::Container & /* container */, KeeperStorage::Ephemerals & /* ephemerals */, int64_t /* zxid */, int64_t /* session_id */) const override
{
auto response = zk_request->makeResponse();
dynamic_cast<Coordination::ZooKeeperSyncResponse *>(response.get())->path = dynamic_cast<Coordination::ZooKeeperSyncRequest *>(zk_request.get())->path;
dynamic_cast<Coordination::ZooKeeperSyncResponse &>(*response).path
= dynamic_cast<Coordination::ZooKeeperSyncRequest &>(*zk_request).path;
return {response, {}};
}
};

View File

@ -64,7 +64,7 @@ TEST(CoordinationTest, BufferSerde)
{
Coordination::ZooKeeperRequestPtr request = Coordination::ZooKeeperRequestFactory::instance().get(Coordination::OpNum::Get);
request->xid = 3;
dynamic_cast<Coordination::ZooKeeperGetRequest *>(request.get())->path = "/path/value";
dynamic_cast<Coordination::ZooKeeperGetRequest &>(*request).path = "/path/value";
DB::WriteBufferFromNuraftBuffer wbuf;
request->write(wbuf);
@ -90,7 +90,7 @@ TEST(CoordinationTest, BufferSerde)
EXPECT_EQ(request_read->getOpNum(), Coordination::OpNum::Get);
EXPECT_EQ(request_read->xid, 3);
EXPECT_EQ(dynamic_cast<Coordination::ZooKeeperGetRequest *>(request_read.get())->path, "/path/value");
EXPECT_EQ(dynamic_cast<Coordination::ZooKeeperGetRequest &>(*request_read).path, "/path/value");
}
template <typename StateMachine>

View File

@ -474,19 +474,19 @@ namespace MySQLReplication
}
case MYSQL_TYPE_NEWDECIMAL:
{
const auto & dispatch = [](const size_t & precision, const size_t & scale, const auto & function) -> Field
const auto & dispatch = [](size_t precision, size_t scale, const auto & function) -> Field
{
if (precision <= DecimalUtils::max_precision<Decimal32>)
return Field(function(precision, scale, Decimal32()));
else if (precision <= DecimalUtils::max_precision<Decimal64>)
else if (precision <= DecimalUtils::max_precision<Decimal64>) //-V547
return Field(function(precision, scale, Decimal64()));
else if (precision <= DecimalUtils::max_precision<Decimal128>)
else if (precision <= DecimalUtils::max_precision<Decimal128>) //-V547
return Field(function(precision, scale, Decimal128()));
return Field(function(precision, scale, Decimal256()));
};
const auto & read_decimal = [&](const size_t & precision, const size_t & scale, auto decimal)
const auto & read_decimal = [&](size_t precision, size_t scale, auto decimal)
{
using DecimalType = decltype(decimal);
static constexpr size_t digits_per_integer = 9;
@ -543,7 +543,7 @@ namespace MySQLReplication
UInt32 val = 0;
size_t to_read = compressed_bytes_map[compressed_decimals];
if (to_read)
if (to_read) //-V547
{
readBigEndianStrict(payload, reinterpret_cast<char *>(&val), to_read);
res *= intExp10OfSize<DecimalType>(compressed_decimals);

View File

@ -257,6 +257,7 @@ class FirstMessage : public FrontMessage
{
public:
Int32 payload_size;
FirstMessage() = delete;
FirstMessage(int payload_size_) : payload_size(payload_size_) {}
};
@ -264,8 +265,9 @@ public:
class CancelRequest : public FirstMessage
{
public:
Int32 process_id;
Int32 secret_key;
Int32 process_id = 0;
Int32 secret_key = 0;
CancelRequest(int payload_size_) : FirstMessage(payload_size_) {}
void deserialize(ReadBuffer & in) override

View File

@ -101,7 +101,7 @@ class IColumn;
M(Float, totals_auto_threshold, 0.5, "The threshold for totals_mode = 'auto'.", 0) \
\
M(Bool, allow_suspicious_low_cardinality_types, false, "In CREATE TABLE statement allows specifying LowCardinality modifier for types of small fixed size (8 or less). Enabling this may increase merge times and memory consumption.", 0) \
M(Bool, compile_expressions, false, "Compile some scalar functions and operators to native code.", 0) \
M(Bool, compile_expressions, true, "Compile some scalar functions and operators to native code.", 0) \
M(UInt64, min_count_to_compile_expression, 3, "The number of identical expressions before they are JIT-compiled", 0) \
M(UInt64, group_by_two_level_threshold, 100000, "From what number of keys, a two-level aggregation starts. 0 - the threshold is not set.", 0) \
M(UInt64, group_by_two_level_threshold_bytes, 100000000, "From what size of the aggregation state in bytes, a two-level aggregation begins to be used. 0 - the threshold is not set. Two-level aggregation is used when at least one of the thresholds is triggered.", 0) \

View File

@ -4,9 +4,7 @@
#ifdef __linux__
#include <linux/version.h>
#endif
#ifdef __linux__
/// Detect does epoll_wait with nested epoll fds works correctly.
/// Polling nested epoll fds from epoll_wait is required for async_socket_for_remote and use_hedged_requests.
///
@ -16,21 +14,15 @@
/// [2]: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=0c54a6a44bf3
bool nestedEpollWorks(Poco::Logger * log)
{
bool nested_epoll_works =
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) && (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 13))
/// the check is correct since there will be no more 5.5.x releases.
false
#else
true
#endif
;
if (!nested_epoll_works)
{
if (log)
LOG_WARNING(log, "Nested epoll_wait has some issues on kernels [5.5.0, 5.6.13). You should upgrade it to avoid possible issues.");
}
return nested_epoll_works;
return false;
#else
(void)log;
return true;
#endif
}
#else
bool nestedEpollWorks(Poco::Logger *) { return true; }

View File

@ -126,7 +126,7 @@ struct SortCursorImpl
/// Prevent using pos instead of getRow()
private:
size_t pos;
size_t pos = 0;
};
using SortCursorImpls = std::vector<SortCursorImpl>;

View File

@ -158,7 +158,7 @@ public:
/** Set the approximate total number of rows to read.
*/
virtual void addTotalRowsApprox(size_t value) { total_rows_approx += value; }
void addTotalRowsApprox(size_t value) { total_rows_approx += value; }
/** Ask to abort the receipt of data as soon as possible.

View File

@ -51,7 +51,7 @@ static void writeData(const IDataType & type, const ColumnPtr & column, WriteBuf
ISerialization::SerializeBinaryBulkSettings settings;
settings.getter = [&ostr](ISerialization::SubstreamPath) -> WriteBuffer * { return &ostr; };
settings.position_independent_encoding = false;
settings.low_cardinality_max_dictionary_size = 0;
settings.low_cardinality_max_dictionary_size = 0; //-V1048
auto serialization = type.getDefaultSerialization();

View File

@ -210,7 +210,8 @@ void PostgreSQLBlockInputStream::insertValue(IColumn & column, std::string_view
{
max_dimension = std::max(max_dimension, dimension);
if (--dimension == 0)
--dimension;
if (dimension == 0)
break;
dimensions[dimension].emplace_back(Array(dimensions[dimension + 1].begin(), dimensions[dimension + 1].end()));

View File

@ -63,7 +63,7 @@ PushingToViewsBlockOutputStream::PushingToViewsBlockOutputStream(
// Do not deduplicate insertions into MV if the main insertion is Ok
if (disable_deduplication_for_children)
insert_context->setSetting("insert_deduplicate", false);
insert_context->setSetting("insert_deduplicate", Field{false});
// Separate min_insert_block_size_rows/min_insert_block_size_bytes for children
if (insert_settings.min_insert_block_size_rows_for_materialized_views)

View File

@ -5,15 +5,13 @@
#endif
#if USE_EMBEDDED_COMPILER
# include <DataTypes/DataTypeDate.h>
# include <DataTypes/DataTypeDateTime.h>
# include <DataTypes/DataTypeFixedString.h>
# include <DataTypes/DataTypeInterval.h>
# include <DataTypes/DataTypeNullable.h>
# include <DataTypes/DataTypeUUID.h>
# include <DataTypes/DataTypesNumber.h>
# include <Common/typeid_cast.h>
# include <Common/Exception.h>
# include <DataTypes/IDataType.h>
# include <DataTypes/DataTypeNullable.h>
# include <DataTypes/DataTypeFixedString.h>
# include <Columns/ColumnConst.h>
# include <Columns/ColumnNullable.h>
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wunused-parameter"
@ -29,60 +27,56 @@ namespace ErrorCodes
extern const int NOT_IMPLEMENTED;
}
template <typename... Ts>
static inline bool typeIsEither(const IDataType & type)
{
return (typeid_cast<const Ts *>(&type) || ...);
}
static inline bool typeIsSigned(const IDataType & type)
{
return typeIsEither<
DataTypeInt8, DataTypeInt16, DataTypeInt32, DataTypeInt64,
DataTypeFloat32, DataTypeFloat64, DataTypeInterval
>(type);
WhichDataType data_type(type);
return data_type.isNativeInt() || data_type.isFloat();
}
static inline llvm::Type * toNativeType(llvm::IRBuilderBase & builder, const IDataType & type)
{
if (auto * nullable = typeid_cast<const DataTypeNullable *>(&type))
WhichDataType data_type(type);
if (data_type.isNullable())
{
auto * wrapped = toNativeType(builder, *nullable->getNestedType());
const auto & data_type_nullable = static_cast<const DataTypeNullable&>(type);
auto * wrapped = toNativeType(builder, *data_type_nullable.getNestedType());
return wrapped ? llvm::StructType::get(wrapped, /* is null = */ builder.getInt1Ty()) : nullptr;
}
/// LLVM doesn't have unsigned types, it has unsigned instructions.
if (typeIsEither<DataTypeInt8, DataTypeUInt8>(type))
if (data_type.isInt8() || data_type.isUInt8())
return builder.getInt8Ty();
if (typeIsEither<DataTypeInt16, DataTypeUInt16, DataTypeDate>(type))
else if (data_type.isInt16() || data_type.isUInt16() || data_type.isDate())
return builder.getInt16Ty();
if (typeIsEither<DataTypeInt32, DataTypeUInt32, DataTypeDateTime>(type))
else if (data_type.isInt32() || data_type.isUInt32() || data_type.isDateTime())
return builder.getInt32Ty();
if (typeIsEither<DataTypeInt64, DataTypeUInt64, DataTypeInterval>(type))
else if (data_type.isInt64() || data_type.isUInt64())
return builder.getInt64Ty();
if (typeIsEither<DataTypeUUID>(type))
return builder.getInt128Ty();
if (typeIsEither<DataTypeFloat32>(type))
else if (data_type.isFloat32())
return builder.getFloatTy();
if (typeIsEither<DataTypeFloat64>(type))
else if (data_type.isFloat64())
return builder.getDoubleTy();
if (auto * fixed_string = typeid_cast<const DataTypeFixedString *>(&type))
return llvm::VectorType::get(builder.getInt8Ty(), fixed_string->getN());
else if (data_type.isFixedString())
{
const auto & data_type_fixed_string = static_cast<const DataTypeFixedString &>(type);
return llvm::VectorType::get(builder.getInt8Ty(), data_type_fixed_string.getN());
}
return nullptr;
}
static inline bool canBeNativeType(const IDataType & type)
{
if (auto * nullable = typeid_cast<const DataTypeNullable *>(&type))
return canBeNativeType(*nullable->getNestedType());
WhichDataType data_type(type);
return typeIsEither<DataTypeInt8, DataTypeUInt8>(type)
|| typeIsEither<DataTypeInt16, DataTypeUInt16, DataTypeDate>(type)
|| typeIsEither<DataTypeInt32, DataTypeUInt32, DataTypeDateTime>(type)
|| typeIsEither<DataTypeInt64, DataTypeUInt64, DataTypeInterval>(type)
|| typeIsEither<DataTypeUUID>(type)
|| typeIsEither<DataTypeFloat32>(type)
|| typeIsEither<DataTypeFloat64>(type)
|| typeid_cast<const DataTypeFixedString *>(&type);
if (data_type.isNullable())
{
const auto & data_type_nullable = static_cast<const DataTypeNullable&>(type);
return canBeNativeType(*data_type_nullable.getNestedType());
}
return data_type.isNativeInt() || data_type.isNativeUInt() || data_type.isFloat() || data_type.isFixedString() || data_type.isDate();
}
static inline llvm::Type * toNativeType(llvm::IRBuilderBase & builder, const DataTypePtr & type)
@ -102,45 +96,98 @@ static inline llvm::Value * nativeBoolCast(llvm::IRBuilder<> & b, const DataType
return b.CreateICmpNE(value, zero);
if (value->getType()->isFloatingPointTy())
return b.CreateFCmpONE(value, zero); /// QNaN is false
throw Exception("Cannot cast non-number " + from->getName() + " to bool", ErrorCodes::NOT_IMPLEMENTED);
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot cast non-number {} to bool", from->getName());
}
static inline llvm::Value * nativeCast(llvm::IRBuilder<> & b, const DataTypePtr & from, llvm::Value * value, llvm::Type * to)
{
auto * n_from = value->getType();
if (n_from == to)
return value;
if (n_from->isIntegerTy() && to->isFloatingPointTy())
else if (n_from->isIntegerTy() && to->isFloatingPointTy())
return typeIsSigned(*from) ? b.CreateSIToFP(value, to) : b.CreateUIToFP(value, to);
if (n_from->isFloatingPointTy() && to->isIntegerTy())
else if (n_from->isFloatingPointTy() && to->isIntegerTy())
return typeIsSigned(*from) ? b.CreateFPToSI(value, to) : b.CreateFPToUI(value, to);
if (n_from->isIntegerTy() && to->isIntegerTy())
else if (n_from->isIntegerTy() && to->isIntegerTy())
return b.CreateIntCast(value, to, typeIsSigned(*from));
if (n_from->isFloatingPointTy() && to->isFloatingPointTy())
else if (n_from->isFloatingPointTy() && to->isFloatingPointTy())
return b.CreateFPCast(value, to);
throw Exception("Cannot cast " + from->getName() + " to requested type", ErrorCodes::NOT_IMPLEMENTED);
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot cast {} to requested type", from->getName());
}
static inline llvm::Value * nativeCast(llvm::IRBuilder<> & b, const DataTypePtr & from, llvm::Value * value, const DataTypePtr & to)
{
auto * n_to = toNativeType(b, to);
if (value->getType() == n_to)
{
return value;
if (from->isNullable() && to->isNullable())
}
else if (from->isNullable() && to->isNullable())
{
auto * inner = nativeCast(b, removeNullable(from), b.CreateExtractValue(value, {0}), to);
return b.CreateInsertValue(inner, b.CreateExtractValue(value, {1}), {1});
}
if (from->isNullable())
else if (from->isNullable())
{
return nativeCast(b, removeNullable(from), b.CreateExtractValue(value, {0}), to);
if (to->isNullable())
}
else if (to->isNullable())
{
auto * inner = nativeCast(b, from, value, removeNullable(to));
return b.CreateInsertValue(llvm::Constant::getNullValue(n_to), inner, {0});
}
return nativeCast(b, from, value, n_to);
}
static inline llvm::Constant * getColumnNativeValue(llvm::IRBuilderBase & builder, const DataTypePtr & column_type, const IColumn & column, size_t index)
{
if (const auto * constant = typeid_cast<const ColumnConst *>(&column))
{
return getColumnNativeValue(builder, column_type, constant->getDataColumn(), 0);
}
WhichDataType column_data_type(column_type);
auto * type = toNativeType(builder, column_type);
if (!type || column.size() <= index)
return nullptr;
if (column_data_type.isNullable())
{
const auto & nullable_data_type = assert_cast<const DataTypeNullable &>(*column_type);
const auto & nullable_column = assert_cast<const ColumnNullable &>(column);
auto * value = getColumnNativeValue(builder, nullable_data_type.getNestedType(), nullable_column.getNestedColumn(), index);
auto * is_null = llvm::ConstantInt::get(type->getContainedType(1), nullable_column.isNullAt(index));
return value ? llvm::ConstantStruct::get(static_cast<llvm::StructType *>(type), value, is_null) : nullptr;
}
else if (column_data_type.isFloat32())
{
return llvm::ConstantFP::get(type, assert_cast<const ColumnVector<Float32> &>(column).getElement(index));
}
else if (column_data_type.isFloat64())
{
return llvm::ConstantFP::get(type, assert_cast<const ColumnVector<Float64> &>(column).getElement(index));
}
else if (column_data_type.isNativeUInt() || column_data_type.isDateOrDateTime())
{
return llvm::ConstantInt::get(type, column.getUInt(index));
}
else if (column_data_type.isNativeInt())
{
return llvm::ConstantInt::get(type, column.getInt(index));
}
return nullptr;
}
}
#endif

View File

@ -160,7 +160,7 @@ struct IndexesSerializationType
return std::make_shared<DataTypeUInt16>();
if (type == TUInt32)
return std::make_shared<DataTypeUInt32>();
if (type == TUInt64)
if (type == TUInt64) //-V547
return std::make_shared<DataTypeUInt64>();
throw Exception("Can't create DataType from IndexesSerializationType.", ErrorCodes::LOGICAL_ERROR);

View File

@ -105,9 +105,9 @@ DataTypePtr convertMySQLDataType(MultiEnum<MySQLDataTypesSupport> type_support,
{
if (precision <= DecimalUtils::max_precision<Decimal32>)
res = std::make_shared<DataTypeDecimal<Decimal32>>(precision, scale);
else if (precision <= DecimalUtils::max_precision<Decimal64>)
else if (precision <= DecimalUtils::max_precision<Decimal64>) //-V547
res = std::make_shared<DataTypeDecimal<Decimal64>>(precision, scale);
else if (precision <= DecimalUtils::max_precision<Decimal128>)
else if (precision <= DecimalUtils::max_precision<Decimal128>) //-V547
res = std::make_shared<DataTypeDecimal<Decimal128>>(precision, scale);
}

View File

@ -423,7 +423,7 @@ DataTypePtr getLeastSupertype(const DataTypes & types)
size_t min_bit_width_of_integer = std::max(max_bits_of_signed_integer, max_bits_of_unsigned_integer);
/// If unsigned is not covered by signed.
if (max_bits_of_signed_integer && max_bits_of_unsigned_integer >= max_bits_of_signed_integer)
if (max_bits_of_signed_integer && max_bits_of_unsigned_integer >= max_bits_of_signed_integer) //-V1051
{
// Because 128 and 256 bit integers are significantly slower, we should not promote to them.
// But if we already have wide numbers, promotion is necessary.

View File

@ -15,8 +15,6 @@
#include <Core/iostream_debug_helpers.h>
namespace std
{
template <typename T>
inline std::ostream& operator<<(std::ostream & ostr, const std::vector<T> & v)
@ -29,8 +27,6 @@ inline std::ostream& operator<<(std::ostream & ostr, const std::vector<T> & v)
return ostr << "] (" << v.size() << ") items";
}
}
using namespace DB;
struct ParseDataTypeTestCase

View File

@ -37,7 +37,7 @@ static auto typesFromString(const std::string & str)
struct TypesTestCase
{
const char * from_types;
const char * from_types = nullptr;
const char * expected_type = nullptr;
};

View File

@ -102,7 +102,7 @@ StoragePtr DatabaseAtomic::detachTable(const String & name)
auto table = DatabaseOrdinary::detachTableUnlocked(name, lock);
table_name_to_path.erase(name);
detached_tables.emplace(table->getStorageID().uuid, table);
not_in_use = cleanupDetachedTables();
not_in_use = cleanupDetachedTables(); //-V1001
return table;
}

View File

@ -67,11 +67,11 @@ static DataTypePtr convertPostgreSQLDataType(String & type, bool is_nullable, ui
if (precision <= DecimalUtils::max_precision<Decimal32>)
res = std::make_shared<DataTypeDecimal<Decimal32>>(precision, scale);
else if (precision <= DecimalUtils::max_precision<Decimal64>)
else if (precision <= DecimalUtils::max_precision<Decimal64>) //-V547
res = std::make_shared<DataTypeDecimal<Decimal64>>(precision, scale);
else if (precision <= DecimalUtils::max_precision<Decimal128>)
else if (precision <= DecimalUtils::max_precision<Decimal128>) //-V547
res = std::make_shared<DataTypeDecimal<Decimal128>>(precision, scale);
else if (precision <= DecimalUtils::max_precision<Decimal256>)
else if (precision <= DecimalUtils::max_precision<Decimal256>) //-V547
res = std::make_shared<DataTypeDecimal<Decimal256>>(precision, scale);
else
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Precision {} and scale {} are too big and not supported", precision, scale);

View File

@ -176,8 +176,9 @@ Columns CacheDictionary<dictionary_key_type>::getColumns(
ProfileEvents::increment(ProfileEvents::DictCacheKeysExpired, expired_keys_size);
ProfileEvents::increment(ProfileEvents::DictCacheKeysNotFound, not_found_keys_size);
query_count.fetch_add(keys.size());
hit_count.fetch_add(found_keys_size);
query_count.fetch_add(keys.size(), std::memory_order_relaxed);
hit_count.fetch_add(found_keys_size, std::memory_order_relaxed);
found_count.fetch_add(found_keys_size, std::memory_order_relaxed);
MutableColumns & fetched_columns_from_storage = result_of_fetch_from_storage.fetched_columns;
const PaddedPODArray<KeyState> & key_index_to_state_from_storage = result_of_fetch_from_storage.key_index_to_state;
@ -296,8 +297,9 @@ ColumnUInt8::Ptr CacheDictionary<dictionary_key_type>::hasKeys(const Columns & k
ProfileEvents::increment(ProfileEvents::DictCacheKeysExpired, expired_keys_size);
ProfileEvents::increment(ProfileEvents::DictCacheKeysNotFound, not_found_keys_size);
query_count.fetch_add(keys.size());
hit_count.fetch_add(found_keys_size);
query_count.fetch_add(keys.size(), std::memory_order_relaxed);
hit_count.fetch_add(found_keys_size, std::memory_order_relaxed);
found_count.fetch_add(found_keys_size, std::memory_order_relaxed);
size_t keys_to_update_size = expired_keys_size + not_found_keys_size;
auto update_unit = std::make_shared<CacheDictionaryUpdateUnit<dictionary_key_type>>(key_columns, result_of_fetch_from_storage.key_index_to_state, request, keys_to_update_size);
@ -365,8 +367,10 @@ ColumnPtr CacheDictionary<dictionary_key_type>::getHierarchy(
{
if (dictionary_key_type == DictionaryKeyType::simple)
{
auto result = getKeysHierarchyDefaultImplementation(this, key_column, key_type);
size_t keys_found;
auto result = getKeysHierarchyDefaultImplementation(this, key_column, key_type, keys_found);
query_count.fetch_add(key_column->size(), std::memory_order_relaxed);
found_count.fetch_add(keys_found, std::memory_order_relaxed);
return result;
}
else
@ -381,8 +385,10 @@ ColumnUInt8::Ptr CacheDictionary<dictionary_key_type>::isInHierarchy(
{
if (dictionary_key_type == DictionaryKeyType::simple)
{
auto result = getKeysIsInHierarchyDefaultImplementation(this, key_column, in_key_column, key_type);
size_t keys_found;
auto result = getKeysIsInHierarchyDefaultImplementation(this, key_column, in_key_column, key_type, keys_found);
query_count.fetch_add(key_column->size(), std::memory_order_relaxed);
found_count.fetch_add(keys_found, std::memory_order_relaxed);
return result;
}
else
@ -668,6 +674,8 @@ void CacheDictionary<dictionary_key_type>::update(CacheDictionaryUpdateUnitPtr<d
ProfileEvents::increment(ProfileEvents::DictCacheKeysRequestedMiss, requested_keys_size - found_keys_size);
ProfileEvents::increment(ProfileEvents::DictCacheKeysRequestedFound, found_keys_size);
ProfileEvents::increment(ProfileEvents::DictCacheRequests);
found_count.fetch_add(found_keys_size, std::memory_order_relaxed);
}
else
{

View File

@ -75,9 +75,20 @@ public:
size_t getQueryCount() const override { return query_count.load(std::memory_order_relaxed); }
double getFoundRate() const override
{
size_t queries = query_count.load(std::memory_order_relaxed);
if (!queries)
return 0;
return static_cast<double>(found_count.load(std::memory_order_relaxed)) / queries;
}
double getHitRate() const override
{
return static_cast<double>(hit_count.load(std::memory_order_acquire)) / query_count.load(std::memory_order_relaxed);
size_t queries = query_count.load(std::memory_order_relaxed);
if (!queries)
return 0;
return static_cast<double>(hit_count.load(std::memory_order_acquire)) / queries;
}
bool supportUpdates() const override { return false; }
@ -204,6 +215,7 @@ private:
mutable std::atomic<size_t> hit_count{0};
mutable std::atomic<size_t> query_count{0};
mutable std::atomic<size_t> found_count{0};
};

View File

@ -106,6 +106,8 @@ Columns DirectDictionary<dictionary_key_type>::getColumns(
auto result_columns = request.makeAttributesResultColumns();
size_t keys_found = 0;
for (size_t attribute_index = 0; attribute_index < result_columns.size(); ++attribute_index)
{
if (!request.shouldFillResultColumnWithIndex(attribute_index))
@ -124,7 +126,10 @@ Columns DirectDictionary<dictionary_key_type>::getColumns(
const auto * it = key_to_fetched_index.find(requested_key);
if (it)
{
fetched_column_from_storage->get(it->getMapped(), value_to_insert);
++keys_found;
}
else
value_to_insert = default_value_provider.getDefaultValue(requested_key_index);
@ -133,6 +138,7 @@ Columns DirectDictionary<dictionary_key_type>::getColumns(
}
query_count.fetch_add(requested_keys_size, std::memory_order_relaxed);
found_count.fetch_add(keys_found, std::memory_order_relaxed);
return request.filterRequestedColumns(result_columns);
}
@ -181,6 +187,8 @@ ColumnUInt8::Ptr DirectDictionary<dictionary_key_type>::hasKeys(
stream->readPrefix();
size_t keys_found = 0;
while (const auto block = stream->read())
{
/// Split into keys columns and attribute columns
@ -198,6 +206,8 @@ ColumnUInt8::Ptr DirectDictionary<dictionary_key_type>::hasKeys(
assert(it);
size_t result_data_found_index = it->getMapped();
/// block_keys_size cannot be used, due to duplicates.
keys_found += !result_data[result_data_found_index];
result_data[result_data_found_index] = true;
block_keys_extractor.rollbackCurrentKey();
@ -209,6 +219,7 @@ ColumnUInt8::Ptr DirectDictionary<dictionary_key_type>::hasKeys(
stream->readSuffix();
query_count.fetch_add(requested_keys_size, std::memory_order_relaxed);
found_count.fetch_add(keys_found, std::memory_order_relaxed);
return result;
}
@ -220,8 +231,10 @@ ColumnPtr DirectDictionary<dictionary_key_type>::getHierarchy(
{
if (dictionary_key_type == DictionaryKeyType::simple)
{
auto result = getKeysHierarchyDefaultImplementation(this, key_column, key_type);
size_t keys_found;
auto result = getKeysHierarchyDefaultImplementation(this, key_column, key_type, keys_found);
query_count.fetch_add(key_column->size(), std::memory_order_relaxed);
found_count.fetch_add(keys_found, std::memory_order_relaxed);
return result;
}
else
@ -236,8 +249,10 @@ ColumnUInt8::Ptr DirectDictionary<dictionary_key_type>::isInHierarchy(
{
if (dictionary_key_type == DictionaryKeyType::simple)
{
auto result = getKeysIsInHierarchyDefaultImplementation(this, key_column, in_key_column, key_type);
size_t keys_found = 0;
auto result = getKeysIsInHierarchyDefaultImplementation(this, key_column, in_key_column, key_type, keys_found);
query_count.fetch_add(key_column->size(), std::memory_order_relaxed);
found_count.fetch_add(keys_found, std::memory_order_relaxed);
return result;
}
else

View File

@ -42,6 +42,14 @@ public:
size_t getQueryCount() const override { return query_count.load(std::memory_order_relaxed); }
double getFoundRate() const override
{
size_t queries = query_count.load(std::memory_order_relaxed);
if (!queries)
return 0;
return static_cast<double>(found_count.load(std::memory_order_relaxed)) / queries;
}
double getHitRate() const override { return 1.0; }
size_t getElementCount() const override { return 0; }
@ -101,6 +109,7 @@ private:
const DictionaryLifetime dict_lifetime;
mutable std::atomic<size_t> query_count{0};
mutable std::atomic<size_t> found_count{0};
};
extern template class DirectDictionary<DictionaryKeyType::simple>;

View File

@ -84,7 +84,7 @@ public:
{
size_t language_id = static_cast<size_t>(language);
if (region_id >= names_refs[language_id].size())
if (region_id >= names_refs[language_id].size()) //-V1051
return StringRef("", 0);
StringRef ref = names_refs[language_id][region_id];

View File

@ -130,13 +130,17 @@ ColumnUInt8::Ptr FlatDictionary::hasKeys(const Columns & key_columns, const Data
auto result = ColumnUInt8::create(keys_size);
auto & out = result->getData();
size_t keys_found = 0;
for (size_t key_index = 0; key_index < keys_size; ++key_index)
{
const auto key = keys[key_index];
out[key_index] = key < loaded_keys.size() && loaded_keys[key];
keys_found += out[key_index];
}
query_count.fetch_add(keys_size, std::memory_order_relaxed);
found_count.fetch_add(keys_found, std::memory_order_relaxed);
return result;
}
@ -154,16 +158,20 @@ ColumnPtr FlatDictionary::getHierarchy(ColumnPtr key_column, const DataTypePtr &
auto is_key_valid_func = [&, this](auto & key) { return key < loaded_keys.size() && loaded_keys[key]; };
size_t keys_found = 0;
auto get_parent_key_func = [&, this](auto & hierarchy_key)
{
bool is_key_valid = hierarchy_key < loaded_keys.size() && loaded_keys[hierarchy_key];
std::optional<UInt64> result = is_key_valid ? std::make_optional(parent_keys[hierarchy_key]) : std::nullopt;
keys_found += result.has_value();
return result;
};
auto dictionary_hierarchy_array = getKeysHierarchyArray(keys, null_value, is_key_valid_func, get_parent_key_func);
query_count.fetch_add(keys.size(), std::memory_order_relaxed);
found_count.fetch_add(keys_found, std::memory_order_relaxed);
return dictionary_hierarchy_array;
}
@ -187,16 +195,20 @@ ColumnUInt8::Ptr FlatDictionary::isInHierarchy(
auto is_key_valid_func = [&, this](auto & key) { return key < loaded_keys.size() && loaded_keys[key]; };
size_t keys_found = 0;
auto get_parent_key_func = [&, this](auto & hierarchy_key)
{
bool is_key_valid = hierarchy_key < loaded_keys.size() && loaded_keys[hierarchy_key];
std::optional<UInt64> result = is_key_valid ? std::make_optional(parent_keys[hierarchy_key]) : std::nullopt;
keys_found += result.has_value();
return result;
};
auto result = getKeysIsInHierarchyColumn(keys, keys_in, null_value, is_key_valid_func, get_parent_key_func);
query_count.fetch_add(keys.size(), std::memory_order_relaxed);
found_count.fetch_add(keys_found, std::memory_order_relaxed);
return result;
}
@ -223,9 +235,11 @@ ColumnPtr FlatDictionary::getDescendants(
parent_to_child[parent_key].emplace_back(static_cast<UInt64>(i));
}
auto result = getKeysDescendantsArray(keys, parent_to_child, level);
size_t keys_found;
auto result = getKeysDescendantsArray(keys, parent_to_child, level, keys_found);
query_count.fetch_add(keys.size(), std::memory_order_relaxed);
found_count.fetch_add(keys_found, std::memory_order_relaxed);
return result;
}
@ -392,17 +406,23 @@ void FlatDictionary::getItemsImpl(
const auto & container = std::get<ContainerType<AttributeType>>(attribute.container);
const auto rows = keys.size();
size_t keys_found = 0;
for (size_t row = 0; row < rows; ++row)
{
const auto key = keys[row];
if (key < loaded_keys.size() && loaded_keys[key])
{
set_value(row, static_cast<OutputType>(container[key]));
++keys_found;
}
else
set_value(row, default_value_extractor[row]);
}
query_count.fetch_add(rows, std::memory_order_relaxed);
found_count.fetch_add(keys_found, std::memory_order_relaxed);
}
template <typename T>

View File

@ -47,6 +47,14 @@ public:
size_t getQueryCount() const override { return query_count.load(std::memory_order_relaxed); }
double getFoundRate() const override
{
size_t queries = query_count.load(std::memory_order_relaxed);
if (!queries)
return 0;
return static_cast<double>(found_count.load(std::memory_order_relaxed)) / queries;
}
double getHitRate() const override { return 1.0; }
size_t getElementCount() const override { return element_count; }
@ -183,6 +191,7 @@ private:
size_t element_count = 0;
size_t bucket_count = 0;
mutable std::atomic<size_t> query_count{0};
mutable std::atomic<size_t> found_count{0};
BlockPtr update_field_loaded_block;
};

View File

@ -42,7 +42,6 @@ HTTPDictionarySource::HTTPDictionarySource(
, context(context_)
, timeouts(ConnectionTimeouts::getHTTPTimeouts(context))
{
if (check_config)
context->getRemoteHostFilter().checkURL(Poco::URI(url));
@ -87,6 +86,16 @@ HTTPDictionarySource::HTTPDictionarySource(const HTTPDictionarySource & other)
credentials.setPassword(other.credentials.getPassword());
}
BlockInputStreamPtr HTTPDictionarySource::createWrappedBuffer(std::unique_ptr<ReadWriteBufferFromHTTP> http_buffer_ptr)
{
Poco::URI uri(url);
String http_request_compression_method_str = http_buffer_ptr->getCompressMethod();
auto in_ptr_wrapped
= wrapReadBufferWithCompressionMethod(std::move(http_buffer_ptr), chooseCompressionMethod(uri.getPath(), http_request_compression_method_str));
auto input_stream = context->getInputFormat(format, *in_ptr_wrapped, sample_block, max_block_size);
return std::make_shared<OwningBlockInputStream<ReadBuffer>>(input_stream, std::move(in_ptr_wrapped));
}
void HTTPDictionarySource::getUpdateFieldAndDate(Poco::URI & uri)
{
if (update_time != std::chrono::system_clock::from_time_t(0))
@ -109,10 +118,15 @@ BlockInputStreamPtr HTTPDictionarySource::loadAll()
LOG_TRACE(log, "loadAll {}", toString());
Poco::URI uri(url);
auto in_ptr = std::make_unique<ReadWriteBufferFromHTTP>(
uri, Poco::Net::HTTPRequest::HTTP_GET, ReadWriteBufferFromHTTP::OutStreamCallback(), timeouts,
0, credentials, DBMS_DEFAULT_BUFFER_SIZE, header_entries);
auto input_stream = context->getInputFormat(format, *in_ptr, sample_block, max_block_size);
return std::make_shared<OwningBlockInputStream<ReadWriteBufferFromHTTP>>(input_stream, std::move(in_ptr));
uri,
Poco::Net::HTTPRequest::HTTP_GET,
ReadWriteBufferFromHTTP::OutStreamCallback(),
timeouts,
0,
credentials,
DBMS_DEFAULT_BUFFER_SIZE,
header_entries);
return createWrappedBuffer(std::move(in_ptr));
}
BlockInputStreamPtr HTTPDictionarySource::loadUpdatedAll()
@ -121,10 +135,15 @@ BlockInputStreamPtr HTTPDictionarySource::loadUpdatedAll()
getUpdateFieldAndDate(uri);
LOG_TRACE(log, "loadUpdatedAll {}", uri.toString());
auto in_ptr = std::make_unique<ReadWriteBufferFromHTTP>(
uri, Poco::Net::HTTPRequest::HTTP_GET, ReadWriteBufferFromHTTP::OutStreamCallback(), timeouts,
0, credentials, DBMS_DEFAULT_BUFFER_SIZE, header_entries);
auto input_stream = context->getInputFormat(format, *in_ptr, sample_block, max_block_size);
return std::make_shared<OwningBlockInputStream<ReadWriteBufferFromHTTP>>(input_stream, std::move(in_ptr));
uri,
Poco::Net::HTTPRequest::HTTP_GET,
ReadWriteBufferFromHTTP::OutStreamCallback(),
timeouts,
0,
credentials,
DBMS_DEFAULT_BUFFER_SIZE,
header_entries);
return createWrappedBuffer(std::move(in_ptr));
}
BlockInputStreamPtr HTTPDictionarySource::loadIds(const std::vector<UInt64> & ids)
@ -142,10 +161,15 @@ BlockInputStreamPtr HTTPDictionarySource::loadIds(const std::vector<UInt64> & id
Poco::URI uri(url);
auto in_ptr = std::make_unique<ReadWriteBufferFromHTTP>(
uri, Poco::Net::HTTPRequest::HTTP_POST, out_stream_callback, timeouts,
0, credentials, DBMS_DEFAULT_BUFFER_SIZE, header_entries);
auto input_stream = context->getInputFormat(format, *in_ptr, sample_block, max_block_size);
return std::make_shared<OwningBlockInputStream<ReadWriteBufferFromHTTP>>(input_stream, std::move(in_ptr));
uri,
Poco::Net::HTTPRequest::HTTP_POST,
out_stream_callback,
timeouts,
0,
credentials,
DBMS_DEFAULT_BUFFER_SIZE,
header_entries);
return createWrappedBuffer(std::move(in_ptr));
}
BlockInputStreamPtr HTTPDictionarySource::loadKeys(const Columns & key_columns, const std::vector<size_t> & requested_rows)
@ -163,10 +187,15 @@ BlockInputStreamPtr HTTPDictionarySource::loadKeys(const Columns & key_columns,
Poco::URI uri(url);
auto in_ptr = std::make_unique<ReadWriteBufferFromHTTP>(
uri, Poco::Net::HTTPRequest::HTTP_POST, out_stream_callback, timeouts,
0, credentials, DBMS_DEFAULT_BUFFER_SIZE, header_entries);
auto input_stream = context->getInputFormat(format, *in_ptr, sample_block, max_block_size);
return std::make_shared<OwningBlockInputStream<ReadWriteBufferFromHTTP>>(input_stream, std::move(in_ptr));
uri,
Poco::Net::HTTPRequest::HTTP_POST,
out_stream_callback,
timeouts,
0,
credentials,
DBMS_DEFAULT_BUFFER_SIZE,
header_entries);
return createWrappedBuffer(std::move(in_ptr));
}
bool HTTPDictionarySource::isModified() const
@ -198,21 +227,19 @@ std::string HTTPDictionarySource::toString() const
void registerDictionarySourceHTTP(DictionarySourceFactory & factory)
{
auto create_table_source = [=](const DictionaryStructure & dict_struct,
const Poco::Util::AbstractConfiguration & config,
const std::string & config_prefix,
Block & sample_block,
ContextPtr context,
const std::string & /* default_database */,
bool check_config) -> DictionarySourcePtr
{
const Poco::Util::AbstractConfiguration & config,
const std::string & config_prefix,
Block & sample_block,
ContextPtr context,
const std::string & /* default_database */,
bool check_config) -> DictionarySourcePtr {
if (dict_struct.has_expressions)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Dictionary source of type `http` does not support attribute expressions");
auto context_local_copy = copyContextAndApplySettings(config_prefix, context, config);
return std::make_unique<HTTPDictionarySource>(
dict_struct, config, config_prefix + ".http",
sample_block, context_local_copy, check_config);
dict_struct, config, config_prefix + ".http", sample_block, context_local_copy, check_config);
};
factory.registerSource("http", create_table_source);
}

View File

@ -8,6 +8,7 @@
#include "DictionaryStructure.h"
#include "IDictionarySource.h"
#include <Interpreters/Context.h>
#include <IO/CompressionMethod.h>
namespace Poco
{
@ -53,6 +54,9 @@ public:
private:
void getUpdateFieldAndDate(Poco::URI & uri);
// wrap buffer using encoding from made request
BlockInputStreamPtr createWrappedBuffer(std::unique_ptr<ReadWriteBufferFromHTTP> http_buffer);
Poco::Logger * log;
LocalDateTime getLastModification() const;
@ -70,3 +74,4 @@ private:
};
}

View File

@ -162,6 +162,7 @@ ColumnUInt8::Ptr HashedDictionary<dictionary_key_type, sparse>::hasKeys(const Co
const auto & attribute = attributes.front();
bool is_attribute_nullable = attribute.is_nullable_set.has_value();
size_t keys_found = 0;
getAttributeContainer(0, [&](const auto & container)
{
@ -171,6 +172,8 @@ ColumnUInt8::Ptr HashedDictionary<dictionary_key_type, sparse>::hasKeys(const Co
out[requested_key_index] = container.find(requested_key) != container.end();
keys_found += out[requested_key_index];
if (is_attribute_nullable && !out[requested_key_index])
out[requested_key_index] = attribute.is_nullable_set->find(requested_key) != nullptr;
@ -179,6 +182,7 @@ ColumnUInt8::Ptr HashedDictionary<dictionary_key_type, sparse>::hasKeys(const Co
});
query_count.fetch_add(keys_size, std::memory_order_relaxed);
found_count.fetch_add(keys_found, std::memory_order_relaxed);
return result;
}
@ -201,6 +205,8 @@ ColumnPtr HashedDictionary<dictionary_key_type, sparse>::getHierarchy(ColumnPtr
auto is_key_valid_func = [&](auto & key) { return parent_keys_map.find(key) != parent_keys_map.end(); };
size_t keys_found = 0;
auto get_parent_func = [&](auto & hierarchy_key)
{
std::optional<UInt64> result;
@ -210,12 +216,15 @@ ColumnPtr HashedDictionary<dictionary_key_type, sparse>::getHierarchy(ColumnPtr
if (it != parent_keys_map.end())
result = getValueFromCell(it);
keys_found +=result.has_value();
return result;
};
auto dictionary_hierarchy_array = getKeysHierarchyArray(keys, null_value, is_key_valid_func, get_parent_func);
query_count.fetch_add(keys.size(), std::memory_order_relaxed);
found_count.fetch_add(keys_found, std::memory_order_relaxed);
return dictionary_hierarchy_array;
}
@ -247,6 +256,8 @@ ColumnUInt8::Ptr HashedDictionary<dictionary_key_type, sparse>::isInHierarchy(
auto is_key_valid_func = [&](auto & key) { return parent_keys_map.find(key) != parent_keys_map.end(); };
size_t keys_found = 0;
auto get_parent_func = [&](auto & hierarchy_key)
{
std::optional<UInt64> result;
@ -256,12 +267,15 @@ ColumnUInt8::Ptr HashedDictionary<dictionary_key_type, sparse>::isInHierarchy(
if (it != parent_keys_map.end())
result = getValueFromCell(it);
keys_found += result.has_value();
return result;
};
auto result = getKeysIsInHierarchyColumn(keys, keys_in, null_value, is_key_valid_func, get_parent_func);
query_count.fetch_add(keys.size(), std::memory_order_relaxed);
found_count.fetch_add(keys_found, std::memory_order_relaxed);
return result;
}
@ -290,9 +304,11 @@ ColumnPtr HashedDictionary<dictionary_key_type, sparse>::getDescendants(
for (const auto & [key, value] : parent_keys)
parent_to_child[value].emplace_back(key);
auto result = getKeysDescendantsArray(keys, parent_to_child, level);
size_t keys_found;
auto result = getKeysDescendantsArray(keys, parent_to_child, level, keys_found);
query_count.fetch_add(keys.size(), std::memory_order_relaxed);
found_count.fetch_add(keys_found, std::memory_order_relaxed);
return result;
}
@ -493,6 +509,8 @@ void HashedDictionary<dictionary_key_type, sparse>::getItemsImpl(
bool is_attribute_nullable = attribute.is_nullable_set.has_value();
size_t keys_found = 0;
for (size_t key_index = 0; key_index < keys_size; ++key_index)
{
auto key = keys_extractor.extractCurrentKey();
@ -500,7 +518,10 @@ void HashedDictionary<dictionary_key_type, sparse>::getItemsImpl(
const auto it = attribute_container.find(key);
if (it != attribute_container.end())
{
set_value(key_index, getValueFromCell(it));
++keys_found;
}
else
{
if (is_attribute_nullable && attribute.is_nullable_set->find(key) != nullptr)
@ -513,6 +534,7 @@ void HashedDictionary<dictionary_key_type, sparse>::getItemsImpl(
}
query_count.fetch_add(keys_size, std::memory_order_relaxed);
found_count.fetch_add(keys_found, std::memory_order_relaxed);
}
template <DictionaryKeyType dictionary_key_type, bool sparse>

View File

@ -59,6 +59,14 @@ public:
size_t getQueryCount() const override { return query_count.load(std::memory_order_relaxed); }
double getFoundRate() const override
{
size_t queries = query_count.load(std::memory_order_relaxed);
if (!queries)
return 0;
return static_cast<double>(found_count.load(std::memory_order_relaxed)) / queries;
}
double getHitRate() const override { return 1.0; }
size_t getElementCount() const override { return element_count; }
@ -219,6 +227,7 @@ private:
size_t element_count = 0;
size_t bucket_count = 0;
mutable std::atomic<size_t> query_count{0};
mutable std::atomic<size_t> found_count{0};
BlockPtr update_field_loaded_block;
Arena complex_key_arena;

View File

@ -84,8 +84,14 @@ namespace
}
}
ColumnPtr getKeysHierarchyDefaultImplementation(const IDictionary * dictionary, ColumnPtr key_column, const DataTypePtr & key_type)
ColumnPtr getKeysHierarchyDefaultImplementation(
const IDictionary * dictionary,
ColumnPtr key_column,
const DataTypePtr & key_type,
size_t & valid_keys)
{
valid_keys = 0;
key_column = key_column->convertToFullColumnIfConst();
const auto * key_column_typed = checkAndGetColumn<ColumnVector<UInt64>>(*key_column);
if (!key_column_typed)
@ -104,6 +110,7 @@ ColumnPtr getKeysHierarchyDefaultImplementation(const IDictionary * dictionary,
{
auto it = key_to_parent_key.find(key);
std::optional<UInt64> result = (it != nullptr ? std::make_optional(it->getMapped()) : std::nullopt);
valid_keys += result.has_value();
return result;
};
@ -117,8 +124,11 @@ ColumnUInt8::Ptr getKeysIsInHierarchyDefaultImplementation(
const IDictionary * dictionary,
ColumnPtr key_column,
ColumnPtr in_key_column,
const DataTypePtr & key_type)
const DataTypePtr & key_type,
size_t & valid_keys)
{
valid_keys = 0;
key_column = key_column->convertToFullColumnIfConst();
in_key_column = in_key_column->convertToFullColumnIfConst();
@ -143,6 +153,7 @@ ColumnUInt8::Ptr getKeysIsInHierarchyDefaultImplementation(
{
auto it = key_to_parent_key.find(key);
std::optional<UInt64> result = (it != nullptr ? std::make_optional(it->getMapped()) : std::nullopt);
valid_keys += result.has_value();
return result;
};

View File

@ -196,6 +196,8 @@ namespace detail
* Hierarchy result is ElementsAndOffsets structure, for each element there is descendants array,
* with size offset[element_index] - (element_index > 0 ? offset[element_index - 1] : 0).
*
* @param valid_keys - number of keys that are valid in parent_to_child map
*
* Example:
* id parent_id
* 1 0
@ -218,11 +220,13 @@ namespace detail
ElementsAndOffsets<KeyType> getDescendants(
const PaddedPODArray<KeyType> & keys,
const HashMap<KeyType, PaddedPODArray<KeyType>> & parent_to_child,
Strategy strategy)
Strategy strategy,
size_t & valid_keys)
{
/// If strategy is GetAllDescendantsStrategy we try to cache and later reuse previously calculated descendants.
/// If strategy is GetDescendantsAtSpecificLevelStrategy we does not use cache strategy.
size_t keys_size = keys.size();
valid_keys = 0;
PaddedPODArray<KeyType> descendants;
descendants.reserve(keys_size);
@ -265,6 +269,7 @@ namespace detail
descendants_offsets.emplace_back(descendants.size());
continue;
}
++valid_keys;
next_keys_to_process_stack.emplace_back(KeyAndDepth{requested_key, 0});
@ -425,43 +430,52 @@ ColumnUInt8::Ptr getKeysIsInHierarchyColumn(
}
/// Returns descendants array column for keys
///
/// @param valid_keys - number of keys that are valid in parent_to_child map
template <typename KeyType>
ColumnPtr getKeysDescendantsArray(
const PaddedPODArray<KeyType> & requested_keys,
const HashMap<KeyType, PaddedPODArray<KeyType>> & parent_to_child,
size_t level)
size_t level,
size_t & valid_keys)
{
if (level == 0)
{
detail::GetAllDescendantsStrategy strategy { .level = level };
auto elements_and_offsets = detail::getDescendants(requested_keys, parent_to_child, strategy);
auto elements_and_offsets = detail::getDescendants(requested_keys, parent_to_child, strategy, valid_keys);
return detail::convertElementsAndOffsetsIntoArray(std::move(elements_and_offsets));
}
else
{
detail::GetDescendantsAtSpecificLevelStrategy strategy { .level = level };
auto elements_and_offsets = detail::getDescendants(requested_keys, parent_to_child, strategy);
auto elements_and_offsets = detail::getDescendants(requested_keys, parent_to_child, strategy, valid_keys);
return detail::convertElementsAndOffsetsIntoArray(std::move(elements_and_offsets));
}
}
/** Default getHierarchy implementation for dictionaries that does not have structure with child to parent representation.
* Implementation will build such structure with getColumn calls, and then getHierarchy for such structure.
* Returns ColumnArray with hierarchy arrays for keys from key_column.
*
* @param valid_keys - number of keys (from @key_column) for which information about parent exists.
* @return ColumnArray with hierarchy arrays for keys from key_column.
*/
ColumnPtr getKeysHierarchyDefaultImplementation(
const IDictionary * dictionary,
ColumnPtr key_column,
const DataTypePtr & key_type);
const DataTypePtr & key_type,
size_t & valid_keys);
/** Default isInHierarchy implementation for dictionaries that does not have structure with child to parent representation.
* Implementation will build such structure with getColumn calls, and then getHierarchy for such structure.
* Returns UInt8 column if key from in_key_column is in key hierarchy from key_column.
*
* @param valid_keys - number of keys (from @key_column) for which information about parent exists.
* @return UInt8 column if key from in_key_column is in key hierarchy from key_column.
*/
ColumnUInt8::Ptr getKeysIsInHierarchyDefaultImplementation(
const IDictionary * dictionary,
ColumnPtr key_column,
ColumnPtr in_key_column,
const DataTypePtr & key_type);
const DataTypePtr & key_type,
size_t & valid_keys);
}

View File

@ -90,6 +90,8 @@ struct IDictionary : public IExternalLoadable
virtual size_t getQueryCount() const = 0;
virtual double getFoundRate() const = 0;
virtual double getHitRate() const = 0;
virtual size_t getElementCount() const = 0;

View File

@ -275,7 +275,9 @@ ColumnUInt8::Ptr IPAddressDictionary::hasKeys(const Columns & key_columns, const
const auto rows = first_column->size();
auto result = ColumnUInt8::create(rows);
auto& out = result->getData();
auto & out = result->getData();
size_t keys_found = 0;
if (first_column->isNumeric())
{
@ -285,6 +287,7 @@ ColumnUInt8::Ptr IPAddressDictionary::hasKeys(const Columns & key_columns, const
auto addrv4 = UInt32(first_column->get64(i));
auto found = tryLookupIPv4(addrv4, addrv6_buf);
out[i] = (found != ipNotFound());
keys_found += out[i];
}
}
else
@ -297,10 +300,12 @@ ColumnUInt8::Ptr IPAddressDictionary::hasKeys(const Columns & key_columns, const
auto found = tryLookupIPv6(reinterpret_cast<const uint8_t *>(addr.data));
out[i] = (found != ipNotFound());
keys_found += out[i];
}
}
query_count.fetch_add(rows, std::memory_order_relaxed);
found_count.fetch_add(keys_found, std::memory_order_relaxed);
return result;
}
@ -680,6 +685,8 @@ void IPAddressDictionary::getItemsImpl(
auto & vec = std::get<ContainerType<AttributeType>>(attribute.maps);
size_t keys_found = 0;
if (first_column->isNumeric())
{
uint8_t addrv6_buf[IPV6_BINARY_LENGTH];
@ -689,7 +696,10 @@ void IPAddressDictionary::getItemsImpl(
auto addrv4 = UInt32(first_column->get64(i));
auto found = tryLookupIPv4(addrv4, addrv6_buf);
if (found != ipNotFound())
{
set_value(i, static_cast<OutputType>(vec[*found]));
++keys_found;
}
else
set_value(i, default_value_extractor[i]);
}
@ -704,13 +714,17 @@ void IPAddressDictionary::getItemsImpl(
auto found = tryLookupIPv6(reinterpret_cast<const uint8_t *>(addr.data));
if (found != ipNotFound())
{
set_value(i, static_cast<OutputType>(vec[*found]));
++keys_found;
}
else
set_value(i, default_value_extractor[i]);
}
}
query_count.fetch_add(rows, std::memory_order_relaxed);
found_count.fetch_add(keys_found, std::memory_order_relaxed);
}
template <typename T>

View File

@ -38,6 +38,14 @@ public:
size_t getQueryCount() const override { return query_count.load(std::memory_order_relaxed); }
double getFoundRate() const override
{
size_t queries = query_count.load(std::memory_order_relaxed);
if (!queries)
return 0;
return static_cast<double>(found_count.load(std::memory_order_relaxed)) / queries;
}
double getHitRate() const override { return 1.0; }
size_t getElementCount() const override { return element_count; }
@ -202,6 +210,7 @@ private:
size_t element_count = 0;
size_t bucket_count = 0;
mutable std::atomic<size_t> query_count{0};
mutable std::atomic<size_t> found_count{0};
Poco::Logger * logger;
};

View File

@ -164,7 +164,7 @@ bool MySQLDictionarySource::isModified() const
if (!invalidate_query.empty())
{
auto response = doInvalidateQuery(invalidate_query);
if (response == invalidate_query_response)
if (response == invalidate_query_response) //-V1051
return false;
invalidate_query_response = response;
return true;

View File

@ -64,6 +64,8 @@ ColumnPtr IPolygonDictionary::getColumn(
Field row_value_to_insert;
size_t polygon_index = 0;
size_t keys_found = 0;
if (unlikely(complex_attribute))
{
for (size_t requested_key_index = 0; requested_key_index < requested_key_points.size(); ++requested_key_index)
@ -74,6 +76,7 @@ ColumnPtr IPolygonDictionary::getColumn(
{
size_t attribute_values_index = polygon_index_to_attribute_value_index[polygon_index];
attribute_values_column->get(attribute_values_index, row_value_to_insert);
++keys_found;
}
else
row_value_to_insert = default_value_provider.getDefaultValue(requested_key_index);
@ -110,6 +113,7 @@ ColumnPtr IPolygonDictionary::getColumn(
size_t attribute_values_index = polygon_index_to_attribute_value_index[polygon_index];
auto data_to_insert = attribute_values_column->getDataAt(attribute_values_index);
result_column_typed.insertData(data_to_insert.data, data_to_insert.size);
++keys_found;
}
else
result_column_typed.insert(default_value_provider.getDefaultValue(requested_key_index));
@ -129,6 +133,7 @@ ColumnPtr IPolygonDictionary::getColumn(
size_t attribute_values_index = polygon_index_to_attribute_value_index[polygon_index];
auto & item = attribute_data[attribute_values_index];
result_data.emplace_back(item);
++keys_found;
}
else
{
@ -143,6 +148,7 @@ ColumnPtr IPolygonDictionary::getColumn(
}
query_count.fetch_add(requested_key_points.size(), std::memory_order_relaxed);
found_count.fetch_add(keys_found, std::memory_order_relaxed);
return result;
}
@ -285,16 +291,20 @@ ColumnUInt8::Ptr IPolygonDictionary::hasKeys(const Columns & key_columns, const
std::vector<IPolygonDictionary::Point> points = extractPoints(key_columns);
auto result = ColumnUInt8::create(points.size());
auto& out = result->getData();
auto & out = result->getData();
size_t keys_found = 0;
for (size_t i = 0; i < points.size(); ++i)
{
size_t unused_find_result = 0;
auto & point = points[i];
out[i] = find(point, unused_find_result);
keys_found += out[i];
}
query_count.fetch_add(points.size(), std::memory_order_relaxed);
found_count.fetch_add(keys_found, std::memory_order_relaxed);
return result;
}

View File

@ -63,6 +63,14 @@ public:
size_t getQueryCount() const override { return query_count.load(std::memory_order_relaxed); }
double getFoundRate() const override
{
size_t queries = query_count.load(std::memory_order_relaxed);
if (!queries)
return 0;
return static_cast<double>(found_count.load(std::memory_order_relaxed)) / queries;
}
double getHitRate() const override { return 1.0; }
size_t getElementCount() const override { return attributes.empty() ? 0 : attributes.front()->size(); }
@ -141,6 +149,7 @@ private:
size_t bytes_allocated = 0;
mutable std::atomic<size_t> query_count{0};
mutable std::atomic<size_t> found_count{0};
/** Since the original data may have been in the form of multi-polygons, an id is stored for each single polygon
* corresponding to the row in which any other attributes for this entry are located.

View File

@ -103,7 +103,7 @@ bool PostgreSQLDictionarySource::isModified() const
if (!invalidate_query.empty())
{
auto response = doInvalidateQuery(invalidate_query);
if (response == invalidate_query_response)
if (response == invalidate_query_response) //-V1051
return false;
invalidate_query_response = response;
}

View File

@ -195,17 +195,20 @@ ColumnUInt8::Ptr RangeHashedDictionary::hasKeys(const Columns & key_columns, con
ColumnUInt8::Ptr result;
size_t keys_found = 0;
auto type_call = [&](const auto & dictionary_attribute_type)
{
using Type = std::decay_t<decltype(dictionary_attribute_type)>;
using AttributeType = typename Type::AttributeType;
using ValueType = DictionaryValueType<AttributeType>;
result = hasKeysImpl<ValueType>(attribute, ids, dates);
result = hasKeysImpl<ValueType>(attribute, ids, dates, keys_found);
};
callOnDictionaryAttributeType(attribute.type, type_call);
query_count.fetch_add(ids.size(), std::memory_order_relaxed);
found_count.fetch_add(keys_found, std::memory_order_relaxed);
return result;
}
@ -214,13 +217,16 @@ template <typename AttributeType>
ColumnUInt8::Ptr RangeHashedDictionary::hasKeysImpl(
const Attribute & attribute,
const PaddedPODArray<UInt64> & ids,
const PaddedPODArray<RangeStorageType> & dates) const
const PaddedPODArray<RangeStorageType> & dates,
size_t & keys_found) const
{
auto result = ColumnUInt8::create(ids.size());
auto& out = result->getData();
const auto & attr = *std::get<Ptr<AttributeType>>(attribute.maps);
keys_found = 0;
for (const auto row : ext::range(0, ids.size()))
{
const auto it = attr.find(ids[row]);
@ -237,10 +243,8 @@ ColumnUInt8::Ptr RangeHashedDictionary::hasKeysImpl(
return v.range.contains(date);
});
if (val_it != std::end(ranges_and_values))
out[row] = true;
else
out[row] = false;
out[row] = val_it != std::end(ranges_and_values);
keys_found += out[row];
}
else
out[row] = false;
@ -396,6 +400,8 @@ void RangeHashedDictionary::getItemsImpl(
const auto & attr = *std::get<Ptr<AttributeType>>(attribute.maps);
size_t keys_found = 0;
for (const auto row : ext::range(0, ids.size()))
{
const auto it = attr.find(ids[row]);
@ -413,7 +419,8 @@ void RangeHashedDictionary::getItemsImpl(
if (val_it != std::end(ranges_and_values))
{
auto& value = val_it->value;
++keys_found;
auto & value = val_it->value;
if (value)
set_value(row, static_cast<OutputType>(*value), false); // NOLINT
@ -432,6 +439,7 @@ void RangeHashedDictionary::getItemsImpl(
}
query_count.fetch_add(ids.size(), std::memory_order_relaxed);
found_count.fetch_add(keys_found, std::memory_order_relaxed);
}

View File

@ -32,6 +32,14 @@ public:
size_t getQueryCount() const override { return query_count.load(std::memory_order_relaxed); }
double getFoundRate() const override
{
size_t queries = query_count.load(std::memory_order_relaxed);
if (!queries)
return 0;
return static_cast<double>(found_count.load(std::memory_order_relaxed)) / queries;
}
double getHitRate() const override { return 1.0; }
size_t getElementCount() const override { return element_count; }
@ -163,7 +171,8 @@ private:
ColumnUInt8::Ptr hasKeysImpl(
const Attribute & attribute,
const PaddedPODArray<UInt64> & ids,
const PaddedPODArray<RangeStorageType> & dates) const;
const PaddedPODArray<RangeStorageType> & dates,
size_t & keys_found) const;
template <typename T>
static void setAttributeValueImpl(Attribute & attribute, const UInt64 id, const Range & range, const Field & value);
@ -201,6 +210,7 @@ private:
size_t element_count = 0;
size_t bucket_count = 0;
mutable std::atomic<size_t> query_count{0};
mutable std::atomic<size_t> found_count{0};
};
}

Some files were not shown because too many files have changed in this diff Show More