mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-25 00:52:02 +00:00
Remove PVS-Studio
This commit is contained in:
parent
60d517c49c
commit
d8cda3dbb8
@ -13,11 +13,7 @@ using char8_t = unsigned char;
|
||||
#endif
|
||||
|
||||
/// This is needed for more strict aliasing. https://godbolt.org/z/xpJBSb https://stackoverflow.com/a/57453713
|
||||
#if !defined(PVS_STUDIO) /// But PVS-Studio does not treat it correctly.
|
||||
using UInt8 = char8_t;
|
||||
#else
|
||||
using UInt8 = uint8_t;
|
||||
#endif
|
||||
|
||||
using UInt16 = uint16_t;
|
||||
using UInt32 = uint32_t;
|
||||
|
@ -78,9 +78,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
// Disable warnings by PVS-Studio
|
||||
//-V::GA
|
||||
|
||||
static const double
|
||||
pi = 3.14159265358979311600e+00, /* 0x400921FB, 0x54442D18 */
|
||||
a0 = 7.72156649015328655494e-02, /* 0x3FB3C467, 0xE37DB0C8 */
|
||||
|
@ -85,9 +85,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
// Disable warnings by PVS-Studio
|
||||
//-V::GA
|
||||
|
||||
#include <stdint.h>
|
||||
#include <math.h>
|
||||
#include "libm.h"
|
||||
|
@ -155,7 +155,7 @@ static inline long double fp_barrierl(long double x)
|
||||
static inline void fp_force_evalf(float x)
|
||||
{
|
||||
volatile float y;
|
||||
y = x; //-V1001
|
||||
y = x;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -164,7 +164,7 @@ static inline void fp_force_evalf(float x)
|
||||
static inline void fp_force_eval(double x)
|
||||
{
|
||||
volatile double y;
|
||||
y = x; //-V1001
|
||||
y = x;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -173,7 +173,7 @@ static inline void fp_force_eval(double x)
|
||||
static inline void fp_force_evall(long double x)
|
||||
{
|
||||
volatile long double y;
|
||||
y = x; //-V1001
|
||||
y = x;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -3,9 +3,6 @@
|
||||
* SPDX-License-Identifier: MIT
|
||||
*/
|
||||
|
||||
// Disable warnings by PVS-Studio
|
||||
//-V::GA
|
||||
|
||||
#include <math.h>
|
||||
#include <stdint.h>
|
||||
#include "libm.h"
|
||||
|
@ -455,7 +455,7 @@ auto bounded_rand(RngType& rng, typename RngType::result_type upper_bound)
|
||||
typedef typename RngType::result_type rtype;
|
||||
rtype threshold = (RngType::max() - RngType::min() + rtype(1) - upper_bound)
|
||||
% upper_bound;
|
||||
for (;;) { //-V1044
|
||||
for (;;) {
|
||||
rtype r = rng() - RngType::min();
|
||||
if (r >= threshold)
|
||||
return r % upper_bound;
|
||||
|
@ -930,7 +930,7 @@ struct rxs_m_xs_mixin {
|
||||
constexpr bitcount_t shift = bits - xtypebits;
|
||||
constexpr bitcount_t mask = (1 << opbits) - 1;
|
||||
bitcount_t rshift =
|
||||
opbits ? bitcount_t(internal >> (bits - opbits)) & mask : 0; //-V547
|
||||
opbits ? bitcount_t(internal >> (bits - opbits)) & mask : 0;
|
||||
internal ^= internal >> (opbits + rshift);
|
||||
internal *= mcg_multiplier<itype>::multiplier();
|
||||
xtype result = internal >> shift;
|
||||
@ -952,7 +952,7 @@ struct rxs_m_xs_mixin {
|
||||
|
||||
internal *= mcg_unmultiplier<itype>::unmultiplier();
|
||||
|
||||
bitcount_t rshift = opbits ? (internal >> (bits - opbits)) & mask : 0; //-V547
|
||||
bitcount_t rshift = opbits ? (internal >> (bits - opbits)) & mask : 0;
|
||||
internal = unxorshift(internal, bits, opbits + rshift);
|
||||
|
||||
return internal;
|
||||
@ -977,7 +977,7 @@ struct rxs_m_mixin {
|
||||
: 2;
|
||||
constexpr bitcount_t shift = bits - xtypebits;
|
||||
constexpr bitcount_t mask = (1 << opbits) - 1;
|
||||
bitcount_t rshift = opbits ? (internal >> (bits - opbits)) & mask : 0; //-V547
|
||||
bitcount_t rshift = opbits ? (internal >> (bits - opbits)) & mask : 0;
|
||||
internal ^= internal >> (opbits + rshift);
|
||||
internal *= mcg_multiplier<itype>::multiplier();
|
||||
xtype result = internal >> shift;
|
||||
@ -1368,7 +1368,7 @@ void extended<table_pow2,advance_pow2,baseclass,extvalclass,kdd>::selfinit()
|
||||
// - any strange correlations would only be apparent if we
|
||||
// were to backstep the generator so that the base generator
|
||||
// was generating the same values again
|
||||
result_type xdiff = baseclass::operator()() - baseclass::operator()(); //-V501
|
||||
result_type xdiff = baseclass::operator()() - baseclass::operator()();
|
||||
for (size_t i = 0; i < table_size; ++i) {
|
||||
data_[i] = baseclass::operator()() ^ xdiff;
|
||||
}
|
||||
|
@ -113,7 +113,7 @@ public:
|
||||
}
|
||||
|
||||
std::size_t diff = curSize - _size;
|
||||
Iterator it = --_keys.end(); //--keys can never be invoked on an empty list due to the minSize==1 requirement of LRU
|
||||
Iterator it = --_keys.end(); /// --keys can never be invoked on an empty list due to the minSize==1 requirement of LRU
|
||||
std::size_t i = 0;
|
||||
|
||||
while (i++ < diff)
|
||||
|
@ -121,7 +121,7 @@ void parseLDAPServer(LDAPClient::Params & params, const Poco::Util::AbstractConf
|
||||
if (enable_tls_lc_str == "starttls")
|
||||
params.enable_tls = LDAPClient::Params::TLSEnable::YES_STARTTLS;
|
||||
else if (config.getBool(ldap_server_config + ".enable_tls"))
|
||||
params.enable_tls = LDAPClient::Params::TLSEnable::YES; //-V1048
|
||||
params.enable_tls = LDAPClient::Params::TLSEnable::YES;
|
||||
else
|
||||
params.enable_tls = LDAPClient::Params::TLSEnable::NO;
|
||||
}
|
||||
@ -140,7 +140,7 @@ void parseLDAPServer(LDAPClient::Params & params, const Poco::Util::AbstractConf
|
||||
else if (tls_minimum_protocol_version_lc_str == "tls1.1")
|
||||
params.tls_minimum_protocol_version = LDAPClient::Params::TLSProtocolVersion::TLS1_1;
|
||||
else if (tls_minimum_protocol_version_lc_str == "tls1.2")
|
||||
params.tls_minimum_protocol_version = LDAPClient::Params::TLSProtocolVersion::TLS1_2; //-V1048
|
||||
params.tls_minimum_protocol_version = LDAPClient::Params::TLSProtocolVersion::TLS1_2;
|
||||
else
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
"Bad value for 'tls_minimum_protocol_version' entry, allowed values are: "
|
||||
@ -159,7 +159,7 @@ void parseLDAPServer(LDAPClient::Params & params, const Poco::Util::AbstractConf
|
||||
else if (tls_require_cert_lc_str == "try")
|
||||
params.tls_require_cert = LDAPClient::Params::TLSRequireCert::TRY;
|
||||
else if (tls_require_cert_lc_str == "demand")
|
||||
params.tls_require_cert = LDAPClient::Params::TLSRequireCert::DEMAND; //-V1048
|
||||
params.tls_require_cert = LDAPClient::Params::TLSRequireCert::DEMAND;
|
||||
else
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
"Bad value for 'tls_require_cert' entry, allowed values are: "
|
||||
|
@ -137,7 +137,7 @@ GrantedRoles::Elements GrantedRoles::getElements() const
|
||||
boost::range::set_difference(roles, roles_with_admin_option, std::back_inserter(element.ids));
|
||||
if (!element.empty())
|
||||
{
|
||||
element.admin_option = false; //-V1048
|
||||
element.admin_option = false;
|
||||
elements.emplace_back(std::move(element));
|
||||
}
|
||||
|
||||
|
@ -20,7 +20,7 @@ namespace ErrorCodes
|
||||
|
||||
/** Tracks the leftmost and rightmost (x, y) data points.
|
||||
*/
|
||||
struct AggregateFunctionBoundingRatioData //-V730
|
||||
struct AggregateFunctionBoundingRatioData
|
||||
{
|
||||
struct Point
|
||||
{
|
||||
|
@ -221,7 +221,7 @@ private:
|
||||
}
|
||||
|
||||
public:
|
||||
AggregateFunctionHistogramData() //-V730
|
||||
AggregateFunctionHistogramData()
|
||||
: size(0)
|
||||
, lower_bound(std::numeric_limits<Mean>::max())
|
||||
, upper_bound(std::numeric_limits<Mean>::lowest())
|
||||
|
@ -481,7 +481,7 @@ struct Compatibility
|
||||
/** For strings. Short strings are stored in the object itself, and long strings are allocated separately.
|
||||
* NOTE It could also be suitable for arrays of numbers.
|
||||
*/
|
||||
struct SingleValueDataString //-V730
|
||||
struct SingleValueDataString
|
||||
{
|
||||
private:
|
||||
using Self = SingleValueDataString;
|
||||
|
@ -182,7 +182,7 @@ TEST(WeakHash32, ColumnVectorI32)
|
||||
for (int idx [[maybe_unused]] : {1, 2})
|
||||
{
|
||||
for (int32_t i = -32768; i < 32768; ++i)
|
||||
data.push_back(i << 16); //-V610
|
||||
data.push_back(i << 16);
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
@ -216,7 +216,7 @@ TEST(WeakHash32, ColumnVectorI64)
|
||||
for (int idx [[maybe_unused]] : {1, 2})
|
||||
{
|
||||
for (int64_t i = -32768; i < 32768; ++i)
|
||||
data.push_back(i << 32); //-V610
|
||||
data.push_back(i << 32);
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
@ -258,7 +258,7 @@ TEST(WeakHash32, ColumnVectorI128)
|
||||
for (int idx [[maybe_unused]] : {1, 2})
|
||||
{
|
||||
for (int64_t i = -32768; i < 32768; ++i)
|
||||
data.push_back(i << 32); //-V610
|
||||
data.push_back(i << 32);
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
@ -275,7 +275,7 @@ TEST(WeakHash32, ColumnDecimal32)
|
||||
for (int idx [[maybe_unused]] : {1, 2})
|
||||
{
|
||||
for (int32_t i = -32768; i < 32768; ++i)
|
||||
data.push_back(i << 16); //-V610
|
||||
data.push_back(i << 16);
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
@ -292,7 +292,7 @@ TEST(WeakHash32, ColumnDecimal64)
|
||||
for (int idx [[maybe_unused]] : {1, 2})
|
||||
{
|
||||
for (int64_t i = -32768; i < 32768; ++i)
|
||||
data.push_back(i << 32); //-V610
|
||||
data.push_back(i << 32);
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
@ -309,7 +309,7 @@ TEST(WeakHash32, ColumnDecimal128)
|
||||
for (int idx [[maybe_unused]] : {1, 2})
|
||||
{
|
||||
for (int64_t i = -32768; i < 32768; ++i)
|
||||
data.push_back(i << 32); //-V610
|
||||
data.push_back(i << 32);
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
|
@ -17,7 +17,7 @@ class DateLUT : private boost::noncopyable
|
||||
{
|
||||
public:
|
||||
/// Return singleton DateLUTImpl instance for the default time zone.
|
||||
static ALWAYS_INLINE const DateLUTImpl & instance() // -V1071
|
||||
static ALWAYS_INLINE const DateLUTImpl & instance()
|
||||
{
|
||||
const auto & date_lut = getInstance();
|
||||
return *date_lut.default_impl.load(std::memory_order_acquire);
|
||||
|
@ -1083,7 +1083,7 @@ bool Dwarf::findLocation(
|
||||
// file+line of the non-inlined outer function making the call.
|
||||
// locationInfo.name is already set by the caller by looking up the
|
||||
// non-inlined function @address belongs to.
|
||||
info.has_file_and_line = true; //-V1048
|
||||
info.has_file_and_line = true;
|
||||
info.file = call_locations[0].file;
|
||||
info.line = call_locations[0].line;
|
||||
|
||||
@ -1783,7 +1783,7 @@ void Dwarf::LineNumberVM::init()
|
||||
lineRange_ = read<uint8_t>(header);
|
||||
opcodeBase_ = read<uint8_t>(header);
|
||||
SAFE_CHECK(opcodeBase_ != 0, "invalid opcode base");
|
||||
standardOpcodeLengths_ = reinterpret_cast<const uint8_t *>(header.data()); //-V506
|
||||
standardOpcodeLengths_ = reinterpret_cast<const uint8_t *>(header.data());
|
||||
header.remove_prefix(opcodeBase_ - 1);
|
||||
|
||||
if (version_ <= 4)
|
||||
|
@ -44,7 +44,7 @@ struct ClearableHashTableCell : public BaseCell
|
||||
/// Do I need to store the zero key separately (that is, can a zero key be inserted into the hash table).
|
||||
static constexpr bool need_zero_value_storage = false;
|
||||
|
||||
ClearableHashTableCell() {} //-V730 /// NOLINT
|
||||
ClearableHashTableCell() {} /// NOLINT
|
||||
ClearableHashTableCell(const Key & key_, const State & state) : BaseCell(key_, state), version(state.version) {}
|
||||
};
|
||||
|
||||
@ -68,7 +68,7 @@ struct ClearableHashTableCell<StringRef, StringRefBaseCell> : public StringRefBa
|
||||
/// Do I need to store the zero key separately (that is, can a zero key be inserted into the hash table).
|
||||
static constexpr bool need_zero_value_storage = true;
|
||||
|
||||
ClearableHashTableCell() { } //-V730 /// NOLINT
|
||||
ClearableHashTableCell() { } /// NOLINT
|
||||
ClearableHashTableCell(const StringRef & key_, const State & state) : StringRefBaseCell(key_, state), version(state.version) { }
|
||||
};
|
||||
|
||||
|
@ -13,7 +13,7 @@ struct FixedClearableHashTableCell
|
||||
using mapped_type = VoidMapped;
|
||||
UInt32 version;
|
||||
|
||||
FixedClearableHashTableCell() {} //-V730 /// NOLINT
|
||||
FixedClearableHashTableCell() {} /// NOLINT
|
||||
FixedClearableHashTableCell(const Key &, const State & state) : version(state.version) {}
|
||||
|
||||
const VoidKey getKey() const { return {}; } /// NOLINT
|
||||
|
@ -16,7 +16,7 @@ struct FixedHashMapCell
|
||||
bool full;
|
||||
Mapped mapped;
|
||||
|
||||
FixedHashMapCell() {} //-V730 /// NOLINT
|
||||
FixedHashMapCell() {} /// NOLINT
|
||||
FixedHashMapCell(const Key &, const State &) : full(true) {}
|
||||
FixedHashMapCell(const value_type & value_, const State &) : full(true), mapped(value_.second) {}
|
||||
|
||||
@ -31,7 +31,7 @@ struct FixedHashMapCell
|
||||
/// Note that we have to assemble a continuous layout for the value_type on each call of getValue().
|
||||
struct CellExt
|
||||
{
|
||||
CellExt() {} //-V730 /// NOLINT
|
||||
CellExt() {} /// NOLINT
|
||||
CellExt(Key && key_, const FixedHashMapCell * ptr_) : key(key_), ptr(const_cast<FixedHashMapCell *>(ptr_)) {}
|
||||
void update(Key && key_, const FixedHashMapCell * ptr_)
|
||||
{
|
||||
@ -76,7 +76,7 @@ struct FixedHashMapImplicitZeroCell
|
||||
/// Note that we have to assemble a continuous layout for the value_type on each call of getValue().
|
||||
struct CellExt
|
||||
{
|
||||
CellExt() {} //-V730 /// NOLINT
|
||||
CellExt() {} /// NOLINT
|
||||
CellExt(Key && key_, const FixedHashMapImplicitZeroCell * ptr_) : key(key_), ptr(const_cast<FixedHashMapImplicitZeroCell *>(ptr_)) {}
|
||||
void update(Key && key_, const FixedHashMapImplicitZeroCell * ptr_)
|
||||
{
|
||||
|
@ -19,7 +19,7 @@ struct FixedHashTableCell
|
||||
using mapped_type = VoidMapped;
|
||||
bool full;
|
||||
|
||||
FixedHashTableCell() {} //-V730 /// NOLINT
|
||||
FixedHashTableCell() {} /// NOLINT
|
||||
FixedHashTableCell(const Key &, const State &) : full(true) {}
|
||||
|
||||
const VoidKey getKey() const { return {}; } /// NOLINT
|
||||
|
@ -121,8 +121,8 @@ struct HashSetCellWithSavedHash : public HashTableCell<Key, Hash, TState>
|
||||
|
||||
size_t saved_hash;
|
||||
|
||||
HashSetCellWithSavedHash() : Base() {} //-V730
|
||||
HashSetCellWithSavedHash(const Key & key_, const typename Base::State & state) : Base(key_, state) {} //-V730
|
||||
HashSetCellWithSavedHash() : Base() {}
|
||||
HashSetCellWithSavedHash(const Key & key_, const typename Base::State & state) : Base(key_, state) {}
|
||||
|
||||
bool keyEquals(const Key & key_) const { return bitEquals(this->key, key_); }
|
||||
bool keyEquals(const Key & key_, size_t hash_) const { return saved_hash == hash_ && bitEquals(this->key, key_); }
|
||||
|
@ -369,7 +369,7 @@ template <bool need_zero_value_storage, typename Cell>
|
||||
struct ZeroValueStorage;
|
||||
|
||||
template <typename Cell>
|
||||
struct ZeroValueStorage<true, Cell> //-V730
|
||||
struct ZeroValueStorage<true, Cell>
|
||||
{
|
||||
private:
|
||||
bool has_zero = false;
|
||||
|
@ -92,7 +92,7 @@ struct StringHashTableHash
|
||||
};
|
||||
|
||||
template <typename Cell>
|
||||
struct StringHashTableEmpty //-V730
|
||||
struct StringHashTableEmpty
|
||||
{
|
||||
using Self = StringHashTableEmpty;
|
||||
|
||||
|
@ -95,7 +95,7 @@ class JSONMap : public IItem
|
||||
};
|
||||
|
||||
public:
|
||||
void add(std::string key, ItemPtr value) { values.emplace_back(Pair{.key = std::move(key), .value = std::move(value)}); } //-V1030
|
||||
void add(std::string key, ItemPtr value) { values.emplace_back(Pair{.key = std::move(key), .value = std::move(value)}); }
|
||||
void add(std::string key, std::string value) { add(std::move(key), std::make_unique<JSONString>(std::move(value))); }
|
||||
void add(std::string key, const char * value) { add(std::move(key), std::make_unique<JSONString>(value)); }
|
||||
void add(std::string key, std::string_view value) { add(std::move(key), std::make_unique<JSONString>(value)); }
|
||||
|
@ -320,8 +320,8 @@ TracingContextHolder::TracingContextHolder(
|
||||
while (_parent_trace_context.trace_id == UUID())
|
||||
{
|
||||
// Make sure the random generated trace_id is not 0 which is an invalid id.
|
||||
_parent_trace_context.trace_id.toUnderType().items[0] = thread_local_rng(); //-V656
|
||||
_parent_trace_context.trace_id.toUnderType().items[1] = thread_local_rng(); //-V656
|
||||
_parent_trace_context.trace_id.toUnderType().items[0] = thread_local_rng();
|
||||
_parent_trace_context.trace_id.toUnderType().items[1] = thread_local_rng();
|
||||
}
|
||||
_parent_trace_context.span_id = 0;
|
||||
}
|
||||
|
@ -381,7 +381,7 @@ void PoolWithFailoverBase<TNestedPool>::updateErrorCounts(PoolWithFailoverBase<T
|
||||
{
|
||||
time_t current_time = time(nullptr);
|
||||
|
||||
if (last_decrease_time) //-V1051
|
||||
if (last_decrease_time)
|
||||
{
|
||||
time_t delta = current_time - last_decrease_time;
|
||||
|
||||
|
@ -86,7 +86,7 @@ public:
|
||||
|
||||
struct Counter
|
||||
{
|
||||
Counter() = default; //-V730
|
||||
Counter() = default;
|
||||
|
||||
explicit Counter(const TKey & k, UInt64 c = 0, UInt64 e = 0, size_t h = 0)
|
||||
: key(k), slot(0), hash(h), count(c), error(e) {}
|
||||
|
@ -204,7 +204,7 @@ public:
|
||||
void updateCounters(ProfileEvents::Counters & profile_events);
|
||||
|
||||
private:
|
||||
::taskstats stats; //-V730_NOINIT
|
||||
::taskstats stats;
|
||||
std::function<::taskstats()> stats_getter;
|
||||
|
||||
explicit TasksStatsCounters(UInt64 tid, MetricsProvider provider);
|
||||
|
@ -193,8 +193,8 @@ namespace VolnitskyTraits
|
||||
chars.c1 = seq_l[seq_ngram_offset + 1];
|
||||
putNGramBase(n, offset);
|
||||
|
||||
chars.c0 = seq_r[seq_ngram_offset]; //-V519
|
||||
chars.c1 = seq_r[seq_ngram_offset + 1]; //-V519
|
||||
chars.c0 = seq_r[seq_ngram_offset];
|
||||
chars.c1 = seq_r[seq_ngram_offset + 1];
|
||||
putNGramBase(n, offset);
|
||||
|
||||
}
|
||||
@ -317,7 +317,7 @@ namespace VolnitskyTraits
|
||||
{
|
||||
/// ngram for Ul
|
||||
chars.c0 = c0u;
|
||||
chars.c1 = c1l; //-V1048
|
||||
chars.c1 = c1l;
|
||||
putNGramBase(n, offset);
|
||||
}
|
||||
|
||||
|
@ -212,7 +212,7 @@ std::pair<ResponsePtr, Undo> TestKeeperCreateRequest::process(TestKeeper::Contai
|
||||
else
|
||||
{
|
||||
TestKeeper::Node created_node;
|
||||
created_node.seq_num = 0; //-V1048
|
||||
created_node.seq_num = 0;
|
||||
created_node.stat.czxid = zxid;
|
||||
created_node.stat.mzxid = zxid;
|
||||
created_node.stat.ctime = std::chrono::system_clock::now().time_since_epoch() / std::chrono::milliseconds(1);
|
||||
@ -286,7 +286,7 @@ std::pair<ResponsePtr, Undo> TestKeeperRemoveRequest::process(TestKeeper::Contai
|
||||
auto & parent = container.at(parentPath(path));
|
||||
--parent.stat.numChildren;
|
||||
++parent.stat.cversion;
|
||||
response.error = Error::ZOK; //-V1048
|
||||
response.error = Error::ZOK;
|
||||
|
||||
undo = [prev_node, &container, path = path]
|
||||
{
|
||||
@ -308,7 +308,7 @@ std::pair<ResponsePtr, Undo> TestKeeperExistsRequest::process(TestKeeper::Contai
|
||||
if (it != container.end())
|
||||
{
|
||||
response.stat = it->second.stat;
|
||||
response.error = Error::ZOK; //-V1048
|
||||
response.error = Error::ZOK;
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -331,7 +331,7 @@ std::pair<ResponsePtr, Undo> TestKeeperGetRequest::process(TestKeeper::Container
|
||||
{
|
||||
response.stat = it->second.stat;
|
||||
response.data = it->second.data;
|
||||
response.error = Error::ZOK; //-V1048
|
||||
response.error = Error::ZOK;
|
||||
}
|
||||
|
||||
return { std::make_shared<GetResponse>(response), {} };
|
||||
@ -358,7 +358,7 @@ std::pair<ResponsePtr, Undo> TestKeeperSetRequest::process(TestKeeper::Container
|
||||
it->second.data = data;
|
||||
++container.at(parentPath(path)).stat.cversion;
|
||||
response.stat = it->second.stat;
|
||||
response.error = Error::ZOK; //-V1048
|
||||
response.error = Error::ZOK;
|
||||
|
||||
undo = [prev_node, &container, path = path]
|
||||
{
|
||||
@ -412,7 +412,7 @@ std::pair<ResponsePtr, Undo> TestKeeperListRequest::process(TestKeeper::Containe
|
||||
}
|
||||
|
||||
response.stat = it->second.stat;
|
||||
response.error = Error::ZOK; //-V1048
|
||||
response.error = Error::ZOK;
|
||||
}
|
||||
|
||||
return { std::make_shared<ListResponse>(response), {} };
|
||||
@ -432,7 +432,7 @@ std::pair<ResponsePtr, Undo> TestKeeperCheckRequest::process(TestKeeper::Contain
|
||||
}
|
||||
else
|
||||
{
|
||||
response.error = Error::ZOK; //-V1048
|
||||
response.error = Error::ZOK;
|
||||
}
|
||||
|
||||
return { std::make_shared<CheckResponse>(response), {} };
|
||||
@ -455,7 +455,7 @@ std::pair<ResponsePtr, Undo> TestKeeperMultiRequest::process(TestKeeper::Contain
|
||||
try
|
||||
{
|
||||
auto request_it = requests.begin();
|
||||
response.error = Error::ZOK; //-V1048
|
||||
response.error = Error::ZOK;
|
||||
while (request_it != requests.end())
|
||||
{
|
||||
const TestKeeperRequest & concrete_request = dynamic_cast<const TestKeeperRequest &>(**request_it);
|
||||
|
@ -826,7 +826,7 @@ void ZooKeeper::receiveEvent()
|
||||
if (length != actual_length)
|
||||
throw Exception(Error::ZMARSHALLINGERROR, "Response length doesn't match. Expected: {}, actual: {}", length, actual_length);
|
||||
|
||||
logOperationIfNeeded(request_info.request, response, /* finalize= */ false, elapsed_ms); //-V614
|
||||
logOperationIfNeeded(request_info.request, response, /* finalize= */ false, elapsed_ms);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
|
@ -32,17 +32,16 @@ static void dummyFunctionForInterposing() __attribute__((used));
|
||||
static void dummyFunctionForInterposing()
|
||||
{
|
||||
void* dummy;
|
||||
/// Suppression for PVS-Studio and clang-tidy.
|
||||
free(nullptr); // -V575 NOLINT
|
||||
ignore(malloc(0)); // -V575 NOLINT
|
||||
ignore(calloc(0, 0)); // -V575 NOLINT
|
||||
ignore(realloc(nullptr, 0)); // -V575 NOLINT
|
||||
ignore(posix_memalign(&dummy, 0, 0)); // -V575 NOLINT
|
||||
ignore(aligned_alloc(1, 0)); // -V575 NOLINT
|
||||
ignore(valloc(0)); // -V575 NOLINT
|
||||
ignore(memalign(0, 0)); // -V575 NOLINT
|
||||
free(nullptr); // NOLINT
|
||||
ignore(malloc(0)); // NOLINT
|
||||
ignore(calloc(0, 0)); // NOLINT
|
||||
ignore(realloc(nullptr, 0)); // NOLINT
|
||||
ignore(posix_memalign(&dummy, 0, 0)); // NOLINT
|
||||
ignore(aligned_alloc(1, 0)); // NOLINT
|
||||
ignore(valloc(0)); // NOLINT
|
||||
ignore(memalign(0, 0)); // NOLINT
|
||||
#if !defined(USE_MUSL)
|
||||
ignore(pvalloc(0)); // -V575 NOLINT
|
||||
ignore(pvalloc(0)); // NOLINT
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
@ -25,7 +25,7 @@ TEST(ThreadPool, GlobalFull1)
|
||||
std::atomic<size_t> counter = 0;
|
||||
static constexpr size_t num_jobs = capacity + 1;
|
||||
|
||||
auto func = [&] { ++counter; while (counter != num_jobs) {} }; //-V776
|
||||
auto func = [&] { ++counter; while (counter != num_jobs) {} };
|
||||
|
||||
ThreadPool pool(num_jobs);
|
||||
|
||||
@ -63,7 +63,7 @@ TEST(ThreadPool, GlobalFull2)
|
||||
global_pool.wait();
|
||||
|
||||
std::atomic<size_t> counter = 0;
|
||||
auto func = [&] { ++counter; while (counter != capacity + 1) {} }; //-V776
|
||||
auto func = [&] { ++counter; while (counter != capacity + 1) {} };
|
||||
|
||||
ThreadPool pool(capacity, 0, capacity);
|
||||
for (size_t i = 0; i < capacity; ++i)
|
||||
|
@ -1130,7 +1130,7 @@ template <typename ValueType>
|
||||
auto DDCompatibilityTestSequence()
|
||||
{
|
||||
// Generates sequences with double delta in given range.
|
||||
auto dd_generator = [prev_delta = static_cast<Int64>(0), prev = static_cast<Int64>(0)](auto dd) mutable //-V788
|
||||
auto dd_generator = [prev_delta = static_cast<Int64>(0), prev = static_cast<Int64>(0)](auto dd) mutable
|
||||
{
|
||||
const auto curr = dd + prev + prev_delta;
|
||||
prev = curr;
|
||||
|
@ -465,7 +465,7 @@ bool hasErrorsInMultiRequest(Coordination::ZooKeeperRequestPtr request)
|
||||
if (request == nullptr)
|
||||
return true;
|
||||
|
||||
for (const auto & subrequest : dynamic_cast<Coordination::ZooKeeperMultiRequest *>(request.get())->requests) // -V522
|
||||
for (const auto & subrequest : dynamic_cast<Coordination::ZooKeeperMultiRequest *>(request.get())->requests)
|
||||
if (subrequest == nullptr)
|
||||
return true;
|
||||
return false;
|
||||
|
@ -1018,7 +1018,6 @@ struct DefineAliases
|
||||
\
|
||||
template class BaseSettings<SETTINGS_TRAITS_NAME>;
|
||||
|
||||
//-V:IMPLEMENT_SETTINGS:501
|
||||
/// NOLINTNEXTLINE
|
||||
#define IMPLEMENT_SETTINGS_TRAITS_(TYPE, NAME, DEFAULT, DESCRIPTION, FLAGS) \
|
||||
res.field_infos.emplace_back( \
|
||||
|
@ -895,7 +895,7 @@ auto & Field::safeGet()
|
||||
|
||||
|
||||
template <typename T>
|
||||
Field::Field(T && rhs, enable_if_not_field_or_bool_or_stringlike_t<T>) //-V730
|
||||
Field::Field(T && rhs, enable_if_not_field_or_bool_or_stringlike_t<T>)
|
||||
{
|
||||
auto && val = castToNearestFieldType(std::forward<T>(rhs));
|
||||
createConcrete(std::forward<decltype(val)>(val));
|
||||
|
@ -581,9 +581,9 @@ namespace MySQLReplication
|
||||
{
|
||||
if (precision <= DecimalUtils::max_precision<Decimal32>)
|
||||
return Field(function(precision, scale, Decimal32()));
|
||||
else if (precision <= DecimalUtils::max_precision<Decimal64>) //-V547
|
||||
else if (precision <= DecimalUtils::max_precision<Decimal64>)
|
||||
return Field(function(precision, scale, Decimal64()));
|
||||
else if (precision <= DecimalUtils::max_precision<Decimal128>) //-V547
|
||||
else if (precision <= DecimalUtils::max_precision<Decimal128>)
|
||||
return Field(function(precision, scale, Decimal128()));
|
||||
|
||||
return Field(function(precision, scale, Decimal256()));
|
||||
@ -649,7 +649,7 @@ namespace MySQLReplication
|
||||
UInt32 val = 0;
|
||||
size_t to_read = compressed_bytes_map[compressed_decimals];
|
||||
|
||||
if (to_read) //-V547
|
||||
if (to_read)
|
||||
{
|
||||
readBigEndianStrict(payload, reinterpret_cast<char *>(&val), to_read);
|
||||
res *= intExp10OfSize<typename DecimalType::NativeType>(static_cast<int>(compressed_decimals));
|
||||
|
@ -74,7 +74,7 @@ void SentryWriter::initialize(Poco::Util::LayeredConfiguration & config)
|
||||
|
||||
if (config.getBool("send_crash_reports.enabled", false))
|
||||
{
|
||||
if (debug || (strlen(VERSION_OFFICIAL) > 0)) //-V560
|
||||
if (debug || (strlen(VERSION_OFFICIAL) > 0))
|
||||
enabled = true;
|
||||
}
|
||||
|
||||
|
@ -184,7 +184,7 @@ struct IndexesSerializationType
|
||||
return std::make_shared<DataTypeUInt16>();
|
||||
if (type == TUInt32)
|
||||
return std::make_shared<DataTypeUInt32>();
|
||||
if (type == TUInt64) //-V547
|
||||
if (type == TUInt64)
|
||||
return std::make_shared<DataTypeUInt64>();
|
||||
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Can't create DataType from IndexesSerializationType.");
|
||||
|
@ -111,9 +111,9 @@ DataTypePtr convertMySQLDataType(MultiEnum<MySQLDataTypesSupport> type_support,
|
||||
{
|
||||
if (precision <= DecimalUtils::max_precision<Decimal32>)
|
||||
res = std::make_shared<DataTypeDecimal<Decimal32>>(precision, scale);
|
||||
else if (precision <= DecimalUtils::max_precision<Decimal64>) //-V547
|
||||
else if (precision <= DecimalUtils::max_precision<Decimal64>)
|
||||
res = std::make_shared<DataTypeDecimal<Decimal64>>(precision, scale);
|
||||
else if (precision <= DecimalUtils::max_precision<Decimal128>) //-V547
|
||||
else if (precision <= DecimalUtils::max_precision<Decimal128>)
|
||||
res = std::make_shared<DataTypeDecimal<Decimal128>>(precision, scale);
|
||||
}
|
||||
|
||||
|
@ -129,7 +129,7 @@ DataTypePtr getNumericType(const TypeIndexSet & types)
|
||||
size_t min_bit_width_of_integer = std::max(max_bits_of_signed_integer, max_bits_of_unsigned_integer);
|
||||
|
||||
/// If unsigned is not covered by signed.
|
||||
if (max_bits_of_signed_integer && max_bits_of_unsigned_integer >= max_bits_of_signed_integer) //-V1051
|
||||
if (max_bits_of_signed_integer && max_bits_of_unsigned_integer >= max_bits_of_signed_integer)
|
||||
{
|
||||
// Because 128 and 256 bit integers are significantly slower, we should not promote to them.
|
||||
// But if we already have wide numbers, promotion is necessary.
|
||||
|
@ -106,7 +106,7 @@ StoragePtr DatabaseAtomic::detachTable(ContextPtr /* context */, const String &
|
||||
auto table = DatabaseOrdinary::detachTableUnlocked(name);
|
||||
table_name_to_path.erase(name);
|
||||
detached_tables.emplace(table->getStorageID().uuid, table);
|
||||
not_in_use = cleanupDetachedTables(); //-V1001
|
||||
not_in_use = cleanupDetachedTables();
|
||||
return table;
|
||||
}
|
||||
|
||||
|
@ -115,11 +115,11 @@ static DataTypePtr convertPostgreSQLDataType(String & type, Fn<void()> auto && r
|
||||
|
||||
if (precision <= DecimalUtils::max_precision<Decimal32>)
|
||||
res = std::make_shared<DataTypeDecimal<Decimal32>>(precision, scale);
|
||||
else if (precision <= DecimalUtils::max_precision<Decimal64>) //-V547
|
||||
else if (precision <= DecimalUtils::max_precision<Decimal64>)
|
||||
res = std::make_shared<DataTypeDecimal<Decimal64>>(precision, scale);
|
||||
else if (precision <= DecimalUtils::max_precision<Decimal128>) //-V547
|
||||
else if (precision <= DecimalUtils::max_precision<Decimal128>)
|
||||
res = std::make_shared<DataTypeDecimal<Decimal128>>(precision, scale);
|
||||
else if (precision <= DecimalUtils::max_precision<Decimal256>) //-V547
|
||||
else if (precision <= DecimalUtils::max_precision<Decimal256>)
|
||||
res = std::make_shared<DataTypeDecimal<Decimal256>>(precision, scale);
|
||||
else
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Precision {} and scale {} are too big and not supported", precision, scale);
|
||||
|
@ -84,7 +84,7 @@ public:
|
||||
{
|
||||
size_t language_id = static_cast<size_t>(language);
|
||||
|
||||
if (region_id >= names_refs[language_id].size()) //-V1051
|
||||
if (region_id >= names_refs[language_id].size())
|
||||
return StringRef("", 0);
|
||||
|
||||
StringRef ref = names_refs[language_id][region_id];
|
||||
|
@ -293,7 +293,7 @@ void registerDictionarySourceHTTP(DictionarySourceFactory & factory)
|
||||
.format = format,
|
||||
.update_field = config.getString(settings_config_prefix + ".update_field", ""),
|
||||
.update_lag = config.getUInt64(settings_config_prefix + ".update_lag", 1),
|
||||
.header_entries = std::move(header_entries) //-V1030
|
||||
.header_entries = std::move(header_entries)
|
||||
};
|
||||
|
||||
auto context = copyContextAndApplySettingsFromDictionaryConfig(global_context, config, config_prefix);
|
||||
|
@ -233,7 +233,7 @@ bool MySQLDictionarySource::isModified() const
|
||||
if (!configuration.invalidate_query.empty())
|
||||
{
|
||||
auto response = doInvalidateQuery(configuration.invalidate_query);
|
||||
if (response == invalidate_query_response) //-V1051
|
||||
if (response == invalidate_query_response)
|
||||
return false;
|
||||
|
||||
invalidate_query_response = response;
|
||||
|
@ -117,7 +117,7 @@ bool PostgreSQLDictionarySource::isModified() const
|
||||
if (!configuration.invalidate_query.empty())
|
||||
{
|
||||
auto response = doInvalidateQuery(configuration.invalidate_query);
|
||||
if (response == invalidate_query_response) //-V1051
|
||||
if (response == invalidate_query_response)
|
||||
return false;
|
||||
invalidate_query_response = response;
|
||||
}
|
||||
|
@ -178,7 +178,7 @@ bool XDBCDictionarySource::isModified() const
|
||||
if (!configuration.invalidate_query.empty())
|
||||
{
|
||||
auto response = doInvalidateQuery(configuration.invalidate_query);
|
||||
if (invalidate_query_response == response) //-V1051
|
||||
if (invalidate_query_response == response)
|
||||
return false;
|
||||
invalidate_query_response = response;
|
||||
}
|
||||
|
@ -55,7 +55,7 @@ static void writeData(const ISerialization & serialization, const ColumnPtr & co
|
||||
ISerialization::SerializeBinaryBulkSettings settings;
|
||||
settings.getter = [&ostr](ISerialization::SubstreamPath) -> WriteBuffer * { return &ostr; };
|
||||
settings.position_independent_encoding = false;
|
||||
settings.low_cardinality_max_dictionary_size = 0; //-V1048
|
||||
settings.low_cardinality_max_dictionary_size = 0;
|
||||
|
||||
ISerialization::SerializeBinaryBulkStatePtr state;
|
||||
serialization.serializeBinaryBulkStatePrefix(*full_column, settings, state);
|
||||
|
@ -264,7 +264,7 @@ ColumnPtr wrapInNullable(const ColumnPtr & src, const ColumnsWithTypeAndName & a
|
||||
if (const auto * nullable = checkAndGetColumn<ColumnNullable>(*elem.column))
|
||||
{
|
||||
const ColumnPtr & null_map_column = nullable->getNullMapColumnPtr();
|
||||
if (!result_null_map_column) //-V1051
|
||||
if (!result_null_map_column)
|
||||
{
|
||||
result_null_map_column = null_map_column;
|
||||
}
|
||||
|
@ -51,7 +51,7 @@ struct HexImpl
|
||||
UInt8 byte = x >> offset;
|
||||
|
||||
/// Skip leading zeros
|
||||
if (byte == 0 && !was_nonzero && offset && skip_leading_zero) //-V560
|
||||
if (byte == 0 && !was_nonzero && offset && skip_leading_zero)
|
||||
continue;
|
||||
|
||||
was_nonzero = true;
|
||||
@ -144,7 +144,7 @@ struct BinImpl
|
||||
UInt8 byte = x >> offset;
|
||||
|
||||
/// Skip leading zeros
|
||||
if (byte == 0 && !was_nonzero && offset && skip_leading_zero) //-V560
|
||||
if (byte == 0 && !was_nonzero && offset && skip_leading_zero)
|
||||
continue;
|
||||
|
||||
was_nonzero = true;
|
||||
|
@ -243,7 +243,7 @@ public:
|
||||
|
||||
capture = std::make_shared<Capture>(Capture{
|
||||
.captured_names = captured_names_,
|
||||
.captured_types = std::move(captured_types), //-V1030
|
||||
.captured_types = std::move(captured_types),
|
||||
.lambda_arguments = lambda_arguments_,
|
||||
.return_name = expression_return_name_,
|
||||
.return_type = function_return_type_,
|
||||
|
@ -79,7 +79,7 @@ public:
|
||||
arguments[1],
|
||||
};
|
||||
|
||||
auto func_if = FunctionFactory::instance().get("if", context)->build(if_columns); //-V557
|
||||
auto func_if = FunctionFactory::instance().get("if", context)->build(if_columns);
|
||||
return func_if->execute(if_columns, result_type, input_rows_count);
|
||||
}
|
||||
|
||||
|
@ -87,7 +87,7 @@ public:
|
||||
|
||||
auto result_column = ColumnUInt8::create();
|
||||
|
||||
auto call = [&](const auto & types) -> bool //-V657
|
||||
auto call = [&](const auto & types) -> bool
|
||||
{
|
||||
using Types = std::decay_t<decltype(types)>;
|
||||
using Type = typename Types::RightType;
|
||||
|
@ -13,8 +13,7 @@ struct IsNaNImpl
|
||||
template <typename T>
|
||||
static bool execute(const T t)
|
||||
{
|
||||
/// Suppression for PVS-Studio.
|
||||
return t != t; //-V501
|
||||
return t != t;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -203,7 +203,7 @@ SOFTWARE.
|
||||
len -= 16;
|
||||
};
|
||||
|
||||
while (len >= 16) // NOLINT //-V1044
|
||||
while (len >= 16) // NOLINT
|
||||
check_packed(_mm_loadu_si128(reinterpret_cast<const __m128i *>(data)));
|
||||
|
||||
/// 0 <= len <= 15 for now. Reading data from data - 1 because of right padding of 15 and left padding
|
||||
|
@ -125,7 +125,7 @@ public:
|
||||
}
|
||||
|
||||
agg_func.create(place.data()); /// This function can throw.
|
||||
state_created = true; //-V519
|
||||
state_created = true;
|
||||
}
|
||||
|
||||
agg_func.merge(place.data(), state_to_add, arena.get());
|
||||
|
@ -174,7 +174,7 @@ void assertNotEOF(ReadBuffer & buf);
|
||||
|
||||
[[noreturn]] void throwAtAssertionFailed(const char * s, ReadBuffer & buf);
|
||||
|
||||
inline bool checkChar(char c, ReadBuffer & buf) // -V1071
|
||||
inline bool checkChar(char c, ReadBuffer & buf)
|
||||
{
|
||||
char a;
|
||||
if (!buf.peek(a) || a != c)
|
||||
@ -458,7 +458,7 @@ void readIntText(T & x, ReadBuffer & buf)
|
||||
}
|
||||
|
||||
template <ReadIntTextCheckOverflow check_overflow = ReadIntTextCheckOverflow::CHECK_OVERFLOW, typename T>
|
||||
bool tryReadIntText(T & x, ReadBuffer & buf) // -V1071
|
||||
bool tryReadIntText(T & x, ReadBuffer & buf)
|
||||
{
|
||||
return readIntTextImpl<T, bool, check_overflow>(x, buf);
|
||||
}
|
||||
|
@ -840,7 +840,7 @@ inline void writeDateTimeUnixTimestamp(DateTime64 datetime64, UInt32 scale, Writ
|
||||
auto components = DecimalUtils::split(datetime64, scale);
|
||||
writeIntText(components.whole, buf);
|
||||
|
||||
if (scale > 0) //-V547
|
||||
if (scale > 0)
|
||||
{
|
||||
buf.write('.');
|
||||
writeDateTime64FractionalText<DateTime64>(components.fractional, scale, buf);
|
||||
|
@ -287,7 +287,7 @@ ReturnType parseDateTimeBestEffortImpl(
|
||||
UInt8 hour_or_day_of_month_or_month = 0;
|
||||
if (num_digits == 2)
|
||||
readDecimalNumber<2>(hour_or_day_of_month_or_month, digits);
|
||||
else if (num_digits == 1) //-V547
|
||||
else if (num_digits == 1)
|
||||
readDecimalNumber<1>(hour_or_day_of_month_or_month, digits);
|
||||
else
|
||||
return on_error(ErrorCodes::LOGICAL_ERROR, "Cannot read DateTime: logical error, unexpected branch in code");
|
||||
|
@ -145,7 +145,7 @@ template <typename T, typename ReturnType>
|
||||
ReturnType readFloatTextPreciseImpl(T & x, ReadBuffer & buf)
|
||||
{
|
||||
static_assert(std::is_same_v<T, double> || std::is_same_v<T, float>, "Argument for readFloatTextPreciseImpl must be float or double");
|
||||
static_assert('a' > '.' && 'A' > '.' && '\n' < '.' && '\t' < '.' && '\'' < '.' && '"' < '.', "Layout of char is not like ASCII"); //-V590
|
||||
static_assert('a' > '.' && 'A' > '.' && '\n' < '.' && '\t' < '.' && '\'' < '.' && '"' < '.', "Layout of char is not like ASCII");
|
||||
|
||||
static constexpr bool throw_exception = std::is_same_v<ReturnType, void>;
|
||||
|
||||
@ -317,7 +317,7 @@ template <typename T, typename ReturnType>
|
||||
ReturnType readFloatTextFastImpl(T & x, ReadBuffer & in)
|
||||
{
|
||||
static_assert(std::is_same_v<T, double> || std::is_same_v<T, float>, "Argument for readFloatTextImpl must be float or double");
|
||||
static_assert('a' > '.' && 'A' > '.' && '\n' < '.' && '\t' < '.' && '\'' < '.' && '"' < '.', "Layout of char is not like ASCII"); //-V590
|
||||
static_assert('a' > '.' && 'A' > '.' && '\n' < '.' && '\t' < '.' && '\'' < '.' && '"' < '.', "Layout of char is not like ASCII");
|
||||
|
||||
static constexpr bool throw_exception = std::is_same_v<ReturnType, void>;
|
||||
|
||||
|
@ -275,7 +275,7 @@ static Block createBlockFromAST(const ASTPtr & node, const DataTypes & types, Co
|
||||
|
||||
assert(tuple || func);
|
||||
|
||||
size_t tuple_size = tuple ? tuple->size() : func->arguments->children.size(); //-V1004
|
||||
size_t tuple_size = tuple ? tuple->size() : func->arguments->children.size();
|
||||
if (tuple_size != num_columns)
|
||||
throw Exception(ErrorCodes::INCORRECT_ELEMENT_OF_SET, "Incorrect size of tuple in set: {} instead of {}",
|
||||
tuple_size, num_columns);
|
||||
|
@ -1650,8 +1650,8 @@ void Context::setCurrentQueryId(const String & query_id)
|
||||
UUID uuid{};
|
||||
} random;
|
||||
|
||||
random.words.a = thread_local_rng(); //-V656
|
||||
random.words.b = thread_local_rng(); //-V656
|
||||
random.words.a = thread_local_rng();
|
||||
random.words.b = thread_local_rng();
|
||||
|
||||
|
||||
String query_id_to_set = query_id;
|
||||
|
@ -99,7 +99,7 @@ void DuplicateOrderByData::visit(ASTSelectQuery & select_query, ASTPtr &)
|
||||
bool is_stateful = false;
|
||||
ASTFunctionStatefulVisitor::Data data{context, is_stateful};
|
||||
ASTFunctionStatefulVisitor(data).visit(elem);
|
||||
if (is_stateful) //-V547
|
||||
if (is_stateful)
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -119,4 +119,3 @@ void DuplicateOrderByData::visit(ASTSelectQuery & select_query, ASTPtr &)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
@ -254,7 +254,7 @@ struct ExpressionActionsChain : WithContext
|
||||
steps.clear();
|
||||
}
|
||||
|
||||
ActionsDAGPtr getLastActions(bool allow_empty = false) // -V1071
|
||||
ActionsDAGPtr getLastActions(bool allow_empty = false)
|
||||
{
|
||||
if (steps.empty())
|
||||
{
|
||||
|
@ -95,7 +95,7 @@ public:
|
||||
KeepFunctionVisitor::Data keep_data{data.key_names_to_keep, keep_key};
|
||||
KeepFunctionVisitor(keep_data).visit(function_node->arguments);
|
||||
|
||||
if (!keep_key) //-V547
|
||||
if (!keep_key)
|
||||
(data.key_names_to_keep).erase(function_node->getColumnName());
|
||||
}
|
||||
|
||||
|
@ -179,7 +179,7 @@ private:
|
||||
std::vector<ASTPtr> renamed;
|
||||
NonGlobalTableVisitor::Data table_data(data.getContext(), data.checker, renamed, &node, nullptr);
|
||||
NonGlobalTableVisitor(table_data).visit(subquery);
|
||||
if (!renamed.empty()) //-V547
|
||||
if (!renamed.empty())
|
||||
data.renamed_tables.emplace_back(subquery, std::move(renamed));
|
||||
}
|
||||
}
|
||||
@ -199,7 +199,7 @@ private:
|
||||
std::vector<ASTPtr> renamed;
|
||||
NonGlobalTableVisitor::Data table_data(data.getContext(), data.checker, renamed, nullptr, table_join);
|
||||
NonGlobalTableVisitor(table_data).visit(subquery);
|
||||
if (!renamed.empty()) //-V547
|
||||
if (!renamed.empty())
|
||||
data.renamed_tables.emplace_back(subquery, std::move(renamed));
|
||||
}
|
||||
else if (table->database_and_table_name)
|
||||
@ -208,7 +208,7 @@ private:
|
||||
std::vector<ASTPtr> renamed;
|
||||
NonGlobalTableVisitor::Data table_data{data.getContext(), data.checker, renamed, nullptr, table_join};
|
||||
NonGlobalTableVisitor(table_data).visit(tb);
|
||||
if (!renamed.empty()) //-V547
|
||||
if (!renamed.empty())
|
||||
data.renamed_tables.emplace_back(tb, std::move(renamed));
|
||||
}
|
||||
}
|
||||
|
@ -312,7 +312,7 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create)
|
||||
{
|
||||
|
||||
/// We use global context here, because storages lifetime is bigger than query context lifetime
|
||||
TablesLoader loader{getContext()->getGlobalContext(), {{database_name, database}}, mode}; //-V560
|
||||
TablesLoader loader{getContext()->getGlobalContext(), {{database_name, database}}, mode};
|
||||
loader.loadTables();
|
||||
loader.startupTables();
|
||||
}
|
||||
|
@ -41,7 +41,7 @@ BlockIO InterpreterWatchQuery::execute()
|
||||
const Settings & settings = getContext()->getSettingsRef();
|
||||
|
||||
StreamLocalLimits limits;
|
||||
limits.mode = LimitsMode::LIMITS_CURRENT; //-V1048
|
||||
limits.mode = LimitsMode::LIMITS_CURRENT;
|
||||
limits.size_limits.max_rows = settings.max_result_rows;
|
||||
limits.size_limits.max_bytes = settings.max_result_bytes;
|
||||
limits.size_limits.overflow_mode = settings.result_overflow_mode;
|
||||
|
@ -486,7 +486,7 @@ void createMissedColumns(Block & block)
|
||||
for (size_t i = 0; i < block.columns(); ++i)
|
||||
{
|
||||
auto & column = block.getByPosition(i);
|
||||
if (!column.column) //-V1051
|
||||
if (!column.column)
|
||||
column.column = column.type->createColumn();
|
||||
}
|
||||
}
|
||||
|
@ -197,7 +197,7 @@ bool PartLog::addNewParts(
|
||||
if (!query_id.empty())
|
||||
elem.query_id.insert(0, query_id.data(), query_id.size());
|
||||
|
||||
elem.event_type = PartLogElement::NEW_PART; //-V1048
|
||||
elem.event_type = PartLogElement::NEW_PART;
|
||||
|
||||
// construct event_time and event_time_microseconds using the same time point
|
||||
// so that the two times will always be equal up to a precision of a second.
|
||||
|
@ -691,7 +691,7 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
|
||||
|
||||
if (!interpreter->ignoreLimits())
|
||||
{
|
||||
limits.mode = LimitsMode::LIMITS_CURRENT; //-V1048
|
||||
limits.mode = LimitsMode::LIMITS_CURRENT;
|
||||
limits.size_limits = SizeLimits(settings.max_result_rows, settings.max_result_bytes, settings.result_overflow_mode);
|
||||
}
|
||||
|
||||
@ -795,7 +795,7 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
|
||||
{
|
||||
QueryLogElement elem;
|
||||
|
||||
elem.type = QueryLogElementType::QUERY_START; //-V1048
|
||||
elem.type = QueryLogElementType::QUERY_START;
|
||||
|
||||
elem.event_time = timeInSeconds(query_start_time);
|
||||
elem.event_time_microseconds = timeInMicroseconds(query_start_time);
|
||||
|
@ -51,7 +51,7 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
|
||||
#endif
|
||||
|
||||
auto current_logger = config.getString("logger", "");
|
||||
if (config_logger == current_logger) //-V1051
|
||||
if (config_logger == current_logger)
|
||||
return;
|
||||
|
||||
config_logger = current_logger;
|
||||
|
@ -365,7 +365,7 @@ void ASTCreateQuery::formatQueryImpl(const FormatSettings & settings, FormatStat
|
||||
FormatStateStacked frame_nested = frame;
|
||||
columns_list->formatImpl(settings, state, frame_nested);
|
||||
settings.ostr << (settings.one_line ? ")" : "\n)");
|
||||
frame.expression_list_always_start_on_new_line = false; //-V519
|
||||
frame.expression_list_always_start_on_new_line = false;
|
||||
}
|
||||
|
||||
settings.ostr << (settings.hilite ? hilite_keyword : "") << " AS " << (settings.hilite ? hilite_none : "");
|
||||
@ -393,7 +393,7 @@ void ASTCreateQuery::formatQueryImpl(const FormatSettings & settings, FormatStat
|
||||
settings.ostr << (settings.one_line ? ")" : "\n)");
|
||||
}
|
||||
|
||||
frame.expression_list_always_start_on_new_line = false; //-V519
|
||||
frame.expression_list_always_start_on_new_line = false;
|
||||
|
||||
if (inner_storage)
|
||||
{
|
||||
|
@ -95,13 +95,13 @@ public:
|
||||
*/
|
||||
virtual bool parse(Pos & pos, ASTPtr & node, Expected & expected) = 0;
|
||||
|
||||
bool ignore(Pos & pos, Expected & expected) // -V1071
|
||||
bool ignore(Pos & pos, Expected & expected)
|
||||
{
|
||||
ASTPtr ignore_node;
|
||||
return parse(pos, ignore_node, expected);
|
||||
}
|
||||
|
||||
bool ignore(Pos & pos) // -V1071
|
||||
bool ignore(Pos & pos)
|
||||
{
|
||||
Expected expected;
|
||||
return ignore(pos, expected);
|
||||
|
@ -35,7 +35,7 @@ public:
|
||||
return res;
|
||||
}
|
||||
|
||||
bool parse(Pos & pos, ASTPtr & node, Expected & expected) override; // -V1071
|
||||
bool parse(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
|
||||
protected:
|
||||
virtual bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) = 0;
|
||||
|
@ -9,7 +9,7 @@
|
||||
using namespace DB;
|
||||
using namespace DB::MySQLParser;
|
||||
|
||||
static inline ASTPtr tryParserQuery(IParser & parser, const String & query) // -V1071
|
||||
static inline ASTPtr tryParserQuery(IParser & parser, const String & query)
|
||||
{
|
||||
return parseQuery(parser, query.data(), query.data() + query.size(), "", 0, 0);
|
||||
}
|
||||
@ -261,4 +261,3 @@ TEST(ParserAlterCommand, AlterOptionsCommand)
|
||||
EXPECT_THROW(tryParserQuery(alter_p, "FORCE ALGORITHM DEFAULT"), Exception);
|
||||
EXPECT_THROW(tryParserQuery(alter_p, "ALGORITHM DEFAULT AUTO_INCREMENT 1"), Exception);
|
||||
}
|
||||
|
||||
|
@ -39,9 +39,9 @@ bool ParserExplainQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
|
||||
else if (s_pipeline.ignore(pos, expected))
|
||||
kind = ASTExplainQuery::ExplainKind::QueryPipeline;
|
||||
else if (s_plan.ignore(pos, expected))
|
||||
kind = ASTExplainQuery::ExplainKind::QueryPlan; //-V1048
|
||||
kind = ASTExplainQuery::ExplainKind::QueryPlan;
|
||||
else if (s_estimates.ignore(pos, expected))
|
||||
kind = ASTExplainQuery::ExplainKind::QueryEstimates; //-V1048
|
||||
kind = ASTExplainQuery::ExplainKind::QueryEstimates;
|
||||
else if (s_table_override.ignore(pos, expected))
|
||||
kind = ASTExplainQuery::ExplainKind::TableOverride;
|
||||
else if (s_current_transaction.ignore(pos, expected))
|
||||
|
@ -86,7 +86,7 @@ void IRowOutputFormat::writeMinExtreme(const DB::Columns & columns, size_t row_n
|
||||
write(columns, row_num);
|
||||
}
|
||||
|
||||
void IRowOutputFormat::writeMaxExtreme(const DB::Columns & columns, size_t row_num) //-V524
|
||||
void IRowOutputFormat::writeMaxExtreme(const DB::Columns & columns, size_t row_num)
|
||||
{
|
||||
write(columns, row_num);
|
||||
}
|
||||
|
@ -177,7 +177,7 @@ QueryPipelineBuilderPtr QueryPlan::buildQueryPipeline(
|
||||
if (last_pipeline)
|
||||
{
|
||||
frame.pipelines.emplace_back(std::move(last_pipeline));
|
||||
last_pipeline = nullptr; //-V1048
|
||||
last_pipeline = nullptr;
|
||||
}
|
||||
|
||||
size_t next_child = frame.pipelines.size();
|
||||
|
@ -238,7 +238,7 @@ void SortingStep::fullSortStreams(
|
||||
});
|
||||
|
||||
StreamLocalLimits limits;
|
||||
limits.mode = LimitsMode::LIMITS_CURRENT; //-V1048
|
||||
limits.mode = LimitsMode::LIMITS_CURRENT;
|
||||
limits.size_limits = sort_settings.size_limits;
|
||||
|
||||
pipeline.addSimpleTransform(
|
||||
|
@ -224,12 +224,12 @@ IProcessor::Status GroupingAggregatedTransform::prepare()
|
||||
|
||||
/// Sanity check. If new bucket was read, we should be able to push it.
|
||||
/// This is always false, but we still keep this condition in case the code will be changed.
|
||||
if (!all_inputs_finished) // -V547
|
||||
if (!all_inputs_finished)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "GroupingAggregatedTransform has read new two-level bucket, but couldn't push it.");
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!all_inputs_finished) // -V547
|
||||
if (!all_inputs_finished)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
"GroupingAggregatedTransform should have read all chunks for single level aggregation, "
|
||||
"but not all of the inputs are finished.");
|
||||
|
@ -368,7 +368,7 @@ Chain buildPushingToViewsChain(
|
||||
out = buildPushingToViewsChain(
|
||||
view, view_metadata_snapshot, insert_context, ASTPtr(), false, thread_status_holder, view_counter_ms);
|
||||
|
||||
views_data->views.emplace_back(ViewRuntimeData{ //-V614
|
||||
views_data->views.emplace_back(ViewRuntimeData{
|
||||
std::move(query),
|
||||
out.getInputHeader(),
|
||||
view_id,
|
||||
|
@ -162,7 +162,7 @@ public:
|
||||
{
|
||||
auto num_threads = pipe.maxParallelStreams();
|
||||
|
||||
if (max_threads) //-V1051
|
||||
if (max_threads)
|
||||
num_threads = std::min(num_threads, max_threads);
|
||||
|
||||
return std::max<size_t>(1, num_threads);
|
||||
|
@ -895,7 +895,7 @@ void TCPHandler::processTablesStatusRequest()
|
||||
status.absolute_delay = static_cast<UInt32>(replicated_table->getAbsoluteDelay());
|
||||
}
|
||||
else
|
||||
status.is_replicated = false; //-V1048
|
||||
status.is_replicated = false;
|
||||
|
||||
response.table_states_by_id.emplace(table_name, std::move(status));
|
||||
}
|
||||
|
@ -98,7 +98,7 @@ public:
|
||||
/// Storage metadata can be set separately in setInMemoryMetadata method
|
||||
explicit IStorage(StorageID storage_id_)
|
||||
: storage_id(std::move(storage_id_))
|
||||
, metadata(std::make_unique<StorageInMemoryMetadata>()) {} //-V730
|
||||
, metadata(std::make_unique<StorageInMemoryMetadata>()) {}
|
||||
|
||||
IStorage(const IStorage &) = delete;
|
||||
IStorage & operator=(const IStorage &) = delete;
|
||||
|
@ -814,7 +814,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToDisk(
|
||||
}
|
||||
|
||||
/// We will remove directory if it's already exists. Make precautions.
|
||||
if (tmp_prefix.empty() //-V560
|
||||
if (tmp_prefix.empty()
|
||||
|| part_name.empty()
|
||||
|| std::string::npos != tmp_prefix.find_first_of("/.")
|
||||
|| std::string::npos != part_name.find_first_of("/."))
|
||||
|
@ -750,7 +750,7 @@ void IMergeTreeDataPart::loadIndex()
|
||||
for (size_t j = 0; j < key_size; ++j)
|
||||
key_serializations[j] = primary_key.data_types[j]->getDefaultSerialization();
|
||||
|
||||
for (size_t i = 0; i < marks_count; ++i) //-V756
|
||||
for (size_t i = 0; i < marks_count; ++i)
|
||||
for (size_t j = 0; j < key_size; ++j)
|
||||
key_serializations[j]->deserializeBinary(*loaded_index[j], *index_file, {});
|
||||
|
||||
|
@ -124,7 +124,7 @@ private:
|
||||
/// By default this context is uninitialed, but some variables has to be set after construction,
|
||||
/// some variables are used in a process of execution
|
||||
/// Proper initialization is responsibility of the author
|
||||
struct GlobalRuntimeContext : public IStageRuntimeContext //-V730
|
||||
struct GlobalRuntimeContext : public IStageRuntimeContext
|
||||
{
|
||||
MergeList::Entry * merge_entry{nullptr};
|
||||
/// If not null, use this instead of the global MergeList::Entry. This is for merging projections.
|
||||
@ -186,7 +186,7 @@ private:
|
||||
/// By default this context is uninitialed, but some variables has to be set after construction,
|
||||
/// some variables are used in a process of execution
|
||||
/// Proper initialization is responsibility of the author
|
||||
struct ExecuteAndFinalizeHorizontalPartRuntimeContext : public IStageRuntimeContext //-V730
|
||||
struct ExecuteAndFinalizeHorizontalPartRuntimeContext : public IStageRuntimeContext
|
||||
{
|
||||
/// Dependencies
|
||||
String suffix;
|
||||
@ -259,7 +259,7 @@ private:
|
||||
/// By default this context is uninitialed, but some variables has to be set after construction,
|
||||
/// some variables are used in a process of execution
|
||||
/// Proper initialization is responsibility of the author
|
||||
struct VerticalMergeRuntimeContext : public IStageRuntimeContext //-V730
|
||||
struct VerticalMergeRuntimeContext : public IStageRuntimeContext
|
||||
{
|
||||
/// Begin dependencies from previous stage
|
||||
std::unique_ptr<PocoTemporaryFile> rows_sources_file;
|
||||
@ -331,7 +331,7 @@ private:
|
||||
/// By default this context is uninitialed, but some variables has to be set after construction,
|
||||
/// some variables are used in a process of execution
|
||||
/// Proper initialization is responsibility of the author
|
||||
struct MergeProjectionsRuntimeContext : public IStageRuntimeContext //-V730
|
||||
struct MergeProjectionsRuntimeContext : public IStageRuntimeContext
|
||||
{
|
||||
/// Only one dependency
|
||||
bool need_sync{false};
|
||||
|
@ -2175,7 +2175,7 @@ void MergeTreeData::removePartsFinally(const MergeTreeData::DataPartsVector & pa
|
||||
part_log_elem.event_time = timeInSeconds(time_now);
|
||||
part_log_elem.event_time_microseconds = timeInMicroseconds(time_now);
|
||||
|
||||
part_log_elem.duration_ms = 0; //-V1048
|
||||
part_log_elem.duration_ms = 0;
|
||||
|
||||
part_log_elem.database_name = table_id.database_name;
|
||||
part_log_elem.table_name = table_id.table_name;
|
||||
@ -3808,7 +3808,7 @@ MergeTreeData::PartsToRemoveFromZooKeeper MergeTreeData::removePartsInRangeFromW
|
||||
|
||||
void MergeTreeData::restoreAndActivatePart(const DataPartPtr & part, DataPartsLock * acquired_lock)
|
||||
{
|
||||
auto lock = (acquired_lock) ? DataPartsLock() : lockParts(); //-V1018
|
||||
auto lock = (acquired_lock) ? DataPartsLock() : lockParts();
|
||||
if (part->getState() == DataPartState::Active)
|
||||
return;
|
||||
addPartContributionToColumnAndSecondaryIndexSizes(part);
|
||||
@ -5294,7 +5294,7 @@ MergeTreeData::DataPartsVector MergeTreeData::getDataPartsVectorForInternalUsage
|
||||
auto range = getDataPartsStateRange(state);
|
||||
std::swap(buf, res);
|
||||
res.clear();
|
||||
std::merge(range.begin(), range.end(), buf.begin(), buf.end(), std::back_inserter(res), LessDataPart()); //-V783
|
||||
std::merge(range.begin(), range.end(), buf.begin(), buf.end(), std::back_inserter(res), LessDataPart());
|
||||
}
|
||||
|
||||
if (out_states != nullptr)
|
||||
|
@ -139,8 +139,8 @@ void writeColumnSingleGranule(
|
||||
ISerialization::SerializeBinaryBulkSettings serialize_settings;
|
||||
|
||||
serialize_settings.getter = stream_getter;
|
||||
serialize_settings.position_independent_encoding = true; //-V1048
|
||||
serialize_settings.low_cardinality_max_dictionary_size = 0; //-V1048
|
||||
serialize_settings.position_independent_encoding = true;
|
||||
serialize_settings.low_cardinality_max_dictionary_size = 0;
|
||||
|
||||
serialization->serializeBinaryBulkStatePrefix(*column.column, serialize_settings, state);
|
||||
serialization->serializeBinaryBulkWithMultipleStreams(*column.column, from_row, number_of_rows, serialize_settings, state);
|
||||
@ -236,7 +236,7 @@ void MergeTreeDataPartWriterCompact::writeDataBlock(const Block & block, const G
|
||||
stream_getter, granule.start_row, granule.rows_to_write);
|
||||
|
||||
/// Each type always have at least one substream
|
||||
prev_stream->hashing_buf.next(); //-V522
|
||||
prev_stream->hashing_buf.next();
|
||||
}
|
||||
|
||||
writeIntBinary(granule.rows_to_write, marks_out);
|
||||
|
@ -1169,7 +1169,7 @@ RangesInDataParts MergeTreeDataSelectExecutor::filterPartsByPrimaryKeyAndSkipInd
|
||||
index_stats.emplace_back(ReadFromMergeTree::IndexStat{
|
||||
.type = ReadFromMergeTree::IndexType::Skip,
|
||||
.name = index_name,
|
||||
.description = std::move(description), //-V1030
|
||||
.description = std::move(description),
|
||||
.num_parts_after = index_and_condition.stat.total_parts - index_and_condition.stat.parts_dropped,
|
||||
.num_granules_after = index_and_condition.stat.total_granules - index_and_condition.stat.granules_dropped});
|
||||
}
|
||||
@ -1186,7 +1186,7 @@ RangesInDataParts MergeTreeDataSelectExecutor::filterPartsByPrimaryKeyAndSkipInd
|
||||
index_stats.emplace_back(ReadFromMergeTree::IndexStat{
|
||||
.type = ReadFromMergeTree::IndexType::Skip,
|
||||
.name = index_name,
|
||||
.description = std::move(description), //-V1030
|
||||
.description = std::move(description),
|
||||
.num_parts_after = index_and_condition.stat.total_parts - index_and_condition.stat.parts_dropped,
|
||||
.num_granules_after = index_and_condition.stat.total_granules - index_and_condition.stat.granules_dropped});
|
||||
}
|
||||
|
@ -73,7 +73,7 @@ void MergeTreeIndexGranuleSet::serializeBinary(WriteBuffer & ostr) const
|
||||
ISerialization::SerializeBinaryBulkSettings settings;
|
||||
settings.getter = [&ostr](ISerialization::SubstreamPath) -> WriteBuffer * { return &ostr; };
|
||||
settings.position_independent_encoding = false;
|
||||
settings.low_cardinality_max_dictionary_size = 0; //-V1048
|
||||
settings.low_cardinality_max_dictionary_size = 0;
|
||||
|
||||
auto serialization = type->getDefaultSerialization();
|
||||
ISerialization::SerializeBinaryBulkStatePtr state;
|
||||
|
@ -121,7 +121,7 @@ struct MergeTreePartInfo
|
||||
/// Simple sanity check for partition ID. Checking that it's not too long or too short, doesn't contain a lot of '_'.
|
||||
static void validatePartitionID(const String & partition_id, MergeTreeDataFormatVersion format_version);
|
||||
|
||||
static MergeTreePartInfo fromPartName(const String & part_name, MergeTreeDataFormatVersion format_version); // -V1071
|
||||
static MergeTreePartInfo fromPartName(const String & part_name, MergeTreeDataFormatVersion format_version);
|
||||
|
||||
static std::optional<MergeTreePartInfo> tryParsePartName(
|
||||
std::string_view part_name, MergeTreeDataFormatVersion format_version);
|
||||
|
@ -173,7 +173,7 @@ void selectWithinPartition(
|
||||
for (size_t end = begin + 2; end <= parts_count; ++end)
|
||||
{
|
||||
assert(end > begin);
|
||||
if (settings.max_parts_to_merge_at_once && end - begin > settings.max_parts_to_merge_at_once) //-V658
|
||||
if (settings.max_parts_to_merge_at_once && end - begin > settings.max_parts_to_merge_at_once)
|
||||
break;
|
||||
|
||||
if (!parts[end - 1].shall_participate_in_merges)
|
||||
|
@ -879,7 +879,7 @@ bool StorageBuffer::flushBuffer(Buffer & buffer, bool check_thresholds, bool loc
|
||||
|
||||
buffer.data.swap(block_to_write);
|
||||
|
||||
if (!buffer.first_write_time) // -V547
|
||||
if (!buffer.first_write_time)
|
||||
buffer.first_write_time = current_time;
|
||||
|
||||
/// After a while, the next write attempt will happen.
|
||||
|
@ -223,7 +223,7 @@ StoragePtr StorageFactory::get(
|
||||
assert(arguments.getContext() == arguments.getContext()->getGlobalContext());
|
||||
|
||||
auto res = storages.at(name).creator_fn(arguments);
|
||||
if (!empty_engine_args.empty()) //-V547
|
||||
if (!empty_engine_args.empty())
|
||||
{
|
||||
/// Storage creator modified empty arguments list, so we should modify the query
|
||||
assert(storage_def && storage_def->engine && !storage_def->engine->arguments);
|
||||
|
@ -552,7 +552,7 @@ private:
|
||||
|
||||
if (!position)
|
||||
position = decltype(position)(
|
||||
static_cast<void *>(new typename Map::const_iterator(map.begin())), //-V572
|
||||
static_cast<void *>(new typename Map::const_iterator(map.begin())),
|
||||
[](void * ptr) { delete reinterpret_cast<typename Map::const_iterator *>(ptr); });
|
||||
|
||||
auto & it = *reinterpret_cast<typename Map::const_iterator *>(position.get());
|
||||
|
@ -204,7 +204,7 @@ void LogSource::readData(const NameAndTypePair & name_and_type, ColumnPtr & colu
|
||||
|
||||
auto create_stream_getter = [&](bool stream_for_prefix)
|
||||
{
|
||||
return [&, stream_for_prefix] (const ISerialization::SubstreamPath & path) -> ReadBuffer * //-V1047
|
||||
return [&, stream_for_prefix] (const ISerialization::SubstreamPath & path) -> ReadBuffer *
|
||||
{
|
||||
if (cache.contains(ISerialization::getSubcolumnNameForStream(path)))
|
||||
return nullptr;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user