Merge branch 'master' of github.com:ClickHouse/ClickHouse into wyhash

This commit is contained in:
olevino999 2022-04-21 19:36:25 +03:00
commit 6594ae8e44
106 changed files with 505 additions and 318 deletions

View File

@ -1,9 +1,8 @@
Checks: '-*,
misc-throw-by-value-catch-by-reference,
misc-misplaced-const,
misc-unconventional-assign-operator,
misc-redundant-expression,
misc-static-assert,
misc-throw-by-value-catch-by-reference,
misc-unconventional-assign-operator,
misc-uniqueptr-reset-release,
misc-unused-alias-decls,
@ -18,22 +17,22 @@ Checks: '-*,
modernize-redundant-void-arg,
modernize-replace-random-shuffle,
modernize-use-bool-literals,
modernize-use-nullptr,
modernize-use-using,
modernize-use-equals-default,
modernize-use-equals-delete,
modernize-use-nullptr,
modernize-use-using,
performance-faster-string-find,
performance-for-range-copy,
performance-implicit-conversion-in-loop,
performance-inefficient-algorithm,
performance-inefficient-vector-operation,
performance-move-const-arg,
performance-move-constructor-init,
performance-no-automatic-move,
performance-noexcept-move-constructor,
performance-trivially-destructible,
performance-unnecessary-copy-initialization,
performance-noexcept-move-constructor,
performance-move-const-arg,
readability-avoid-const-params-in-decls,
readability-const-return-type,
@ -42,6 +41,8 @@ Checks: '-*,
readability-convert-member-functions-to-static,
readability-delete-null-pointer,
readability-deleted-default,
readability-identifier-naming,
readability-inconsistent-declaration-parameter-name,
readability-make-member-function-const,
readability-misplaced-array-index,
readability-non-const-parameter,
@ -49,26 +50,23 @@ Checks: '-*,
readability-redundant-access-specifiers,
readability-redundant-control-flow,
readability-redundant-function-ptr-dereference,
readability-redundant-member-init,
readability-redundant-smartptr-get,
readability-redundant-string-cstr,
readability-redundant-string-init,
readability-simplify-boolean-expr,
readability-simplify-subscript-expr,
readability-static-definition-in-anonymous-namespace,
readability-string-compare,
readability-uniqueptr-delete-release,
readability-redundant-member-init,
readability-simplify-subscript-expr,
readability-simplify-boolean-expr,
readability-inconsistent-declaration-parameter-name,
readability-identifier-naming,
bugprone-undelegated-constructor,
bugprone-argument-comment,
bugprone-bad-signal-to-kill-thread,
bugprone-bool-pointer-implicit-conversion,
bugprone-copy-constructor-init,
bugprone-dangling-handle,
bugprone-forward-declaration-namespace,
bugprone-fold-init-type,
bugprone-forward-declaration-namespace,
bugprone-inaccurate-erase,
bugprone-incorrect-roundings,
bugprone-infinite-loop,
@ -99,6 +97,7 @@ Checks: '-*,
bugprone-throw-keyword-missing,
bugprone-too-small-loop-variable,
bugprone-undefined-memory-manipulation,
bugprone-undelegated-constructor,
bugprone-unhandled-self-assignment,
bugprone-unused-raii,
bugprone-unused-return-value,
@ -119,8 +118,8 @@ Checks: '-*,
google-build-namespaces,
google-default-arguments,
google-explicit-constructor,
google-readability-casting,
google-readability-avoid-underscore-in-googletest-name,
google-readability-casting,
google-runtime-int,
google-runtime-operator,
@ -139,12 +138,12 @@ Checks: '-*,
clang-analyzer-core.uninitialized.CapturedBlockVariable,
clang-analyzer-core.uninitialized.UndefReturn,
clang-analyzer-cplusplus.InnerPointer,
clang-analyzer-cplusplus.Move,
clang-analyzer-cplusplus.NewDelete,
clang-analyzer-cplusplus.NewDeleteLeaks,
clang-analyzer-cplusplus.PlacementNewChecker,
clang-analyzer-cplusplus.SelfAssignment,
clang-analyzer-deadcode.DeadStores,
clang-analyzer-cplusplus.Move,
clang-analyzer-optin.cplusplus.UninitializedObject,
clang-analyzer-optin.cplusplus.VirtualCall,
clang-analyzer-security.insecureAPI.UncheckedReturn,

View File

@ -73,7 +73,7 @@ replxx::Replxx::completions_t LineReader::Suggest::getCompletions(const String &
if (std::string::npos == last_word_pos)
last_word = prefix;
else
last_word = std::string_view(prefix).substr(last_word_pos + 1, std::string::npos);
last_word = std::string_view{prefix}.substr(last_word_pos + 1, std::string::npos);
/// last_word can be empty.
std::pair<Words::const_iterator, Words::const_iterator> range;

View File

@ -61,6 +61,7 @@
#if defined(OS_DARWIN)
# pragma GCC diagnostic ignored "-Wunused-macros"
// NOLINTNEXTLINE(bugprone-reserved-identifier)
# define _XOPEN_SOURCE 700 // ucontext is not available without _XOPEN_SOURCE
#endif
#include <ucontext.h>
@ -132,7 +133,7 @@ static void signalHandler(int sig, siginfo_t * info, void * context)
DB::writePODBinary(*info, out);
DB::writePODBinary(signal_context, out);
DB::writePODBinary(stack_trace, out);
DB::writeBinary(UInt32(getThreadId()), out);
DB::writeBinary(static_cast<UInt32>(getThreadId()), out);
DB::writePODBinary(DB::current_thread, out);
out.next();
@ -435,7 +436,7 @@ static void sanitizerDeathCallback()
DB::WriteBufferFromFileDescriptor out(signal_pipe.fds_rw[1], buf_size, buf);
DB::writeBinary(static_cast<int>(SignalListener::StdTerminate), out);
DB::writeBinary(UInt32(getThreadId()), out);
DB::writeBinary(static_cast<UInt32>(getThreadId()), out);
DB::writeBinary(log_message, out);
out.next();

View File

@ -103,7 +103,7 @@ void OwnSplitChannel::logSplit(const Poco::Message & msg)
columns[i++]->insert(DNSResolver::instance().getHostName());
columns[i++]->insert(msg_ext.query_id);
columns[i++]->insert(msg_ext.thread_id);
columns[i++]->insert(Int64(msg.getPriority()));
columns[i++]->insert(static_cast<Int64>(msg.getPriority()));
columns[i++]->insert(msg.getSource());
columns[i++]->insert(msg.getText());

View File

@ -2,11 +2,11 @@
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
SET(VERSION_REVISION 54461)
SET(VERSION_REVISION 54462)
SET(VERSION_MAJOR 22)
SET(VERSION_MINOR 4)
SET(VERSION_MINOR 5)
SET(VERSION_PATCH 1)
SET(VERSION_GITHASH 92ab33f560e638d1989c5ca543021ab53d110f5c)
SET(VERSION_DESCRIBE v22.4.1.1-testing)
SET(VERSION_STRING 22.4.1.1)
SET(VERSION_GITHASH 77a82cc090dd5dba2d995946e82a12a2cadaaff3)
SET(VERSION_DESCRIBE v22.5.1.1-testing)
SET(VERSION_STRING 22.5.1.1)
# end of autochange

View File

@ -239,12 +239,12 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
uint32_t path_length = 0;
_NSGetExecutablePath(nullptr, &path_length);
if (path_length <= 1)
Exception(ErrorCodes::FILE_DOESNT_EXIST, "Cannot obtain path to the binary");
throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "Cannot obtain path to the binary");
std::string path(path_length, std::string::value_type());
auto res = _NSGetExecutablePath(&path[0], &path_length);
if (res != 0)
Exception(ErrorCodes::FILE_DOESNT_EXIST, "Cannot obtain path to the binary");
throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "Cannot obtain path to the binary");
if (path.back() == '\0')
path.pop_back();

View File

@ -549,7 +549,7 @@ private:
CodePoint sample(UInt64 random, double end_multiplier) const
{
UInt64 range = total + UInt64(count_end * end_multiplier);
UInt64 range = total + static_cast<UInt64>(count_end * end_multiplier);
if (range == 0)
return END;
@ -728,7 +728,7 @@ public:
if (!histogram.total)
continue;
double average = double(histogram.total) / histogram.buckets.size();
double average = static_cast<double>(histogram.total) / histogram.buckets.size();
UInt64 new_total = 0;
for (auto & bucket : histogram.buckets)

View File

@ -225,7 +225,7 @@ public:
throw Exception("Logical error: single argument is passed to AggregateFunctionIfNullVariadic", ErrorCodes::LOGICAL_ERROR);
if (number_of_arguments > MAX_ARGS)
throw Exception("Maximum number of arguments for aggregate function with Nullable types is " + toString(size_t(MAX_ARGS)),
throw Exception("Maximum number of arguments for aggregate function with Nullable types is " + toString(MAX_ARGS),
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
for (size_t i = 0; i < number_of_arguments; ++i)
@ -359,7 +359,7 @@ private:
using Base = AggregateFunctionNullBase<result_is_nullable, serialize_flag,
AggregateFunctionIfNullVariadic<result_is_nullable, serialize_flag, null_is_skipped>>;
enum { MAX_ARGS = 8 };
static constexpr size_t MAX_ARGS = 8;
size_t number_of_arguments = 0;
std::array<char, MAX_ARGS> is_nullable; /// Plain array is better than std::vector due to one indirection less.
};

View File

@ -56,8 +56,8 @@ namespace
/// Such default parameters were picked because they did good on some tests,
/// though it still requires to fit parameters to achieve better result
auto learning_rate = Float64(1.0);
auto l2_reg_coef = Float64(0.5);
auto learning_rate = static_cast<Float64>(1.0);
auto l2_reg_coef = static_cast<Float64>(0.5);
UInt64 batch_size = 15;
std::string weights_updater_name = "Adam";

View File

@ -607,7 +607,7 @@ MutableColumns ColumnAggregateFunction::scatter(IColumn::ColumnIndex num_columns
size_t num_rows = size();
{
size_t reserve_size = double(num_rows) / num_columns * 1.1; /// 1.1 is just a guess. Better to use n-sigma rule.
size_t reserve_size = static_cast<double>(num_rows) / num_columns * 1.1; /// 1.1 is just a guess. Better to use n-sigma rule.
if (reserve_size > 1)
for (auto & column : columns)

View File

@ -81,7 +81,7 @@ namespace
if (max_val > size)
return mapUniqueIndexImplRef(index);
auto map_size = UInt64(max_val) + 1;
auto map_size = static_cast<UInt64>(max_val) + 1;
PaddedPODArray<T> map(map_size, 0);
T zero_pos_value = index[0];
index[0] = 0;
@ -98,7 +98,7 @@ namespace
index[i] = map[val];
}
auto res_col = ColumnVector<T>::create(UInt64(cur_pos) + 1);
auto res_col = ColumnVector<T>::create(static_cast<UInt64>(cur_pos) + 1);
auto & data = res_col->getData();
data[0] = zero_pos_value;
for (size_t i = 0; i < map_size; ++i)

View File

@ -228,7 +228,7 @@ void ColumnVector<T>::getPermutation(IColumn::PermutationSortDirection direction
if (s >= 256 && s <= std::numeric_limits<UInt32>::max() && use_radix_sort)
{
PaddedPODArray<ValueWithIndex<T>> pairs(s);
for (UInt32 i = 0; i < UInt32(s); ++i)
for (UInt32 i = 0; i < static_cast<UInt32>(s); ++i)
pairs[i] = {data[i], i};
RadixSort<RadixSortTraits<T>>::executeLSD(pairs.data(), s, reverse, res.data());

View File

@ -82,7 +82,7 @@ void FieldVisitorWriteBinary::operator() (const Object & x, WriteBuffer & buf) c
void FieldVisitorWriteBinary::operator()(const bool & x, WriteBuffer & buf) const
{
writeBinary(UInt8(x), buf);
writeBinary(static_cast<UInt8>(x), buf);
}
}

View File

@ -207,15 +207,15 @@ std::unique_ptr<ShellCommand> ShellCommand::executeImpl(
/// Replace the file descriptors with the ends of our pipes.
if (STDIN_FILENO != dup2(pipe_stdin.fds_rw[0], STDIN_FILENO))
_exit(int(ReturnCodes::CANNOT_DUP_STDIN));
_exit(static_cast<int>(ReturnCodes::CANNOT_DUP_STDIN));
if (!config.pipe_stdin_only)
{
if (STDOUT_FILENO != dup2(pipe_stdout.fds_rw[1], STDOUT_FILENO))
_exit(int(ReturnCodes::CANNOT_DUP_STDOUT));
_exit(static_cast<int>(ReturnCodes::CANNOT_DUP_STDOUT));
if (STDERR_FILENO != dup2(pipe_stderr.fds_rw[1], STDERR_FILENO))
_exit(int(ReturnCodes::CANNOT_DUP_STDERR));
_exit(static_cast<int>(ReturnCodes::CANNOT_DUP_STDERR));
}
for (size_t i = 0; i < config.read_fds.size(); ++i)
@ -224,7 +224,7 @@ std::unique_ptr<ShellCommand> ShellCommand::executeImpl(
auto fd = config.read_fds[i];
if (fd != dup2(fds.fds_rw[1], fd))
_exit(int(ReturnCodes::CANNOT_DUP_READ_DESCRIPTOR));
_exit(static_cast<int>(ReturnCodes::CANNOT_DUP_READ_DESCRIPTOR));
}
for (size_t i = 0; i < config.write_fds.size(); ++i)
@ -233,7 +233,7 @@ std::unique_ptr<ShellCommand> ShellCommand::executeImpl(
auto fd = config.write_fds[i];
if (fd != dup2(fds.fds_rw[0], fd))
_exit(int(ReturnCodes::CANNOT_DUP_WRITE_DESCRIPTOR));
_exit(static_cast<int>(ReturnCodes::CANNOT_DUP_WRITE_DESCRIPTOR));
}
// Reset the signal mask: it may be non-empty and will be inherited
@ -246,7 +246,7 @@ std::unique_ptr<ShellCommand> ShellCommand::executeImpl(
execv(filename, argv);
/// If the process is running, then `execv` does not return here.
_exit(int(ReturnCodes::CANNOT_EXEC));
_exit(static_cast<int>(ReturnCodes::CANNOT_EXEC));
}
std::unique_ptr<ShellCommand> res(new ShellCommand(
@ -356,17 +356,17 @@ void ShellCommand::wait()
{
switch (retcode)
{
case int(ReturnCodes::CANNOT_DUP_STDIN):
case static_cast<int>(ReturnCodes::CANNOT_DUP_STDIN):
throw Exception("Cannot dup2 stdin of child process", ErrorCodes::CANNOT_CREATE_CHILD_PROCESS);
case int(ReturnCodes::CANNOT_DUP_STDOUT):
case static_cast<int>(ReturnCodes::CANNOT_DUP_STDOUT):
throw Exception("Cannot dup2 stdout of child process", ErrorCodes::CANNOT_CREATE_CHILD_PROCESS);
case int(ReturnCodes::CANNOT_DUP_STDERR):
case static_cast<int>(ReturnCodes::CANNOT_DUP_STDERR):
throw Exception("Cannot dup2 stderr of child process", ErrorCodes::CANNOT_CREATE_CHILD_PROCESS);
case int(ReturnCodes::CANNOT_EXEC):
case static_cast<int>(ReturnCodes::CANNOT_EXEC):
throw Exception("Cannot execv in child process", ErrorCodes::CANNOT_CREATE_CHILD_PROCESS);
case int(ReturnCodes::CANNOT_DUP_READ_DESCRIPTOR):
case static_cast<int>(ReturnCodes::CANNOT_DUP_READ_DESCRIPTOR):
throw Exception("Cannot dup2 read descriptor of child process", ErrorCodes::CANNOT_CREATE_CHILD_PROCESS);
case int(ReturnCodes::CANNOT_DUP_WRITE_DESCRIPTOR):
case static_cast<int>(ReturnCodes::CANNOT_DUP_WRITE_DESCRIPTOR):
throw Exception("Cannot dup2 write descriptor of child process", ErrorCodes::CANNOT_CREATE_CHILD_PROCESS);
default:
throw Exception("Child process was exited with return code " + toString(retcode), ErrorCodes::CHILD_WAS_NOT_EXITED_NORMALLY);

View File

@ -154,7 +154,7 @@ ReturnType ThreadPoolImpl<Thread>::scheduleImpl(Job job, int priority, std::opti
new_job_or_shutdown.notify_one();
}
return ReturnType(true);
return static_cast<ReturnType>(true);
}
template <typename Thread>

View File

@ -64,7 +64,7 @@ void TraceSender::send(TraceType trace_type, const StackTrace & stack_trace, Int
size_t stack_trace_size = stack_trace.getSize();
size_t stack_trace_offset = stack_trace.getOffset();
writeIntBinary(UInt8(stack_trace_size - stack_trace_offset), out);
writeIntBinary(static_cast<UInt8>(stack_trace_size - stack_trace_offset), out);
for (size_t i = stack_trace_offset; i < stack_trace_size; ++i)
writePODBinary(stack_trace.getFramePointers()[i], out);

View File

@ -514,7 +514,7 @@ void TestKeeper::processingThread()
{
RequestInfo info;
UInt64 max_wait = UInt64(operation_timeout.totalMilliseconds());
UInt64 max_wait = static_cast<UInt64>(operation_timeout.totalMilliseconds());
if (requests_queue.tryPop(info, max_wait))
{
if (expired)

View File

@ -76,7 +76,7 @@ void ZooKeeper::init(const std::string & implementation_, const Strings & hosts_
auto & host_string = host.host;
try
{
bool secure = bool(startsWith(host_string, "secure://"));
bool secure = startsWith(host_string, "secure://");
if (secure)
host_string.erase(0, strlen("secure://"));
@ -801,7 +801,7 @@ bool ZooKeeper::waitForDisappear(const std::string & path, const WaitCondition &
auto callback = [state](const Coordination::GetResponse & response)
{
state->code = int32_t(response.error);
state->code = static_cast<int32_t>(response.error);
if (state->code)
state->event.set();
};
@ -810,7 +810,7 @@ bool ZooKeeper::waitForDisappear(const std::string & path, const WaitCondition &
{
if (!state->code)
{
state->code = int32_t(response.error);
state->code = static_cast<int32_t>(response.error);
if (!state->code)
state->event_type = response.type;
state->event.set();
@ -828,7 +828,7 @@ bool ZooKeeper::waitForDisappear(const std::string & path, const WaitCondition &
if (!state->event.tryWait(1000))
continue;
if (state->code == int32_t(Coordination::Error::ZNONODE))
if (state->code == static_cast<int32_t>(Coordination::Error::ZNONODE))
return true;
if (state->code)

View File

@ -40,7 +40,7 @@ void write(bool x, WriteBuffer & out)
void write(const std::string & s, WriteBuffer & out)
{
write(int32_t(s.size()), out);
write(static_cast<int32_t>(s.size()), out);
out.write(s.data(), s.size());
}

View File

@ -539,7 +539,7 @@ void ZooKeeper::sendAuth(const String & scheme, const String & data)
Error::ZMARSHALLINGERROR);
if (err != Error::ZOK)
throw Exception("Error received in reply to auth request. Code: " + DB::toString(int32_t(err)) + ". Message: " + String(errorMessage(err)),
throw Exception("Error received in reply to auth request. Code: " + DB::toString(static_cast<int32_t>(err)) + ". Message: " + String(errorMessage(err)),
Error::ZMARSHALLINGERROR);
}
@ -563,8 +563,8 @@ void ZooKeeper::sendThread()
{
/// Wait for the next request in queue. No more than operation timeout. No more than until next heartbeat time.
UInt64 max_wait = std::min(
UInt64(std::chrono::duration_cast<std::chrono::milliseconds>(next_heartbeat_time - now).count()),
UInt64(operation_timeout.totalMilliseconds()));
static_cast<UInt64>(std::chrono::duration_cast<std::chrono::milliseconds>(next_heartbeat_time - now).count()),
static_cast<UInt64>(operation_timeout.totalMilliseconds()));
RequestInfo info;
if (requests_queue.tryPop(info, max_wait))

View File

@ -153,7 +153,7 @@ void formatIPv6(const unsigned char * src, char *& dst, uint8_t zeroed_tail_byte
}
/// Was it a trailing run of 0x00's?
if (best.base != -1 && size_t(best.base) + size_t(best.len) == words.size())
if (best.base != -1 && static_cast<size_t>(best.base) + static_cast<size_t>(best.len) == words.size())
*dst++ = ':';
*dst++ = '\0';

View File

@ -143,7 +143,7 @@ void CompressionCodecDelta::doDecompressData(const char * source, UInt32 source_
UInt8 bytes_to_skip = uncompressed_size % bytes_size;
UInt32 output_size = uncompressed_size - bytes_to_skip;
if (UInt32(2 + bytes_to_skip) > source_size)
if (static_cast<UInt32>(2 + bytes_to_skip) > source_size)
throw Exception("Cannot decompress. File has wrong header", ErrorCodes::CANNOT_DECOMPRESS);
memcpy(dest, &source[2], bytes_to_skip);
@ -186,7 +186,7 @@ UInt8 getDeltaBytesSize(const IDataType * column_type)
void registerCodecDelta(CompressionCodecFactory & factory)
{
UInt8 method_code = UInt8(CompressionMethodByte::Delta);
UInt8 method_code = static_cast<UInt8>(CompressionMethodByte::Delta);
factory.registerCompressionCodecWithType("Delta", method_code, [&](const ASTPtr & arguments, const IDataType * column_type) -> CompressionCodecPtr
{
UInt8 delta_bytes_size = 0;

View File

@ -520,7 +520,7 @@ void CompressionCodecDoubleDelta::doDecompressData(const char * source, UInt32 s
UInt8 bytes_to_skip = uncompressed_size % bytes_size;
UInt32 output_size = uncompressed_size - bytes_to_skip;
if (UInt32(2 + bytes_to_skip) > source_size)
if (static_cast<UInt32>(2 + bytes_to_skip) > source_size)
throw Exception("Cannot decompress. File has wrong header", ErrorCodes::CANNOT_DECOMPRESS);
memcpy(dest, &source[2], bytes_to_skip);
@ -544,7 +544,7 @@ void CompressionCodecDoubleDelta::doDecompressData(const char * source, UInt32 s
void registerCodecDoubleDelta(CompressionCodecFactory & factory)
{
UInt8 method_code = UInt8(CompressionMethodByte::DoubleDelta);
UInt8 method_code = static_cast<UInt8>(CompressionMethodByte::DoubleDelta);
factory.registerCompressionCodecWithType("DoubleDelta", method_code,
[&](const ASTPtr & arguments, const IDataType * column_type) -> CompressionCodecPtr
{

View File

@ -50,11 +50,11 @@ uint8_t getMethodCode(EncryptionMethod Method)
{
if (Method == AES_128_GCM_SIV)
{
return uint8_t(CompressionMethodByte::AES_128_GCM_SIV);
return static_cast<uint8_t>(CompressionMethodByte::AES_128_GCM_SIV);
}
else if (Method == AES_256_GCM_SIV)
{
return uint8_t(CompressionMethodByte::AES_256_GCM_SIV);
return static_cast<uint8_t>(CompressionMethodByte::AES_256_GCM_SIV);
}
else
{

View File

@ -419,7 +419,7 @@ void CompressionCodecGorilla::doDecompressData(const char * source, UInt32 sourc
UInt8 bytes_to_skip = uncompressed_size % bytes_size;
if (UInt32(2 + bytes_to_skip) > source_size)
if (static_cast<UInt32>(2 + bytes_to_skip) > source_size)
throw Exception("Cannot decompress. File has wrong header", ErrorCodes::CANNOT_DECOMPRESS);
memcpy(dest, &source[2], bytes_to_skip);
@ -443,7 +443,7 @@ void CompressionCodecGorilla::doDecompressData(const char * source, UInt32 sourc
void registerCodecGorilla(CompressionCodecFactory & factory)
{
UInt8 method_code = UInt8(CompressionMethodByte::Gorilla);
UInt8 method_code = static_cast<UInt8>(CompressionMethodByte::Gorilla);
factory.registerCompressionCodecWithType("Gorilla", method_code,
[&](const ASTPtr & arguments, const IDataType * column_type) -> CompressionCodecPtr
{

View File

@ -112,7 +112,7 @@ MagicNumber serializeTypeId(TypeIndex type_id)
break;
}
throw Exception("Type is not supported by T64 codec: " + toString(UInt32(type_id)), ErrorCodes::LOGICAL_ERROR);
throw Exception("Type is not supported by T64 codec: " + toString(static_cast<UInt32>(type_id)), ErrorCodes::LOGICAL_ERROR);
}
TypeIndex deserializeTypeId(uint8_t serialized_type_id)
@ -137,7 +137,7 @@ TypeIndex deserializeTypeId(uint8_t serialized_type_id)
case MagicNumber::Decimal64: return TypeIndex::Decimal64;
}
throw Exception("Bad magic number in T64 codec: " + toString(UInt32(serialized_type_id)), ErrorCodes::LOGICAL_ERROR);
throw Exception("Bad magic number in T64 codec: " + toString(static_cast<UInt32>(serialized_type_id)), ErrorCodes::LOGICAL_ERROR);
}
@ -284,22 +284,22 @@ void reverseTransposeBytes(const UInt64 * matrix, UInt32 col, T & value)
if constexpr (sizeof(T) > 4)
{
value |= UInt64(matrix8[64 * 7 + col]) << (8 * 7);
value |= UInt64(matrix8[64 * 6 + col]) << (8 * 6);
value |= UInt64(matrix8[64 * 5 + col]) << (8 * 5);
value |= UInt64(matrix8[64 * 4 + col]) << (8 * 4);
value |= static_cast<UInt64>(matrix8[64 * 7 + col]) << (8 * 7);
value |= static_cast<UInt64>(matrix8[64 * 6 + col]) << (8 * 6);
value |= static_cast<UInt64>(matrix8[64 * 5 + col]) << (8 * 5);
value |= static_cast<UInt64>(matrix8[64 * 4 + col]) << (8 * 4);
}
if constexpr (sizeof(T) > 2)
{
value |= UInt32(matrix8[64 * 3 + col]) << (8 * 3);
value |= UInt32(matrix8[64 * 2 + col]) << (8 * 2);
value |= static_cast<UInt32>(matrix8[64 * 3 + col]) << (8 * 3);
value |= static_cast<UInt32>(matrix8[64 * 2 + col]) << (8 * 2);
}
if constexpr (sizeof(T) > 1)
value |= UInt32(matrix8[64 * 1 + col]) << (8 * 1);
value |= static_cast<UInt32>(matrix8[64 * 1 + col]) << (8 * 1);
value |= UInt32(matrix8[col]);
value |= static_cast<UInt32>(matrix8[col]);
}
@ -422,12 +422,12 @@ UInt32 getValuableBitsNumber(Int64 min, Int64 max)
if (min < 0 && max >= 0)
{
if (min + max >= 0)
return getValuableBitsNumber(0ull, UInt64(max)) + 1;
return getValuableBitsNumber(0ull, static_cast<UInt64>(max)) + 1;
else
return getValuableBitsNumber(0ull, UInt64(~min)) + 1;
return getValuableBitsNumber(0ull, static_cast<UInt64>(~min)) + 1;
}
else
return getValuableBitsNumber(UInt64(min), UInt64(max));
return getValuableBitsNumber(static_cast<UInt64>(min), static_cast<UInt64>(max));
}
@ -559,14 +559,14 @@ void decompressData(const char * src, UInt32 bytes_size, char * dst, UInt32 unco
T upper_max [[maybe_unused]] = 0;
T sign_bit [[maybe_unused]] = 0;
if (num_bits < 64)
upper_min = UInt64(min) >> num_bits << num_bits;
upper_min = static_cast<UInt64>(min) >> num_bits << num_bits;
if constexpr (is_signed_v<T>)
{
if (min < 0 && max >= 0 && num_bits < 64)
{
sign_bit = 1ull << (num_bits - 1);
upper_max = UInt64(max) >> num_bits << num_bits;
upper_max = static_cast<UInt64>(max) >> num_bits << num_bits;
}
}

View File

@ -109,7 +109,7 @@ CompressionCodecZSTD::CompressionCodecZSTD(int level_) : level(level_), enable_l
void registerCodecZSTD(CompressionCodecFactory & factory)
{
UInt8 method_code = UInt8(CompressionMethodByte::ZSTD);
UInt8 method_code = static_cast<UInt8>(CompressionMethodByte::ZSTD);
factory.registerCompressionCodec("ZSTD", method_code, [&](const ASTPtr & arguments) -> CompressionCodecPtr {
int level = CompressionCodecZSTD::ZSTD_DEFAULT_LEVEL;
if (arguments && !arguments->children.empty())

View File

@ -628,12 +628,12 @@ void StreamStatistics::print() const
{
std::cerr
<< "Num tokens: " << num_tokens
<< ", Avg literal length: " << double(sum_literal_lengths) / num_tokens
<< ", Avg match length: " << double(sum_match_lengths) / num_tokens
<< ", Avg match offset: " << double(sum_match_offsets) / num_tokens
<< ", Offset < 8 ratio: " << double(count_match_offset_less_8) / num_tokens
<< ", Offset < 16 ratio: " << double(count_match_offset_less_16) / num_tokens
<< ", Match replicate itself: " << double(count_match_replicate_itself) / num_tokens
<< ", Avg literal length: " << static_cast<double>(sum_literal_lengths) / num_tokens
<< ", Avg match length: " << static_cast<double>(sum_match_lengths) / num_tokens
<< ", Avg match offset: " << static_cast<double>(sum_match_offsets) / num_tokens
<< ", Offset < 8 ratio: " << static_cast<double>(count_match_offset_less_8) / num_tokens
<< ", Offset < 16 ratio: " << static_cast<double>(count_match_offset_less_16) / num_tokens
<< ", Match replicate itself: " << static_cast<double>(count_match_replicate_itself) / num_tokens
<< "\n";
}

View File

@ -99,20 +99,20 @@ void KeeperConfigurationAndSettings::dump(WriteBufferFromOwnString & buf) const
writeText("max_requests_batch_size=", buf);
write_int(coordination_settings->max_requests_batch_size);
writeText("min_session_timeout_ms=", buf);
write_int(uint64_t(coordination_settings->min_session_timeout_ms));
write_int(static_cast<uint64_t>(coordination_settings->min_session_timeout_ms));
writeText("session_timeout_ms=", buf);
write_int(uint64_t(coordination_settings->session_timeout_ms));
write_int(static_cast<uint64_t>(coordination_settings->session_timeout_ms));
writeText("operation_timeout_ms=", buf);
write_int(uint64_t(coordination_settings->operation_timeout_ms));
write_int(static_cast<uint64_t>(coordination_settings->operation_timeout_ms));
writeText("dead_session_check_period_ms=", buf);
write_int(uint64_t(coordination_settings->dead_session_check_period_ms));
write_int(static_cast<uint64_t>(coordination_settings->dead_session_check_period_ms));
writeText("heart_beat_interval_ms=", buf);
write_int(uint64_t(coordination_settings->heart_beat_interval_ms));
write_int(static_cast<uint64_t>(coordination_settings->heart_beat_interval_ms));
writeText("election_timeout_lower_bound_ms=", buf);
write_int(uint64_t(coordination_settings->election_timeout_lower_bound_ms));
write_int(static_cast<uint64_t>(coordination_settings->election_timeout_lower_bound_ms));
writeText("election_timeout_upper_bound_ms=", buf);
write_int(uint64_t(coordination_settings->election_timeout_upper_bound_ms));
write_int(static_cast<uint64_t>(coordination_settings->election_timeout_upper_bound_ms));
writeText("reserved_log_items=", buf);
write_int(coordination_settings->reserved_log_items);
@ -122,9 +122,9 @@ void KeeperConfigurationAndSettings::dump(WriteBufferFromOwnString & buf) const
writeText("auto_forwarding=", buf);
write_bool(coordination_settings->auto_forwarding);
writeText("shutdown_timeout=", buf);
write_int(uint64_t(coordination_settings->shutdown_timeout));
write_int(static_cast<uint64_t>(coordination_settings->shutdown_timeout));
writeText("startup_timeout=", buf);
write_int(uint64_t(coordination_settings->startup_timeout));
write_int(static_cast<uint64_t>(coordination_settings->startup_timeout));
writeText("raft_logs_level=", buf);
writeText(coordination_settings->raft_logs_level.toString(), buf);

View File

@ -221,7 +221,7 @@ namespace MySQLReplication
case MYSQL_TYPE_BLOB:
case MYSQL_TYPE_GEOMETRY:
{
column_meta.emplace_back(UInt16(meta[pos]));
column_meta.emplace_back(static_cast<UInt16>(meta[pos]));
pos += 1;
break;
}
@ -229,9 +229,9 @@ namespace MySQLReplication
case MYSQL_TYPE_STRING:
{
/// Big-Endian
auto b0 = UInt16(meta[pos] << 8);
auto b1 = UInt8(meta[pos + 1]);
column_meta.emplace_back(UInt16(b0 + b1));
auto b0 = static_cast<UInt16>(meta[pos] << 8);
auto b1 = static_cast<UInt8>(meta[pos + 1]);
column_meta.emplace_back(static_cast<UInt16>(b0 + b1));
pos += 2;
break;
}
@ -239,9 +239,9 @@ namespace MySQLReplication
case MYSQL_TYPE_VARCHAR:
case MYSQL_TYPE_VAR_STRING: {
/// Little-Endian
auto b0 = UInt8(meta[pos]);
auto b1 = UInt16(meta[pos + 1] << 8);
column_meta.emplace_back(UInt16(b0 + b1));
auto b0 = static_cast<UInt8>(meta[pos]);
auto b1 = static_cast<UInt16>(meta[pos + 1] << 8);
column_meta.emplace_back(static_cast<UInt16>(b0 + b1));
pos += 2;
break;
}
@ -543,7 +543,7 @@ namespace MySQLReplication
);
if (!meta)
row.push_back(Field{UInt32(date_time)});
row.push_back(Field{static_cast<UInt32>(date_time)});
else
{
DB::DecimalUtils::DecimalComponents<DateTime64> components{
@ -603,7 +603,7 @@ namespace MySQLReplication
throw Exception("Attempt to read after EOF.", ErrorCodes::ATTEMPT_TO_READ_AFTER_EOF);
if ((*payload.position() & 0x80) == 0)
mask = UInt32(-1);
mask = static_cast<UInt32>(-1);
*payload.position() ^= 0x80;

View File

@ -519,7 +519,7 @@ class IColumn;
M(Bool, database_replicated_always_detach_permanently, false, "Execute DETACH TABLE as DETACH TABLE PERMANENTLY if database engine is Replicated", 0) \
M(Bool, database_replicated_allow_only_replicated_engine, false, "Allow to create only Replicated tables in database with engine Replicated", 0) \
M(DistributedDDLOutputMode, distributed_ddl_output_mode, DistributedDDLOutputMode::THROW, "Format of distributed DDL query result", 0) \
M(UInt64, distributed_ddl_entry_format_version, 1, "Version of DDL entry to write into ZooKeeper", 0) \
M(UInt64, distributed_ddl_entry_format_version, 2, "Version of DDL entry to write into ZooKeeper", 0) \
\
M(UInt64, external_storage_max_read_rows, 0, "Limit maximum number of rows when table with external engine should flush history data. Now supported only for MySQL table engine, database engine, dictionary and MaterializedMySQL. If equal to 0, this setting is disabled", 0) \
M(UInt64, external_storage_max_read_bytes, 0, "Limit maximum number of bytes when table with external engine should flush history data. Now supported only for MySQL table engine, database engine, dictionary and MaterializedMySQL. If equal to 0, this setting is disabled", 0) \

View File

@ -142,23 +142,30 @@ void SerializationMap::deserializeTextImpl(IColumn & column, ReadBuffer & istr,
break;
reader(istr, key, key_column);
++size;
skipWhitespaceIfAny(istr);
assertChar(':', istr);
++size;
skipWhitespaceIfAny(istr);
reader(istr, value, value_column);
skipWhitespaceIfAny(istr);
}
offsets.push_back(offsets.back() + size);
assertChar('}', istr);
}
catch (...)
{
if (size)
{
nested_tuple.getColumnPtr(0) = key_column.cut(0, offsets.back());
nested_tuple.getColumnPtr(1) = value_column.cut(0, offsets.back());
}
throw;
}
offsets.push_back(offsets.back() + size);
}
void SerializationMap::serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const

View File

@ -301,7 +301,7 @@ ColumnUInt8::Ptr IPAddressDictionary::hasKeys(const Columns & key_columns, const
uint8_t addrv6_buf[IPV6_BINARY_LENGTH];
for (const auto i : collections::range(0, rows))
{
auto addrv4 = UInt32(first_column->get64(i));
auto addrv4 = static_cast<UInt32>(first_column->get64(i));
auto found = tryLookupIPv4(addrv4, addrv6_buf);
out[i] = (found != ipNotFound());
keys_found += out[i];
@ -387,7 +387,7 @@ void IPAddressDictionary::loadData()
setAttributeValue(attribute, attribute_column[row]);
}
const auto [addr, prefix] = parseIPFromString(std::string_view(key_column_ptr->getDataAt(row)));
const auto [addr, prefix] = parseIPFromString(std::string_view{key_column_ptr->getDataAt(row)});
has_ipv6 = has_ipv6 || (addr.family() == Poco::Net::IPAddress::IPv6);
size_t row_number = ip_records.size();
@ -716,7 +716,7 @@ void IPAddressDictionary::getItemsImpl(
for (const auto i : collections::range(0, rows))
{
// addrv4 has native endianness
auto addrv4 = UInt32(first_column->get64(i));
auto addrv4 = static_cast<UInt32>(first_column->get64(i));
auto found = tryLookupIPv4(addrv4, addrv6_buf);
if (found != ipNotFound())
{

View File

@ -181,7 +181,7 @@ Pipe MongoDBDictionarySource::loadIds(const std::vector<UInt64> & ids)
Poco::MongoDB::Array::Ptr ids_array(new Poco::MongoDB::Array);
for (const UInt64 id : ids)
ids_array->add(DB::toString(id), Int32(id));
ids_array->add(DB::toString(id), static_cast<Int32>(id));
cursor->query().selector().addNewDocument(dict_struct.id->name).add("$in", ids_array);
@ -218,7 +218,7 @@ Pipe MongoDBDictionarySource::loadKeys(const Columns & key_columns, const std::v
case AttributeUnderlyingType::Int32:
case AttributeUnderlyingType::Int64:
{
key.add(key_attribute.name, Int32(key_columns[attribute_index]->get64(row_idx)));
key.add(key_attribute.name, static_cast<Int32>(key_columns[attribute_index]->get64(row_idx)));
break;
}
case AttributeUnderlyingType::Float32:

View File

@ -93,7 +93,7 @@ private:
if (!first)
writeChar(',', out);
first = false;
writeIntText(T(bit), out);
writeIntText(static_cast<T>(bit), out);
}
}

View File

@ -651,18 +651,18 @@ struct ParseMACImpl
*/
static UInt64 parse(const char * pos)
{
return (UInt64(unhex(pos[0])) << 44)
| (UInt64(unhex(pos[1])) << 40)
| (UInt64(unhex(pos[3])) << 36)
| (UInt64(unhex(pos[4])) << 32)
| (UInt64(unhex(pos[6])) << 28)
| (UInt64(unhex(pos[7])) << 24)
| (UInt64(unhex(pos[9])) << 20)
| (UInt64(unhex(pos[10])) << 16)
| (UInt64(unhex(pos[12])) << 12)
| (UInt64(unhex(pos[13])) << 8)
| (UInt64(unhex(pos[15])) << 4)
| (UInt64(unhex(pos[16])));
return (static_cast<UInt64>(unhex(pos[0])) << 44)
| (static_cast<UInt64>(unhex(pos[1])) << 40)
| (static_cast<UInt64>(unhex(pos[3])) << 36)
| (static_cast<UInt64>(unhex(pos[4])) << 32)
| (static_cast<UInt64>(unhex(pos[6])) << 28)
| (static_cast<UInt64>(unhex(pos[7])) << 24)
| (static_cast<UInt64>(unhex(pos[9])) << 20)
| (static_cast<UInt64>(unhex(pos[10])) << 16)
| (static_cast<UInt64>(unhex(pos[12])) << 12)
| (static_cast<UInt64>(unhex(pos[13])) << 8)
| (static_cast<UInt64>(unhex(pos[15])) << 4)
| (static_cast<UInt64>(unhex(pos[16])));
}
static constexpr auto name = "MACStringToNum";
@ -678,12 +678,12 @@ struct ParseOUIImpl
*/
static UInt64 parse(const char * pos)
{
return (UInt64(unhex(pos[0])) << 20)
| (UInt64(unhex(pos[1])) << 16)
| (UInt64(unhex(pos[3])) << 12)
| (UInt64(unhex(pos[4])) << 8)
| (UInt64(unhex(pos[6])) << 4)
| (UInt64(unhex(pos[7])));
return (static_cast<UInt64>(unhex(pos[0])) << 20)
| (static_cast<UInt64>(unhex(pos[1])) << 16)
| (static_cast<UInt64>(unhex(pos[3])) << 12)
| (static_cast<UInt64>(unhex(pos[4])) << 8)
| (static_cast<UInt64>(unhex(pos[6])) << 4)
| (static_cast<UInt64>(unhex(pos[7])));
}
static constexpr auto name = "MACStringToOUI";
@ -895,9 +895,9 @@ private:
if (bits_to_keep >= 8 * sizeof(UInt32))
return { src, src };
if (bits_to_keep == 0)
return { UInt32(0), UInt32(-1) };
return { static_cast<UInt32>(0), static_cast<UInt32>(-1) };
UInt32 mask = UInt32(-1) << (8 * sizeof(UInt32) - bits_to_keep);
UInt32 mask = static_cast<UInt32>(-1) << (8 * sizeof(UInt32) - bits_to_keep);
UInt32 lower = src & mask;
UInt32 upper = lower | ~mask;

View File

@ -565,7 +565,7 @@ ColumnPtr FunctionAnyArityLogical<Impl, Name>::executeShortCircuit(ColumnsWithTy
/// The result is !mask_n.
bool inverted = Name::name != NameAnd::name;
UInt8 null_value = UInt8(Name::name == NameAnd::name);
UInt8 null_value = static_cast<UInt8>(Name::name == NameAnd::name);
IColumn::Filter mask(arguments[0].column->size(), 1);
/// If result is nullable, we need to create null bytemap of the resulting column.

View File

@ -271,9 +271,9 @@ struct NgramDistanceImpl
size_t first_size = dispatchSearcher(calculateHaystackStatsAndMetric<false>, data.data(), data_size, common_stats.get(), distance, nullptr);
/// For !symmetric version we should not use first_size.
if constexpr (symmetric)
res = distance * 1.f / std::max(first_size + second_size, size_t(1));
res = distance * 1.f / std::max(first_size + second_size, static_cast<size_t>(1));
else
res = 1.f - distance * 1.f / std::max(second_size, size_t(1));
res = 1.f - distance * 1.f / std::max(second_size, static_cast<size_t>(1));
}
else
{
@ -339,9 +339,9 @@ struct NgramDistanceImpl
/// For !symmetric version we should not use haystack_stats_size.
if constexpr (symmetric)
res[i] = distance * 1.f / std::max(haystack_stats_size + needle_stats_size, size_t(1));
res[i] = distance * 1.f / std::max(haystack_stats_size + needle_stats_size, static_cast<size_t>(1));
else
res[i] = 1.f - distance * 1.f / std::max(needle_stats_size, size_t(1));
res[i] = 1.f - distance * 1.f / std::max(needle_stats_size, static_cast<size_t>(1));
}
else
{
@ -410,7 +410,7 @@ struct NgramDistanceImpl
for (size_t j = 0; j < needle_stats_size; ++j)
--common_stats[needle_ngram_storage[j]];
res[i] = 1.f - distance * 1.f / std::max(needle_stats_size, size_t(1));
res[i] = 1.f - distance * 1.f / std::max(needle_stats_size, static_cast<size_t>(1));
}
else
{
@ -457,9 +457,9 @@ struct NgramDistanceImpl
ngram_storage.get());
/// For !symmetric version we should not use haystack_stats_size.
if constexpr (symmetric)
res[i] = distance * 1.f / std::max(haystack_stats_size + needle_stats_size, size_t(1));
res[i] = distance * 1.f / std::max(haystack_stats_size + needle_stats_size, static_cast<size_t>(1));
else
res[i] = 1.f - distance * 1.f / std::max(needle_stats_size, size_t(1));
res[i] = 1.f - distance * 1.f / std::max(needle_stats_size, static_cast<size_t>(1));
}
else
{

View File

@ -23,7 +23,7 @@ public:
if (txn)
res = {txn->tid.start_csn, txn->tid.local_tid, txn->tid.host_id};
else
res = {UInt64(0), UInt64(0), UUIDHelpers::Nil};
res = {static_cast<UInt64>(0), static_cast<UInt64>(0), UUIDHelpers::Nil};
return res;
}

View File

@ -306,7 +306,7 @@ GeohashesInBoxPreparedArgs geohashesInBoxPrepare(
return GeohashesInBoxPreparedArgs
{
std::max<UInt64>(1, UInt64(lon_items) * lat_items),
std::max<UInt64>(1, static_cast<UInt64>(lon_items) * lat_items),
lon_items,
lat_items,
lon_min,

View File

@ -113,7 +113,7 @@ private:
return default_port;
port = (port * 10) + (*p - '0');
if (port < 0 || port > UInt16(-1))
if (port < 0 || port > static_cast<UInt16>(-1))
return default_port;
++p;
}

View File

@ -94,7 +94,7 @@ private:
src_offset = src_offsets[i];
dst_offset += src_length;
if (src_length > 1 && dst_data[dst_offset - 2] != UInt8(trailing_char_str.front()))
if (src_length > 1 && dst_data[dst_offset - 2] != static_cast<UInt8>(trailing_char_str.front()))
{
dst_data[dst_offset - 1] = trailing_char_str.front();
dst_data[dst_offset] = 0;

View File

@ -132,7 +132,7 @@ public:
if (count_positive == 0 || count_positive == size)
return std::numeric_limits<ResultType>::quiet_NaN();
return ResultType(area) / count_positive / (size - count_positive);
return static_cast<ResultType>(area) / count_positive / (size - count_positive);
}
};

View File

@ -183,7 +183,7 @@ struct ArrayAggregateImpl
{
size_t array_size = offsets[i] - pos;
/// Just multiply the value by array size.
res[i] = x * ResultType(array_size);
res[i] = x * static_cast<ResultType>(array_size);
}
else if constexpr (aggregate_operation == AggregateOperation::min ||
aggregate_operation == AggregateOperation::max)

View File

@ -152,7 +152,7 @@ public:
void update()
{
sink_null_map[index] = bool(src_null_map);
sink_null_map[index] = static_cast<bool>(src_null_map);
++index;
}
@ -492,7 +492,7 @@ ColumnPtr FunctionArrayElement::executeNumberConst(
/// arr[-2] is the element at offset 1 from the last and so on.
ArrayElementNumImpl<DataType>::template vectorConst<true>(
col_nested->getData(), col_array->getOffsets(), -(UInt64(safeGet<Int64>(index)) + 1), col_res->getData(), builder);
col_nested->getData(), col_array->getOffsets(), -(static_cast<UInt64>(safeGet<Int64>(index)) + 1), col_res->getData(), builder);
}
else
throw Exception("Illegal type of array index", ErrorCodes::LOGICAL_ERROR);
@ -605,7 +605,7 @@ ColumnPtr FunctionArrayElement::executeGenericConst(
col_nested, col_array->getOffsets(), safeGet<UInt64>(index) - 1, *col_res, builder);
else if (index.getType() == Field::Types::Int64)
ArrayElementGenericImpl::vectorConst<true>(
col_nested, col_array->getOffsets(), -(UInt64(safeGet<Int64>(index) + 1)), *col_res, builder);
col_nested, col_array->getOffsets(), -(static_cast<UInt64>(safeGet<Int64>(index) + 1)), *col_res, builder);
else
throw Exception("Illegal type of array index", ErrorCodes::LOGICAL_ERROR);

View File

@ -112,7 +112,7 @@ bool FunctionArrayReverse::executeGeneric(const IColumn & src_data, const Column
{
ssize_t src_index = src_array_offsets[i] - 1;
while (src_index >= ssize_t(src_prev_offset))
while (src_index >= static_cast<ssize_t>(src_prev_offset))
{
res_data.insertFrom(src_data, src_index);
--src_index;

View File

@ -72,7 +72,7 @@ struct BitShiftLeftImpl
if (shift_left_bits)
{
/// The left b bit of the right byte is moved to the right b bit of this byte
*out = UInt8(UInt8(*(op_pointer) >> (8 - shift_left_bits)) | previous);
*out = static_cast<UInt8>(static_cast<UInt8>(*(op_pointer) >> (8 - shift_left_bits)) | previous);
previous = *op_pointer << shift_left_bits;
}
else
@ -131,7 +131,7 @@ struct BitShiftLeftImpl
if (op_pointer + 1 < end)
{
/// The left b bit of the right byte is moved to the right b bit of this byte
*out = UInt8(UInt8(*(op_pointer + 1) >> (8 - shift_left_bits)) | *out);
*out = static_cast<UInt8>(static_cast<UInt8>(*(op_pointer + 1) >> (8 - shift_left_bits)) | *out);
}
op_pointer++;
out++;

View File

@ -41,7 +41,7 @@ struct BitShiftRightImpl
if (op_pointer - 1 >= begin)
{
/// The right b bit of the left byte is moved to the left b bit of this byte
*out = UInt8(UInt8(*(op_pointer - 1) << (8 - shift_right_bits)) | *out);
*out = static_cast<UInt8>(static_cast<UInt8>(*(op_pointer - 1) << (8 - shift_right_bits)) | *out);
}
}
}

View File

@ -235,8 +235,8 @@ private:
template <typename TransformX, typename TransformY, typename T1, typename T2>
Int64 calculate(const TransformX & transform_x, const TransformY & transform_y, T1 x, T2 y, const DateLUTImpl & timezone_x, const DateLUTImpl & timezone_y) const
{
return Int64(transform_y.execute(y, timezone_y))
- Int64(transform_x.execute(x, timezone_x));
return static_cast<Int64>(transform_y.execute(y, timezone_y))
- static_cast<Int64>(transform_x.execute(x, timezone_x));
}
template <typename T>

View File

@ -150,7 +150,7 @@ ColumnPtr FunctionHasColumnInTable::executeImpl(const ColumnsWithTypeAndName & a
has_column = remote_columns.hasPhysical(column_name);
}
return DataTypeUInt8().createColumnConst(input_rows_count, Field{UInt64(has_column)});
return DataTypeUInt8().createColumnConst(input_rows_count, Field{static_cast<UInt64>(has_column)});
}
}

View File

@ -15,7 +15,7 @@ inline int32_t JumpConsistentHash(uint64_t key, int32_t num_buckets)
{
b = j;
key = key * 2862933555777941757ULL + 1;
j = static_cast<int64_t>((b + 1) * (double(1LL << 31) / double((key >> 33) + 1)));
j = static_cast<int64_t>((b + 1) * (static_cast<double>(1LL << 31) / static_cast<double>((key >> 33) + 1)));
}
return static_cast<int32_t>(b);
}

View File

@ -135,7 +135,7 @@ public:
}
if (size <= 0)
return;
if (size > Int64(input_rows_count))
if (size > static_cast<Int64>(input_rows_count))
size = input_rows_count;
if (!src)
@ -163,14 +163,14 @@ public:
}
else if (offset > 0)
{
insert_range_from(source_is_constant, source_column_casted, offset, Int64(input_rows_count) - offset);
insert_range_from(default_is_constant, default_column_casted, Int64(input_rows_count) - offset, offset);
insert_range_from(source_is_constant, source_column_casted, offset, static_cast<Int64>(input_rows_count) - offset);
insert_range_from(default_is_constant, default_column_casted, static_cast<Int64>(input_rows_count) - offset, offset);
return result_column;
}
else
{
insert_range_from(default_is_constant, default_column_casted, 0, -offset);
insert_range_from(source_is_constant, source_column_casted, 0, Int64(input_rows_count) + offset);
insert_range_from(source_is_constant, source_column_casted, 0, static_cast<Int64>(input_rows_count) + offset);
return result_column;
}
}
@ -188,7 +188,7 @@ public:
Int64 src_idx = row + offset;
if (src_idx >= 0 && src_idx < Int64(input_rows_count))
if (src_idx >= 0 && src_idx < static_cast<Int64>(input_rows_count))
result_column->insertFrom(*source_column_casted, source_is_constant ? 0 : src_idx);
else if (has_defaults)
result_column->insertFrom(*default_column_casted, default_is_constant ? 0 : row);

View File

@ -30,7 +30,7 @@ using FunctionSigmoid = FunctionMathUnary<Impl>;
#else
static double sigmoid(double x)
double sigmoid(double x)
{
return 1.0 / (1.0 + exp(-x));
}

View File

@ -26,7 +26,7 @@ inline bool HadoopSnappyDecoder::checkBufferLength(int max) const
inline bool HadoopSnappyDecoder::checkAvailIn(size_t avail_in, int min)
{
return avail_in >= size_t(min);
return avail_in >= static_cast<size_t>(min);
}
inline void HadoopSnappyDecoder::copyToBuffer(size_t * avail_in, const char ** next_in)

View File

@ -245,7 +245,7 @@ void PeekableReadBuffer::resizeOwnMemoryIfNecessary(size_t bytes_to_append)
/// Stack memory is not enough, allocate larger buffer.
use_stack_memory = false;
memory.resize(std::max(size_t(DBMS_DEFAULT_BUFFER_SIZE), new_size));
memory.resize(std::max(static_cast<size_t>(DBMS_DEFAULT_BUFFER_SIZE), new_size));
memcpy(memory.data(), stack_memory, sizeof(stack_memory));
if (need_update_checkpoint)
checkpoint.emplace(memory.data() + offset);

View File

@ -16,13 +16,13 @@ off_t ReadBufferFromMemory::seek(off_t offset, int whence)
{
pos = internal_buffer.begin() + offset;
working_buffer = internal_buffer; /// We need to restore `working_buffer` in case the position was at EOF before this seek().
return size_t(pos - internal_buffer.begin());
return static_cast<size_t>(pos - internal_buffer.begin());
}
else
throw Exception(
"Seek position is out of bounds. "
"Offset: "
+ std::to_string(offset) + ", Max: " + std::to_string(size_t(internal_buffer.end() - internal_buffer.begin())),
+ std::to_string(offset) + ", Max: " + std::to_string(static_cast<size_t>(internal_buffer.end() - internal_buffer.begin())),
ErrorCodes::SEEK_POSITION_OUT_OF_BOUND);
}
else if (whence == SEEK_CUR)
@ -32,13 +32,13 @@ off_t ReadBufferFromMemory::seek(off_t offset, int whence)
{
pos = new_pos;
working_buffer = internal_buffer; /// We need to restore `working_buffer` in case the position was at EOF before this seek().
return size_t(pos - internal_buffer.begin());
return static_cast<size_t>(pos - internal_buffer.begin());
}
else
throw Exception(
"Seek position is out of bounds. "
"Offset: "
+ std::to_string(offset) + ", Max: " + std::to_string(size_t(internal_buffer.end() - internal_buffer.begin())),
+ std::to_string(offset) + ", Max: " + std::to_string(static_cast<size_t>(internal_buffer.end() - internal_buffer.begin())),
ErrorCodes::SEEK_POSITION_OUT_OF_BOUND);
}
else

View File

@ -175,7 +175,7 @@ off_t ReadBufferFromS3::seek(off_t offset_, int whence)
if (!restricted_seek)
{
if (!working_buffer.empty()
&& size_t(offset_) >= offset - working_buffer.size()
&& static_cast<size_t>(offset_) >= offset - working_buffer.size()
&& offset_ < offset)
{
pos = working_buffer.end() - (offset - offset_);

View File

@ -368,7 +368,7 @@ void WriteBufferFromS3::completeMultipartUpload()
void WriteBufferFromS3::makeSinglepartUpload()
{
auto size = temporary_buffer->tellp();
bool with_pool = bool(schedule);
bool with_pool = static_cast<bool>(schedule);
LOG_TRACE(log, "Making single part upload. Bucket: {}, Key: {}, Size: {}, WithPool: {}", bucket, key, size, with_pool);
@ -456,7 +456,7 @@ void WriteBufferFromS3::fillPutRequest(Aws::S3::Model::PutObjectRequest & req)
void WriteBufferFromS3::processPutRequest(PutObjectTask & task)
{
auto outcome = client_ptr->PutObject(task.req);
bool with_pool = bool(schedule);
bool with_pool = static_cast<bool>(schedule);
if (outcome.IsSuccess())
LOG_TRACE(log, "Single part upload has completed. Bucket: {}, Key: {}, Object size: {}, WithPool: {}", bucket, key, task.req.GetContentLength(), with_pool);

View File

@ -59,13 +59,13 @@ Field zeroField(const Field & value)
{
switch (value.getType())
{
case Field::Types::UInt64: return UInt64(0);
case Field::Types::Int64: return Int64(0);
case Field::Types::Float64: return Float64(0);
case Field::Types::UInt128: return UInt128(0);
case Field::Types::Int128: return Int128(0);
case Field::Types::UInt256: return UInt256(0);
case Field::Types::Int256: return Int256(0);
case Field::Types::UInt64: return static_cast<UInt64>(0);
case Field::Types::Int64: return static_cast<Int64>(0);
case Field::Types::Float64: return static_cast<Float64>(0);
case Field::Types::UInt128: return static_cast<UInt128>(0);
case Field::Types::Int128: return static_cast<Int128>(0);
case Field::Types::UInt256: return static_cast<UInt256>(0);
case Field::Types::Int256: return static_cast<Int256>(0);
default:
break;
}

View File

@ -24,7 +24,7 @@ void ClientInfo::write(WriteBuffer & out, UInt64 server_protocol_revision) const
if (server_protocol_revision < DBMS_MIN_REVISION_WITH_CLIENT_INFO)
throw Exception("Logical error: method ClientInfo::write is called for unsupported server revision", ErrorCodes::LOGICAL_ERROR);
writeBinary(UInt8(query_kind), out);
writeBinary(static_cast<UInt8>(query_kind), out);
if (empty())
return;
@ -35,7 +35,7 @@ void ClientInfo::write(WriteBuffer & out, UInt64 server_protocol_revision) const
if (server_protocol_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_INITIAL_QUERY_START_TIME)
writeBinary(initial_query_start_time_microseconds, out);
writeBinary(UInt8(interface), out);
writeBinary(static_cast<UInt8>(interface), out);
if (interface == Interface::TCP)
{
@ -48,7 +48,7 @@ void ClientInfo::write(WriteBuffer & out, UInt64 server_protocol_revision) const
}
else if (interface == Interface::HTTP)
{
writeBinary(UInt8(http_method), out);
writeBinary(static_cast<UInt8>(http_method), out);
writeBinary(http_user_agent, out);
if (server_protocol_revision >= DBMS_MIN_REVISION_WITH_X_FORWARDED_FOR_IN_CLIENT_INFO)
@ -86,7 +86,7 @@ void ClientInfo::write(WriteBuffer & out, UInt64 server_protocol_revision) const
else
{
// Don't have OpenTelemetry header.
writeBinary(uint8_t(0), out);
writeBinary(static_cast<UInt8>(0), out);
}
}

View File

@ -82,7 +82,7 @@ void collectCrashLog(Int32 signal, UInt64 thread_id, const String & query_id, co
stack_trace.toStringEveryLine([&trace_full](const std::string & line) { trace_full.push_back(line); });
CrashLogElement element{time_t(time / 1000000000), time, signal, thread_id, query_id, trace, trace_full};
CrashLogElement element{static_cast<time_t>(time / 1000000000), time, signal, thread_id, query_id, trace, trace_full};
crash_log_owned->add(element);
}
}

View File

@ -39,7 +39,7 @@ void DNSCacheUpdater::run()
* - automatically throttle when DNS requests take longer time;
* - add natural randomization on huge clusters - avoid sending all requests at the same moment of time from different servers.
*/
task_handle->scheduleAfter(size_t(update_period_seconds) * 1000);
task_handle->scheduleAfter(static_cast<size_t>(update_period_seconds) * 1000);
}
void DNSCacheUpdater::start()

View File

@ -1398,15 +1398,17 @@ bool SelectQueryExpressionAnalyzer::appendLimitBy(ExpressionActionsChain & chain
if (!select_query->limitBy())
return false;
ExpressionActionsChain::Step & step = chain.lastStep(aggregated_columns);
/// Use columns for ORDER BY.
/// They could be required to do ORDER BY on the initiator in case of distributed queries.
ExpressionActionsChain::Step & step = chain.lastStep(chain.getLastStep().getRequiredColumns());
getRootActions(select_query->limitBy(), only_types, step.actions());
NameSet aggregated_names;
for (const auto & column : aggregated_columns)
NameSet existing_column_names;
for (const auto & column : chain.getLastStep().getRequiredColumns())
{
step.addRequiredOutput(column.name);
aggregated_names.insert(column.name);
existing_column_names.insert(column.name);
}
auto & children = select_query->limitBy()->children;
@ -1416,7 +1418,7 @@ bool SelectQueryExpressionAnalyzer::appendLimitBy(ExpressionActionsChain & chain
replaceForPositionalArguments(child, select_query, ASTSelectQuery::Expression::LIMIT_BY);
auto child_name = child->getColumnName();
if (!aggregated_names.contains(child_name))
if (!existing_column_names.contains(child_name))
step.addRequiredOutput(child_name);
}

View File

@ -325,7 +325,6 @@ public:
bool hasConstAggregationKeys() const { return has_const_aggregation_keys; }
const AggregateDescriptions & aggregates() const { return aggregate_descriptions; }
const PreparedSets & getPreparedSets() const { return prepared_sets; }
std::unique_ptr<QueryPlan> getJoinedPlan();
/// Tables that will need to be sent to remote servers for distributed query processing.

View File

@ -48,7 +48,7 @@ BlockIO InterpreterCheckQuery::execute()
{
bool result = std::all_of(check_results.begin(), check_results.end(), [] (const CheckResult & res) { return res.success; });
auto column = ColumnUInt8::create();
column->insertValue(UInt64(result));
column->insertValue(static_cast<UInt64>(result));
block = Block{{std::move(column), std::make_shared<DataTypeUInt8>(), "result"}};
}
else

View File

@ -380,7 +380,7 @@ BlockIO InterpreterInsertQuery::execute()
pipeline.dropTotalsAndExtremes();
if (table->supportsParallelInsert() && settings.max_insert_threads > 1)
out_streams_size = std::min(size_t(settings.max_insert_threads), pipeline.getNumStreams());
out_streams_size = std::min(static_cast<size_t>(settings.max_insert_threads), pipeline.getNumStreams());
pipeline.resize(out_streams_size);

View File

@ -160,17 +160,7 @@ InterpreterSelectQuery::InterpreterSelectQuery(
const SelectQueryOptions & options_,
const Names & required_result_column_names_)
: InterpreterSelectQuery(query_ptr_, context_, std::nullopt, nullptr, options_, required_result_column_names_)
{
}
InterpreterSelectQuery::InterpreterSelectQuery(
const ASTPtr & query_ptr_,
ContextPtr context_,
const SelectQueryOptions & options_,
PreparedSets prepared_sets_)
: InterpreterSelectQuery(query_ptr_, context_, std::nullopt, nullptr, options_, {}, {}, std::move(prepared_sets_))
{
}
{}
InterpreterSelectQuery::InterpreterSelectQuery(
const ASTPtr & query_ptr_,
@ -189,6 +179,16 @@ InterpreterSelectQuery::InterpreterSelectQuery(
: InterpreterSelectQuery(query_ptr_, context_, std::nullopt, storage_, options_.copy().noSubquery(), {}, metadata_snapshot_)
{}
InterpreterSelectQuery::InterpreterSelectQuery(
const ASTPtr & query_ptr_,
ContextPtr context_,
const SelectQueryOptions & options_,
SubqueriesForSets subquery_for_sets_,
PreparedSets prepared_sets_)
: InterpreterSelectQuery(
query_ptr_, context_, std::nullopt, nullptr, options_, {}, {}, std::move(subquery_for_sets_), std::move(prepared_sets_))
{}
InterpreterSelectQuery::~InterpreterSelectQuery() = default;
@ -275,6 +275,7 @@ InterpreterSelectQuery::InterpreterSelectQuery(
const SelectQueryOptions & options_,
const Names & required_result_column_names,
const StorageMetadataPtr & metadata_snapshot_,
SubqueriesForSets subquery_for_sets_,
PreparedSets prepared_sets_)
/// NOTE: the query almost always should be cloned because it will be modified during analysis.
: IInterpreterUnionOrSelectQuery(options_.modify_inplace ? query_ptr_ : query_ptr_->clone(), context_, options_)
@ -282,6 +283,7 @@ InterpreterSelectQuery::InterpreterSelectQuery(
, input_pipe(std::move(input_pipe_))
, log(&Poco::Logger::get("InterpreterSelectQuery"))
, metadata_snapshot(metadata_snapshot_)
, subquery_for_sets(std::move(subquery_for_sets_))
, prepared_sets(std::move(prepared_sets_))
{
checkStackSize();
@ -404,9 +406,6 @@ InterpreterSelectQuery::InterpreterSelectQuery(
if (storage)
view = dynamic_cast<StorageView *>(storage.get());
/// Reuse already built sets for multiple passes of analysis
SubqueriesForSets subquery_for_sets;
auto analyze = [&] (bool try_move_to_prewhere)
{
/// Allow push down and other optimizations for VIEW: replace with subquery and rewrite it.
@ -570,7 +569,7 @@ InterpreterSelectQuery::InterpreterSelectQuery(
/// Reuse already built sets for multiple passes of analysis
subquery_for_sets = std::move(query_analyzer->getSubqueriesForSets());
prepared_sets = query_info.sets.empty() ? query_analyzer->getPreparedSets() : query_info.sets;
prepared_sets = std::move(query_analyzer->getPreparedSets());
/// Do not try move conditions to PREWHERE for the second time.
/// Otherwise, we won't be able to fallback from inefficient PREWHERE to WHERE later.
@ -654,9 +653,14 @@ Block InterpreterSelectQuery::getSampleBlockImpl()
auto & query = getSelectQuery();
query_analyzer->makeSetsForIndex(query.where());
query_analyzer->makeSetsForIndex(query.prewhere());
query_info.sets = query_analyzer->getPreparedSets();
query_info.sets = std::move(query_analyzer->getPreparedSets());
query_info.subquery_for_sets = std::move(query_analyzer->getSubqueriesForSets());
from_stage = storage->getQueryProcessingStage(context, options.to_stage, storage_snapshot, query_info);
/// query_info.sets is used for further set index analysis. Use copy instead of move.
query_analyzer->getPreparedSets() = query_info.sets;
query_analyzer->getSubqueriesForSets() = std::move(query_info.subquery_for_sets);
}
/// Do I need to perform the first part of the pipeline?

View File

@ -67,11 +67,13 @@ public:
const StorageMetadataPtr & metadata_snapshot_ = nullptr,
const SelectQueryOptions & = {});
/// Read data not from the table specified in the query, but from the specified `storage_`.
/// Reuse existing subqueries_for_sets and prepared_sets for another pass of analysis. It's used for projection.
/// TODO: Find a general way of sharing sets among different interpreters, such as subqueries.
InterpreterSelectQuery(
const ASTPtr & query_ptr_,
ContextPtr context_,
const SelectQueryOptions &,
SubqueriesForSets subquery_for_sets_,
PreparedSets prepared_sets_);
~InterpreterSelectQuery() override;
@ -115,6 +117,7 @@ private:
const SelectQueryOptions &,
const Names & required_result_column_names = {},
const StorageMetadataPtr & metadata_snapshot_ = nullptr,
SubqueriesForSets subquery_for_sets_ = {},
PreparedSets prepared_sets_ = {});
ASTSelectQuery & getSelectQuery() { return query_ptr->as<ASTSelectQuery &>(); }
@ -207,6 +210,7 @@ private:
StorageSnapshotPtr storage_snapshot;
/// Reuse already built sets for multiple passes of analysis, possibly across interpreters.
SubqueriesForSets subquery_for_sets;
PreparedSets prepared_sets;
};

View File

@ -104,7 +104,7 @@ InterpreterSelectWithUnionQuery::InterpreterSelectWithUnionQuery(
}
else if (settings.offset)
{
ASTPtr new_limit_offset_ast = std::make_shared<ASTLiteral>(Field(UInt64(settings.offset)));
ASTPtr new_limit_offset_ast = std::make_shared<ASTLiteral>(Field(static_cast<UInt64>(settings.offset)));
select_query->setExpression(ASTSelectQuery::Expression::LIMIT_OFFSET, std::move(new_limit_offset_ast));
}
@ -115,15 +115,15 @@ InterpreterSelectWithUnionQuery::InterpreterSelectWithUnionQuery(
UInt64 new_limit_length = 0;
if (settings.offset == 0)
new_limit_length = std::min(limit_length, UInt64(settings.limit));
new_limit_length = std::min(limit_length, static_cast<UInt64>(settings.limit));
else if (settings.offset < limit_length)
new_limit_length = settings.limit ? std::min(UInt64(settings.limit), limit_length - settings.offset) : (limit_length - settings.offset);
new_limit_length = settings.limit ? std::min(static_cast<UInt64>(settings.limit), limit_length - settings.offset) : (limit_length - settings.offset);
limit_length_ast->as<ASTLiteral &>().value = Field(new_limit_length);
}
else if (settings.limit)
{
ASTPtr new_limit_length_ast = std::make_shared<ASTLiteral>(Field(UInt64(settings.limit)));
ASTPtr new_limit_length_ast = std::make_shared<ASTLiteral>(Field(static_cast<UInt64>(settings.limit)));
select_query->setExpression(ASTSelectQuery::Expression::LIMIT_LENGTH, std::move(new_limit_length_ast));
}

View File

@ -588,7 +588,7 @@ void InterpreterSystemQuery::restartReplicas(ContextMutablePtr system_context)
for (auto & guard : guards)
guard.second = catalog.getDDLGuard(guard.first.database_name, guard.first.table_name);
ThreadPool pool(std::min(size_t(getNumberOfPhysicalCPUCores()), replica_names.size()));
ThreadPool pool(std::min(static_cast<size_t>(getNumberOfPhysicalCPUCores()), replica_names.size()));
for (auto & replica : replica_names)
{

View File

@ -325,7 +325,7 @@ static ASTPtr getPartitionPolicy(const NamesAndTypesList & primary_keys)
return std::make_shared<ASTIdentifier>(column_name);
return makeASTFunction("intDiv", std::make_shared<ASTIdentifier>(column_name),
std::make_shared<ASTLiteral>(UInt64(type_max_size / 1000)));
std::make_shared<ASTLiteral>(static_cast<UInt64>(type_max_size / 1000)));
};
ASTPtr best_partition;
@ -493,7 +493,7 @@ ASTs InterpreterCreateImpl::getRewrittenQueries(
String sign_column_name = getUniqueColumnName(columns_name_and_type, "_sign");
String version_column_name = getUniqueColumnName(columns_name_and_type, "_version");
columns->set(columns->columns, InterpreterCreateQuery::formatColumns(columns_description));
columns->columns->children.emplace_back(create_materialized_column_declaration(sign_column_name, "Int8", UInt64(1)));
columns->columns->children.emplace_back(create_materialized_column_declaration(sign_column_name, "Int8", static_cast<UInt64>(1)));
columns->columns->children.emplace_back(create_materialized_column_declaration(version_column_name, "UInt64", UInt64(1)));
/// Add minmax skipping index for _version column.

View File

@ -71,7 +71,7 @@ static void dumpProfileEvents(ProfileEventsSnapshot const & snapshot, DB::Mutabl
{
size_t i = 0;
columns[i++]->insertData(host_name.data(), host_name.size());
columns[i++]->insert(UInt64(snapshot.current_time));
columns[i++]->insert(static_cast<UInt64>(snapshot.current_time));
columns[i++]->insert(UInt64{snapshot.thread_id});
columns[i++]->insert(Type::INCREMENT);
}
@ -81,8 +81,8 @@ static void dumpMemoryTracker(ProfileEventsSnapshot const & snapshot, DB::Mutabl
{
size_t i = 0;
columns[i++]->insertData(host_name.data(), host_name.size());
columns[i++]->insert(UInt64(snapshot.current_time));
columns[i++]->insert(UInt64{snapshot.thread_id});
columns[i++]->insert(static_cast<UInt64>(snapshot.current_time));
columns[i++]->insert(static_cast<UInt64>(snapshot.thread_id));
columns[i++]->insert(Type::GAUGE);
columns[i++]->insertData(MemoryTracker::USAGE_EVENT_NAME, strlen(MemoryTracker::USAGE_EVENT_NAME));

View File

@ -279,7 +279,7 @@ void QueryLogElement::appendClientInfo(const ClientInfo & client_info, MutableCo
columns[i++]->insert(client_info.initial_query_start_time);
columns[i++]->insert(client_info.initial_query_start_time_microseconds);
columns[i++]->insert(UInt64(client_info.interface));
columns[i++]->insert(static_cast<UInt64>(client_info.interface));
columns[i++]->insert(static_cast<UInt64>(client_info.is_secure));
columns[i++]->insert(client_info.os_user);
@ -290,7 +290,7 @@ void QueryLogElement::appendClientInfo(const ClientInfo & client_info, MutableCo
columns[i++]->insert(client_info.client_version_minor);
columns[i++]->insert(client_info.client_version_patch);
columns[i++]->insert(UInt64(client_info.http_method));
columns[i++]->insert(static_cast<UInt64>(client_info.http_method));
columns[i++]->insert(client_info.http_user_agent);
columns[i++]->insert(client_info.http_referer);
columns[i++]->insert(client_info.forwarded_for);

View File

@ -49,7 +49,7 @@ void ThreadStatus::applyQuerySettings()
initQueryProfiler();
untracked_memory_limit = settings.max_untracked_memory;
if (settings.memory_profiler_step && settings.memory_profiler_step < UInt64(untracked_memory_limit))
if (settings.memory_profiler_step && settings.memory_profiler_step < static_cast<UInt64>(untracked_memory_limit))
untracked_memory_limit = settings.memory_profiler_step;
#if defined(OS_LINUX)

View File

@ -84,7 +84,7 @@ void TraceCollector::run()
{
uintptr_t addr = 0;
readPODBinary(addr, in);
trace.emplace_back(UInt64(addr));
trace.emplace_back(static_cast<UInt64>(addr));
}
TraceType trace_type;
@ -103,8 +103,8 @@ void TraceCollector::run()
struct timespec ts;
clock_gettime(CLOCK_REALTIME, &ts);
UInt64 time = UInt64(ts.tv_sec * 1000000000LL + ts.tv_nsec);
UInt64 time_in_microseconds = UInt64((ts.tv_sec * 1000000LL) + (ts.tv_nsec / 1000));
UInt64 time = static_cast<UInt64>(ts.tv_sec * 1000000000LL + ts.tv_nsec);
UInt64 time_in_microseconds = static_cast<UInt64>((ts.tv_sec * 1000000LL) + (ts.tv_nsec / 1000));
TraceLogElement element{time_t(time / 1000000000), time_in_microseconds, time, trace_type, thread_id, query_id, trace, size};
trace_log->add(element);
}

View File

@ -76,7 +76,7 @@ void appendUnusedGroupByColumn(ASTSelectQuery * select_query)
/// Also start unused_column integer must not intersect with ([1, source_columns.size()])
/// might be in positional GROUP BY.
select_query->setExpression(ASTSelectQuery::Expression::GROUP_BY, std::make_shared<ASTExpressionList>());
select_query->groupBy()->children.emplace_back(std::make_shared<ASTLiteral>(Int64(-1)));
select_query->groupBy()->children.emplace_back(std::make_shared<ASTLiteral>(static_cast<Int64>(-1)));
}
/// Eliminates injective function calls and constant expressions from group by statement.

View File

@ -294,7 +294,7 @@ struct ExistsExpressionData
select_query->setExpression(ASTSelectQuery::Expression::SELECT, select_expr_list);
select_query->setExpression(ASTSelectQuery::Expression::TABLES, tables_in_select);
ASTPtr limit_length_ast = std::make_shared<ASTLiteral>(Field(UInt64(1)));
ASTPtr limit_length_ast = std::make_shared<ASTLiteral>(Field(static_cast<UInt64>(1)));
select_query->setExpression(ASTSelectQuery::Expression::LIMIT_LENGTH, std::move(limit_length_ast));
auto select_with_union_query = std::make_shared<ASTSelectWithUnionQuery>();
@ -347,7 +347,7 @@ void replaceWithSumCount(String column_name, ASTFunction & func)
/// Rewrite "avg" to sumCount().1 / sumCount().2
auto new_arg1 = makeASTFunction("tupleElement", func_base, std::make_shared<ASTLiteral>(UInt8(1)));
auto new_arg2 = makeASTFunction("CAST",
makeASTFunction("tupleElement", func_base, std::make_shared<ASTLiteral>(UInt8(2))),
makeASTFunction("tupleElement", func_base, std::make_shared<ASTLiteral>(static_cast<UInt8>(2))),
std::make_shared<ASTLiteral>("Float64"));
func.name = "divide";

View File

@ -892,7 +892,7 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
ReadableSize(elem.read_bytes / elapsed_seconds));
}
if (log_queries && elem.type >= log_queries_min_type && Int64(elem.query_duration_ms) >= log_queries_min_query_duration_ms)
if (log_queries && elem.type >= log_queries_min_type && static_cast<Int64>(elem.query_duration_ms) >= log_queries_min_query_duration_ms)
{
if (auto query_log = context->getQueryLog())
query_log->add(elem);
@ -1009,7 +1009,7 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
logException(context, elem);
/// In case of exception we log internal queries also
if (log_queries && elem.type >= log_queries_min_type && Int64(elem.query_duration_ms) >= log_queries_min_query_duration_ms)
if (log_queries && elem.type >= log_queries_min_type && static_cast<Int64>(elem.query_duration_ms) >= log_queries_min_query_duration_ms)
{
if (auto query_log = context->getQueryLog())
query_log->add(elem);

View File

@ -85,13 +85,13 @@ ASTPtr ASTDeclareOptions::clone() const
bool ParserAlwaysTrue::parseImpl(IParser::Pos & /*pos*/, ASTPtr & node, Expected & /*expected*/)
{
node = std::make_shared<ASTLiteral>(Field(UInt64(1)));
node = std::make_shared<ASTLiteral>(Field(static_cast<UInt64>(1)));
return true;
}
bool ParserAlwaysFalse::parseImpl(IParser::Pos & /*pos*/, ASTPtr & node, Expected & /*expected*/)
{
node = std::make_shared<ASTLiteral>(Field(UInt64(0)));
node = std::make_shared<ASTLiteral>(Field(static_cast<UInt64>(0)));
return true;
}

View File

@ -391,7 +391,7 @@ bool ParserSelectQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
/// Transform `DISTINCT ON expr` to `LIMIT 1 BY expr`
limit_by_expression_list = distinct_on_expression_list;
limit_by_length = std::make_shared<ASTLiteral>(Field{UInt8(1)});
limit_by_length = std::make_shared<ASTLiteral>(Field{static_cast<UInt8>(1)});
distinct_on_expression_list = nullptr;
}

View File

@ -30,9 +30,9 @@ bool ParserSetQuery::parseNameValuePair(SettingChange & change, IParser::Pos & p
return false;
if (ParserKeyword("TRUE").ignore(pos, expected))
value = std::make_shared<ASTLiteral>(Field(UInt64(1)));
value = std::make_shared<ASTLiteral>(Field(static_cast<UInt64>(1)));
else if (ParserKeyword("FALSE").ignore(pos, expected))
value = std::make_shared<ASTLiteral>(Field(UInt64(0)));
value = std::make_shared<ASTLiteral>(Field(static_cast<UInt64>(0)));
else if (!value_p.parse(pos, value, expected))
return false;

View File

@ -22,9 +22,9 @@ ASTPtr makeASTForLogicalAnd(ASTs && arguments)
});
if (!partial_result)
return std::make_shared<ASTLiteral>(Field{UInt8(0)});
return std::make_shared<ASTLiteral>(Field{static_cast<UInt8>(0)});
if (arguments.empty())
return std::make_shared<ASTLiteral>(Field{UInt8(1)});
return std::make_shared<ASTLiteral>(Field{static_cast<UInt8>(1)});
if (arguments.size() == 1)
return arguments[0];
@ -51,9 +51,9 @@ ASTPtr makeASTForLogicalOr(ASTs && arguments)
});
if (partial_result)
return std::make_shared<ASTLiteral>(Field{UInt8(1)});
return std::make_shared<ASTLiteral>(Field{static_cast<UInt8>(1)});
if (arguments.empty())
return std::make_shared<ASTLiteral>(Field{UInt8(0)});
return std::make_shared<ASTLiteral>(Field{static_cast<UInt8>(0)});
if (arguments.size() == 1)
return arguments[0];

View File

@ -14,10 +14,12 @@ namespace ErrorCodes
extern const int CANNOT_PARSE_QUOTED_STRING;
extern const int CANNOT_PARSE_DATE;
extern const int CANNOT_PARSE_DATETIME;
extern const int CANNOT_READ_ARRAY_FROM_TEXT;
extern const int CANNOT_READ_ALL_DATA;
extern const int CANNOT_PARSE_NUMBER;
extern const int CANNOT_PARSE_BOOL;
extern const int CANNOT_PARSE_UUID;
extern const int CANNOT_READ_ARRAY_FROM_TEXT;
extern const int CANNOT_READ_MAP_FROM_TEXT;
extern const int CANNOT_READ_ALL_DATA;
extern const int TOO_LARGE_STRING_SIZE;
extern const int INCORRECT_NUMBER_OF_COLUMNS;
extern const int ARGUMENT_OUT_OF_BOUND;
@ -32,9 +34,11 @@ bool isParseError(int code)
|| code == ErrorCodes::CANNOT_PARSE_QUOTED_STRING
|| code == ErrorCodes::CANNOT_PARSE_DATE
|| code == ErrorCodes::CANNOT_PARSE_DATETIME
|| code == ErrorCodes::CANNOT_READ_ARRAY_FROM_TEXT
|| code == ErrorCodes::CANNOT_PARSE_NUMBER
|| code == ErrorCodes::CANNOT_PARSE_UUID
|| code == ErrorCodes::CANNOT_PARSE_BOOL
|| code == ErrorCodes::CANNOT_READ_ARRAY_FROM_TEXT
|| code == ErrorCodes::CANNOT_READ_MAP_FROM_TEXT
|| code == ErrorCodes::CANNOT_READ_ALL_DATA
|| code == ErrorCodes::TOO_LARGE_STRING_SIZE
|| code == ErrorCodes::ARGUMENT_OUT_OF_BOUND /// For Decimals

View File

@ -338,7 +338,7 @@ void registerFileSegmentationEngineTabSeparated(FormatFactory & factory)
{
auto register_func = [&](const String & format_name, bool with_names, bool with_types)
{
size_t min_rows = 1 + int(with_names) + int(with_types);
size_t min_rows = 1 + static_cast<int>(with_names) + static_cast<int>(with_types);
factory.registerFileSegmentationEngine(format_name, [is_raw, min_rows](ReadBuffer & in, DB::Memory<> & memory, size_t min_chunk_size)
{
return fileSegmentationEngineTabSeparatedImpl(in, memory, min_chunk_size, is_raw, min_rows);

View File

@ -45,7 +45,7 @@ Chunk CubeTransform::generate()
consumed_chunks.clear();
auto num_rows = cube_chunk.getNumRows();
mask = (UInt64(1) << keys.size()) - 1;
mask = (static_cast<UInt64>(1) << keys.size()) - 1;
current_columns = cube_chunk.getColumns();
current_zero_columns.clear();

View File

@ -86,7 +86,7 @@ void PostgreSQLSource<T>::onStart()
}
}
stream = std::make_unique<pqxx::stream_from>(*tx, pqxx::from_query, std::string_view(query_str));
stream = std::make_unique<pqxx::stream_from>(*tx, pqxx::from_query, std::string_view{query_str});
}
template<typename T>

View File

@ -33,7 +33,7 @@ static void limitProgressingSpeed(size_t total_progress_size, size_t max_speed_i
/// Never sleep more than one second (it should be enough to limit speed for a reasonable amount,
/// and otherwise it's too easy to make query hang).
sleep_microseconds = std::min(UInt64(1000000), sleep_microseconds);
sleep_microseconds = std::min(static_cast<UInt64>(1000000), sleep_microseconds);
sleepForMicroseconds(sleep_microseconds);

View File

@ -340,7 +340,7 @@ namespace
uint64_t doubleToUInt64(double d)
{
if (d >= double(std::numeric_limits<uint64_t>::max()))
if (d >= static_cast<double>(std::numeric_limits<uint64_t>::max()))
return std::numeric_limits<uint64_t>::max();
return static_cast<uint64_t>(d);
}

View File

@ -267,7 +267,7 @@ Chunk MergeTreeBaseSelectProcessor::readFromPartImpl()
};
UInt64 recommended_rows = estimate_num_rows(*task, task->range_reader);
UInt64 rows_to_read = std::max(UInt64(1), std::min(current_max_block_size_rows, recommended_rows));
UInt64 rows_to_read = std::max(static_cast<UInt64>(1), std::min(current_max_block_size_rows, recommended_rows));
auto read_result = task->range_reader.read(rows_to_read, task->mark_ranges);

View File

@ -943,7 +943,7 @@ void MergeTreeData::loadDataPartsFromDisk(
const MergeTreeSettingsPtr & settings)
{
/// Parallel loading of data parts.
pool.setMaxThreads(std::min(size_t(settings->max_part_loading_threads), num_parts));
pool.setMaxThreads(std::min(static_cast<size_t>(settings->max_part_loading_threads), num_parts));
size_t num_threads = pool.getMaxThreads();
std::vector<size_t> parts_per_thread(num_threads, num_parts / num_threads);
for (size_t i = 0ul; i < num_parts % num_threads; ++i)
@ -3250,7 +3250,7 @@ void MergeTreeData::delayInsertOrThrowIfNeeded(Poco::Event * until) const
"Too many inactive parts ({}). Parts cleaning are processing significantly slower than inserts",
inactive_parts_count_in_partition);
}
k_inactive = ssize_t(inactive_parts_count_in_partition) - ssize_t(settings->inactive_parts_to_delay_insert);
k_inactive = static_cast<ssize_t>(inactive_parts_count_in_partition) - static_cast<ssize_t>(settings->inactive_parts_to_delay_insert);
}
if (parts_count_in_partition >= settings->parts_to_throw_insert)
@ -5229,9 +5229,12 @@ std::optional<ProjectionCandidate> MergeTreeData::getQueryProcessingStageWithAgg
query_ptr,
query_context,
query_options,
/* prepared_sets_= */ query_info.sets);
std::move(query_info.subquery_for_sets),
std::move(query_info.sets));
const auto & analysis_result = select.getAnalysisResult();
query_info.sets = select.getQueryAnalyzer()->getPreparedSets();
query_info.sets = std::move(select.getQueryAnalyzer()->getPreparedSets());
query_info.subquery_for_sets = std::move(select.getQueryAnalyzer()->getSubqueriesForSets());
bool can_use_aggregate_projection = true;
/// If the first stage of the query pipeline is more complex than Aggregating - Expression - Filter - ReadFromStorage,
@ -5631,8 +5634,6 @@ std::optional<ProjectionCandidate> MergeTreeData::getQueryProcessingStageWithAgg
{
selected_candidate->aggregation_keys = select.getQueryAnalyzer()->aggregationKeys();
selected_candidate->aggregate_descriptions = select.getQueryAnalyzer()->aggregates();
selected_candidate->subqueries_for_sets
= std::make_shared<SubqueriesForSets>(std::move(select.getQueryAnalyzer()->getSubqueriesForSets()));
}
return *selected_candidate;

View File

@ -206,7 +206,7 @@ void MergeTreeDataPartWriterCompact::writeDataBlock(const Block & block, const G
writeIntBinary(plain_hashing.count(), marks);
writeIntBinary(UInt64(0), marks);
writeIntBinary(static_cast<UInt64>(0), marks);
writeColumnSingleGranule(
block.getByName(name_and_type->name), data_part->getSerialization(*name_and_type),
@ -246,9 +246,9 @@ void MergeTreeDataPartWriterCompact::fillDataChecksums(IMergeTreeDataPart::Check
for (size_t i = 0; i < columns_list.size(); ++i)
{
writeIntBinary(plain_hashing.count(), marks);
writeIntBinary(UInt64(0), marks);
writeIntBinary(static_cast<UInt64>(0), marks);
}
writeIntBinary(UInt64(0), marks);
writeIntBinary(static_cast<UInt64>(0), marks);
}
plain_file->next();

View File

@ -374,12 +374,6 @@ QueryPlanPtr MergeTreeDataSelectExecutor::read(
std::move(pipe),
fmt::format("MergeTree(with {} projection {})", query_info.projection->desc->type, query_info.projection->desc->name));
plan->addStep(std::move(step));
if (query_info.projection->subqueries_for_sets && !query_info.projection->subqueries_for_sets->empty())
{
SizeLimits limits(settings.max_rows_to_transfer, settings.max_bytes_to_transfer, settings.transfer_overflow_mode);
addCreatingSetsStep(*plan, std::move(*query_info.projection->subqueries_for_sets), limits, context);
}
return plan;
}
@ -1095,7 +1089,7 @@ std::shared_ptr<QueryIdHolder> MergeTreeDataSelectExecutor::checkLimits(
std::set<String> partitions;
for (const auto & part_with_ranges : result.parts_with_ranges)
partitions.insert(part_with_ranges.data_part->info.partition_id);
if (partitions.size() > size_t(max_partitions_to_read))
if (partitions.size() > static_cast<size_t>(max_partitions_to_read))
throw Exception(
ErrorCodes::TOO_MANY_PARTITIONS,
"Too many partitions to read. Current {}, max {}",

View File

@ -178,7 +178,7 @@ T MaterializedPostgreSQLConsumer::unhexN(const char * message, size_t pos, size_
for (size_t i = 0; i < n; ++i)
{
if (i) result <<= 8;
result |= UInt32(unhex2(message + pos + 2 * i));
result |= static_cast<UInt32>(unhex2(message + pos + 2 * i));
}
return result;
}
@ -276,14 +276,14 @@ void MaterializedPostgreSQLConsumer::readTupleData(
{
case PostgreSQLQuery::INSERT:
{
buffer.columns[num_columns]->insert(Int8(1));
buffer.columns[num_columns]->insert(static_cast<Int8>(1));
buffer.columns[num_columns + 1]->insert(lsn_value);
break;
}
case PostgreSQLQuery::DELETE:
{
buffer.columns[num_columns]->insert(Int8(-1));
buffer.columns[num_columns]->insert(static_cast<Int8>(-1));
buffer.columns[num_columns + 1]->insert(lsn_value);
break;
@ -292,9 +292,9 @@ void MaterializedPostgreSQLConsumer::readTupleData(
{
/// Process old value in case changed value is a primary key.
if (old_value)
buffer.columns[num_columns]->insert(Int8(-1));
buffer.columns[num_columns]->insert(static_cast<Int8>(-1));
else
buffer.columns[num_columns]->insert(Int8(1));
buffer.columns[num_columns]->insert(static_cast<Int8>(1));
buffer.columns[num_columns + 1]->insert(lsn_value);

View File

@ -352,7 +352,7 @@ ASTPtr StorageMaterializedPostgreSQL::getColumnDeclaration(const DataTypePtr & d
ast_expression->name = "DateTime64";
ast_expression->arguments = std::make_shared<ASTExpressionList>();
ast_expression->arguments->children.emplace_back(std::make_shared<ASTLiteral>(UInt32(6)));
ast_expression->arguments->children.emplace_back(std::make_shared<ASTLiteral>(static_cast<UInt32>(6)));
return ast_expression;
}

View File

@ -48,7 +48,7 @@ Pipe readFinalFromNestedStorage(
require_columns_name.emplace_back(sign_column.name);
const auto & sign_column_name = std::make_shared<ASTIdentifier>(sign_column.name);
const auto & fetch_sign_value = std::make_shared<ASTLiteral>(Field(Int8(1)));
const auto & fetch_sign_value = std::make_shared<ASTLiteral>(Field(static_cast<Int8>(1)));
expressions->children.emplace_back(makeASTFunction("equals", sign_column_name, fetch_sign_value));
filter_column_name = expressions->children.back()->getColumnName();

View File

@ -1,6 +1,7 @@
#pragma once
#include <Interpreters/PreparedSets.h>
#include <Interpreters/SubqueryForSet.h>
#include <Interpreters/DatabaseAndTableWithAlias.h>
#include <Core/SortDescription.h>
#include <Core/Names.h>
@ -128,7 +129,6 @@ struct ProjectionCandidate
InputOrderInfoPtr input_order_info;
ManyExpressionActions group_by_elements_actions;
SortDescription group_by_elements_order_descr;
std::shared_ptr<SubqueriesForSets> subqueries_for_sets;
MergeTreeDataSelectAnalysisResultPtr merge_tree_projection_select_result_ptr;
MergeTreeDataSelectAnalysisResultPtr merge_tree_normal_select_result_ptr;
};
@ -137,7 +137,7 @@ struct ProjectionCandidate
* that can be used during query processing
* inside storage engines.
*/
struct SelectQueryInfo
struct SelectQueryInfoBase
{
ASTPtr query;
ASTPtr view_query; /// Optimized VIEW query
@ -178,4 +178,14 @@ struct SelectQueryInfo
MergeTreeDataSelectAnalysisResultPtr merge_tree_select_result_ptr;
};
/// Contains non-copyable stuff
struct SelectQueryInfo : SelectQueryInfoBase
{
SelectQueryInfo() = default;
SelectQueryInfo(const SelectQueryInfo & other) : SelectQueryInfoBase(other) {}
/// Make subquery_for_sets reusable across different interpreters.
SubqueriesForSets subquery_for_sets;
};
}

View File

@ -1376,7 +1376,7 @@ void StorageDistributed::delayInsertOrThrowIfNeeded() const
{
/// Step is 5% of the delay and minimal one second.
/// NOTE: max_delay_to_insert is in seconds, and step is in ms.
const size_t step_ms = std::min<double>(1., double(distributed_settings.max_delay_to_insert) * 1'000 * 0.05);
const size_t step_ms = std::min<double>(1., static_cast<double>(distributed_settings.max_delay_to_insert) * 1'000 * 0.05);
UInt64 delayed_ms = 0;
do {

View File

@ -296,7 +296,7 @@ Pipe StorageMerge::read(
size_t tables_count = selected_tables.size();
Float64 num_streams_multiplier
= std::min(unsigned(tables_count), std::max(1U, unsigned(local_context->getSettingsRef().max_streams_multiplier_for_merge_tables)));
= std::min(static_cast<unsigned>(tables_count), std::max(1U, static_cast<unsigned>(local_context->getSettingsRef().max_streams_multiplier_for_merge_tables)));
num_streams *= num_streams_multiplier;
size_t remaining_streams = num_streams;
@ -327,7 +327,7 @@ Pipe StorageMerge::read(
size_t current_need_streams = tables_count >= num_streams ? 1 : (num_streams / tables_count);
size_t current_streams = std::min(current_need_streams, remaining_streams);
remaining_streams -= current_streams;
current_streams = std::max(size_t(1), current_streams);
current_streams = std::max(static_cast<size_t>(1), current_streams);
const auto & storage = std::get<1>(table);

View File

@ -2,6 +2,7 @@
const char * auto_contributors[] {
"0xflotus",
"13DaGGeR",
"1lann",
"20018712",
"243f6a88 85a308d3",
"243f6a8885a308d313198a2e037",
@ -112,8 +113,10 @@ const char * auto_contributors[] {
"Andrey Urusov",
"Andrey Z",
"Andrii Buriachevskyi",
"Andrii R",
"Andy Liang",
"Andy Yang",
"Anish Bhanwala",
"Anmol Arora",
"Anna",
"Anna Shakhova",
@ -121,6 +124,7 @@ const char * auto_contributors[] {
"Anthony N. Simon",
"Anton Ivashkin",
"Anton Kobzev",
"Anton Kozlov",
"Anton Kvasha",
"Anton Okhitin",
"Anton Okulov",
@ -176,7 +180,9 @@ const char * auto_contributors[] {
"Boris Kuschel",
"Bowen Masco",
"Braulio Valdivielso",
"Brendan Cox",
"Brett Hoerner",
"Brian Hunter",
"Bulat Gaifullin",
"Carbyn",
"Caspian",
@ -199,6 +205,7 @@ const char * auto_contributors[] {
"CurtizJ",
"DF5HSE",
"DIAOZHAFENG",
"Dan Roscigno",
"Daniel Bershatsky",
"Daniel Dao",
"Daniel Qin",
@ -255,6 +262,7 @@ const char * auto_contributors[] {
"Eric Daniel",
"Erixonich",
"Ernest Poletaev",
"Eugene Galkin",
"Eugene Klimov",
"Eugene Konkov",
"Evgenia Sudarikova",
@ -283,10 +291,12 @@ const char * auto_contributors[] {
"Francisco Barón",
"Frank Chen",
"Frank Zhao",
"François Violette",
"Fruit of Eden",
"Fu Zhe",
"Fullstop000",
"Fuwang Hu",
"G5.Qin",
"Gagan Arneja",
"Gao Qiang",
"Gary Dotzler",
@ -308,6 +318,7 @@ const char * auto_contributors[] {
"Guo Wei (William)",
"Haavard Kvaalen",
"Habibullah Oladepo",
"HaiBo Li",
"Hamoon",
"Harry-Lee",
"HarryLeeIBM",
@ -316,6 +327,7 @@ const char * auto_contributors[] {
"Heena Bansal",
"HeenaBansal2009",
"Hiroaki Nakamura",
"Hongbin",
"HuFuwang",
"Hui Wang",
"ILya Limarenko",
@ -371,12 +383,14 @@ const char * auto_contributors[] {
"Jeffrey Dang",
"Jiading Guo",
"Jiang Tao",
"Jianmei Zhang",
"Jochen Schalanda",
"John",
"John Hummel",
"John Skopis",
"Jonatas Freitas",
"João Figueiredo",
"Julian Gilyadov",
"Julian Zhou",
"Justin Hilliard",
"Kang Liu",
@ -384,8 +398,10 @@ const char * auto_contributors[] {
"Keiji Yoshida",
"Ken Chen",
"Ken MacInnis",
"Kerry Clendinning",
"Kevin Chiang",
"Kevin Michel",
"KinderRiven",
"Kiran",
"Kirill Danshin",
"Kirill Ershov",
@ -406,7 +422,11 @@ const char * auto_contributors[] {
"Kruglov Pavel",
"Kseniia Sumarokova",
"Ky Li",
"LAL2211",
"LB",
"LIJINGBO",
"Larry Luo",
"Lars Eidnes",
"Latysheva Alexandra",
"Lemore",
"Leonardo Cecchi",
@ -466,6 +486,7 @@ const char * auto_contributors[] {
"Maxim Ulanovskiy",
"MaximAL",
"Mc.Spring",
"Meena Renganathan",
"Meena-Renganathan",
"MeiK",
"Memo",
@ -473,12 +494,14 @@ const char * auto_contributors[] {
"Metikov Vadim",
"Michael Furmur",
"Michael Kolupaev",
"Michael Lex",
"Michael Monashev",
"Michael Razuvaev",
"Michael Smitasin",
"Michail Safronov",
"Michal Lisowski",
"MicrochipQ",
"Miel Donkers",
"Miguel Fernández",
"Mihail Fandyushin",
"Mikahil Nacharov",
@ -528,14 +551,17 @@ const char * auto_contributors[] {
"Nik",
"Nikhil Nadig",
"Nikhil Raman",
"Nikifor Seriakov",
"Nikita",
"Nikita Lapkov",
"Nikita Mikhailov",
"Nikita Mikhalev",
"Nikita Mikhaylov",
"Nikita Orlov",
"Nikita Taranov",
"Nikita Tikhomirov",
"Nikita Vasilev",
"NikitaEvs",
"Nikolai Kochetov",
"Nikolai Sorokin",
"Nikolay",
@ -545,6 +571,7 @@ const char * auto_contributors[] {
"Nikolay Shcheglov",
"Nikolay Vasiliev",
"Nikolay Volosatov",
"Nir Peled",
"Niu Zhaojie",
"Odin Hultgren Van Der Horst",
"Okada Haruki",
@ -554,6 +581,7 @@ const char * auto_contributors[] {
"Oleg Matrokhin",
"Oleg Obleukhov",
"Oleg Strokachuk",
"Oleg Taizov",
"Olga Khvostikova",
"Olga Revyakina",
"OmarBazaraa",
@ -564,6 +592,7 @@ const char * auto_contributors[] {
"Oskar Wojciski",
"OuO",
"PHO",
"Pablo Alegre",
"Paramtamtam",
"Patrick Zippenfenig",
"Pavel",
@ -586,10 +615,12 @@ const char * auto_contributors[] {
"Philippe Ombredanne",
"Potya",
"Pradeep Chhetri",
"Pxl",
"Pysaoke",
"Quid37",
"Rafael David Tinoco",
"Rajkumar",
"Rajkumar Varada",
"Ramazan Polat",
"Ravengg",
"Raúl Marín",
@ -601,6 +632,7 @@ const char * auto_contributors[] {
"Ri",
"Rich Raposa",
"Robert Hodges",
"Robert Schulze",
"RogerYK",
"Rohit Agarwal",
"Romain Neutron",
@ -609,10 +641,12 @@ const char * auto_contributors[] {
"Roman Chyrva",
"Roman Lipovsky",
"Roman Nikolaev",
"Roman Nikonov",
"Roman Nozdrin",
"Roman Peshkurov",
"Roman Tsisyk",
"Roman Zhukov",
"Roy Bellingan",
"Ruslan",
"Ruslan Savchenko",
"Russ Frank",
@ -647,10 +681,12 @@ const char * auto_contributors[] {
"Sergey Zaikin",
"Sergi Almacellas Abellana",
"Sergi Vladykin",
"Sergio Tulentsev",
"SevaCode",
"Seyed Mehrshad Hosseini",
"Sherry Wang",
"Shoh Jahon",
"SiderZhang",
"Silviu Caragea",
"Simeon Emanuilov",
"Simon Liu",
@ -702,6 +738,7 @@ const char * auto_contributors[] {
"Tomáš Hromada",
"Tsarkova Anastasia",
"TszkitLo40",
"Tyler Hannan",
"Ubuntu",
"Ubus",
"UnamedRus",
@ -714,6 +751,7 @@ const char * auto_contributors[] {
"VadimPE",
"Val",
"Valera Ryaboshapko",
"Varinara",
"Vasily Kozhukhovskiy",
"Vasily Morozov",
"Vasily Nemkov",
@ -769,7 +807,9 @@ const char * auto_contributors[] {
"Xianda Ke",
"Xiang Zhou",
"Xin Wang",
"Xudong Zhang",
"Y Lu",
"Yakov Olkhovskiy",
"Yangkuan Liu",
"Yatian Xu",
"Yatsishin Ilya",
@ -780,6 +820,7 @@ const char * auto_contributors[] {
"Yingfan Chen",
"Yiğit Konur",
"Yohann Jardin",
"Yong Wang",
"Youenn Lebras",
"Yuntao Wu",
"Yuri Dyachenko",
@ -836,6 +877,7 @@ const char * auto_contributors[] {
"avasiliev",
"avogar",
"avsharapov",
"awakeljw",
"awesomeleo",
"bbkas",
"benamazing",
@ -857,6 +899,7 @@ const char * auto_contributors[] {
"chang.chen",
"changvvb",
"chasingegg",
"chen9t",
"chengy8934",
"chenjian",
"chenqi",
@ -872,6 +915,7 @@ const char * auto_contributors[] {
"comunodi",
"congbaoyangrou",
"coraxster",
"cwkyaoyao",
"d.v.semenov",
"dalei2019",
"damozhaeva",
@ -903,6 +947,7 @@ const char * auto_contributors[] {
"emakarov",
"emhlbmc",
"emironyuk",
"erikbaan",
"ermaotech",
"evtan",
"exprmntr",
@ -946,6 +991,8 @@ const char * auto_contributors[] {
"hao.he",
"hchen9",
"hcz",
"heleihelei",
"helifu",
"heng zhao",
"hermano",
"hexiaoting",
@ -957,6 +1004,7 @@ const char * auto_contributors[] {
"ianton-ru",
"ice1x",
"idfer",
"ifinik",
"igomac",
"igor",
"igor.lapko",
@ -976,11 +1024,13 @@ const char * auto_contributors[] {
"javi santana",
"jennyma",
"jetgm",
"jewisliu",
"jianmei zhang",
"jkuklis",
"jus1096",
"jyz0309",
"karnevil13",
"kashwy",
"keenwolf",
"kevin wan",
"khamadiev",
@ -995,6 +1045,7 @@ const char * auto_contributors[] {
"l",
"l1tsolaiki",
"lalex",
"larryluogit",
"laurieliyang",
"lehasm",
"leosunli",
@ -1011,6 +1062,7 @@ const char * auto_contributors[] {
"libenwang",
"lichengxiang",
"linceyou",
"lincion",
"listar",
"litao91",
"liu-bov",
@ -1032,6 +1084,7 @@ const char * auto_contributors[] {
"malkfilipp",
"manmitya",
"maqroll",
"martincholuj",
"mastertheknife",
"maxim",
"maxim-babenko",
@ -1043,6 +1096,7 @@ const char * auto_contributors[] {
"meo",
"meoww-bot",
"mergify[bot]",
"metahys",
"mf5137",
"mfridental",
"michael1589",
@ -1084,7 +1138,9 @@ const char * auto_contributors[] {
"orantius",
"p0ny",
"palasonicq",
"palegre-tiny",
"pawelsz-rb",
"pdai",
"pdv-ru",
"peshkurov",
"peter279k",
@ -1103,6 +1159,7 @@ const char * auto_contributors[] {
"r1j1k",
"rainbowsysu",
"redclusive",
"rfraposa",
"ritaank",
"robert",
"robot-clickhouse",
@ -1120,12 +1177,15 @@ const char * auto_contributors[] {
"save-my-heart",
"sdk2",
"serebrserg",
"serxa",
"sev7e0",
"sevirov",
"sfod",
"shangshujie",
"shedx",
"shuchaome",
"simon-says",
"snyk-bot",
"songenjie",
"spff",
"spongedc",
@ -1142,8 +1202,10 @@ const char * auto_contributors[] {
"tai",
"taichong",
"taiyang-li",
"tangjiangling",
"tao jiang",
"tavplubix",
"tcoyvwac",
"tekeri",
"templarzq",
"terrylin",
@ -1178,6 +1240,8 @@ const char * auto_contributors[] {
"wzl",
"xPoSx",
"xiedeyantu",
"xinhuitian",
"yakov-olkhovskiy",
"yandd",
"yang",
"yangshuai",
@ -1186,12 +1250,15 @@ const char * auto_contributors[] {
"yhgcn",
"yiguolei",
"yingjinghan",
"yjant",
"ylchou",
"yonesko",
"youenn lebras",
"yuchuansun",
"yuefoo",
"yulu86",
"yuluxu",
"yuuch",
"ywill3",
"zamulla",
"zhang2014",
@ -1200,6 +1267,8 @@ const char * auto_contributors[] {
"zhangshengyu",
"zhangxiao018",
"zhangxiao871",
"zhangyifan27",
"zhangyuli1",
"zhen ni",
"zhifeng",
"zhongyuankai",
@ -1211,6 +1280,7 @@ const char * auto_contributors[] {
"zvrr",
"zvvr",
"zxc111",
"zxealous",
"zzsmdfj",
"Šimon Podlipský",
"Артем Стрельцов",
@ -1223,6 +1293,7 @@ const char * auto_contributors[] {
"Смитюх Вячеслав",
"Сундуков Алексей",
"万康",
"何李夫",
"凌涛",
"吴健",
"小路",

View File

@ -26,27 +26,34 @@ DO_NOT_TEST_LABEL = "do not test"
FORCE_TESTS_LABEL = "force tests"
SUBMODULE_CHANGED_LABEL = "submodule changed"
MAP_CATEGORY_TO_LABEL = {
"New Feature": "pr-feature",
"Bug Fix": "pr-bugfix",
"Bug Fix (user-visible misbehaviour in official "
"stable or prestable release)": "pr-bugfix",
"Improvement": "pr-improvement",
"Performance Improvement": "pr-performance",
"Backward Incompatible Change": "pr-backward-incompatible",
"Build/Testing/Packaging Improvement": "pr-build",
"Build Improvement": "pr-build",
"Build/Testing Improvement": "pr-build",
"Build": "pr-build",
"Packaging Improvement": "pr-build",
"Not for changelog (changelog entry is not required)": "pr-not-for-changelog",
"Not for changelog": "pr-not-for-changelog",
"Documentation (changelog entry is not required)": "pr-documentation",
"Documentation": "pr-documentation",
# 'Other': doesn't match anything
LABELS = {
"pr-backward-incompatible": ["Backward Incompatible Change"],
"pr-bugfix": [
"Bug Fix",
"Bug Fix (user-visible misbehaviour in official stable or prestable release)",
],
"pr-build": [
"Build/Testing/Packaging Improvement",
"Build Improvement",
"Build/Testing Improvement",
"Build",
"Packaging Improvement",
],
"pr-documentation": [
"Documentation (changelog entry is not required)",
"Documentation",
],
"pr-feature": ["New Feature"],
"pr-improvement": ["Improvement"],
"pr-not-for-changelog": [
"Not for changelog (changelog entry is not required)",
"Not for changelog",
],
"pr-performance": ["Performance Improvement"],
}
CATEGORY_TO_LABEL = {c: lb for lb, categories in LABELS.items() for c in categories}
def pr_is_by_trusted_user(pr_user_login, pr_user_orgs):
if pr_user_login.lower() in TRUSTED_CONTRIBUTORS:
@ -95,14 +102,20 @@ def should_run_checks_for_pr(pr_info: PRInfo) -> Tuple[bool, str, str]:
return True, "No special conditions apply", "pending"
def check_pr_description(pr_info):
description = pr_info.body
def check_pr_description(pr_info) -> Tuple[str, str]:
lines = list(
map(lambda x: x.strip(), description.split("\n") if description else [])
map(lambda x: x.strip(), pr_info.body.split("\n") if pr_info.body else [])
)
lines = [re.sub(r"\s+", " ", line) for line in lines]
# Check if body contains "Reverts ClickHouse/ClickHouse#36337"
if [
True
for line in lines
if re.match(rf"\AReverts {GITHUB_REPOSITORY}#[\d]+\Z", line)
]:
return "", LABELS["pr-not-for-changelog"][0]
category = ""
entry = ""
@ -179,20 +192,20 @@ if __name__ == "__main__":
gh = Github(get_best_robot_token())
commit = get_commit(gh, pr_info.sha)
description_report, category = check_pr_description(pr_info)
description_error, category = check_pr_description(pr_info)
pr_labels_to_add = []
pr_labels_to_remove = []
if (
category in MAP_CATEGORY_TO_LABEL
and MAP_CATEGORY_TO_LABEL[category] not in pr_info.labels
category in CATEGORY_TO_LABEL
and CATEGORY_TO_LABEL[category] not in pr_info.labels
):
pr_labels_to_add.append(MAP_CATEGORY_TO_LABEL[category])
pr_labels_to_add.append(CATEGORY_TO_LABEL[category])
for label in pr_info.labels:
if (
label in MAP_CATEGORY_TO_LABEL.values()
and category in MAP_CATEGORY_TO_LABEL
and label != MAP_CATEGORY_TO_LABEL[category]
label in CATEGORY_TO_LABEL.values()
and category in CATEGORY_TO_LABEL
and label != CATEGORY_TO_LABEL[category]
):
pr_labels_to_remove.append(label)
@ -208,15 +221,15 @@ if __name__ == "__main__":
if pr_labels_to_remove:
remove_labels(gh, pr_info, pr_labels_to_remove)
if description_report:
if description_error:
print(
"::error ::Cannot run, PR description does not match the template: "
f"{description_report}"
f"{description_error}"
)
logging.info(
"PR body doesn't match the template: (start)\n%s\n(end)\n" "Reason: %s",
pr_info.body,
description_report,
description_error,
)
url = (
f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}/"
@ -224,7 +237,7 @@ if __name__ == "__main__":
)
commit.create_status(
context=NAME,
description=description_report[:139],
description=description_error[:139],
state="failure",
target_url=url,
)

Some files were not shown because too many files have changed in this diff Show More