mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 08:32:02 +00:00
Fix various clang-tidy warnings
When I tried to add cool new clang-tidy 14 warnings, I noticed that the current clang-tidy settings already produce a ton of warnings. This commit addresses many of these. Almost all of them were non-critical, i.e. C vs. C++ style casts.
This commit is contained in:
parent
fd094185e6
commit
b24ca8de52
@ -73,7 +73,7 @@ replxx::Replxx::completions_t LineReader::Suggest::getCompletions(const String &
|
||||
if (std::string::npos == last_word_pos)
|
||||
last_word = prefix;
|
||||
else
|
||||
last_word = std::string_view(prefix).substr(last_word_pos + 1, std::string::npos);
|
||||
last_word = std::string_view{prefix}.substr(last_word_pos + 1, std::string::npos);
|
||||
/// last_word can be empty.
|
||||
|
||||
std::pair<Words::const_iterator, Words::const_iterator> range;
|
||||
|
@ -61,6 +61,7 @@
|
||||
|
||||
#if defined(OS_DARWIN)
|
||||
# pragma GCC diagnostic ignored "-Wunused-macros"
|
||||
// NOLINTNEXTLINE(bugprone-reserved-identifier)
|
||||
# define _XOPEN_SOURCE 700 // ucontext is not available without _XOPEN_SOURCE
|
||||
#endif
|
||||
#include <ucontext.h>
|
||||
@ -132,7 +133,7 @@ static void signalHandler(int sig, siginfo_t * info, void * context)
|
||||
DB::writePODBinary(*info, out);
|
||||
DB::writePODBinary(signal_context, out);
|
||||
DB::writePODBinary(stack_trace, out);
|
||||
DB::writeBinary(UInt32(getThreadId()), out);
|
||||
DB::writeBinary(static_cast<UInt32>(getThreadId()), out);
|
||||
DB::writePODBinary(DB::current_thread, out);
|
||||
|
||||
out.next();
|
||||
@ -435,7 +436,7 @@ static void sanitizerDeathCallback()
|
||||
DB::WriteBufferFromFileDescriptor out(signal_pipe.fds_rw[1], buf_size, buf);
|
||||
|
||||
DB::writeBinary(static_cast<int>(SignalListener::StdTerminate), out);
|
||||
DB::writeBinary(UInt32(getThreadId()), out);
|
||||
DB::writeBinary(static_cast<UInt32>(getThreadId()), out);
|
||||
DB::writeBinary(log_message, out);
|
||||
out.next();
|
||||
|
||||
|
@ -103,7 +103,7 @@ void OwnSplitChannel::logSplit(const Poco::Message & msg)
|
||||
columns[i++]->insert(DNSResolver::instance().getHostName());
|
||||
columns[i++]->insert(msg_ext.query_id);
|
||||
columns[i++]->insert(msg_ext.thread_id);
|
||||
columns[i++]->insert(Int64(msg.getPriority()));
|
||||
columns[i++]->insert(static_cast<Int64>(msg.getPriority()));
|
||||
columns[i++]->insert(msg.getSource());
|
||||
columns[i++]->insert(msg.getText());
|
||||
|
||||
|
@ -239,12 +239,12 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
||||
uint32_t path_length = 0;
|
||||
_NSGetExecutablePath(nullptr, &path_length);
|
||||
if (path_length <= 1)
|
||||
Exception(ErrorCodes::FILE_DOESNT_EXIST, "Cannot obtain path to the binary");
|
||||
throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "Cannot obtain path to the binary");
|
||||
|
||||
std::string path(path_length, std::string::value_type());
|
||||
auto res = _NSGetExecutablePath(&path[0], &path_length);
|
||||
if (res != 0)
|
||||
Exception(ErrorCodes::FILE_DOESNT_EXIST, "Cannot obtain path to the binary");
|
||||
throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "Cannot obtain path to the binary");
|
||||
|
||||
if (path.back() == '\0')
|
||||
path.pop_back();
|
||||
|
@ -549,7 +549,7 @@ private:
|
||||
|
||||
CodePoint sample(UInt64 random, double end_multiplier) const
|
||||
{
|
||||
UInt64 range = total + UInt64(count_end * end_multiplier);
|
||||
UInt64 range = total + static_cast<UInt64>(count_end * end_multiplier);
|
||||
if (range == 0)
|
||||
return END;
|
||||
|
||||
@ -728,7 +728,7 @@ public:
|
||||
if (!histogram.total)
|
||||
continue;
|
||||
|
||||
double average = double(histogram.total) / histogram.buckets.size();
|
||||
double average = static_cast<double>(histogram.total) / histogram.buckets.size();
|
||||
|
||||
UInt64 new_total = 0;
|
||||
for (auto & bucket : histogram.buckets)
|
||||
|
@ -225,7 +225,7 @@ public:
|
||||
throw Exception("Logical error: single argument is passed to AggregateFunctionIfNullVariadic", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
if (number_of_arguments > MAX_ARGS)
|
||||
throw Exception("Maximum number of arguments for aggregate function with Nullable types is " + toString(size_t(MAX_ARGS)),
|
||||
throw Exception("Maximum number of arguments for aggregate function with Nullable types is " + toString(MAX_ARGS),
|
||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
|
||||
for (size_t i = 0; i < number_of_arguments; ++i)
|
||||
@ -359,7 +359,7 @@ private:
|
||||
using Base = AggregateFunctionNullBase<result_is_nullable, serialize_flag,
|
||||
AggregateFunctionIfNullVariadic<result_is_nullable, serialize_flag, null_is_skipped>>;
|
||||
|
||||
enum { MAX_ARGS = 8 };
|
||||
static constexpr size_t MAX_ARGS = 8;
|
||||
size_t number_of_arguments = 0;
|
||||
std::array<char, MAX_ARGS> is_nullable; /// Plain array is better than std::vector due to one indirection less.
|
||||
};
|
||||
|
@ -56,8 +56,8 @@ namespace
|
||||
|
||||
/// Such default parameters were picked because they did good on some tests,
|
||||
/// though it still requires to fit parameters to achieve better result
|
||||
auto learning_rate = Float64(1.0);
|
||||
auto l2_reg_coef = Float64(0.5);
|
||||
auto learning_rate = static_cast<Float64>(1.0);
|
||||
auto l2_reg_coef = static_cast<Float64>(0.5);
|
||||
UInt64 batch_size = 15;
|
||||
|
||||
std::string weights_updater_name = "Adam";
|
||||
|
@ -607,7 +607,7 @@ MutableColumns ColumnAggregateFunction::scatter(IColumn::ColumnIndex num_columns
|
||||
size_t num_rows = size();
|
||||
|
||||
{
|
||||
size_t reserve_size = double(num_rows) / num_columns * 1.1; /// 1.1 is just a guess. Better to use n-sigma rule.
|
||||
size_t reserve_size = static_cast<double>(num_rows) / num_columns * 1.1; /// 1.1 is just a guess. Better to use n-sigma rule.
|
||||
|
||||
if (reserve_size > 1)
|
||||
for (auto & column : columns)
|
||||
|
@ -81,7 +81,7 @@ namespace
|
||||
if (max_val > size)
|
||||
return mapUniqueIndexImplRef(index);
|
||||
|
||||
auto map_size = UInt64(max_val) + 1;
|
||||
auto map_size = static_cast<UInt64>(max_val) + 1;
|
||||
PaddedPODArray<T> map(map_size, 0);
|
||||
T zero_pos_value = index[0];
|
||||
index[0] = 0;
|
||||
@ -98,7 +98,7 @@ namespace
|
||||
index[i] = map[val];
|
||||
}
|
||||
|
||||
auto res_col = ColumnVector<T>::create(UInt64(cur_pos) + 1);
|
||||
auto res_col = ColumnVector<T>::create(static_cast<UInt64>(cur_pos) + 1);
|
||||
auto & data = res_col->getData();
|
||||
data[0] = zero_pos_value;
|
||||
for (size_t i = 0; i < map_size; ++i)
|
||||
|
@ -228,7 +228,7 @@ void ColumnVector<T>::getPermutation(IColumn::PermutationSortDirection direction
|
||||
if (s >= 256 && s <= std::numeric_limits<UInt32>::max() && use_radix_sort)
|
||||
{
|
||||
PaddedPODArray<ValueWithIndex<T>> pairs(s);
|
||||
for (UInt32 i = 0; i < UInt32(s); ++i)
|
||||
for (UInt32 i = 0; i < static_cast<UInt32>(s); ++i)
|
||||
pairs[i] = {data[i], i};
|
||||
|
||||
RadixSort<RadixSortTraits<T>>::executeLSD(pairs.data(), s, reverse, res.data());
|
||||
|
@ -82,7 +82,7 @@ void FieldVisitorWriteBinary::operator() (const Object & x, WriteBuffer & buf) c
|
||||
|
||||
void FieldVisitorWriteBinary::operator()(const bool & x, WriteBuffer & buf) const
|
||||
{
|
||||
writeBinary(UInt8(x), buf);
|
||||
writeBinary(static_cast<UInt8>(x), buf);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -207,15 +207,15 @@ std::unique_ptr<ShellCommand> ShellCommand::executeImpl(
|
||||
|
||||
/// Replace the file descriptors with the ends of our pipes.
|
||||
if (STDIN_FILENO != dup2(pipe_stdin.fds_rw[0], STDIN_FILENO))
|
||||
_exit(int(ReturnCodes::CANNOT_DUP_STDIN));
|
||||
_exit(static_cast<int>(ReturnCodes::CANNOT_DUP_STDIN));
|
||||
|
||||
if (!config.pipe_stdin_only)
|
||||
{
|
||||
if (STDOUT_FILENO != dup2(pipe_stdout.fds_rw[1], STDOUT_FILENO))
|
||||
_exit(int(ReturnCodes::CANNOT_DUP_STDOUT));
|
||||
_exit(static_cast<int>(ReturnCodes::CANNOT_DUP_STDOUT));
|
||||
|
||||
if (STDERR_FILENO != dup2(pipe_stderr.fds_rw[1], STDERR_FILENO))
|
||||
_exit(int(ReturnCodes::CANNOT_DUP_STDERR));
|
||||
_exit(static_cast<int>(ReturnCodes::CANNOT_DUP_STDERR));
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < config.read_fds.size(); ++i)
|
||||
@ -224,7 +224,7 @@ std::unique_ptr<ShellCommand> ShellCommand::executeImpl(
|
||||
auto fd = config.read_fds[i];
|
||||
|
||||
if (fd != dup2(fds.fds_rw[1], fd))
|
||||
_exit(int(ReturnCodes::CANNOT_DUP_READ_DESCRIPTOR));
|
||||
_exit(static_cast<int>(ReturnCodes::CANNOT_DUP_READ_DESCRIPTOR));
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < config.write_fds.size(); ++i)
|
||||
@ -233,7 +233,7 @@ std::unique_ptr<ShellCommand> ShellCommand::executeImpl(
|
||||
auto fd = config.write_fds[i];
|
||||
|
||||
if (fd != dup2(fds.fds_rw[0], fd))
|
||||
_exit(int(ReturnCodes::CANNOT_DUP_WRITE_DESCRIPTOR));
|
||||
_exit(static_cast<int>(ReturnCodes::CANNOT_DUP_WRITE_DESCRIPTOR));
|
||||
}
|
||||
|
||||
// Reset the signal mask: it may be non-empty and will be inherited
|
||||
@ -246,7 +246,7 @@ std::unique_ptr<ShellCommand> ShellCommand::executeImpl(
|
||||
execv(filename, argv);
|
||||
/// If the process is running, then `execv` does not return here.
|
||||
|
||||
_exit(int(ReturnCodes::CANNOT_EXEC));
|
||||
_exit(static_cast<int>(ReturnCodes::CANNOT_EXEC));
|
||||
}
|
||||
|
||||
std::unique_ptr<ShellCommand> res(new ShellCommand(
|
||||
@ -356,17 +356,17 @@ void ShellCommand::wait()
|
||||
{
|
||||
switch (retcode)
|
||||
{
|
||||
case int(ReturnCodes::CANNOT_DUP_STDIN):
|
||||
case static_cast<int>(ReturnCodes::CANNOT_DUP_STDIN):
|
||||
throw Exception("Cannot dup2 stdin of child process", ErrorCodes::CANNOT_CREATE_CHILD_PROCESS);
|
||||
case int(ReturnCodes::CANNOT_DUP_STDOUT):
|
||||
case static_cast<int>(ReturnCodes::CANNOT_DUP_STDOUT):
|
||||
throw Exception("Cannot dup2 stdout of child process", ErrorCodes::CANNOT_CREATE_CHILD_PROCESS);
|
||||
case int(ReturnCodes::CANNOT_DUP_STDERR):
|
||||
case static_cast<int>(ReturnCodes::CANNOT_DUP_STDERR):
|
||||
throw Exception("Cannot dup2 stderr of child process", ErrorCodes::CANNOT_CREATE_CHILD_PROCESS);
|
||||
case int(ReturnCodes::CANNOT_EXEC):
|
||||
case static_cast<int>(ReturnCodes::CANNOT_EXEC):
|
||||
throw Exception("Cannot execv in child process", ErrorCodes::CANNOT_CREATE_CHILD_PROCESS);
|
||||
case int(ReturnCodes::CANNOT_DUP_READ_DESCRIPTOR):
|
||||
case static_cast<int>(ReturnCodes::CANNOT_DUP_READ_DESCRIPTOR):
|
||||
throw Exception("Cannot dup2 read descriptor of child process", ErrorCodes::CANNOT_CREATE_CHILD_PROCESS);
|
||||
case int(ReturnCodes::CANNOT_DUP_WRITE_DESCRIPTOR):
|
||||
case static_cast<int>(ReturnCodes::CANNOT_DUP_WRITE_DESCRIPTOR):
|
||||
throw Exception("Cannot dup2 write descriptor of child process", ErrorCodes::CANNOT_CREATE_CHILD_PROCESS);
|
||||
default:
|
||||
throw Exception("Child process was exited with return code " + toString(retcode), ErrorCodes::CHILD_WAS_NOT_EXITED_NORMALLY);
|
||||
|
@ -154,7 +154,7 @@ ReturnType ThreadPoolImpl<Thread>::scheduleImpl(Job job, int priority, std::opti
|
||||
new_job_or_shutdown.notify_one();
|
||||
}
|
||||
|
||||
return ReturnType(true);
|
||||
return static_cast<ReturnType>(true);
|
||||
}
|
||||
|
||||
template <typename Thread>
|
||||
|
@ -64,7 +64,7 @@ void TraceSender::send(TraceType trace_type, const StackTrace & stack_trace, Int
|
||||
|
||||
size_t stack_trace_size = stack_trace.getSize();
|
||||
size_t stack_trace_offset = stack_trace.getOffset();
|
||||
writeIntBinary(UInt8(stack_trace_size - stack_trace_offset), out);
|
||||
writeIntBinary(static_cast<UInt8>(stack_trace_size - stack_trace_offset), out);
|
||||
for (size_t i = stack_trace_offset; i < stack_trace_size; ++i)
|
||||
writePODBinary(stack_trace.getFramePointers()[i], out);
|
||||
|
||||
|
@ -514,7 +514,7 @@ void TestKeeper::processingThread()
|
||||
{
|
||||
RequestInfo info;
|
||||
|
||||
UInt64 max_wait = UInt64(operation_timeout.totalMilliseconds());
|
||||
UInt64 max_wait = static_cast<UInt64>(operation_timeout.totalMilliseconds());
|
||||
if (requests_queue.tryPop(info, max_wait))
|
||||
{
|
||||
if (expired)
|
||||
|
@ -76,7 +76,7 @@ void ZooKeeper::init(const std::string & implementation_, const Strings & hosts_
|
||||
auto & host_string = host.host;
|
||||
try
|
||||
{
|
||||
bool secure = bool(startsWith(host_string, "secure://"));
|
||||
bool secure = startsWith(host_string, "secure://");
|
||||
|
||||
if (secure)
|
||||
host_string.erase(0, strlen("secure://"));
|
||||
@ -801,7 +801,7 @@ bool ZooKeeper::waitForDisappear(const std::string & path, const WaitCondition &
|
||||
|
||||
auto callback = [state](const Coordination::GetResponse & response)
|
||||
{
|
||||
state->code = int32_t(response.error);
|
||||
state->code = static_cast<int32_t>(response.error);
|
||||
if (state->code)
|
||||
state->event.set();
|
||||
};
|
||||
@ -810,7 +810,7 @@ bool ZooKeeper::waitForDisappear(const std::string & path, const WaitCondition &
|
||||
{
|
||||
if (!state->code)
|
||||
{
|
||||
state->code = int32_t(response.error);
|
||||
state->code = static_cast<int32_t>(response.error);
|
||||
if (!state->code)
|
||||
state->event_type = response.type;
|
||||
state->event.set();
|
||||
@ -828,7 +828,7 @@ bool ZooKeeper::waitForDisappear(const std::string & path, const WaitCondition &
|
||||
if (!state->event.tryWait(1000))
|
||||
continue;
|
||||
|
||||
if (state->code == int32_t(Coordination::Error::ZNONODE))
|
||||
if (state->code == static_cast<int32_t>(Coordination::Error::ZNONODE))
|
||||
return true;
|
||||
|
||||
if (state->code)
|
||||
|
@ -40,7 +40,7 @@ void write(bool x, WriteBuffer & out)
|
||||
|
||||
void write(const std::string & s, WriteBuffer & out)
|
||||
{
|
||||
write(int32_t(s.size()), out);
|
||||
write(static_cast<int32_t>(s.size()), out);
|
||||
out.write(s.data(), s.size());
|
||||
}
|
||||
|
||||
|
@ -539,7 +539,7 @@ void ZooKeeper::sendAuth(const String & scheme, const String & data)
|
||||
Error::ZMARSHALLINGERROR);
|
||||
|
||||
if (err != Error::ZOK)
|
||||
throw Exception("Error received in reply to auth request. Code: " + DB::toString(int32_t(err)) + ". Message: " + String(errorMessage(err)),
|
||||
throw Exception("Error received in reply to auth request. Code: " + DB::toString(static_cast<int32_t>(err)) + ". Message: " + String(errorMessage(err)),
|
||||
Error::ZMARSHALLINGERROR);
|
||||
}
|
||||
|
||||
@ -563,8 +563,8 @@ void ZooKeeper::sendThread()
|
||||
{
|
||||
/// Wait for the next request in queue. No more than operation timeout. No more than until next heartbeat time.
|
||||
UInt64 max_wait = std::min(
|
||||
UInt64(std::chrono::duration_cast<std::chrono::milliseconds>(next_heartbeat_time - now).count()),
|
||||
UInt64(operation_timeout.totalMilliseconds()));
|
||||
static_cast<UInt64>(std::chrono::duration_cast<std::chrono::milliseconds>(next_heartbeat_time - now).count()),
|
||||
static_cast<UInt64>(operation_timeout.totalMilliseconds()));
|
||||
|
||||
RequestInfo info;
|
||||
if (requests_queue.tryPop(info, max_wait))
|
||||
|
@ -153,7 +153,7 @@ void formatIPv6(const unsigned char * src, char *& dst, uint8_t zeroed_tail_byte
|
||||
}
|
||||
|
||||
/// Was it a trailing run of 0x00's?
|
||||
if (best.base != -1 && size_t(best.base) + size_t(best.len) == words.size())
|
||||
if (best.base != -1 && static_cast<size_t>(best.base) + static_cast<size_t>(best.len) == words.size())
|
||||
*dst++ = ':';
|
||||
|
||||
*dst++ = '\0';
|
||||
|
@ -143,7 +143,7 @@ void CompressionCodecDelta::doDecompressData(const char * source, UInt32 source_
|
||||
UInt8 bytes_to_skip = uncompressed_size % bytes_size;
|
||||
UInt32 output_size = uncompressed_size - bytes_to_skip;
|
||||
|
||||
if (UInt32(2 + bytes_to_skip) > source_size)
|
||||
if (static_cast<UInt32>(2 + bytes_to_skip) > source_size)
|
||||
throw Exception("Cannot decompress. File has wrong header", ErrorCodes::CANNOT_DECOMPRESS);
|
||||
|
||||
memcpy(dest, &source[2], bytes_to_skip);
|
||||
@ -186,7 +186,7 @@ UInt8 getDeltaBytesSize(const IDataType * column_type)
|
||||
|
||||
void registerCodecDelta(CompressionCodecFactory & factory)
|
||||
{
|
||||
UInt8 method_code = UInt8(CompressionMethodByte::Delta);
|
||||
UInt8 method_code = static_cast<UInt8>(CompressionMethodByte::Delta);
|
||||
factory.registerCompressionCodecWithType("Delta", method_code, [&](const ASTPtr & arguments, const IDataType * column_type) -> CompressionCodecPtr
|
||||
{
|
||||
UInt8 delta_bytes_size = 0;
|
||||
|
@ -520,7 +520,7 @@ void CompressionCodecDoubleDelta::doDecompressData(const char * source, UInt32 s
|
||||
UInt8 bytes_to_skip = uncompressed_size % bytes_size;
|
||||
UInt32 output_size = uncompressed_size - bytes_to_skip;
|
||||
|
||||
if (UInt32(2 + bytes_to_skip) > source_size)
|
||||
if (static_cast<UInt32>(2 + bytes_to_skip) > source_size)
|
||||
throw Exception("Cannot decompress. File has wrong header", ErrorCodes::CANNOT_DECOMPRESS);
|
||||
|
||||
memcpy(dest, &source[2], bytes_to_skip);
|
||||
@ -544,7 +544,7 @@ void CompressionCodecDoubleDelta::doDecompressData(const char * source, UInt32 s
|
||||
|
||||
void registerCodecDoubleDelta(CompressionCodecFactory & factory)
|
||||
{
|
||||
UInt8 method_code = UInt8(CompressionMethodByte::DoubleDelta);
|
||||
UInt8 method_code = static_cast<UInt8>(CompressionMethodByte::DoubleDelta);
|
||||
factory.registerCompressionCodecWithType("DoubleDelta", method_code,
|
||||
[&](const ASTPtr & arguments, const IDataType * column_type) -> CompressionCodecPtr
|
||||
{
|
||||
|
@ -50,11 +50,11 @@ uint8_t getMethodCode(EncryptionMethod Method)
|
||||
{
|
||||
if (Method == AES_128_GCM_SIV)
|
||||
{
|
||||
return uint8_t(CompressionMethodByte::AES_128_GCM_SIV);
|
||||
return static_cast<uint8_t>(CompressionMethodByte::AES_128_GCM_SIV);
|
||||
}
|
||||
else if (Method == AES_256_GCM_SIV)
|
||||
{
|
||||
return uint8_t(CompressionMethodByte::AES_256_GCM_SIV);
|
||||
return static_cast<uint8_t>(CompressionMethodByte::AES_256_GCM_SIV);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -419,7 +419,7 @@ void CompressionCodecGorilla::doDecompressData(const char * source, UInt32 sourc
|
||||
|
||||
UInt8 bytes_to_skip = uncompressed_size % bytes_size;
|
||||
|
||||
if (UInt32(2 + bytes_to_skip) > source_size)
|
||||
if (static_cast<UInt32>(2 + bytes_to_skip) > source_size)
|
||||
throw Exception("Cannot decompress. File has wrong header", ErrorCodes::CANNOT_DECOMPRESS);
|
||||
|
||||
memcpy(dest, &source[2], bytes_to_skip);
|
||||
@ -443,7 +443,7 @@ void CompressionCodecGorilla::doDecompressData(const char * source, UInt32 sourc
|
||||
|
||||
void registerCodecGorilla(CompressionCodecFactory & factory)
|
||||
{
|
||||
UInt8 method_code = UInt8(CompressionMethodByte::Gorilla);
|
||||
UInt8 method_code = static_cast<UInt8>(CompressionMethodByte::Gorilla);
|
||||
factory.registerCompressionCodecWithType("Gorilla", method_code,
|
||||
[&](const ASTPtr & arguments, const IDataType * column_type) -> CompressionCodecPtr
|
||||
{
|
||||
|
@ -112,7 +112,7 @@ MagicNumber serializeTypeId(TypeIndex type_id)
|
||||
break;
|
||||
}
|
||||
|
||||
throw Exception("Type is not supported by T64 codec: " + toString(UInt32(type_id)), ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception("Type is not supported by T64 codec: " + toString(static_cast<UInt32>(type_id)), ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
|
||||
TypeIndex deserializeTypeId(uint8_t serialized_type_id)
|
||||
@ -137,7 +137,7 @@ TypeIndex deserializeTypeId(uint8_t serialized_type_id)
|
||||
case MagicNumber::Decimal64: return TypeIndex::Decimal64;
|
||||
}
|
||||
|
||||
throw Exception("Bad magic number in T64 codec: " + toString(UInt32(serialized_type_id)), ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception("Bad magic number in T64 codec: " + toString(static_cast<UInt32>(serialized_type_id)), ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
|
||||
|
||||
@ -284,22 +284,22 @@ void reverseTransposeBytes(const UInt64 * matrix, UInt32 col, T & value)
|
||||
|
||||
if constexpr (sizeof(T) > 4)
|
||||
{
|
||||
value |= UInt64(matrix8[64 * 7 + col]) << (8 * 7);
|
||||
value |= UInt64(matrix8[64 * 6 + col]) << (8 * 6);
|
||||
value |= UInt64(matrix8[64 * 5 + col]) << (8 * 5);
|
||||
value |= UInt64(matrix8[64 * 4 + col]) << (8 * 4);
|
||||
value |= static_cast<UInt64>(matrix8[64 * 7 + col]) << (8 * 7);
|
||||
value |= static_cast<UInt64>(matrix8[64 * 6 + col]) << (8 * 6);
|
||||
value |= static_cast<UInt64>(matrix8[64 * 5 + col]) << (8 * 5);
|
||||
value |= static_cast<UInt64>(matrix8[64 * 4 + col]) << (8 * 4);
|
||||
}
|
||||
|
||||
if constexpr (sizeof(T) > 2)
|
||||
{
|
||||
value |= UInt32(matrix8[64 * 3 + col]) << (8 * 3);
|
||||
value |= UInt32(matrix8[64 * 2 + col]) << (8 * 2);
|
||||
value |= static_cast<UInt32>(matrix8[64 * 3 + col]) << (8 * 3);
|
||||
value |= static_cast<UInt32>(matrix8[64 * 2 + col]) << (8 * 2);
|
||||
}
|
||||
|
||||
if constexpr (sizeof(T) > 1)
|
||||
value |= UInt32(matrix8[64 * 1 + col]) << (8 * 1);
|
||||
value |= static_cast<UInt32>(matrix8[64 * 1 + col]) << (8 * 1);
|
||||
|
||||
value |= UInt32(matrix8[col]);
|
||||
value |= static_cast<UInt32>(matrix8[col]);
|
||||
}
|
||||
|
||||
|
||||
@ -422,12 +422,12 @@ UInt32 getValuableBitsNumber(Int64 min, Int64 max)
|
||||
if (min < 0 && max >= 0)
|
||||
{
|
||||
if (min + max >= 0)
|
||||
return getValuableBitsNumber(0ull, UInt64(max)) + 1;
|
||||
return getValuableBitsNumber(0ull, static_cast<UInt64>(max)) + 1;
|
||||
else
|
||||
return getValuableBitsNumber(0ull, UInt64(~min)) + 1;
|
||||
return getValuableBitsNumber(0ull, static_cast<UInt64>(~min)) + 1;
|
||||
}
|
||||
else
|
||||
return getValuableBitsNumber(UInt64(min), UInt64(max));
|
||||
return getValuableBitsNumber(static_cast<UInt64>(min), static_cast<UInt64>(max));
|
||||
}
|
||||
|
||||
|
||||
@ -559,14 +559,14 @@ void decompressData(const char * src, UInt32 bytes_size, char * dst, UInt32 unco
|
||||
T upper_max [[maybe_unused]] = 0;
|
||||
T sign_bit [[maybe_unused]] = 0;
|
||||
if (num_bits < 64)
|
||||
upper_min = UInt64(min) >> num_bits << num_bits;
|
||||
upper_min = static_cast<UInt64>(min) >> num_bits << num_bits;
|
||||
|
||||
if constexpr (is_signed_v<T>)
|
||||
{
|
||||
if (min < 0 && max >= 0 && num_bits < 64)
|
||||
{
|
||||
sign_bit = 1ull << (num_bits - 1);
|
||||
upper_max = UInt64(max) >> num_bits << num_bits;
|
||||
upper_max = static_cast<UInt64>(max) >> num_bits << num_bits;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -109,7 +109,7 @@ CompressionCodecZSTD::CompressionCodecZSTD(int level_) : level(level_), enable_l
|
||||
|
||||
void registerCodecZSTD(CompressionCodecFactory & factory)
|
||||
{
|
||||
UInt8 method_code = UInt8(CompressionMethodByte::ZSTD);
|
||||
UInt8 method_code = static_cast<UInt8>(CompressionMethodByte::ZSTD);
|
||||
factory.registerCompressionCodec("ZSTD", method_code, [&](const ASTPtr & arguments) -> CompressionCodecPtr {
|
||||
int level = CompressionCodecZSTD::ZSTD_DEFAULT_LEVEL;
|
||||
if (arguments && !arguments->children.empty())
|
||||
|
@ -628,12 +628,12 @@ void StreamStatistics::print() const
|
||||
{
|
||||
std::cerr
|
||||
<< "Num tokens: " << num_tokens
|
||||
<< ", Avg literal length: " << double(sum_literal_lengths) / num_tokens
|
||||
<< ", Avg match length: " << double(sum_match_lengths) / num_tokens
|
||||
<< ", Avg match offset: " << double(sum_match_offsets) / num_tokens
|
||||
<< ", Offset < 8 ratio: " << double(count_match_offset_less_8) / num_tokens
|
||||
<< ", Offset < 16 ratio: " << double(count_match_offset_less_16) / num_tokens
|
||||
<< ", Match replicate itself: " << double(count_match_replicate_itself) / num_tokens
|
||||
<< ", Avg literal length: " << static_cast<double>(sum_literal_lengths) / num_tokens
|
||||
<< ", Avg match length: " << static_cast<double>(sum_match_lengths) / num_tokens
|
||||
<< ", Avg match offset: " << static_cast<double>(sum_match_offsets) / num_tokens
|
||||
<< ", Offset < 8 ratio: " << static_cast<double>(count_match_offset_less_8) / num_tokens
|
||||
<< ", Offset < 16 ratio: " << static_cast<double>(count_match_offset_less_16) / num_tokens
|
||||
<< ", Match replicate itself: " << static_cast<double>(count_match_replicate_itself) / num_tokens
|
||||
<< "\n";
|
||||
}
|
||||
|
||||
|
@ -99,20 +99,20 @@ void KeeperConfigurationAndSettings::dump(WriteBufferFromOwnString & buf) const
|
||||
writeText("max_requests_batch_size=", buf);
|
||||
write_int(coordination_settings->max_requests_batch_size);
|
||||
writeText("min_session_timeout_ms=", buf);
|
||||
write_int(uint64_t(coordination_settings->min_session_timeout_ms));
|
||||
write_int(static_cast<uint64_t>(coordination_settings->min_session_timeout_ms));
|
||||
writeText("session_timeout_ms=", buf);
|
||||
write_int(uint64_t(coordination_settings->session_timeout_ms));
|
||||
write_int(static_cast<uint64_t>(coordination_settings->session_timeout_ms));
|
||||
writeText("operation_timeout_ms=", buf);
|
||||
write_int(uint64_t(coordination_settings->operation_timeout_ms));
|
||||
write_int(static_cast<uint64_t>(coordination_settings->operation_timeout_ms));
|
||||
writeText("dead_session_check_period_ms=", buf);
|
||||
write_int(uint64_t(coordination_settings->dead_session_check_period_ms));
|
||||
write_int(static_cast<uint64_t>(coordination_settings->dead_session_check_period_ms));
|
||||
|
||||
writeText("heart_beat_interval_ms=", buf);
|
||||
write_int(uint64_t(coordination_settings->heart_beat_interval_ms));
|
||||
write_int(static_cast<uint64_t>(coordination_settings->heart_beat_interval_ms));
|
||||
writeText("election_timeout_lower_bound_ms=", buf);
|
||||
write_int(uint64_t(coordination_settings->election_timeout_lower_bound_ms));
|
||||
write_int(static_cast<uint64_t>(coordination_settings->election_timeout_lower_bound_ms));
|
||||
writeText("election_timeout_upper_bound_ms=", buf);
|
||||
write_int(uint64_t(coordination_settings->election_timeout_upper_bound_ms));
|
||||
write_int(static_cast<uint64_t>(coordination_settings->election_timeout_upper_bound_ms));
|
||||
|
||||
writeText("reserved_log_items=", buf);
|
||||
write_int(coordination_settings->reserved_log_items);
|
||||
@ -122,9 +122,9 @@ void KeeperConfigurationAndSettings::dump(WriteBufferFromOwnString & buf) const
|
||||
writeText("auto_forwarding=", buf);
|
||||
write_bool(coordination_settings->auto_forwarding);
|
||||
writeText("shutdown_timeout=", buf);
|
||||
write_int(uint64_t(coordination_settings->shutdown_timeout));
|
||||
write_int(static_cast<uint64_t>(coordination_settings->shutdown_timeout));
|
||||
writeText("startup_timeout=", buf);
|
||||
write_int(uint64_t(coordination_settings->startup_timeout));
|
||||
write_int(static_cast<uint64_t>(coordination_settings->startup_timeout));
|
||||
|
||||
writeText("raft_logs_level=", buf);
|
||||
writeText(coordination_settings->raft_logs_level.toString(), buf);
|
||||
|
@ -221,7 +221,7 @@ namespace MySQLReplication
|
||||
case MYSQL_TYPE_BLOB:
|
||||
case MYSQL_TYPE_GEOMETRY:
|
||||
{
|
||||
column_meta.emplace_back(UInt16(meta[pos]));
|
||||
column_meta.emplace_back(static_cast<UInt16>(meta[pos]));
|
||||
pos += 1;
|
||||
break;
|
||||
}
|
||||
@ -229,9 +229,9 @@ namespace MySQLReplication
|
||||
case MYSQL_TYPE_STRING:
|
||||
{
|
||||
/// Big-Endian
|
||||
auto b0 = UInt16(meta[pos] << 8);
|
||||
auto b1 = UInt8(meta[pos + 1]);
|
||||
column_meta.emplace_back(UInt16(b0 + b1));
|
||||
auto b0 = static_cast<UInt16>(meta[pos] << 8);
|
||||
auto b1 = static_cast<UInt8>(meta[pos + 1]);
|
||||
column_meta.emplace_back(static_cast<UInt16>(b0 + b1));
|
||||
pos += 2;
|
||||
break;
|
||||
}
|
||||
@ -239,9 +239,9 @@ namespace MySQLReplication
|
||||
case MYSQL_TYPE_VARCHAR:
|
||||
case MYSQL_TYPE_VAR_STRING: {
|
||||
/// Little-Endian
|
||||
auto b0 = UInt8(meta[pos]);
|
||||
auto b1 = UInt16(meta[pos + 1] << 8);
|
||||
column_meta.emplace_back(UInt16(b0 + b1));
|
||||
auto b0 = static_cast<UInt8>(meta[pos]);
|
||||
auto b1 = static_cast<UInt16>(meta[pos + 1] << 8);
|
||||
column_meta.emplace_back(static_cast<UInt16>(b0 + b1));
|
||||
pos += 2;
|
||||
break;
|
||||
}
|
||||
@ -543,7 +543,7 @@ namespace MySQLReplication
|
||||
);
|
||||
|
||||
if (!meta)
|
||||
row.push_back(Field{UInt32(date_time)});
|
||||
row.push_back(Field{static_cast<UInt32>(date_time)});
|
||||
else
|
||||
{
|
||||
DB::DecimalUtils::DecimalComponents<DateTime64> components{
|
||||
@ -603,7 +603,7 @@ namespace MySQLReplication
|
||||
throw Exception("Attempt to read after EOF.", ErrorCodes::ATTEMPT_TO_READ_AFTER_EOF);
|
||||
|
||||
if ((*payload.position() & 0x80) == 0)
|
||||
mask = UInt32(-1);
|
||||
mask = static_cast<UInt32>(-1);
|
||||
|
||||
*payload.position() ^= 0x80;
|
||||
|
||||
|
@ -301,7 +301,7 @@ ColumnUInt8::Ptr IPAddressDictionary::hasKeys(const Columns & key_columns, const
|
||||
uint8_t addrv6_buf[IPV6_BINARY_LENGTH];
|
||||
for (const auto i : collections::range(0, rows))
|
||||
{
|
||||
auto addrv4 = UInt32(first_column->get64(i));
|
||||
auto addrv4 = static_cast<UInt32>(first_column->get64(i));
|
||||
auto found = tryLookupIPv4(addrv4, addrv6_buf);
|
||||
out[i] = (found != ipNotFound());
|
||||
keys_found += out[i];
|
||||
@ -387,7 +387,7 @@ void IPAddressDictionary::loadData()
|
||||
setAttributeValue(attribute, attribute_column[row]);
|
||||
}
|
||||
|
||||
const auto [addr, prefix] = parseIPFromString(std::string_view(key_column_ptr->getDataAt(row)));
|
||||
const auto [addr, prefix] = parseIPFromString(std::string_view{key_column_ptr->getDataAt(row)});
|
||||
has_ipv6 = has_ipv6 || (addr.family() == Poco::Net::IPAddress::IPv6);
|
||||
|
||||
size_t row_number = ip_records.size();
|
||||
@ -716,7 +716,7 @@ void IPAddressDictionary::getItemsImpl(
|
||||
for (const auto i : collections::range(0, rows))
|
||||
{
|
||||
// addrv4 has native endianness
|
||||
auto addrv4 = UInt32(first_column->get64(i));
|
||||
auto addrv4 = static_cast<UInt32>(first_column->get64(i));
|
||||
auto found = tryLookupIPv4(addrv4, addrv6_buf);
|
||||
if (found != ipNotFound())
|
||||
{
|
||||
|
@ -181,7 +181,7 @@ Pipe MongoDBDictionarySource::loadIds(const std::vector<UInt64> & ids)
|
||||
|
||||
Poco::MongoDB::Array::Ptr ids_array(new Poco::MongoDB::Array);
|
||||
for (const UInt64 id : ids)
|
||||
ids_array->add(DB::toString(id), Int32(id));
|
||||
ids_array->add(DB::toString(id), static_cast<Int32>(id));
|
||||
|
||||
cursor->query().selector().addNewDocument(dict_struct.id->name).add("$in", ids_array);
|
||||
|
||||
@ -218,7 +218,7 @@ Pipe MongoDBDictionarySource::loadKeys(const Columns & key_columns, const std::v
|
||||
case AttributeUnderlyingType::Int32:
|
||||
case AttributeUnderlyingType::Int64:
|
||||
{
|
||||
key.add(key_attribute.name, Int32(key_columns[attribute_index]->get64(row_idx)));
|
||||
key.add(key_attribute.name, static_cast<Int32>(key_columns[attribute_index]->get64(row_idx)));
|
||||
break;
|
||||
}
|
||||
case AttributeUnderlyingType::Float32:
|
||||
|
@ -93,7 +93,7 @@ private:
|
||||
if (!first)
|
||||
writeChar(',', out);
|
||||
first = false;
|
||||
writeIntText(T(bit), out);
|
||||
writeIntText(static_cast<T>(bit), out);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -651,18 +651,18 @@ struct ParseMACImpl
|
||||
*/
|
||||
static UInt64 parse(const char * pos)
|
||||
{
|
||||
return (UInt64(unhex(pos[0])) << 44)
|
||||
| (UInt64(unhex(pos[1])) << 40)
|
||||
| (UInt64(unhex(pos[3])) << 36)
|
||||
| (UInt64(unhex(pos[4])) << 32)
|
||||
| (UInt64(unhex(pos[6])) << 28)
|
||||
| (UInt64(unhex(pos[7])) << 24)
|
||||
| (UInt64(unhex(pos[9])) << 20)
|
||||
| (UInt64(unhex(pos[10])) << 16)
|
||||
| (UInt64(unhex(pos[12])) << 12)
|
||||
| (UInt64(unhex(pos[13])) << 8)
|
||||
| (UInt64(unhex(pos[15])) << 4)
|
||||
| (UInt64(unhex(pos[16])));
|
||||
return (static_cast<UInt64>(unhex(pos[0])) << 44)
|
||||
| (static_cast<UInt64>(unhex(pos[1])) << 40)
|
||||
| (static_cast<UInt64>(unhex(pos[3])) << 36)
|
||||
| (static_cast<UInt64>(unhex(pos[4])) << 32)
|
||||
| (static_cast<UInt64>(unhex(pos[6])) << 28)
|
||||
| (static_cast<UInt64>(unhex(pos[7])) << 24)
|
||||
| (static_cast<UInt64>(unhex(pos[9])) << 20)
|
||||
| (static_cast<UInt64>(unhex(pos[10])) << 16)
|
||||
| (static_cast<UInt64>(unhex(pos[12])) << 12)
|
||||
| (static_cast<UInt64>(unhex(pos[13])) << 8)
|
||||
| (static_cast<UInt64>(unhex(pos[15])) << 4)
|
||||
| (static_cast<UInt64>(unhex(pos[16])));
|
||||
}
|
||||
|
||||
static constexpr auto name = "MACStringToNum";
|
||||
@ -678,12 +678,12 @@ struct ParseOUIImpl
|
||||
*/
|
||||
static UInt64 parse(const char * pos)
|
||||
{
|
||||
return (UInt64(unhex(pos[0])) << 20)
|
||||
| (UInt64(unhex(pos[1])) << 16)
|
||||
| (UInt64(unhex(pos[3])) << 12)
|
||||
| (UInt64(unhex(pos[4])) << 8)
|
||||
| (UInt64(unhex(pos[6])) << 4)
|
||||
| (UInt64(unhex(pos[7])));
|
||||
return (static_cast<UInt64>(unhex(pos[0])) << 20)
|
||||
| (static_cast<UInt64>(unhex(pos[1])) << 16)
|
||||
| (static_cast<UInt64>(unhex(pos[3])) << 12)
|
||||
| (static_cast<UInt64>(unhex(pos[4])) << 8)
|
||||
| (static_cast<UInt64>(unhex(pos[6])) << 4)
|
||||
| (static_cast<UInt64>(unhex(pos[7])));
|
||||
}
|
||||
|
||||
static constexpr auto name = "MACStringToOUI";
|
||||
@ -895,9 +895,9 @@ private:
|
||||
if (bits_to_keep >= 8 * sizeof(UInt32))
|
||||
return { src, src };
|
||||
if (bits_to_keep == 0)
|
||||
return { UInt32(0), UInt32(-1) };
|
||||
return { static_cast<UInt32>(0), static_cast<UInt32>(-1) };
|
||||
|
||||
UInt32 mask = UInt32(-1) << (8 * sizeof(UInt32) - bits_to_keep);
|
||||
UInt32 mask = static_cast<UInt32>(-1) << (8 * sizeof(UInt32) - bits_to_keep);
|
||||
UInt32 lower = src & mask;
|
||||
UInt32 upper = lower | ~mask;
|
||||
|
||||
|
@ -565,7 +565,7 @@ ColumnPtr FunctionAnyArityLogical<Impl, Name>::executeShortCircuit(ColumnsWithTy
|
||||
/// The result is !mask_n.
|
||||
|
||||
bool inverted = Name::name != NameAnd::name;
|
||||
UInt8 null_value = UInt8(Name::name == NameAnd::name);
|
||||
UInt8 null_value = static_cast<UInt8>(Name::name == NameAnd::name);
|
||||
IColumn::Filter mask(arguments[0].column->size(), 1);
|
||||
|
||||
/// If result is nullable, we need to create null bytemap of the resulting column.
|
||||
|
@ -271,9 +271,9 @@ struct NgramDistanceImpl
|
||||
size_t first_size = dispatchSearcher(calculateHaystackStatsAndMetric<false>, data.data(), data_size, common_stats.get(), distance, nullptr);
|
||||
/// For !symmetric version we should not use first_size.
|
||||
if constexpr (symmetric)
|
||||
res = distance * 1.f / std::max(first_size + second_size, size_t(1));
|
||||
res = distance * 1.f / std::max(first_size + second_size, static_cast<size_t>(1));
|
||||
else
|
||||
res = 1.f - distance * 1.f / std::max(second_size, size_t(1));
|
||||
res = 1.f - distance * 1.f / std::max(second_size, static_cast<size_t>(1));
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -339,9 +339,9 @@ struct NgramDistanceImpl
|
||||
|
||||
/// For !symmetric version we should not use haystack_stats_size.
|
||||
if constexpr (symmetric)
|
||||
res[i] = distance * 1.f / std::max(haystack_stats_size + needle_stats_size, size_t(1));
|
||||
res[i] = distance * 1.f / std::max(haystack_stats_size + needle_stats_size, static_cast<size_t>(1));
|
||||
else
|
||||
res[i] = 1.f - distance * 1.f / std::max(needle_stats_size, size_t(1));
|
||||
res[i] = 1.f - distance * 1.f / std::max(needle_stats_size, static_cast<size_t>(1));
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -410,7 +410,7 @@ struct NgramDistanceImpl
|
||||
for (size_t j = 0; j < needle_stats_size; ++j)
|
||||
--common_stats[needle_ngram_storage[j]];
|
||||
|
||||
res[i] = 1.f - distance * 1.f / std::max(needle_stats_size, size_t(1));
|
||||
res[i] = 1.f - distance * 1.f / std::max(needle_stats_size, static_cast<size_t>(1));
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -457,9 +457,9 @@ struct NgramDistanceImpl
|
||||
ngram_storage.get());
|
||||
/// For !symmetric version we should not use haystack_stats_size.
|
||||
if constexpr (symmetric)
|
||||
res[i] = distance * 1.f / std::max(haystack_stats_size + needle_stats_size, size_t(1));
|
||||
res[i] = distance * 1.f / std::max(haystack_stats_size + needle_stats_size, static_cast<size_t>(1));
|
||||
else
|
||||
res[i] = 1.f - distance * 1.f / std::max(needle_stats_size, size_t(1));
|
||||
res[i] = 1.f - distance * 1.f / std::max(needle_stats_size, static_cast<size_t>(1));
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -23,7 +23,7 @@ public:
|
||||
if (txn)
|
||||
res = {txn->tid.start_csn, txn->tid.local_tid, txn->tid.host_id};
|
||||
else
|
||||
res = {UInt64(0), UInt64(0), UUIDHelpers::Nil};
|
||||
res = {static_cast<UInt64>(0), static_cast<UInt64>(0), UUIDHelpers::Nil};
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -306,7 +306,7 @@ GeohashesInBoxPreparedArgs geohashesInBoxPrepare(
|
||||
|
||||
return GeohashesInBoxPreparedArgs
|
||||
{
|
||||
std::max<UInt64>(1, UInt64(lon_items) * lat_items),
|
||||
std::max<UInt64>(1, static_cast<UInt64>(lon_items) * lat_items),
|
||||
lon_items,
|
||||
lat_items,
|
||||
lon_min,
|
||||
|
@ -113,7 +113,7 @@ private:
|
||||
return default_port;
|
||||
|
||||
port = (port * 10) + (*p - '0');
|
||||
if (port < 0 || port > UInt16(-1))
|
||||
if (port < 0 || port > static_cast<UInt16>(-1))
|
||||
return default_port;
|
||||
++p;
|
||||
}
|
||||
|
@ -94,7 +94,7 @@ private:
|
||||
src_offset = src_offsets[i];
|
||||
dst_offset += src_length;
|
||||
|
||||
if (src_length > 1 && dst_data[dst_offset - 2] != UInt8(trailing_char_str.front()))
|
||||
if (src_length > 1 && dst_data[dst_offset - 2] != static_cast<UInt8>(trailing_char_str.front()))
|
||||
{
|
||||
dst_data[dst_offset - 1] = trailing_char_str.front();
|
||||
dst_data[dst_offset] = 0;
|
||||
|
@ -132,7 +132,7 @@ public:
|
||||
if (count_positive == 0 || count_positive == size)
|
||||
return std::numeric_limits<ResultType>::quiet_NaN();
|
||||
|
||||
return ResultType(area) / count_positive / (size - count_positive);
|
||||
return static_cast<ResultType>(area) / count_positive / (size - count_positive);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -183,7 +183,7 @@ struct ArrayAggregateImpl
|
||||
{
|
||||
size_t array_size = offsets[i] - pos;
|
||||
/// Just multiply the value by array size.
|
||||
res[i] = x * ResultType(array_size);
|
||||
res[i] = x * static_cast<ResultType>(array_size);
|
||||
}
|
||||
else if constexpr (aggregate_operation == AggregateOperation::min ||
|
||||
aggregate_operation == AggregateOperation::max)
|
||||
|
@ -152,7 +152,7 @@ public:
|
||||
|
||||
void update()
|
||||
{
|
||||
sink_null_map[index] = bool(src_null_map);
|
||||
sink_null_map[index] = static_cast<bool>(src_null_map);
|
||||
++index;
|
||||
}
|
||||
|
||||
@ -492,7 +492,7 @@ ColumnPtr FunctionArrayElement::executeNumberConst(
|
||||
/// arr[-2] is the element at offset 1 from the last and so on.
|
||||
|
||||
ArrayElementNumImpl<DataType>::template vectorConst<true>(
|
||||
col_nested->getData(), col_array->getOffsets(), -(UInt64(safeGet<Int64>(index)) + 1), col_res->getData(), builder);
|
||||
col_nested->getData(), col_array->getOffsets(), -(static_cast<UInt64>(safeGet<Int64>(index)) + 1), col_res->getData(), builder);
|
||||
}
|
||||
else
|
||||
throw Exception("Illegal type of array index", ErrorCodes::LOGICAL_ERROR);
|
||||
@ -605,7 +605,7 @@ ColumnPtr FunctionArrayElement::executeGenericConst(
|
||||
col_nested, col_array->getOffsets(), safeGet<UInt64>(index) - 1, *col_res, builder);
|
||||
else if (index.getType() == Field::Types::Int64)
|
||||
ArrayElementGenericImpl::vectorConst<true>(
|
||||
col_nested, col_array->getOffsets(), -(UInt64(safeGet<Int64>(index) + 1)), *col_res, builder);
|
||||
col_nested, col_array->getOffsets(), -(static_cast<UInt64>(safeGet<Int64>(index) + 1)), *col_res, builder);
|
||||
else
|
||||
throw Exception("Illegal type of array index", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
|
@ -112,7 +112,7 @@ bool FunctionArrayReverse::executeGeneric(const IColumn & src_data, const Column
|
||||
{
|
||||
ssize_t src_index = src_array_offsets[i] - 1;
|
||||
|
||||
while (src_index >= ssize_t(src_prev_offset))
|
||||
while (src_index >= static_cast<ssize_t>(src_prev_offset))
|
||||
{
|
||||
res_data.insertFrom(src_data, src_index);
|
||||
--src_index;
|
||||
|
@ -72,7 +72,7 @@ struct BitShiftLeftImpl
|
||||
if (shift_left_bits)
|
||||
{
|
||||
/// The left b bit of the right byte is moved to the right b bit of this byte
|
||||
*out = UInt8(UInt8(*(op_pointer) >> (8 - shift_left_bits)) | previous);
|
||||
*out = static_cast<UInt8>(static_cast<UInt8>(*(op_pointer) >> (8 - shift_left_bits)) | previous);
|
||||
previous = *op_pointer << shift_left_bits;
|
||||
}
|
||||
else
|
||||
@ -131,7 +131,7 @@ struct BitShiftLeftImpl
|
||||
if (op_pointer + 1 < end)
|
||||
{
|
||||
/// The left b bit of the right byte is moved to the right b bit of this byte
|
||||
*out = UInt8(UInt8(*(op_pointer + 1) >> (8 - shift_left_bits)) | *out);
|
||||
*out = static_cast<UInt8>(static_cast<UInt8>(*(op_pointer + 1) >> (8 - shift_left_bits)) | *out);
|
||||
}
|
||||
op_pointer++;
|
||||
out++;
|
||||
|
@ -41,7 +41,7 @@ struct BitShiftRightImpl
|
||||
if (op_pointer - 1 >= begin)
|
||||
{
|
||||
/// The right b bit of the left byte is moved to the left b bit of this byte
|
||||
*out = UInt8(UInt8(*(op_pointer - 1) << (8 - shift_right_bits)) | *out);
|
||||
*out = static_cast<UInt8>(static_cast<UInt8>(*(op_pointer - 1) << (8 - shift_right_bits)) | *out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -235,8 +235,8 @@ private:
|
||||
template <typename TransformX, typename TransformY, typename T1, typename T2>
|
||||
Int64 calculate(const TransformX & transform_x, const TransformY & transform_y, T1 x, T2 y, const DateLUTImpl & timezone_x, const DateLUTImpl & timezone_y) const
|
||||
{
|
||||
return Int64(transform_y.execute(y, timezone_y))
|
||||
- Int64(transform_x.execute(x, timezone_x));
|
||||
return static_cast<Int64>(transform_y.execute(y, timezone_y))
|
||||
- static_cast<Int64>(transform_x.execute(x, timezone_x));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
|
@ -150,7 +150,7 @@ ColumnPtr FunctionHasColumnInTable::executeImpl(const ColumnsWithTypeAndName & a
|
||||
has_column = remote_columns.hasPhysical(column_name);
|
||||
}
|
||||
|
||||
return DataTypeUInt8().createColumnConst(input_rows_count, Field{UInt64(has_column)});
|
||||
return DataTypeUInt8().createColumnConst(input_rows_count, Field{static_cast<UInt64>(has_column)});
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -15,7 +15,7 @@ inline int32_t JumpConsistentHash(uint64_t key, int32_t num_buckets)
|
||||
{
|
||||
b = j;
|
||||
key = key * 2862933555777941757ULL + 1;
|
||||
j = static_cast<int64_t>((b + 1) * (double(1LL << 31) / double((key >> 33) + 1)));
|
||||
j = static_cast<int64_t>((b + 1) * (static_cast<double>(1LL << 31) / static_cast<double>((key >> 33) + 1)));
|
||||
}
|
||||
return static_cast<int32_t>(b);
|
||||
}
|
||||
|
@ -135,7 +135,7 @@ public:
|
||||
}
|
||||
if (size <= 0)
|
||||
return;
|
||||
if (size > Int64(input_rows_count))
|
||||
if (size > static_cast<Int64>(input_rows_count))
|
||||
size = input_rows_count;
|
||||
|
||||
if (!src)
|
||||
@ -163,14 +163,14 @@ public:
|
||||
}
|
||||
else if (offset > 0)
|
||||
{
|
||||
insert_range_from(source_is_constant, source_column_casted, offset, Int64(input_rows_count) - offset);
|
||||
insert_range_from(default_is_constant, default_column_casted, Int64(input_rows_count) - offset, offset);
|
||||
insert_range_from(source_is_constant, source_column_casted, offset, static_cast<Int64>(input_rows_count) - offset);
|
||||
insert_range_from(default_is_constant, default_column_casted, static_cast<Int64>(input_rows_count) - offset, offset);
|
||||
return result_column;
|
||||
}
|
||||
else
|
||||
{
|
||||
insert_range_from(default_is_constant, default_column_casted, 0, -offset);
|
||||
insert_range_from(source_is_constant, source_column_casted, 0, Int64(input_rows_count) + offset);
|
||||
insert_range_from(source_is_constant, source_column_casted, 0, static_cast<Int64>(input_rows_count) + offset);
|
||||
return result_column;
|
||||
}
|
||||
}
|
||||
@ -188,7 +188,7 @@ public:
|
||||
|
||||
Int64 src_idx = row + offset;
|
||||
|
||||
if (src_idx >= 0 && src_idx < Int64(input_rows_count))
|
||||
if (src_idx >= 0 && src_idx < static_cast<Int64>(input_rows_count))
|
||||
result_column->insertFrom(*source_column_casted, source_is_constant ? 0 : src_idx);
|
||||
else if (has_defaults)
|
||||
result_column->insertFrom(*default_column_casted, default_is_constant ? 0 : row);
|
||||
|
@ -30,7 +30,7 @@ using FunctionSigmoid = FunctionMathUnary<Impl>;
|
||||
|
||||
#else
|
||||
|
||||
static double sigmoid(double x)
|
||||
double sigmoid(double x)
|
||||
{
|
||||
return 1.0 / (1.0 + exp(-x));
|
||||
}
|
||||
|
@ -26,7 +26,7 @@ inline bool HadoopSnappyDecoder::checkBufferLength(int max) const
|
||||
|
||||
inline bool HadoopSnappyDecoder::checkAvailIn(size_t avail_in, int min)
|
||||
{
|
||||
return avail_in >= size_t(min);
|
||||
return avail_in >= static_cast<size_t>(min);
|
||||
}
|
||||
|
||||
inline void HadoopSnappyDecoder::copyToBuffer(size_t * avail_in, const char ** next_in)
|
||||
|
@ -245,7 +245,7 @@ void PeekableReadBuffer::resizeOwnMemoryIfNecessary(size_t bytes_to_append)
|
||||
|
||||
/// Stack memory is not enough, allocate larger buffer.
|
||||
use_stack_memory = false;
|
||||
memory.resize(std::max(size_t(DBMS_DEFAULT_BUFFER_SIZE), new_size));
|
||||
memory.resize(std::max(static_cast<size_t>(DBMS_DEFAULT_BUFFER_SIZE), new_size));
|
||||
memcpy(memory.data(), stack_memory, sizeof(stack_memory));
|
||||
if (need_update_checkpoint)
|
||||
checkpoint.emplace(memory.data() + offset);
|
||||
|
@ -16,13 +16,13 @@ off_t ReadBufferFromMemory::seek(off_t offset, int whence)
|
||||
{
|
||||
pos = internal_buffer.begin() + offset;
|
||||
working_buffer = internal_buffer; /// We need to restore `working_buffer` in case the position was at EOF before this seek().
|
||||
return size_t(pos - internal_buffer.begin());
|
||||
return static_cast<size_t>(pos - internal_buffer.begin());
|
||||
}
|
||||
else
|
||||
throw Exception(
|
||||
"Seek position is out of bounds. "
|
||||
"Offset: "
|
||||
+ std::to_string(offset) + ", Max: " + std::to_string(size_t(internal_buffer.end() - internal_buffer.begin())),
|
||||
+ std::to_string(offset) + ", Max: " + std::to_string(static_cast<size_t>(internal_buffer.end() - internal_buffer.begin())),
|
||||
ErrorCodes::SEEK_POSITION_OUT_OF_BOUND);
|
||||
}
|
||||
else if (whence == SEEK_CUR)
|
||||
@ -32,13 +32,13 @@ off_t ReadBufferFromMemory::seek(off_t offset, int whence)
|
||||
{
|
||||
pos = new_pos;
|
||||
working_buffer = internal_buffer; /// We need to restore `working_buffer` in case the position was at EOF before this seek().
|
||||
return size_t(pos - internal_buffer.begin());
|
||||
return static_cast<size_t>(pos - internal_buffer.begin());
|
||||
}
|
||||
else
|
||||
throw Exception(
|
||||
"Seek position is out of bounds. "
|
||||
"Offset: "
|
||||
+ std::to_string(offset) + ", Max: " + std::to_string(size_t(internal_buffer.end() - internal_buffer.begin())),
|
||||
+ std::to_string(offset) + ", Max: " + std::to_string(static_cast<size_t>(internal_buffer.end() - internal_buffer.begin())),
|
||||
ErrorCodes::SEEK_POSITION_OUT_OF_BOUND);
|
||||
}
|
||||
else
|
||||
|
@ -175,7 +175,7 @@ off_t ReadBufferFromS3::seek(off_t offset_, int whence)
|
||||
if (!restricted_seek)
|
||||
{
|
||||
if (!working_buffer.empty()
|
||||
&& size_t(offset_) >= offset - working_buffer.size()
|
||||
&& static_cast<size_t>(offset_) >= offset - working_buffer.size()
|
||||
&& offset_ < offset)
|
||||
{
|
||||
pos = working_buffer.end() - (offset - offset_);
|
||||
|
@ -379,7 +379,7 @@ void WriteBufferFromS3::completeMultipartUpload()
|
||||
void WriteBufferFromS3::makeSinglepartUpload()
|
||||
{
|
||||
auto size = temporary_buffer->tellp();
|
||||
bool with_pool = bool(schedule);
|
||||
bool with_pool = static_cast<bool>(schedule);
|
||||
|
||||
LOG_TRACE(log, "Making single part upload. Bucket: {}, Key: {}, Size: {}, WithPool: {}", bucket, key, size, with_pool);
|
||||
|
||||
@ -467,7 +467,7 @@ void WriteBufferFromS3::fillPutRequest(Aws::S3::Model::PutObjectRequest & req)
|
||||
void WriteBufferFromS3::processPutRequest(PutObjectTask & task)
|
||||
{
|
||||
auto outcome = client_ptr->PutObject(task.req);
|
||||
bool with_pool = bool(schedule);
|
||||
bool with_pool = static_cast<bool>(schedule);
|
||||
|
||||
if (outcome.IsSuccess())
|
||||
LOG_TRACE(log, "Single part upload has completed. Bucket: {}, Key: {}, Object size: {}, WithPool: {}", bucket, key, task.req.GetContentLength(), with_pool);
|
||||
|
@ -59,13 +59,13 @@ Field zeroField(const Field & value)
|
||||
{
|
||||
switch (value.getType())
|
||||
{
|
||||
case Field::Types::UInt64: return UInt64(0);
|
||||
case Field::Types::Int64: return Int64(0);
|
||||
case Field::Types::Float64: return Float64(0);
|
||||
case Field::Types::UInt128: return UInt128(0);
|
||||
case Field::Types::Int128: return Int128(0);
|
||||
case Field::Types::UInt256: return UInt256(0);
|
||||
case Field::Types::Int256: return Int256(0);
|
||||
case Field::Types::UInt64: return static_cast<UInt64>(0);
|
||||
case Field::Types::Int64: return static_cast<Int64>(0);
|
||||
case Field::Types::Float64: return static_cast<Float64>(0);
|
||||
case Field::Types::UInt128: return static_cast<UInt128>(0);
|
||||
case Field::Types::Int128: return static_cast<Int128>(0);
|
||||
case Field::Types::UInt256: return static_cast<UInt256>(0);
|
||||
case Field::Types::Int256: return static_cast<Int256>(0);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -24,7 +24,7 @@ void ClientInfo::write(WriteBuffer & out, UInt64 server_protocol_revision) const
|
||||
if (server_protocol_revision < DBMS_MIN_REVISION_WITH_CLIENT_INFO)
|
||||
throw Exception("Logical error: method ClientInfo::write is called for unsupported server revision", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
writeBinary(UInt8(query_kind), out);
|
||||
writeBinary(static_cast<UInt8>(query_kind), out);
|
||||
if (empty())
|
||||
return;
|
||||
|
||||
@ -35,7 +35,7 @@ void ClientInfo::write(WriteBuffer & out, UInt64 server_protocol_revision) const
|
||||
if (server_protocol_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_INITIAL_QUERY_START_TIME)
|
||||
writeBinary(initial_query_start_time_microseconds, out);
|
||||
|
||||
writeBinary(UInt8(interface), out);
|
||||
writeBinary(static_cast<UInt8>(interface), out);
|
||||
|
||||
if (interface == Interface::TCP)
|
||||
{
|
||||
@ -48,7 +48,7 @@ void ClientInfo::write(WriteBuffer & out, UInt64 server_protocol_revision) const
|
||||
}
|
||||
else if (interface == Interface::HTTP)
|
||||
{
|
||||
writeBinary(UInt8(http_method), out);
|
||||
writeBinary(static_cast<UInt8>(http_method), out);
|
||||
writeBinary(http_user_agent, out);
|
||||
|
||||
if (server_protocol_revision >= DBMS_MIN_REVISION_WITH_X_FORWARDED_FOR_IN_CLIENT_INFO)
|
||||
@ -86,7 +86,7 @@ void ClientInfo::write(WriteBuffer & out, UInt64 server_protocol_revision) const
|
||||
else
|
||||
{
|
||||
// Don't have OpenTelemetry header.
|
||||
writeBinary(uint8_t(0), out);
|
||||
writeBinary(static_cast<UInt8>(0), out);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -82,7 +82,7 @@ void collectCrashLog(Int32 signal, UInt64 thread_id, const String & query_id, co
|
||||
|
||||
stack_trace.toStringEveryLine([&trace_full](const std::string & line) { trace_full.push_back(line); });
|
||||
|
||||
CrashLogElement element{time_t(time / 1000000000), time, signal, thread_id, query_id, trace, trace_full};
|
||||
CrashLogElement element{static_cast<time_t>(time / 1000000000), time, signal, thread_id, query_id, trace, trace_full};
|
||||
crash_log_owned->add(element);
|
||||
}
|
||||
}
|
||||
|
@ -39,7 +39,7 @@ void DNSCacheUpdater::run()
|
||||
* - automatically throttle when DNS requests take longer time;
|
||||
* - add natural randomization on huge clusters - avoid sending all requests at the same moment of time from different servers.
|
||||
*/
|
||||
task_handle->scheduleAfter(size_t(update_period_seconds) * 1000);
|
||||
task_handle->scheduleAfter(static_cast<size_t>(update_period_seconds) * 1000);
|
||||
}
|
||||
|
||||
void DNSCacheUpdater::start()
|
||||
|
@ -48,7 +48,7 @@ BlockIO InterpreterCheckQuery::execute()
|
||||
{
|
||||
bool result = std::all_of(check_results.begin(), check_results.end(), [] (const CheckResult & res) { return res.success; });
|
||||
auto column = ColumnUInt8::create();
|
||||
column->insertValue(UInt64(result));
|
||||
column->insertValue(static_cast<UInt64>(result));
|
||||
block = Block{{std::move(column), std::make_shared<DataTypeUInt8>(), "result"}};
|
||||
}
|
||||
else
|
||||
|
@ -380,7 +380,7 @@ BlockIO InterpreterInsertQuery::execute()
|
||||
pipeline.dropTotalsAndExtremes();
|
||||
|
||||
if (table->supportsParallelInsert() && settings.max_insert_threads > 1)
|
||||
out_streams_size = std::min(size_t(settings.max_insert_threads), pipeline.getNumStreams());
|
||||
out_streams_size = std::min(static_cast<size_t>(settings.max_insert_threads), pipeline.getNumStreams());
|
||||
|
||||
pipeline.resize(out_streams_size);
|
||||
|
||||
|
@ -104,7 +104,7 @@ InterpreterSelectWithUnionQuery::InterpreterSelectWithUnionQuery(
|
||||
}
|
||||
else if (settings.offset)
|
||||
{
|
||||
ASTPtr new_limit_offset_ast = std::make_shared<ASTLiteral>(Field(UInt64(settings.offset)));
|
||||
ASTPtr new_limit_offset_ast = std::make_shared<ASTLiteral>(Field(static_cast<UInt64>(settings.offset)));
|
||||
select_query->setExpression(ASTSelectQuery::Expression::LIMIT_OFFSET, std::move(new_limit_offset_ast));
|
||||
}
|
||||
|
||||
@ -115,15 +115,15 @@ InterpreterSelectWithUnionQuery::InterpreterSelectWithUnionQuery(
|
||||
|
||||
UInt64 new_limit_length = 0;
|
||||
if (settings.offset == 0)
|
||||
new_limit_length = std::min(limit_length, UInt64(settings.limit));
|
||||
new_limit_length = std::min(limit_length, static_cast<UInt64>(settings.limit));
|
||||
else if (settings.offset < limit_length)
|
||||
new_limit_length = settings.limit ? std::min(UInt64(settings.limit), limit_length - settings.offset) : (limit_length - settings.offset);
|
||||
new_limit_length = settings.limit ? std::min(static_cast<UInt64>(settings.limit), limit_length - settings.offset) : (limit_length - settings.offset);
|
||||
|
||||
limit_length_ast->as<ASTLiteral &>().value = Field(new_limit_length);
|
||||
}
|
||||
else if (settings.limit)
|
||||
{
|
||||
ASTPtr new_limit_length_ast = std::make_shared<ASTLiteral>(Field(UInt64(settings.limit)));
|
||||
ASTPtr new_limit_length_ast = std::make_shared<ASTLiteral>(Field(static_cast<UInt64>(settings.limit)));
|
||||
select_query->setExpression(ASTSelectQuery::Expression::LIMIT_LENGTH, std::move(new_limit_length_ast));
|
||||
}
|
||||
|
||||
|
@ -588,7 +588,7 @@ void InterpreterSystemQuery::restartReplicas(ContextMutablePtr system_context)
|
||||
for (auto & guard : guards)
|
||||
guard.second = catalog.getDDLGuard(guard.first.database_name, guard.first.table_name);
|
||||
|
||||
ThreadPool pool(std::min(size_t(getNumberOfPhysicalCPUCores()), replica_names.size()));
|
||||
ThreadPool pool(std::min(static_cast<size_t>(getNumberOfPhysicalCPUCores()), replica_names.size()));
|
||||
|
||||
for (auto & replica : replica_names)
|
||||
{
|
||||
|
@ -325,7 +325,7 @@ static ASTPtr getPartitionPolicy(const NamesAndTypesList & primary_keys)
|
||||
return std::make_shared<ASTIdentifier>(column_name);
|
||||
|
||||
return makeASTFunction("intDiv", std::make_shared<ASTIdentifier>(column_name),
|
||||
std::make_shared<ASTLiteral>(UInt64(type_max_size / 1000)));
|
||||
std::make_shared<ASTLiteral>(static_cast<UInt64>(type_max_size / 1000)));
|
||||
};
|
||||
|
||||
ASTPtr best_partition;
|
||||
@ -493,7 +493,7 @@ ASTs InterpreterCreateImpl::getRewrittenQueries(
|
||||
String sign_column_name = getUniqueColumnName(columns_name_and_type, "_sign");
|
||||
String version_column_name = getUniqueColumnName(columns_name_and_type, "_version");
|
||||
columns->set(columns->columns, InterpreterCreateQuery::formatColumns(columns_description));
|
||||
columns->columns->children.emplace_back(create_materialized_column_declaration(sign_column_name, "Int8", UInt64(1)));
|
||||
columns->columns->children.emplace_back(create_materialized_column_declaration(sign_column_name, "Int8", static_cast<UInt64>(1)));
|
||||
columns->columns->children.emplace_back(create_materialized_column_declaration(version_column_name, "UInt64", UInt64(1)));
|
||||
|
||||
/// Add minmax skipping index for _version column.
|
||||
|
@ -71,7 +71,7 @@ static void dumpProfileEvents(ProfileEventsSnapshot const & snapshot, DB::Mutabl
|
||||
{
|
||||
size_t i = 0;
|
||||
columns[i++]->insertData(host_name.data(), host_name.size());
|
||||
columns[i++]->insert(UInt64(snapshot.current_time));
|
||||
columns[i++]->insert(static_cast<UInt64>(snapshot.current_time));
|
||||
columns[i++]->insert(UInt64{snapshot.thread_id});
|
||||
columns[i++]->insert(Type::INCREMENT);
|
||||
}
|
||||
@ -81,8 +81,8 @@ static void dumpMemoryTracker(ProfileEventsSnapshot const & snapshot, DB::Mutabl
|
||||
{
|
||||
size_t i = 0;
|
||||
columns[i++]->insertData(host_name.data(), host_name.size());
|
||||
columns[i++]->insert(UInt64(snapshot.current_time));
|
||||
columns[i++]->insert(UInt64{snapshot.thread_id});
|
||||
columns[i++]->insert(static_cast<UInt64>(snapshot.current_time));
|
||||
columns[i++]->insert(static_cast<UInt64>{snapshot.thread_id});
|
||||
columns[i++]->insert(Type::GAUGE);
|
||||
|
||||
columns[i++]->insertData(MemoryTracker::USAGE_EVENT_NAME, strlen(MemoryTracker::USAGE_EVENT_NAME));
|
||||
|
@ -279,7 +279,7 @@ void QueryLogElement::appendClientInfo(const ClientInfo & client_info, MutableCo
|
||||
columns[i++]->insert(client_info.initial_query_start_time);
|
||||
columns[i++]->insert(client_info.initial_query_start_time_microseconds);
|
||||
|
||||
columns[i++]->insert(UInt64(client_info.interface));
|
||||
columns[i++]->insert(static_cast<UInt64>(client_info.interface));
|
||||
columns[i++]->insert(static_cast<UInt64>(client_info.is_secure));
|
||||
|
||||
columns[i++]->insert(client_info.os_user);
|
||||
@ -290,7 +290,7 @@ void QueryLogElement::appendClientInfo(const ClientInfo & client_info, MutableCo
|
||||
columns[i++]->insert(client_info.client_version_minor);
|
||||
columns[i++]->insert(client_info.client_version_patch);
|
||||
|
||||
columns[i++]->insert(UInt64(client_info.http_method));
|
||||
columns[i++]->insert(static_cast<UInt64>(client_info.http_method));
|
||||
columns[i++]->insert(client_info.http_user_agent);
|
||||
columns[i++]->insert(client_info.http_referer);
|
||||
columns[i++]->insert(client_info.forwarded_for);
|
||||
|
@ -49,7 +49,7 @@ void ThreadStatus::applyQuerySettings()
|
||||
initQueryProfiler();
|
||||
|
||||
untracked_memory_limit = settings.max_untracked_memory;
|
||||
if (settings.memory_profiler_step && settings.memory_profiler_step < UInt64(untracked_memory_limit))
|
||||
if (settings.memory_profiler_step && settings.memory_profiler_step < static_cast<UInt64>(untracked_memory_limit))
|
||||
untracked_memory_limit = settings.memory_profiler_step;
|
||||
|
||||
#if defined(OS_LINUX)
|
||||
|
@ -84,7 +84,7 @@ void TraceCollector::run()
|
||||
{
|
||||
uintptr_t addr = 0;
|
||||
readPODBinary(addr, in);
|
||||
trace.emplace_back(UInt64(addr));
|
||||
trace.emplace_back(static_cast<UInt64>(addr));
|
||||
}
|
||||
|
||||
TraceType trace_type;
|
||||
@ -103,8 +103,8 @@ void TraceCollector::run()
|
||||
struct timespec ts;
|
||||
clock_gettime(CLOCK_REALTIME, &ts);
|
||||
|
||||
UInt64 time = UInt64(ts.tv_sec * 1000000000LL + ts.tv_nsec);
|
||||
UInt64 time_in_microseconds = UInt64((ts.tv_sec * 1000000LL) + (ts.tv_nsec / 1000));
|
||||
UInt64 time = static_cast<UInt64>(ts.tv_sec * 1000000000LL + ts.tv_nsec);
|
||||
UInt64 time_in_microseconds = static_cast<UInt64>((ts.tv_sec * 1000000LL) + (ts.tv_nsec / 1000));
|
||||
TraceLogElement element{time_t(time / 1000000000), time_in_microseconds, time, trace_type, thread_id, query_id, trace, size};
|
||||
trace_log->add(element);
|
||||
}
|
||||
|
@ -76,7 +76,7 @@ void appendUnusedGroupByColumn(ASTSelectQuery * select_query)
|
||||
/// Also start unused_column integer must not intersect with ([1, source_columns.size()])
|
||||
/// might be in positional GROUP BY.
|
||||
select_query->setExpression(ASTSelectQuery::Expression::GROUP_BY, std::make_shared<ASTExpressionList>());
|
||||
select_query->groupBy()->children.emplace_back(std::make_shared<ASTLiteral>(Int64(-1)));
|
||||
select_query->groupBy()->children.emplace_back(std::make_shared<ASTLiteral>(static_cast<Int64>(-1)));
|
||||
}
|
||||
|
||||
/// Eliminates injective function calls and constant expressions from group by statement.
|
||||
|
@ -294,7 +294,7 @@ struct ExistsExpressionData
|
||||
select_query->setExpression(ASTSelectQuery::Expression::SELECT, select_expr_list);
|
||||
select_query->setExpression(ASTSelectQuery::Expression::TABLES, tables_in_select);
|
||||
|
||||
ASTPtr limit_length_ast = std::make_shared<ASTLiteral>(Field(UInt64(1)));
|
||||
ASTPtr limit_length_ast = std::make_shared<ASTLiteral>(Field(static_cast<UInt64>(1)));
|
||||
select_query->setExpression(ASTSelectQuery::Expression::LIMIT_LENGTH, std::move(limit_length_ast));
|
||||
|
||||
auto select_with_union_query = std::make_shared<ASTSelectWithUnionQuery>();
|
||||
@ -347,7 +347,7 @@ void replaceWithSumCount(String column_name, ASTFunction & func)
|
||||
/// Rewrite "avg" to sumCount().1 / sumCount().2
|
||||
auto new_arg1 = makeASTFunction("tupleElement", func_base, std::make_shared<ASTLiteral>(UInt8(1)));
|
||||
auto new_arg2 = makeASTFunction("CAST",
|
||||
makeASTFunction("tupleElement", func_base, std::make_shared<ASTLiteral>(UInt8(2))),
|
||||
makeASTFunction("tupleElement", func_base, std::make_shared<ASTLiteral>(static_cast<UInt8>(2))),
|
||||
std::make_shared<ASTLiteral>("Float64"));
|
||||
|
||||
func.name = "divide";
|
||||
|
@ -892,7 +892,7 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
|
||||
ReadableSize(elem.read_bytes / elapsed_seconds));
|
||||
}
|
||||
|
||||
if (log_queries && elem.type >= log_queries_min_type && Int64(elem.query_duration_ms) >= log_queries_min_query_duration_ms)
|
||||
if (log_queries && elem.type >= log_queries_min_type && static_cast<Int64>(elem.query_duration_ms) >= log_queries_min_query_duration_ms)
|
||||
{
|
||||
if (auto query_log = context->getQueryLog())
|
||||
query_log->add(elem);
|
||||
@ -1009,7 +1009,7 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
|
||||
logException(context, elem);
|
||||
|
||||
/// In case of exception we log internal queries also
|
||||
if (log_queries && elem.type >= log_queries_min_type && Int64(elem.query_duration_ms) >= log_queries_min_query_duration_ms)
|
||||
if (log_queries && elem.type >= log_queries_min_type && static_cast<Int64>(elem.query_duration_ms) >= log_queries_min_query_duration_ms)
|
||||
{
|
||||
if (auto query_log = context->getQueryLog())
|
||||
query_log->add(elem);
|
||||
|
@ -85,13 +85,13 @@ ASTPtr ASTDeclareOptions::clone() const
|
||||
|
||||
bool ParserAlwaysTrue::parseImpl(IParser::Pos & /*pos*/, ASTPtr & node, Expected & /*expected*/)
|
||||
{
|
||||
node = std::make_shared<ASTLiteral>(Field(UInt64(1)));
|
||||
node = std::make_shared<ASTLiteral>(Field(static_cast<UInt64>(1)));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ParserAlwaysFalse::parseImpl(IParser::Pos & /*pos*/, ASTPtr & node, Expected & /*expected*/)
|
||||
{
|
||||
node = std::make_shared<ASTLiteral>(Field(UInt64(0)));
|
||||
node = std::make_shared<ASTLiteral>(Field(static_cast<UInt64>(0)));
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -391,7 +391,7 @@ bool ParserSelectQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
|
||||
/// Transform `DISTINCT ON expr` to `LIMIT 1 BY expr`
|
||||
limit_by_expression_list = distinct_on_expression_list;
|
||||
limit_by_length = std::make_shared<ASTLiteral>(Field{UInt8(1)});
|
||||
limit_by_length = std::make_shared<ASTLiteral>(Field{static_cast<UInt8>(1)});
|
||||
distinct_on_expression_list = nullptr;
|
||||
}
|
||||
|
||||
|
@ -30,9 +30,9 @@ bool ParserSetQuery::parseNameValuePair(SettingChange & change, IParser::Pos & p
|
||||
return false;
|
||||
|
||||
if (ParserKeyword("TRUE").ignore(pos, expected))
|
||||
value = std::make_shared<ASTLiteral>(Field(UInt64(1)));
|
||||
value = std::make_shared<ASTLiteral>(Field(static_cast<UInt64>(1)));
|
||||
else if (ParserKeyword("FALSE").ignore(pos, expected))
|
||||
value = std::make_shared<ASTLiteral>(Field(UInt64(0)));
|
||||
value = std::make_shared<ASTLiteral>(Field(static_cast<UInt64>(0)));
|
||||
else if (!value_p.parse(pos, value, expected))
|
||||
return false;
|
||||
|
||||
|
@ -22,9 +22,9 @@ ASTPtr makeASTForLogicalAnd(ASTs && arguments)
|
||||
});
|
||||
|
||||
if (!partial_result)
|
||||
return std::make_shared<ASTLiteral>(Field{UInt8(0)});
|
||||
return std::make_shared<ASTLiteral>(Field{static_cast<UInt8>(0)});
|
||||
if (arguments.empty())
|
||||
return std::make_shared<ASTLiteral>(Field{UInt8(1)});
|
||||
return std::make_shared<ASTLiteral>(Field{static_cast<UInt8>(1)});
|
||||
if (arguments.size() == 1)
|
||||
return arguments[0];
|
||||
|
||||
@ -51,9 +51,9 @@ ASTPtr makeASTForLogicalOr(ASTs && arguments)
|
||||
});
|
||||
|
||||
if (partial_result)
|
||||
return std::make_shared<ASTLiteral>(Field{UInt8(1)});
|
||||
return std::make_shared<ASTLiteral>(Field{static_cast<UInt8>(1)});
|
||||
if (arguments.empty())
|
||||
return std::make_shared<ASTLiteral>(Field{UInt8(0)});
|
||||
return std::make_shared<ASTLiteral>(Field{static_cast<UInt8>(0)});
|
||||
if (arguments.size() == 1)
|
||||
return arguments[0];
|
||||
|
||||
|
@ -338,7 +338,7 @@ void registerFileSegmentationEngineTabSeparated(FormatFactory & factory)
|
||||
{
|
||||
auto register_func = [&](const String & format_name, bool with_names, bool with_types)
|
||||
{
|
||||
size_t min_rows = 1 + int(with_names) + int(with_types);
|
||||
size_t min_rows = 1 + static_cast<int>(with_names) + static_cast<int>(with_types);
|
||||
factory.registerFileSegmentationEngine(format_name, [is_raw, min_rows](ReadBuffer & in, DB::Memory<> & memory, size_t min_chunk_size)
|
||||
{
|
||||
return fileSegmentationEngineTabSeparatedImpl(in, memory, min_chunk_size, is_raw, min_rows);
|
||||
|
@ -45,7 +45,7 @@ Chunk CubeTransform::generate()
|
||||
consumed_chunks.clear();
|
||||
|
||||
auto num_rows = cube_chunk.getNumRows();
|
||||
mask = (UInt64(1) << keys.size()) - 1;
|
||||
mask = (static_cast<UInt64>(1) << keys.size()) - 1;
|
||||
|
||||
current_columns = cube_chunk.getColumns();
|
||||
current_zero_columns.clear();
|
||||
|
@ -86,7 +86,7 @@ void PostgreSQLSource<T>::onStart()
|
||||
}
|
||||
}
|
||||
|
||||
stream = std::make_unique<pqxx::stream_from>(*tx, pqxx::from_query, std::string_view(query_str));
|
||||
stream = std::make_unique<pqxx::stream_from>(*tx, pqxx::from_query, std::string_view{query_str});
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
|
@ -33,7 +33,7 @@ static void limitProgressingSpeed(size_t total_progress_size, size_t max_speed_i
|
||||
|
||||
/// Never sleep more than one second (it should be enough to limit speed for a reasonable amount,
|
||||
/// and otherwise it's too easy to make query hang).
|
||||
sleep_microseconds = std::min(UInt64(1000000), sleep_microseconds);
|
||||
sleep_microseconds = std::min(static_cast<UInt64>(1000000), sleep_microseconds);
|
||||
|
||||
sleepForMicroseconds(sleep_microseconds);
|
||||
|
||||
|
@ -340,7 +340,7 @@ namespace
|
||||
|
||||
uint64_t doubleToUInt64(double d)
|
||||
{
|
||||
if (d >= double(std::numeric_limits<uint64_t>::max()))
|
||||
if (d >= static_cast<double>(std::numeric_limits<uint64_t>::max()))
|
||||
return std::numeric_limits<uint64_t>::max();
|
||||
return static_cast<uint64_t>(d);
|
||||
}
|
||||
|
@ -267,7 +267,7 @@ Chunk MergeTreeBaseSelectProcessor::readFromPartImpl()
|
||||
};
|
||||
|
||||
UInt64 recommended_rows = estimate_num_rows(*task, task->range_reader);
|
||||
UInt64 rows_to_read = std::max(UInt64(1), std::min(current_max_block_size_rows, recommended_rows));
|
||||
UInt64 rows_to_read = std::max(static_cast<UInt64>(1), std::min(current_max_block_size_rows, recommended_rows));
|
||||
|
||||
auto read_result = task->range_reader.read(rows_to_read, task->mark_ranges);
|
||||
|
||||
|
@ -943,7 +943,7 @@ void MergeTreeData::loadDataPartsFromDisk(
|
||||
const MergeTreeSettingsPtr & settings)
|
||||
{
|
||||
/// Parallel loading of data parts.
|
||||
pool.setMaxThreads(std::min(size_t(settings->max_part_loading_threads), num_parts));
|
||||
pool.setMaxThreads(std::min(static_cast<size_t>(settings->max_part_loading_threads), num_parts));
|
||||
size_t num_threads = pool.getMaxThreads();
|
||||
std::vector<size_t> parts_per_thread(num_threads, num_parts / num_threads);
|
||||
for (size_t i = 0ul; i < num_parts % num_threads; ++i)
|
||||
@ -3250,7 +3250,7 @@ void MergeTreeData::delayInsertOrThrowIfNeeded(Poco::Event * until) const
|
||||
"Too many inactive parts ({}). Parts cleaning are processing significantly slower than inserts",
|
||||
inactive_parts_count_in_partition);
|
||||
}
|
||||
k_inactive = ssize_t(inactive_parts_count_in_partition) - ssize_t(settings->inactive_parts_to_delay_insert);
|
||||
k_inactive = static_cast<ssize_t>(inactive_parts_count_in_partition) - static_cast<ssize_t>(settings->inactive_parts_to_delay_insert);
|
||||
}
|
||||
|
||||
if (parts_count_in_partition >= settings->parts_to_throw_insert)
|
||||
|
@ -206,7 +206,7 @@ void MergeTreeDataPartWriterCompact::writeDataBlock(const Block & block, const G
|
||||
|
||||
|
||||
writeIntBinary(plain_hashing.count(), marks);
|
||||
writeIntBinary(UInt64(0), marks);
|
||||
writeIntBinary(static_cast<UInt64>(0), marks);
|
||||
|
||||
writeColumnSingleGranule(
|
||||
block.getByName(name_and_type->name), data_part->getSerialization(*name_and_type),
|
||||
@ -246,9 +246,9 @@ void MergeTreeDataPartWriterCompact::fillDataChecksums(IMergeTreeDataPart::Check
|
||||
for (size_t i = 0; i < columns_list.size(); ++i)
|
||||
{
|
||||
writeIntBinary(plain_hashing.count(), marks);
|
||||
writeIntBinary(UInt64(0), marks);
|
||||
writeIntBinary(static_cast<UInt64>(0), marks);
|
||||
}
|
||||
writeIntBinary(UInt64(0), marks);
|
||||
writeIntBinary(static_cast<UInt64>(0), marks);
|
||||
}
|
||||
|
||||
plain_file->next();
|
||||
|
@ -1095,7 +1095,7 @@ std::shared_ptr<QueryIdHolder> MergeTreeDataSelectExecutor::checkLimits(
|
||||
std::set<String> partitions;
|
||||
for (const auto & part_with_ranges : result.parts_with_ranges)
|
||||
partitions.insert(part_with_ranges.data_part->info.partition_id);
|
||||
if (partitions.size() > size_t(max_partitions_to_read))
|
||||
if (partitions.size() > static_cast<size_t>(max_partitions_to_read))
|
||||
throw Exception(
|
||||
ErrorCodes::TOO_MANY_PARTITIONS,
|
||||
"Too many partitions to read. Current {}, max {}",
|
||||
|
@ -178,7 +178,7 @@ T MaterializedPostgreSQLConsumer::unhexN(const char * message, size_t pos, size_
|
||||
for (size_t i = 0; i < n; ++i)
|
||||
{
|
||||
if (i) result <<= 8;
|
||||
result |= UInt32(unhex2(message + pos + 2 * i));
|
||||
result |= static_cast<UInt32>(unhex2(message + pos + 2 * i));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
@ -276,14 +276,14 @@ void MaterializedPostgreSQLConsumer::readTupleData(
|
||||
{
|
||||
case PostgreSQLQuery::INSERT:
|
||||
{
|
||||
buffer.columns[num_columns]->insert(Int8(1));
|
||||
buffer.columns[num_columns]->insert(static_cast<Int8>(1));
|
||||
buffer.columns[num_columns + 1]->insert(lsn_value);
|
||||
|
||||
break;
|
||||
}
|
||||
case PostgreSQLQuery::DELETE:
|
||||
{
|
||||
buffer.columns[num_columns]->insert(Int8(-1));
|
||||
buffer.columns[num_columns]->insert(static_cast<Int8>(-1));
|
||||
buffer.columns[num_columns + 1]->insert(lsn_value);
|
||||
|
||||
break;
|
||||
@ -292,9 +292,9 @@ void MaterializedPostgreSQLConsumer::readTupleData(
|
||||
{
|
||||
/// Process old value in case changed value is a primary key.
|
||||
if (old_value)
|
||||
buffer.columns[num_columns]->insert(Int8(-1));
|
||||
buffer.columns[num_columns]->insert(static_cast<Int8>(-1));
|
||||
else
|
||||
buffer.columns[num_columns]->insert(Int8(1));
|
||||
buffer.columns[num_columns]->insert(static_cast<Int8>(1));
|
||||
|
||||
buffer.columns[num_columns + 1]->insert(lsn_value);
|
||||
|
||||
|
@ -352,7 +352,7 @@ ASTPtr StorageMaterializedPostgreSQL::getColumnDeclaration(const DataTypePtr & d
|
||||
|
||||
ast_expression->name = "DateTime64";
|
||||
ast_expression->arguments = std::make_shared<ASTExpressionList>();
|
||||
ast_expression->arguments->children.emplace_back(std::make_shared<ASTLiteral>(UInt32(6)));
|
||||
ast_expression->arguments->children.emplace_back(std::make_shared<ASTLiteral>(static_cast<UInt32>(6)));
|
||||
return ast_expression;
|
||||
}
|
||||
|
||||
|
@ -48,7 +48,7 @@ Pipe readFinalFromNestedStorage(
|
||||
require_columns_name.emplace_back(sign_column.name);
|
||||
|
||||
const auto & sign_column_name = std::make_shared<ASTIdentifier>(sign_column.name);
|
||||
const auto & fetch_sign_value = std::make_shared<ASTLiteral>(Field(Int8(1)));
|
||||
const auto & fetch_sign_value = std::make_shared<ASTLiteral>(Field(static_cast<Int8>(1)));
|
||||
|
||||
expressions->children.emplace_back(makeASTFunction("equals", sign_column_name, fetch_sign_value));
|
||||
filter_column_name = expressions->children.back()->getColumnName();
|
||||
|
@ -1376,7 +1376,7 @@ void StorageDistributed::delayInsertOrThrowIfNeeded() const
|
||||
{
|
||||
/// Step is 5% of the delay and minimal one second.
|
||||
/// NOTE: max_delay_to_insert is in seconds, and step is in ms.
|
||||
const size_t step_ms = std::min<double>(1., double(distributed_settings.max_delay_to_insert) * 1'000 * 0.05);
|
||||
const size_t step_ms = std::min<double>(1., static_cast<double>(distributed_settings.max_delay_to_insert) * 1'000 * 0.05);
|
||||
UInt64 delayed_ms = 0;
|
||||
|
||||
do {
|
||||
|
@ -296,7 +296,7 @@ Pipe StorageMerge::read(
|
||||
|
||||
size_t tables_count = selected_tables.size();
|
||||
Float64 num_streams_multiplier
|
||||
= std::min(unsigned(tables_count), std::max(1U, unsigned(local_context->getSettingsRef().max_streams_multiplier_for_merge_tables)));
|
||||
= std::min(static_cast<unsigned>(tables_count), std::max(1U, static_cast<unsigned>(local_context->getSettingsRef().max_streams_multiplier_for_merge_tables)));
|
||||
num_streams *= num_streams_multiplier;
|
||||
size_t remaining_streams = num_streams;
|
||||
|
||||
@ -327,7 +327,7 @@ Pipe StorageMerge::read(
|
||||
size_t current_need_streams = tables_count >= num_streams ? 1 : (num_streams / tables_count);
|
||||
size_t current_streams = std::min(current_need_streams, remaining_streams);
|
||||
remaining_streams -= current_streams;
|
||||
current_streams = std::max(size_t(1), current_streams);
|
||||
current_streams = std::max(static_cast<size_t>(1), current_streams);
|
||||
|
||||
const auto & storage = std::get<1>(table);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user