Merge branch 'master' of github.com:yandex/ClickHouse

This commit is contained in:
Ivan Blinkov 2018-08-10 10:12:29 +03:00
commit 1771d78516
134 changed files with 702 additions and 447 deletions

View File

@ -36,6 +36,10 @@
* Fixed a bug in the `anyHeavy` aggregate function ([a2101df2](https://github.com/yandex/ClickHouse/commit/a2101df25a6a0fba99aa71f8793d762af2b801ee))
* Fixed server crash when using the `countArray()` aggregate function.
### Backward incompatible changes:
* Parameters for `Kafka` engine was changed from `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_schema, kafka_num_consumers])` to `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_row_delimiter, kafka_schema, kafka_num_consumers])`. If your tables use `kafka_schema` or `kafka_num_consumers` parameters, you have to manually edit the metadata files `path/metadata/database/table.sql` and add `kafka_row_delimiter` parameter with `''` value.
## ClickHouse release 18.1.0, 2018-07-23
### New features:

View File

@ -43,6 +43,10 @@
* Исправлена ошибка в агрегатной функции `anyHeavy` ([a2101df2](https://github.com/yandex/ClickHouse/commit/a2101df25a6a0fba99aa71f8793d762af2b801ee))
* Исправлено падение сервера при использовании функции `countArray()`.
### Обратно несовместимые изменения:
* Список параметров для таблиц `Kafka` был изменён с `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_schema, kafka_num_consumers])` на `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_row_delimiter, kafka_schema, kafka_num_consumers])`. Если вы использовали параметры `kafka_schema` или `kafka_num_consumers`, вам необходимо вручную отредактировать файлы с метаданными `path/metadata/database/table.sql`, добавив параметр `kafka_row_delimiter` со значением `''` в соответствующее место.
## ClickHouse release 18.1.0, 2018-07-23

View File

@ -61,7 +61,7 @@ if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
endif ()
option (TEST_COVERAGE "Enables flags for test coverage" OFF)
option (ENABLE_TESTS "Enables tests" ${NOT_MSVC})
option (ENABLE_TESTS "Enables tests" ON)
option (USE_STATIC_LIBRARIES "Set to FALSE to use shared libraries" ON)
option (MAKE_STATIC_LIBRARIES "Set to FALSE to make shared libraries" ${USE_STATIC_LIBRARIES})
@ -174,7 +174,7 @@ if (OS_LINUX AND CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
endif ()
if (LIBCXX_PATH)
# include_directories (BEFORE SYSTEM "${LIBCXX_PATH}/include" "${LIBCXX_PATH}/include/c++/v1")
# include_directories (SYSTEM BEFORE "${LIBCXX_PATH}/include" "${LIBCXX_PATH}/include/c++/v1")
link_directories ("${LIBCXX_PATH}/lib")
endif ()
endif ()

View File

@ -1,4 +1,4 @@
option (ENABLE_CAPNP "Enable Cap'n Proto" ${NOT_MSVC})
option (ENABLE_CAPNP "Enable Cap'n Proto" ON)
if (ENABLE_CAPNP)
# cmake 3.5.1 bug:

View File

@ -43,6 +43,12 @@ if (ENABLE_EMBEDDED_COMPILER)
else()
set (USE_EMBEDDED_COMPILER 0)
endif()
if (LLVM_FOUND AND OS_LINUX AND USE_LIBCXX)
message(WARNING "Option USE_INTERNAL_LLVM_LIBRARY is not set but the LLVM library from OS packages in Linux is incompatible with libc++ ABI. LLVM Will be disabled.")
set (LLVM_FOUND 0)
set (USE_EMBEDDED_COMPILER 0)
endif ()
else()
set (LLVM_FOUND 1)
set (USE_EMBEDDED_COMPILER 1)

View File

@ -1,4 +1,4 @@
option (ENABLE_RDKAFKA "Enable kafka" ${NOT_MSVC})
option (ENABLE_RDKAFKA "Enable kafka" ON)
if (ENABLE_RDKAFKA)

View File

@ -98,8 +98,8 @@ if (USE_INTERNAL_SSL_LIBRARY)
set (USE_SHARED ${USE_STATIC_LIBRARIES})
set (LIBRESSL_SKIP_INSTALL 1)
add_subdirectory (ssl)
target_include_directories(${OPENSSL_CRYPTO_LIBRARY} PUBLIC ${OPENSSL_INCLUDE_DIR})
target_include_directories(${OPENSSL_SSL_LIBRARY} PUBLIC ${OPENSSL_INCLUDE_DIR})
target_include_directories(${OPENSSL_CRYPTO_LIBRARY} SYSTEM PUBLIC ${OPENSSL_INCLUDE_DIR})
target_include_directories(${OPENSSL_SSL_LIBRARY} SYSTEM PUBLIC ${OPENSSL_INCLUDE_DIR})
endif ()
if (ENABLE_MYSQL AND USE_INTERNAL_MYSQL_LIBRARY)
@ -109,26 +109,9 @@ if (ENABLE_MYSQL AND USE_INTERNAL_MYSQL_LIBRARY)
endif ()
if (USE_INTERNAL_RDKAFKA_LIBRARY)
set (RDKAFKA_BUILD_EXAMPLES OFF CACHE INTERNAL "")
set (RDKAFKA_BUILD_TESTS OFF CACHE INTERNAL "")
set (RDKAFKA_BUILD_STATIC ${MAKE_STATIC_LIBRARIES} CACHE INTERNAL "")
mark_as_advanced (ZLIB_INCLUDE_DIR)
if (USE_INTERNAL_SSL_LIBRARY)
if (MAKE_STATIC_LIBRARIES)
add_library(bundled-ssl ALIAS ${OPENSSL_SSL_LIBRARY})
set (WITH_BUNDLED_SSL 1 CACHE INTERNAL "")
else ()
set (WITH_SSL 0 CACHE INTERNAL "")
endif ()
endif ()
add_subdirectory (librdkafka)
if (USE_INTERNAL_SSL_LIBRARY AND MAKE_STATIC_LIBRARIES)
target_include_directories(rdkafka PRIVATE BEFORE ${OPENSSL_INCLUDE_DIR})
endif ()
add_subdirectory (librdkafka-cmake)
target_include_directories(rdkafka PRIVATE BEFORE ${ZLIB_INCLUDE_DIR})
target_include_directories(rdkafka PRIVATE BEFORE ${OPENSSL_INCLUDE_DIR})
endif ()
if (ENABLE_ODBC AND USE_INTERNAL_ODBC_LIBRARY)
@ -162,7 +145,7 @@ if (USE_INTERNAL_POCO_LIBRARY)
if (OPENSSL_FOUND AND TARGET Crypto AND (NOT DEFINED ENABLE_POCO_NETSSL OR ENABLE_POCO_NETSSL))
# Bug in poco https://github.com/pocoproject/poco/pull/2100 found on macos
target_include_directories(Crypto PUBLIC ${OPENSSL_INCLUDE_DIR})
target_include_directories(Crypto SYSTEM PUBLIC ${OPENSSL_INCLUDE_DIR})
endif ()
endif ()

View File

@ -42,9 +42,9 @@ ${LIBRARY_DIR}/libs/filesystem/src/windows_file_codecvt.cpp)
add_library(boost_system_internal
${LIBRARY_DIR}/libs/system/src/error_code.cpp)
target_include_directories (boost_program_options_internal BEFORE PUBLIC ${Boost_INCLUDE_DIRS})
target_include_directories (boost_filesystem_internal BEFORE PUBLIC ${Boost_INCLUDE_DIRS})
target_include_directories (boost_system_internal BEFORE PUBLIC ${Boost_INCLUDE_DIRS})
target_include_directories (boost_program_options_internal SYSTEM BEFORE PUBLIC ${Boost_INCLUDE_DIRS})
target_include_directories (boost_filesystem_internal SYSTEM BEFORE PUBLIC ${Boost_INCLUDE_DIRS})
target_include_directories (boost_system_internal SYSTEM BEFORE PUBLIC ${Boost_INCLUDE_DIRS})
target_compile_definitions (boost_program_options_internal PUBLIC BOOST_SYSTEM_NO_DEPRECATED)
target_compile_definitions (boost_filesystem_internal PUBLIC BOOST_SYSTEM_NO_DEPRECATED)

View File

@ -17,4 +17,4 @@ include/libcpuid/recog_amd.h
include/libcpuid/recog_intel.h
)
target_include_directories (cpuid PUBLIC include)
target_include_directories (cpuid SYSTEM PUBLIC include)

View File

@ -0,0 +1,60 @@
set(RDKAFKA_SOURCE_DIR ${CMAKE_SOURCE_DIR}/contrib/librdkafka/src)
set(SRCS
${RDKAFKA_SOURCE_DIR}/crc32c.c
${RDKAFKA_SOURCE_DIR}/rdaddr.c
${RDKAFKA_SOURCE_DIR}/rdavl.c
${RDKAFKA_SOURCE_DIR}/rdbuf.c
${RDKAFKA_SOURCE_DIR}/rdcrc32.c
${RDKAFKA_SOURCE_DIR}/rdkafka.c
${RDKAFKA_SOURCE_DIR}/rdkafka_assignor.c
${RDKAFKA_SOURCE_DIR}/rdkafka_broker.c
${RDKAFKA_SOURCE_DIR}/rdkafka_buf.c
${RDKAFKA_SOURCE_DIR}/rdkafka_cgrp.c
${RDKAFKA_SOURCE_DIR}/rdkafka_conf.c
${RDKAFKA_SOURCE_DIR}/rdkafka_event.c
${RDKAFKA_SOURCE_DIR}/rdkafka_feature.c
${RDKAFKA_SOURCE_DIR}/rdkafka_lz4.c
${RDKAFKA_SOURCE_DIR}/rdkafka_metadata.c
${RDKAFKA_SOURCE_DIR}/rdkafka_metadata_cache.c
${RDKAFKA_SOURCE_DIR}/rdkafka_msg.c
${RDKAFKA_SOURCE_DIR}/rdkafka_msgset_reader.c
${RDKAFKA_SOURCE_DIR}/rdkafka_msgset_writer.c
${RDKAFKA_SOURCE_DIR}/rdkafka_offset.c
${RDKAFKA_SOURCE_DIR}/rdkafka_op.c
${RDKAFKA_SOURCE_DIR}/rdkafka_partition.c
${RDKAFKA_SOURCE_DIR}/rdkafka_pattern.c
${RDKAFKA_SOURCE_DIR}/rdkafka_queue.c
${RDKAFKA_SOURCE_DIR}/rdkafka_range_assignor.c
${RDKAFKA_SOURCE_DIR}/rdkafka_request.c
${RDKAFKA_SOURCE_DIR}/rdkafka_roundrobin_assignor.c
${RDKAFKA_SOURCE_DIR}/rdkafka_sasl.c
${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_plain.c
${RDKAFKA_SOURCE_DIR}/rdkafka_subscription.c
${RDKAFKA_SOURCE_DIR}/rdkafka_timer.c
${RDKAFKA_SOURCE_DIR}/rdkafka_topic.c
${RDKAFKA_SOURCE_DIR}/rdkafka_transport.c
${RDKAFKA_SOURCE_DIR}/rdkafka_interceptor.c
${RDKAFKA_SOURCE_DIR}/rdkafka_header.c
${RDKAFKA_SOURCE_DIR}/rdlist.c
${RDKAFKA_SOURCE_DIR}/rdlog.c
${RDKAFKA_SOURCE_DIR}/rdmurmur2.c
${RDKAFKA_SOURCE_DIR}/rdports.c
${RDKAFKA_SOURCE_DIR}/rdrand.c
${RDKAFKA_SOURCE_DIR}/rdregex.c
${RDKAFKA_SOURCE_DIR}/rdstring.c
${RDKAFKA_SOURCE_DIR}/rdunittest.c
${RDKAFKA_SOURCE_DIR}/rdvarint.c
${RDKAFKA_SOURCE_DIR}/snappy.c
${RDKAFKA_SOURCE_DIR}/tinycthread.c
${RDKAFKA_SOURCE_DIR}/xxhash.c
${RDKAFKA_SOURCE_DIR}/lz4.c
${RDKAFKA_SOURCE_DIR}/lz4frame.c
${RDKAFKA_SOURCE_DIR}/lz4hc.c
${RDKAFKA_SOURCE_DIR}/rdgz.c
)
add_library(rdkafka STATIC ${SRCS})
target_include_directories(rdkafka PRIVATE include)
target_include_directories(rdkafka SYSTEM PUBLIC ${RDKAFKA_SOURCE_DIR})
target_link_libraries(rdkafka PUBLIC ${ZLIB_LIBRARIES} ${OPENSSL_SSL_LIBRARY} ${OPENSSL_CRYPTO_LIBRARY})

View File

@ -0,0 +1,74 @@
// Automatically generated by ./configure
#ifndef _CONFIG_H_
#define _CONFIG_H_
#define ARCH "x86_64"
#define CPU "generic"
#define WITHOUT_OPTIMIZATION 0
#define ENABLE_DEVEL 0
#define ENABLE_VALGRIND 0
#define ENABLE_REFCNT_DEBUG 0
#define ENABLE_SHAREDPTR_DEBUG 0
#define ENABLE_LZ4_EXT 1
#define ENABLE_SSL 1
//#define ENABLE_SASL 1
#define MKL_APP_NAME "librdkafka"
#define MKL_APP_DESC_ONELINE "The Apache Kafka C/C++ library"
// distro
//#define SOLIB_EXT ".so"
// gcc
//#define WITH_GCC 1
// gxx
//#define WITH_GXX 1
// pkgconfig
//#define WITH_PKGCONFIG 1
// install
//#define WITH_INSTALL 1
// PIC
//#define HAVE_PIC 1
// gnulib
//#define WITH_GNULD 1
// __atomic_32
#define HAVE_ATOMICS_32 1
// __atomic_32
#define HAVE_ATOMICS_32_ATOMIC 1
// atomic_32
#define ATOMIC_OP32(OP1,OP2,PTR,VAL) __atomic_ ## OP1 ## _ ## OP2(PTR, VAL, __ATOMIC_SEQ_CST)
// __atomic_64
#define HAVE_ATOMICS_64 1
// __atomic_64
#define HAVE_ATOMICS_64_ATOMIC 1
// atomic_64
#define ATOMIC_OP64(OP1,OP2,PTR,VAL) __atomic_ ## OP1 ## _ ## OP2(PTR, VAL, __ATOMIC_SEQ_CST)
// atomic_64
#define ATOMIC_OP(OP1,OP2,PTR,VAL) __atomic_ ## OP1 ## _ ## OP2(PTR, VAL, __ATOMIC_SEQ_CST)
// parseversion
#define RDKAFKA_VERSION_STR "0.11.4"
// parseversion
#define MKL_APP_VERSION "0.11.4"
// libdl
//#define WITH_LIBDL 1
// WITH_PLUGINS
//#define WITH_PLUGINS 1
// zlib
#define WITH_ZLIB 1
// WITH_SNAPPY
#define WITH_SNAPPY 1
// WITH_SOCKEM
#define WITH_SOCKEM 1
// libssl
#define WITH_SSL 1
// WITH_SASL_SCRAM
//#define WITH_SASL_SCRAM 1
// crc32chw
#define WITH_CRC32C_HW 1
// regex
#define HAVE_REGEX 1
// strndup
#define HAVE_STRNDUP 1
// strerror_r
#define HAVE_STRERROR_R 1
// pthread_setname_gnu
#define HAVE_PTHREAD_SETNAME_GNU 1
// python
//#define HAVE_PYTHON 1
#endif /* _CONFIG_H_ */

View File

@ -0,0 +1 @@
This directory is needed because rdkafka files have #include "../config.h"

2
contrib/poco vendored

@ -1 +1 @@
Subproject commit 4ab45bc3bb0d2c476ea5385ec2d398c6bfc9f089
Subproject commit 3df947389e6d9654919002797bdd86ed190b3963

View File

@ -12,7 +12,8 @@ endforeach ()
add_library (re2_st ${RE2_ST_SOURCES})
target_compile_definitions (re2_st PRIVATE NDEBUG NO_THREADS re2=re2_st)
target_include_directories (re2_st PRIVATE . PUBLIC ${CMAKE_CURRENT_BINARY_DIR} ${RE2_SOURCE_DIR})
target_include_directories (re2_st PRIVATE .)
target_include_directories (re2_st SYSTEM PUBLIC ${CMAKE_CURRENT_BINARY_DIR} ${RE2_SOURCE_DIR})
file (MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/re2_st)
foreach (FILENAME filtered_re2.h re2.h set.h stringpiece.h)

2
contrib/ssl vendored

@ -1 +1 @@
Subproject commit 4f9a7b8745184410dc0b31ba548ce21ac64edd9c
Subproject commit 994687ca6c7b5a2b7e4346bf835a54068b3530a4

View File

@ -156,7 +156,6 @@ target_link_libraries (dbms
${MYSQLXX_LIBRARY}
${RE2_LIBRARY}
${RE2_ST_LIBRARY}
${OPENSSL_CRYPTO_LIBRARY}
${BTRIE_LIBRARIES}
)
@ -219,6 +218,8 @@ if (USE_RDKAFKA)
endif ()
endif ()
target_link_libraries(dbms ${OPENSSL_CRYPTO_LIBRARY})
target_link_libraries (dbms
Threads::Threads
)

View File

@ -1,11 +1,11 @@
# This strings autochanged from release_lib.sh:
set(VERSION_REVISION 54404 CACHE STRING "")
set(VERSION_REVISION 54405 CACHE STRING "")
set(VERSION_MAJOR 18 CACHE STRING "")
set(VERSION_MINOR 9 CACHE STRING "")
set(VERSION_PATCH 0 CACHE STRING "")
set(VERSION_GITHASH c83721a02db002eef7ff864f82d53ca89d47f9e6 CACHE STRING "")
set(VERSION_DESCRIBE v18.9.0-testing CACHE STRING "")
set(VERSION_STRING 18.9.0 CACHE STRING "")
set(VERSION_MINOR 10 CACHE STRING "")
set(VERSION_PATCH 1 CACHE STRING "")
set(VERSION_GITHASH 419bc587c0079b51a906a65af9a10da3300ddaf2 CACHE STRING "")
set(VERSION_DESCRIBE v18.10.1-testing CACHE STRING "")
set(VERSION_STRING 18.10.1 CACHE STRING "")
# end of autochange
set(VERSION_EXTRA "" CACHE STRING "")

View File

@ -226,6 +226,6 @@ ConnectionPoolWithFailover::tryGetEntry(
}
}
return result;
};
}
}

View File

@ -3,10 +3,7 @@
#include <Common/config.h>
#if USE_ICU
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wold-style-cast"
#include <unicode/ucol.h>
#pragma GCC diagnostic pop
#else
#ifdef __clang__
#pragma clang diagnostic push

View File

@ -1,17 +1,7 @@
#pragma once
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#include <boost/smart_ptr/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#include <initializer_list>

View File

@ -37,7 +37,7 @@ namespace detail
{
MoveOrCopyIfThrow<T>()(std::forward<T>(src), dst);
}
};
}
/** A very simple thread-safe queue of limited size.
* If you try to pop an item from an empty queue, the thread is blocked until the queue becomes nonempty.

View File

@ -130,11 +130,11 @@ int getCurrentExceptionCode()
{
return e.code();
}
catch (const Poco::Exception & e)
catch (const Poco::Exception &)
{
return ErrorCodes::POCO_EXCEPTION;
}
catch (const std::exception & e)
catch (const std::exception &)
{
return ErrorCodes::STD_EXCEPTION;
}

View File

@ -74,7 +74,7 @@ bool check(const T x) { return x == 0; }
template <typename T>
void set(T & x) { x = 0; }
};
}
/** Compile-time interface for cell of the hash table.

View File

@ -94,8 +94,8 @@ public:
{
if (auto it = aliases.find(name); it != aliases.end())
return it->second;
else if (auto it = case_insensitive_aliases.find(Poco::toLower(name)); it != case_insensitive_aliases.end())
return it->second;
else if (auto jt = case_insensitive_aliases.find(Poco::toLower(name)); jt != case_insensitive_aliases.end())
return jt->second;
throw Exception(getFactoryName() + ": name '" + name + "' is not alias", ErrorCodes::LOGICAL_ERROR);
}

View File

@ -5,24 +5,9 @@
#include <algorithm>
#include <memory>
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wold-style-cast"
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#pragma clang diagnostic ignored "-Wreserved-id-macro"
#endif
#include <boost/noncopyable.hpp>
#include <boost/iterator_adaptors.hpp>
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#pragma GCC diagnostic pop
#include <common/likely.h>
#include <common/strong_typedef.h>

View File

@ -340,4 +340,4 @@ private:
size_t m_capacity;
};
};
}

View File

@ -50,4 +50,4 @@ private:
static size_t getFailedOpIndex(int32_t code, const Responses & responses);
};
};
}

View File

@ -83,4 +83,4 @@ void ZooKeeperHolder::init(Args&&... args)
using ZooKeeperHolderPtr = std::shared_ptr<ZooKeeperHolder>;
};
}

View File

@ -646,4 +646,4 @@ private:
CurrentMetrics::Increment active_session_metric_increment{CurrentMetrics::ZooKeeperSession};
};
};
}

View File

@ -77,7 +77,10 @@ void formatIPv6(const unsigned char * src, char *& dst, UInt8 zeroed_tail_bytes_
if (words[i] == 0)
{
if (cur.base == -1)
cur.base = i, cur.len = 1;
{
cur.base = i;
cur.len = 1;
}
else
cur.len++;
}

View File

@ -108,7 +108,7 @@ void localBackup(const Poco::Path & source_path, const Poco::Path & destination_
continue;
}
catch (const Poco::FileNotFoundException & e)
catch (const Poco::FileNotFoundException &)
{
++try_no;
if (try_no == max_tries)

View File

@ -60,7 +60,7 @@ add_executable (space_saving space_saving.cpp)
target_link_libraries (space_saving clickhouse_common_io)
add_executable (integer_hash_tables_and_hashes integer_hash_tables_and_hashes.cpp)
target_include_directories (integer_hash_tables_and_hashes BEFORE PRIVATE ${SPARCEHASH_INCLUDE_DIR})
target_include_directories (integer_hash_tables_and_hashes SYSTEM BEFORE PRIVATE ${SPARCEHASH_INCLUDE_DIR})
target_link_libraries (integer_hash_tables_and_hashes clickhouse_common_io)
add_executable (allocator allocator.cpp)

View File

@ -25,7 +25,7 @@ namespace ErrorCodes
class Field;
using Array = std::vector<Field>;
using TupleBackend = std::vector<Field>;
STRONG_TYPEDEF(TupleBackend, Tuple); /// Array and Tuple are different types with equal representation inside Field.
STRONG_TYPEDEF(TupleBackend, Tuple) /// Array and Tuple are different types with equal representation inside Field.
/** 32 is enough. Round number is used for alignment and for better arithmetic inside std::vector.

View File

@ -6,6 +6,6 @@
namespace DB
{
STRONG_TYPEDEF(UInt128, UUID);
STRONG_TYPEDEF(UInt128, UUID)
}

View File

@ -63,7 +63,7 @@ Block AggregatingSortedBlockInputStream::readImpl()
for (size_t i = 0, size = columns_to_aggregate.size(); i < size; ++i)
columns_to_aggregate[i] = typeid_cast<ColumnAggregateFunction *>(merged_columns[column_numbers_to_aggregate[i]].get());
merge(merged_columns, queue);
merge(merged_columns, queue_without_collation);
return header.cloneWithColumns(std::move(merged_columns));
}

View File

@ -102,7 +102,7 @@ Block CollapsingSortedBlockInputStream::readImpl()
if (merged_columns.empty())
return {};
merge(merged_columns, queue);
merge(merged_columns, queue_without_collation);
return header.cloneWithColumns(std::move(merged_columns));
}

View File

@ -102,7 +102,7 @@ Block GraphiteRollupSortedBlockInputStream::readImpl()
if (merged_columns.empty())
return Block();
merge(merged_columns, queue);
merge(merged_columns, queue_without_collation);
return header.cloneWithColumns(std::move(merged_columns));
}

View File

@ -188,7 +188,7 @@ static bool handleOverflowMode(OverflowMode mode, const String & message, int co
default:
throw Exception("Logical error: unknown overflow mode", ErrorCodes::LOGICAL_ERROR);
}
};
}
bool IProfilingBlockInputStream::checkTimeLimit()

View File

@ -183,7 +183,7 @@ MergeSortingBlocksBlockInputStream::MergeSortingBlocksBlockInputStream(
if (!has_collation)
{
for (size_t i = 0; i < cursors.size(); ++i)
queue.push(SortCursor(&cursors[i]));
queue_without_collation.push(SortCursor(&cursors[i]));
}
else
{
@ -206,7 +206,7 @@ Block MergeSortingBlocksBlockInputStream::readImpl()
}
return !has_collation
? mergeImpl<SortCursor>(queue)
? mergeImpl<SortCursor>(queue_without_collation)
: mergeImpl<SortCursorWithCollation>(queue_with_collation);
}

View File

@ -55,7 +55,7 @@ private:
bool has_collation = false;
std::priority_queue<SortCursor> queue;
std::priority_queue<SortCursor> queue_without_collation;
std::priority_queue<SortCursorWithCollation> queue_with_collation;
/** Two different cursors are supported - with and without Collation.

View File

@ -320,7 +320,7 @@ void MergingAggregatedMemoryEfficientBlockInputStream::mergeThread(MemoryTracker
* - or, if no next blocks, set 'exhausted' flag.
*/
{
std::lock_guard<std::mutex> lock(parallel_merge_data->get_next_blocks_mutex);
std::lock_guard<std::mutex> lock_next_blocks(parallel_merge_data->get_next_blocks_mutex);
if (parallel_merge_data->exhausted || parallel_merge_data->finish)
break;
@ -330,7 +330,7 @@ void MergingAggregatedMemoryEfficientBlockInputStream::mergeThread(MemoryTracker
if (!blocks_to_merge || blocks_to_merge->empty())
{
{
std::unique_lock<std::mutex> lock(parallel_merge_data->merged_blocks_mutex);
std::unique_lock<std::mutex> lock_merged_blocks(parallel_merge_data->merged_blocks_mutex);
parallel_merge_data->exhausted = true;
}
@ -344,9 +344,9 @@ void MergingAggregatedMemoryEfficientBlockInputStream::mergeThread(MemoryTracker
: blocks_to_merge->front().info.bucket_num;
{
std::unique_lock<std::mutex> lock(parallel_merge_data->merged_blocks_mutex);
std::unique_lock<std::mutex> lock_merged_blocks(parallel_merge_data->merged_blocks_mutex);
parallel_merge_data->have_space.wait(lock, [this]
parallel_merge_data->have_space.wait(lock_merged_blocks, [this]
{
return parallel_merge_data->merged_blocks.size() < merging_threads
|| parallel_merge_data->finish;

View File

@ -58,7 +58,7 @@ void MergingSortedBlockInputStream::init(MutableColumns & merged_columns)
if (has_collation)
initQueue(queue_with_collation);
else
initQueue(queue);
initQueue(queue_without_collation);
}
/// Let's check that all source blocks have the same structure.
@ -105,7 +105,7 @@ Block MergingSortedBlockInputStream::readImpl()
if (has_collation)
merge(merged_columns, queue_with_collation);
else
merge(merged_columns, queue);
merge(merged_columns, queue_without_collation);
return header.cloneWithColumns(std::move(merged_columns));
}
@ -200,7 +200,7 @@ void MergingSortedBlockInputStream::merge(MutableColumns & merged_columns, std::
// std::cerr << "copied columns\n";
size_t merged_rows = merged_columns.at(0)->size();
merged_rows = merged_columns.at(0)->size();
if (limit && total_merged_rows + merged_rows > limit)
{

View File

@ -2,17 +2,8 @@
#include <queue>
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#include <boost/smart_ptr/intrusive_ptr.hpp>
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#include <common/logger_useful.h>
#include <Core/Row.h>
@ -161,7 +152,7 @@ protected:
CursorImpls cursors;
using Queue = std::priority_queue<SortCursor>;
Queue queue;
Queue queue_without_collation;
using QueueWithCollation = std::priority_queue<SortCursorWithCollation>;
QueueWithCollation queue_with_collation;

View File

@ -58,21 +58,21 @@ RemoteBlockInputStream::RemoteBlockInputStream(
create_multiplexed_connections = [this, pool, throttler]()
{
const Settings & settings = context.getSettingsRef();
const Settings & current_settings = context.getSettingsRef();
std::vector<IConnectionPool::Entry> connections;
if (main_table)
{
auto try_results = pool->getManyChecked(&settings, pool_mode, *main_table);
auto try_results = pool->getManyChecked(&current_settings, pool_mode, *main_table);
connections.reserve(try_results.size());
for (auto & try_result : try_results)
connections.emplace_back(std::move(try_result.entry));
}
else
connections = pool->getMany(&settings, pool_mode);
connections = pool->getMany(&current_settings, pool_mode);
return std::make_unique<MultiplexedConnections>(
std::move(connections), settings, throttler, append_extra_info);
std::move(connections), current_settings, throttler, append_extra_info);
};
}

View File

@ -44,7 +44,7 @@ Block ReplacingSortedBlockInputStream::readImpl()
if (merged_columns.empty())
return Block();
merge(merged_columns, queue);
merge(merged_columns, queue_without_collation);
return header.cloneWithColumns(std::move(merged_columns));
}

View File

@ -286,7 +286,7 @@ Block SummingSortedBlockInputStream::readImpl()
desc.merged_column = header.safeGetByPosition(desc.column_numbers[0]).column->cloneEmpty();
}
merge(merged_columns, queue);
merge(merged_columns, queue_without_collation);
Block res = header.cloneWithColumns(std::move(merged_columns));
/// Place aggregation results into block.

View File

@ -76,7 +76,7 @@ Block VersionedCollapsingSortedBlockInputStream::readImpl()
if (merged_columns.empty())
return {};
merge(merged_columns, queue);
merge(merged_columns, queue_without_collation);
return header.cloneWithColumns(std::move(merged_columns));
}

View File

@ -97,7 +97,7 @@ DataTypeEnum<Type>::DataTypeEnum(const Values & values_) : values{values_}
});
fillMaps();
name = generateName(values);
type_name = generateName(values);
}
template <typename Type>
@ -145,9 +145,9 @@ template <typename Type>
void DataTypeEnum<Type>::deserializeTextEscaped(IColumn & column, ReadBuffer & istr, const FormatSettings &) const
{
/// NOTE It would be nice to do without creating a temporary object - at least extract std::string out.
std::string name;
readEscapedString(name, istr);
static_cast<ColumnType &>(column).getData().push_back(getValue(StringRef(name)));
std::string field_name;
readEscapedString(field_name, istr);
static_cast<ColumnType &>(column).getData().push_back(getValue(StringRef(field_name)));
}
template <typename Type>
@ -159,9 +159,9 @@ void DataTypeEnum<Type>::serializeTextQuoted(const IColumn & column, size_t row_
template <typename Type>
void DataTypeEnum<Type>::deserializeTextQuoted(IColumn & column, ReadBuffer & istr, const FormatSettings &) const
{
std::string name;
readQuotedStringWithSQLStyle(name, istr);
static_cast<ColumnType &>(column).getData().push_back(getValue(StringRef(name)));
std::string field_name;
readQuotedStringWithSQLStyle(field_name, istr);
static_cast<ColumnType &>(column).getData().push_back(getValue(StringRef(field_name)));
}
template <typename Type>
@ -179,9 +179,9 @@ void DataTypeEnum<Type>::serializeTextXML(const IColumn & column, size_t row_num
template <typename Type>
void DataTypeEnum<Type>::deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings &) const
{
std::string name;
readJSONString(name, istr);
static_cast<ColumnType &>(column).getData().push_back(getValue(StringRef(name)));
std::string field_name;
readJSONString(field_name, istr);
static_cast<ColumnType &>(column).getData().push_back(getValue(StringRef(field_name)));
}
template <typename Type>
@ -193,9 +193,9 @@ void DataTypeEnum<Type>::serializeTextCSV(const IColumn & column, size_t row_num
template <typename Type>
void DataTypeEnum<Type>::deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
{
std::string name;
readCSVString(name, istr, settings.csv);
static_cast<ColumnType &>(column).getData().push_back(getValue(StringRef(name)));
std::string field_name;
readCSVString(field_name, istr, settings.csv);
static_cast<ColumnType &>(column).getData().push_back(getValue(StringRef(field_name)));
}
template <typename Type>
@ -237,7 +237,7 @@ void DataTypeEnum<Type>::insertDefaultInto(IColumn & column) const
template <typename Type>
bool DataTypeEnum<Type>::equals(const IDataType & rhs) const
{
return typeid(rhs) == typeid(*this) && name == static_cast<const DataTypeEnum<Type> &>(rhs).name;
return typeid(rhs) == typeid(*this) && type_name == static_cast<const DataTypeEnum<Type> &>(rhs).type_name;
}
@ -346,14 +346,14 @@ static DataTypePtr create(const ASTPtr & arguments)
throw Exception("Elements of Enum data type must be of form: 'name' = number, where name is string literal and number is an integer",
ErrorCodes::UNEXPECTED_AST_STRUCTURE);
const String & name = name_literal->value.get<String>();
const String & field_name = name_literal->value.get<String>();
const auto value = value_literal->value.get<typename NearestFieldType<FieldType>::Type>();
if (value > std::numeric_limits<FieldType>::max() || value < std::numeric_limits<FieldType>::min())
throw Exception{"Value " + toString(value) + " for element '" + name + "' exceeds range of " + EnumName<FieldType>::value,
throw Exception{"Value " + toString(value) + " for element '" + field_name + "' exceeds range of " + EnumName<FieldType>::value,
ErrorCodes::ARGUMENT_OUT_OF_BOUND};
values.emplace_back(name, value);
values.emplace_back(field_name, value);
}
return std::make_shared<DataTypeEnum>(values);

View File

@ -53,7 +53,7 @@ private:
Values values;
NameToValueMap name_to_value_map;
ValueToNameMap value_to_name_map;
std::string name;
std::string type_name;
static std::string generateName(const Values & values);
void fillMaps();
@ -62,7 +62,7 @@ public:
explicit DataTypeEnum(const Values & values_);
const Values & getValues() const { return values; }
std::string getName() const override { return name; }
std::string getName() const override { return type_name; }
const char * getFamilyName() const override;
const StringRef & getNameForValue(const FieldType & value) const
@ -74,11 +74,11 @@ public:
return it->second;
}
FieldType getValue(StringRef name) const
FieldType getValue(StringRef field_name) const
{
const auto it = name_to_value_map.find(name);
const auto it = name_to_value_map.find(field_name);
if (it == std::end(name_to_value_map))
throw Exception{"Unknown element '" + name.toString() + "' for type " + getName(), ErrorCodes::LOGICAL_ERROR};
throw Exception{"Unknown element '" + field_name.toString() + "' for type " + getName(), ErrorCodes::LOGICAL_ERROR};
return it->second;
}

View File

@ -22,6 +22,6 @@ namespace Nested
/// Collect Array columns in a form of `column_name.element_name` to single Array(Tuple(...)) column.
NamesAndTypesList collect(const NamesAndTypesList & names_and_types);
};
}
}

View File

@ -39,15 +39,15 @@ Tables DatabaseDictionary::loadTables()
Tables tables;
for (const auto & pair : dictionaries)
{
const std::string & name = pair.first;
if (deleted_tables.count(name))
const std::string & dict_name = pair.first;
if (deleted_tables.count(dict_name))
continue;
auto dict_ptr = std::static_pointer_cast<IDictionaryBase>(pair.second.loadable);
if (dict_ptr)
{
const DictionaryStructure & dictionary_structure = dict_ptr->getStructure();
auto columns = StorageDictionary::getNamesAndTypes(dictionary_structure);
tables[name] = StorageDictionary::create(name, ColumnsDescription{columns}, dictionary_structure, name);
tables[dict_name] = StorageDictionary::create(dict_name, ColumnsDescription{columns}, dictionary_structure, dict_name);
}
}

View File

@ -101,12 +101,12 @@ void CacheDictionary::isInImpl(
{
/// Transform all children to parents until ancestor id or null_value will be reached.
size_t size = out.size();
memset(out.data(), 0xFF, size); /// 0xFF means "not calculated"
size_t out_size = out.size();
memset(out.data(), 0xFF, out_size); /// 0xFF means "not calculated"
const auto null_value = std::get<UInt64>(hierarchical_attribute->null_values);
PaddedPODArray<Key> children(size);
PaddedPODArray<Key> children(out_size);
PaddedPODArray<Key> parents(child_ids.begin(), child_ids.end());
while (true)
@ -115,7 +115,7 @@ void CacheDictionary::isInImpl(
size_t parents_idx = 0;
size_t new_children_idx = 0;
while (out_idx < size)
while (out_idx < out_size)
{
/// Already calculated
if (out[out_idx] != 0xFF)
@ -203,7 +203,7 @@ void CacheDictionary::isInConstantVector(
}
/// Assuming short hierarchy, so linear search is Ok.
for (size_t i = 0, size = out.size(); i < size; ++i)
for (size_t i = 0, out_size = out.size(); i < out_size; ++i)
out[i] = std::find(ancestors.begin(), ancestors.end(), ancestor_ids[i]) != ancestors.end();
}
@ -936,12 +936,12 @@ void CacheDictionary::setAttributeValue(Attribute & attribute, const Key idx, co
if (string_ref.data && string_ref.data != null_value_ref.data())
string_arena->free(const_cast<char *>(string_ref.data), string_ref.size);
const auto size = string.size();
if (size != 0)
const auto str_size = string.size();
if (str_size != 0)
{
auto string_ptr = string_arena->alloc(size + 1);
std::copy(string.data(), string.data() + size + 1, string_ptr);
string_ref = StringRef{string_ptr, size};
auto string_ptr = string_arena->alloc(str_size + 1);
std::copy(string.data(), string.data() + str_size + 1, string_ptr);
string_ref = StringRef{string_ptr, str_size};
}
else
string_ref = {};

View File

@ -307,13 +307,13 @@ private:
/// buffer[column_size * cat_features_count] -> char * => cat_features[column_size][cat_features_count] -> char *
void fillCatFeaturesBuffer(const char *** cat_features, const char ** buffer,
size_t column_size, size_t cat_features_count) const
size_t column_size, size_t cat_features_count_current) const
{
for (size_t i = 0; i < column_size; ++i)
{
*cat_features = buffer;
++cat_features;
buffer += cat_features_count;
buffer += cat_features_count_current;
}
}
@ -321,7 +321,7 @@ private:
/// * CalcModelPredictionFlat if no cat features
/// * CalcModelPrediction if all cat features are strings
/// * CalcModelPredictionWithHashedCatFeatures if has int cat features.
ColumnPtr evalImpl(const ColumnRawPtrs & columns, size_t float_features_count, size_t cat_features_count,
ColumnPtr evalImpl(const ColumnRawPtrs & columns, size_t float_features_count_current, size_t cat_features_count_current,
bool cat_features_are_strings) const
{
std::string error_msg = "Error occurred while applying CatBoost model: ";
@ -334,12 +334,12 @@ private:
PODArray<const float *> float_features(column_size);
auto float_features_buf = float_features.data();
/// Store all float data into single column. float_features is a list of pointers to it.
auto float_features_col = placeNumericColumns<float>(columns, 0, float_features_count, float_features_buf);
auto float_features_col = placeNumericColumns<float>(columns, 0, float_features_count_current, float_features_buf);
if (cat_features_count == 0)
if (cat_features_count_current == 0)
{
if (!api->CalcModelPredictionFlat(handle->get(), column_size,
float_features_buf, float_features_count,
float_features_buf, float_features_count_current,
result_buf, column_size))
{
@ -352,18 +352,18 @@ private:
if (cat_features_are_strings)
{
/// cat_features_holder stores pointers to ColumnString data or fixed_strings_data.
PODArray<const char *> cat_features_holder(cat_features_count * column_size);
PODArray<const char *> cat_features_holder(cat_features_count_current * column_size);
PODArray<const char **> cat_features(column_size);
auto cat_features_buf = cat_features.data();
fillCatFeaturesBuffer(cat_features_buf, cat_features_holder.data(), column_size, cat_features_count);
fillCatFeaturesBuffer(cat_features_buf, cat_features_holder.data(), column_size, cat_features_count_current);
/// Fixed strings are stored without termination zero, so have to copy data into fixed_strings_data.
auto fixed_strings_data = placeStringColumns(columns, float_features_count,
cat_features_count, cat_features_holder.data());
auto fixed_strings_data = placeStringColumns(columns, float_features_count_current,
cat_features_count_current, cat_features_holder.data());
if (!api->CalcModelPrediction(handle->get(), column_size,
float_features_buf, float_features_count,
cat_features_buf, cat_features_count,
float_features_buf, float_features_count_current,
cat_features_buf, cat_features_count_current,
result_buf, column_size))
{
throw Exception(error_msg + api->GetErrorString(), ErrorCodes::CANNOT_APPLY_CATBOOST_MODEL);
@ -373,13 +373,13 @@ private:
{
PODArray<const int *> cat_features(column_size);
auto cat_features_buf = cat_features.data();
auto cat_features_col = placeNumericColumns<int>(columns, float_features_count,
cat_features_count, cat_features_buf);
calcHashes(columns, float_features_count, cat_features_count, cat_features_buf);
auto cat_features_col = placeNumericColumns<int>(columns, float_features_count_current,
cat_features_count_current, cat_features_buf);
calcHashes(columns, float_features_count_current, cat_features_count_current, cat_features_buf);
if (!api->CalcModelPredictionWithHashedCatFeatures(
handle->get(), column_size,
float_features_buf, float_features_count,
cat_features_buf, cat_features_count,
float_features_buf, float_features_count_current,
cat_features_buf, cat_features_count_current,
result_buf, column_size))
{
throw Exception(error_msg + api->GetErrorString(), ErrorCodes::CANNOT_APPLY_CATBOOST_MODEL);
@ -453,7 +453,7 @@ CatBoostModel::CatBoostModel(std::string name_, std::string model_path_, std::st
{
try
{
init(lib_path);
init();
}
catch (...)
{
@ -463,7 +463,7 @@ CatBoostModel::CatBoostModel(std::string name_, std::string model_path_, std::st
creation_time = std::chrono::system_clock::now();
}
void CatBoostModel::init(const std::string & lib_path)
void CatBoostModel::init()
{
api_provider = getCatBoostWrapperHolder(lib_path);
api = &api_provider->getAPI();

View File

@ -80,7 +80,7 @@ private:
std::chrono::time_point<std::chrono::system_clock> creation_time;
std::exception_ptr creation_exception;
void init(const std::string & lib_path);
void init();
};
}

View File

@ -28,12 +28,12 @@ void ComplexKeyCacheDictionary::setAttributeValue(Attribute & attribute, const s
if (string_ref.data && string_ref.data != null_value_ref.data())
string_arena->free(const_cast<char *>(string_ref.data), string_ref.size);
const auto size = string.size();
if (size != 0)
const auto str_size = string.size();
if (str_size != 0)
{
auto string_ptr = string_arena->alloc(size + 1);
std::copy(string.data(), string.data() + size + 1, string_ptr);
string_ref = StringRef{string_ptr, size};
auto string_ptr = string_arena->alloc(str_size + 1);
std::copy(string.data(), string.data() + str_size + 1, string_ptr);
string_ref = StringRef{string_ptr, str_size};
}
else
string_ref = {};

View File

@ -223,47 +223,47 @@ Block DictionaryBlockInputStream<DictionaryType, Key>::getBlock(size_t start, si
template <typename DictionaryType, typename Key>
template <typename Type, typename Container>
void DictionaryBlockInputStream<DictionaryType, Key>::callGetter(
DictionaryGetter<Type> getter, const PaddedPODArray<Key> & ids,
DictionaryGetter<Type> getter, const PaddedPODArray<Key> & ids_to_fill,
const Columns & /*keys*/, const DataTypes & /*data_types*/,
Container & container, const DictionaryAttribute & attribute, const DictionaryType & dictionary) const
Container & container, const DictionaryAttribute & attribute, const DictionaryType & dict) const
{
(dictionary.*getter)(attribute.name, ids, container);
(dict.*getter)(attribute.name, ids_to_fill, container);
}
template <typename DictionaryType, typename Key>
template <typename Container>
void DictionaryBlockInputStream<DictionaryType, Key>::callGetter(
DictionaryStringGetter getter, const PaddedPODArray<Key> & ids,
DictionaryStringGetter getter, const PaddedPODArray<Key> & ids_to_fill,
const Columns & /*keys*/, const DataTypes & /*data_types*/,
Container & container, const DictionaryAttribute & attribute, const DictionaryType & dictionary) const
Container & container, const DictionaryAttribute & attribute, const DictionaryType & dict) const
{
(dictionary.*getter)(attribute.name, ids, container);
(dict.*getter)(attribute.name, ids_to_fill, container);
}
template <typename DictionaryType, typename Key>
template <typename Type, typename Container>
void DictionaryBlockInputStream<DictionaryType, Key>::callGetter(
GetterByKey<Type> getter, const PaddedPODArray<Key> & /*ids*/,
GetterByKey<Type> getter, const PaddedPODArray<Key> & /*ids_to_fill*/,
const Columns & keys, const DataTypes & data_types,
Container & container, const DictionaryAttribute & attribute, const DictionaryType & dictionary) const
Container & container, const DictionaryAttribute & attribute, const DictionaryType & dict) const
{
(dictionary.*getter)(attribute.name, keys, data_types, container);
(dict.*getter)(attribute.name, keys, data_types, container);
}
template <typename DictionaryType, typename Key>
template <typename Container>
void DictionaryBlockInputStream<DictionaryType, Key>::callGetter(
StringGetterByKey getter, const PaddedPODArray<Key> & /*ids*/,
StringGetterByKey getter, const PaddedPODArray<Key> & /*ids_to_fill*/,
const Columns & keys, const DataTypes & data_types,
Container & container, const DictionaryAttribute & attribute, const DictionaryType & dictionary) const
Container & container, const DictionaryAttribute & attribute, const DictionaryType & dict) const
{
(dictionary.*getter)(attribute.name, keys, data_types, container);
(dict.*getter)(attribute.name, keys, data_types, container);
}
template <typename DictionaryType, typename Key>
template <template <typename> class Getter, typename StringGetter>
Block DictionaryBlockInputStream<DictionaryType, Key>::fillBlock(
const PaddedPODArray<Key> & ids, const Columns & keys, const DataTypes & types, ColumnsWithTypeAndName && view) const
const PaddedPODArray<Key> & ids_to_fill, const Columns & keys, const DataTypes & types, ColumnsWithTypeAndName && view) const
{
std::unordered_set<std::string> names(column_names.begin(), column_names.end());
@ -283,7 +283,7 @@ Block DictionaryBlockInputStream<DictionaryType, Key>::fillBlock(
const DictionaryStructure & structure = dictionary->getStructure();
if (structure.id && names.find(structure.id->name) != names.end())
block_columns.emplace_back(getColumnFromIds(ids), std::make_shared<DataTypeUInt64>(), structure.id->name);
block_columns.emplace_back(getColumnFromIds(ids_to_fill), std::make_shared<DataTypeUInt64>(), structure.id->name);
for (const auto idx : ext::range(0, structure.attributes.size()))
{
@ -293,7 +293,7 @@ Block DictionaryBlockInputStream<DictionaryType, Key>::fillBlock(
ColumnPtr column;
#define GET_COLUMN_FORM_ATTRIBUTE(TYPE) \
column = getColumnFromAttribute<TYPE, Getter<TYPE>>( \
&DictionaryType::get##TYPE, ids, keys, data_types, attribute, *dictionary)
&DictionaryType::get##TYPE, ids_to_fill, keys, data_types, attribute, *dictionary)
switch (attribute.underlying_type)
{
case AttributeUnderlyingType::UInt8:
@ -346,37 +346,37 @@ Block DictionaryBlockInputStream<DictionaryType, Key>::fillBlock(
template <typename DictionaryType, typename Key>
template <typename AttributeType, typename Getter>
ColumnPtr DictionaryBlockInputStream<DictionaryType, Key>::getColumnFromAttribute(
Getter getter, const PaddedPODArray<Key> & ids,
Getter getter, const PaddedPODArray<Key> & ids_to_fill,
const Columns & keys, const DataTypes & data_types,
const DictionaryAttribute & attribute, const DictionaryType & dictionary) const
const DictionaryAttribute & attribute, const DictionaryType & dict) const
{
auto size = ids.size();
auto size = ids_to_fill.size();
if (!keys.empty())
size = keys.front()->size();
auto column_vector = ColumnVector<AttributeType>::create(size);
callGetter(getter, ids, keys, data_types, column_vector->getData(), attribute, dictionary);
callGetter(getter, ids_to_fill, keys, data_types, column_vector->getData(), attribute, dict);
return std::move(column_vector);
}
template <typename DictionaryType, typename Key>
template <typename Getter>
ColumnPtr DictionaryBlockInputStream<DictionaryType, Key>::getColumnFromStringAttribute(
Getter getter, const PaddedPODArray<Key> & ids,
Getter getter, const PaddedPODArray<Key> & ids_to_fill,
const Columns & keys, const DataTypes & data_types,
const DictionaryAttribute& attribute, const DictionaryType& dictionary) const
const DictionaryAttribute& attribute, const DictionaryType & dict) const
{
auto column_string = ColumnString::create();
auto ptr = column_string.get();
callGetter(getter, ids, keys, data_types, ptr, attribute, dictionary);
callGetter(getter, ids_to_fill, keys, data_types, ptr, attribute, dict);
return std::move(column_string);
}
template <typename DictionaryType, typename Key>
ColumnPtr DictionaryBlockInputStream<DictionaryType, Key>::getColumnFromIds(const PaddedPODArray<Key> & ids) const
ColumnPtr DictionaryBlockInputStream<DictionaryType, Key>::getColumnFromIds(const PaddedPODArray<Key> & ids_to_fill) const
{
auto column_vector = ColumnVector<UInt64>::create();
column_vector->getData().reserve(ids.size());
for (UInt64 id : ids)
column_vector->getData().reserve(ids_to_fill.size());
for (UInt64 id : ids_to_fill)
column_vector->insert(id);
return std::move(column_vector);
}

View File

@ -7,7 +7,7 @@ namespace DB
class DictionaryBlockInputStreamBase : public IProfilingBlockInputStream
{
protected:
Block block;
//Block block;
DictionaryBlockInputStreamBase(size_t rows_count, size_t max_block_size);

View File

@ -20,10 +20,7 @@
#include <Dictionaries/MongoDBDictionarySource.h>
#endif
#if USE_POCO_SQLODBC || USE_POCO_DATAODBC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#include <Poco/Data/ODBC/Connector.h>
#pragma GCC diagnostic pop
#include <Dictionaries/ODBCDictionarySource.h>
#endif
#if USE_MYSQL

View File

@ -240,22 +240,22 @@ std::vector<DictionaryAttribute> DictionaryStructure::getAttributes(
const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix,
const bool hierarchy_allowed, const bool allow_null_values)
{
Poco::Util::AbstractConfiguration::Keys keys;
config.keys(config_prefix, keys);
Poco::Util::AbstractConfiguration::Keys config_elems;
config.keys(config_prefix, config_elems);
auto has_hierarchy = false;
std::vector<DictionaryAttribute> attributes;
std::vector<DictionaryAttribute> res_attributes;
const FormatSettings format_settings;
for (const auto & key : keys)
for (const auto & config_elem : config_elems)
{
if (!startsWith(key.data(), "attribute"))
if (!startsWith(config_elem.data(), "attribute"))
continue;
const auto prefix = config_prefix + '.' + key + '.';
const auto prefix = config_prefix + '.' + config_elem + '.';
Poco::Util::AbstractConfiguration::Keys attribute_keys;
config.keys(config_prefix + '.' + key, attribute_keys);
config.keys(config_prefix + '.' + config_elem, attribute_keys);
checkAttributeKeys(attribute_keys);
@ -300,12 +300,12 @@ std::vector<DictionaryAttribute> DictionaryStructure::getAttributes(
has_hierarchy = has_hierarchy || hierarchical;
attributes.emplace_back(DictionaryAttribute{
res_attributes.emplace_back(DictionaryAttribute{
name, underlying_type, type, expression, null_value, hierarchical, injective, is_object_id
});
}
return attributes;
return res_attributes;
}
}

View File

@ -5,13 +5,10 @@
#include <string>
#include <sstream>
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#include <Poco/MongoDB/Connection.h>
#include <Poco/MongoDB/Cursor.h>
#include <Poco/MongoDB/Element.h>
#include <Poco/MongoDB/ObjectId.h>
#pragma GCC diagnostic pop
#include <Poco/MongoDB/Connection.h>
#include <Poco/MongoDB/Cursor.h>
#include <Poco/MongoDB/Element.h>
#include <Poco/MongoDB/ObjectId.h>
#include <Dictionaries/DictionaryStructure.h>
#include <Dictionaries/MongoDBBlockInputStream.h>

View File

@ -2,14 +2,11 @@
#if USE_POCO_MONGODB
#include <Poco/Util/AbstractConfiguration.h>
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#include <Poco/MongoDB/Connection.h>
#include <Poco/MongoDB/Database.h>
#include <Poco/MongoDB/Cursor.h>
#include <Poco/MongoDB/Array.h>
#include <Poco/MongoDB/ObjectId.h>
#pragma GCC diagnostic pop
#include <Poco/MongoDB/Connection.h>
#include <Poco/MongoDB/Database.h>
#include <Poco/MongoDB/Cursor.h>
#include <Poco/MongoDB/Array.h>
#include <Poco/MongoDB/ObjectId.h>
#include <Poco/Version.h>

View File

@ -167,10 +167,10 @@ std::string MySQLDictionarySource::quoteForLike(const std::string s)
LocalDateTime MySQLDictionarySource::getLastModification() const
{
LocalDateTime update_time{std::time(nullptr)};
LocalDateTime modification_time{std::time(nullptr)};
if (dont_check_update_time)
return update_time;
return modification_time;
try
{
@ -190,8 +190,8 @@ LocalDateTime MySQLDictionarySource::getLastModification() const
if (!update_time_value.isNull())
{
update_time = update_time_value.getDateTime();
LOG_TRACE(log, "Got update time: " << update_time);
modification_time = update_time_value.getDateTime();
LOG_TRACE(log, "Got modification time: " << modification_time);
}
/// fetch remaining rows to avoid "commands out of sync" error
@ -211,15 +211,15 @@ LocalDateTime MySQLDictionarySource::getLastModification() const
}
/// we suppose failure to get modification time is not an error, therefore return current time
return update_time;
return modification_time;
}
std::string MySQLDictionarySource::doInvalidateQuery(const std::string & request) const
{
Block sample_block;
Block invalidate_sample_block;
ColumnPtr column(ColumnString::create());
sample_block.insert(ColumnWithTypeAndName(column, std::make_shared<DataTypeString>(), "Sample Block"));
MySQLBlockInputStream block_input_stream(pool.Get(), request, sample_block, 1);
invalidate_sample_block.insert(ColumnWithTypeAndName(column, std::make_shared<DataTypeString>(), "Sample Block"));
MySQLBlockInputStream block_input_stream(pool.Get(), request, invalidate_sample_block, 1);
return readInvalidateQuery(block_input_stream);
}

View File

@ -4,12 +4,9 @@
#include <DataStreams/IProfilingBlockInputStream.h>
#include <Dictionaries/ExternalResultDescription.h>
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#include <Poco/Data/Session.h>
#include <Poco/Data/Statement.h>
#include <Poco/Data/RecordSet.h>
#pragma GCC diagnostic pop
#include <Poco/Data/Session.h>
#include <Poco/Data/Statement.h>
#include <Poco/Data/RecordSet.h>
#include <string>

View File

@ -145,10 +145,10 @@ bool ODBCDictionarySource::isModified() const
std::string ODBCDictionarySource::doInvalidateQuery(const std::string & request) const
{
Block sample_block;
Block invalidate_sample_block;
ColumnPtr column(ColumnString::create());
sample_block.insert(ColumnWithTypeAndName(column, std::make_shared<DataTypeString>(), "Sample Block"));
ODBCBlockInputStream block_input_stream(pool->get(), request, sample_block, 1);
invalidate_sample_block.insert(ColumnWithTypeAndName(column, std::make_shared<DataTypeString>(), "Sample Block"));
ODBCBlockInputStream block_input_stream(pool->get(), request, invalidate_sample_block, 1);
return readInvalidateQuery(block_input_stream);
}

View File

@ -1,13 +1,12 @@
#pragma once
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#include <Poco/Data/SessionPool.h>
#pragma GCC diagnostic pop
#include <Dictionaries/IDictionarySource.h>
#include <Dictionaries/ExternalQueryBuilder.h>
#include <Dictionaries/DictionaryStructure.h>
namespace Poco
{
namespace Util

View File

@ -189,22 +189,22 @@ std::string validateODBCConnectionString(const std::string & connection_string)
{
reconstructed_connection_string += '{';
const char * pos = value.data();
const char * end = pos + value.size();
const char * value_pos = value.data();
const char * value_end = value_pos + value.size();
while (true)
{
const char * next_pos = find_first_symbols<'}'>(pos, end);
const char * next_pos = find_first_symbols<'}'>(value_pos, value_end);
if (next_pos == end)
if (next_pos == value_end)
{
reconstructed_connection_string.append(pos, next_pos - pos);
reconstructed_connection_string.append(value_pos, next_pos - value_pos);
break;
}
else
{
reconstructed_connection_string.append(pos, next_pos - pos);
reconstructed_connection_string.append(value_pos, next_pos - value_pos);
reconstructed_connection_string.append("}}");
pos = next_pos + 1;
value_pos = next_pos + 1;
}
}

View File

@ -120,6 +120,7 @@ void registerOutputFormatJSON(FormatFactory & factory);
void registerOutputFormatJSONCompact(FormatFactory & factory);
void registerOutputFormatXML(FormatFactory & factory);
void registerOutputFormatODBCDriver(FormatFactory & factory);
void registerOutputFormatODBCDriver2(FormatFactory & factory);
void registerOutputFormatNull(FormatFactory & factory);
/// Input only formats.
@ -153,6 +154,7 @@ FormatFactory::FormatFactory()
registerOutputFormatJSONCompact(*this);
registerOutputFormatXML(*this);
registerOutputFormatODBCDriver(*this);
registerOutputFormatODBCDriver2(*this);
registerOutputFormatNull(*this);
}

View File

@ -0,0 +1,96 @@
#include <Core/Block.h>
#include <Formats/FormatFactory.h>
#include <Formats/ODBCDriver2BlockOutputStream.h>
#include <IO/WriteBuffer.h>
#include <IO/WriteHelpers.h>
#include <Core/iostream_debug_helpers.h>
namespace DB
{
ODBCDriver2BlockOutputStream::ODBCDriver2BlockOutputStream(
WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings)
: out(out_), header(header_), format_settings(format_settings)
{
}
void ODBCDriver2BlockOutputStream::flush()
{
out.next();
}
void writeODBCString(WriteBuffer & out, const std::string & str)
{
writeIntBinary(Int32(str.size()), out);
out.write(str.data(), str.size());
}
void ODBCDriver2BlockOutputStream::write(const Block & block)
{
const size_t rows = block.rows();
const size_t columns = block.columns();
String text_value;
for (size_t i = 0; i < rows; ++i)
{
for (size_t j = 0; j < columns; ++j)
{
text_value.resize(0);
const ColumnWithTypeAndName & col = block.getByPosition(j);
if (col.column->isNullAt(i))
{
writeIntBinary(Int32(-1), out);
}
else
{
{
WriteBufferFromString text_out(text_value);
col.type->serializeText(*col.column, i, text_out, format_settings);
}
writeODBCString(out, text_value);
}
}
}
}
void ODBCDriver2BlockOutputStream::writePrefix()
{
const size_t columns = header.columns();
/// Number of header rows.
writeIntBinary(Int32(2), out);
/// Names of columns.
/// Number of columns + 1 for first name column.
writeIntBinary(Int32(columns + 1), out);
writeODBCString(out, "name");
for (size_t i = 0; i < columns; ++i)
{
const ColumnWithTypeAndName & col = header.getByPosition(i);
writeODBCString(out, col.name);
}
/// Types of columns.
writeIntBinary(Int32(columns + 1), out);
writeODBCString(out, "type");
for (size_t i = 0; i < columns; ++i)
{
const ColumnWithTypeAndName & col = header.getByPosition(i);
writeODBCString(out, col.type->getName());
}
}
void registerOutputFormatODBCDriver2(FormatFactory & factory)
{
factory.registerOutputFormat(
"ODBCDriver2", [](WriteBuffer & buf, const Block & sample, const Context &, const FormatSettings & format_settings)
{
return std::make_shared<ODBCDriver2BlockOutputStream>(buf, sample, format_settings);
});
}
}

View File

@ -0,0 +1,44 @@
#pragma once
#include <string>
#include <Core/Block.h>
#include <DataStreams/IBlockOutputStream.h>
#include <Formats/FormatSettings.h>
namespace DB
{
class WriteBuffer;
/** A data format designed to simplify the implementation of the ODBC driver.
* ODBC driver is designed to be build for different platforms without dependencies from the main code,
* so the format is made that way so that it can be as easy as possible to parse it.
* A header is displayed with the required information.
* The data is then output in the order of the rows. Each value is displayed as follows: length in Int32 format (-1 for NULL), then data in text form.
*/
class ODBCDriver2BlockOutputStream : public IBlockOutputStream
{
public:
ODBCDriver2BlockOutputStream(WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings);
Block getHeader() const override
{
return header;
}
void write(const Block & block) override;
void writePrefix() override;
void flush() override;
std::string getContentType() const override
{
return "application/octet-stream";
}
private:
WriteBuffer & out;
const Block header;
const FormatSettings format_settings;
};
}

View File

@ -16,18 +16,18 @@
*/
#if USE_VECTORCLASS
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wshift-negative-value"
#endif
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wshift-negative-value"
#endif
#include <vectorf128.h> // Y_IGNORE
#include <vectormath_exp.h> // Y_IGNORE
#include <vectormath_trig.h> // Y_IGNORE
#include <vectorf128.h> // Y_IGNORE
#include <vectormath_exp.h> // Y_IGNORE
#include <vectormath_trig.h> // Y_IGNORE
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#endif

View File

@ -44,7 +44,7 @@ public:
return curr_buffer;
}
~CascadeWriteBuffer();
~CascadeWriteBuffer() override;
private:

View File

@ -31,7 +31,7 @@ public:
const Poco::Timespan & send_timeout = Poco::Timespan(DEFAULT_REMOTE_WRITE_BUFFER_SEND_TIMEOUT, 0),
const Poco::Timespan & receive_timeout = Poco::Timespan(DEFAULT_REMOTE_WRITE_BUFFER_RECEIVE_TIMEOUT, 0));
~InterserverWriteBuffer();
~InterserverWriteBuffer() override;
void finalize();
void cancel();

View File

@ -206,10 +206,10 @@ inline void copyOverlap8Shuffle(UInt8 * op, const UInt8 *& match, const size_t o
template <> void inline copy<8>(UInt8 * dst, const UInt8 * src) { copy8(dst, src); };
template <> void inline wildCopy<8>(UInt8 * dst, const UInt8 * src, UInt8 * dst_end) { wildCopy8(dst, src, dst_end); };
template <> void inline copyOverlap<8, false>(UInt8 * op, const UInt8 *& match, const size_t offset) { copyOverlap8(op, match, offset); };
template <> void inline copyOverlap<8, true>(UInt8 * op, const UInt8 *& match, const size_t offset) { copyOverlap8Shuffle(op, match, offset); };
template <> void inline copy<8>(UInt8 * dst, const UInt8 * src) { copy8(dst, src); }
template <> void inline wildCopy<8>(UInt8 * dst, const UInt8 * src, UInt8 * dst_end) { wildCopy8(dst, src, dst_end); }
template <> void inline copyOverlap<8, false>(UInt8 * op, const UInt8 *& match, const size_t offset) { copyOverlap8(op, match, offset); }
template <> void inline copyOverlap<8, true>(UInt8 * op, const UInt8 *& match, const size_t offset) { copyOverlap8Shuffle(op, match, offset); }
inline void copy16(UInt8 * dst, const UInt8 * src)
@ -337,10 +337,10 @@ inline void copyOverlap16Shuffle(UInt8 * op, const UInt8 *& match, const size_t
#endif
template <> void inline copy<16>(UInt8 * dst, const UInt8 * src) { copy16(dst, src); };
template <> void inline wildCopy<16>(UInt8 * dst, const UInt8 * src, UInt8 * dst_end) { wildCopy16(dst, src, dst_end); };
template <> void inline copyOverlap<16, false>(UInt8 * op, const UInt8 *& match, const size_t offset) { copyOverlap16(op, match, offset); };
template <> void inline copyOverlap<16, true>(UInt8 * op, const UInt8 *& match, const size_t offset) { copyOverlap16Shuffle(op, match, offset); };
template <> void inline copy<16>(UInt8 * dst, const UInt8 * src) { copy16(dst, src); }
template <> void inline wildCopy<16>(UInt8 * dst, const UInt8 * src, UInt8 * dst_end) { wildCopy16(dst, src, dst_end); }
template <> void inline copyOverlap<16, false>(UInt8 * op, const UInt8 *& match, const size_t offset) { copyOverlap16(op, match, offset); }
template <> void inline copyOverlap<16, true>(UInt8 * op, const UInt8 *& match, const size_t offset) { copyOverlap16Shuffle(op, match, offset); }
/// See also https://stackoverflow.com/a/30669632

View File

@ -75,7 +75,7 @@ struct PerformanceStatistics
sum += seconds / bytes;
}
double sample(pcg64 & rng) const
double sample(pcg64 & stat_rng) const
{
/// If there is a variant with not enough statistics, always choose it.
/// And in that case prefer variant with less number of invocations.
@ -83,7 +83,7 @@ struct PerformanceStatistics
if (adjustedCount() < 2)
return adjustedCount() - 1;
else
return std::normal_distribution<>(mean(), sigma())(rng);
return std::normal_distribution<>(mean(), sigma())(stat_rng);
}
};

View File

@ -14,7 +14,7 @@ namespace DB
class MMapReadBufferFromFileDescriptor : public ReadBuffer
{
protected:
MMapReadBufferFromFileDescriptor() : ReadBuffer(nullptr, 0) {};
MMapReadBufferFromFileDescriptor() : ReadBuffer(nullptr, 0) {}
void init(int fd_, size_t offset, size_t length_);
void init(int fd_, size_t offset);

View File

@ -33,7 +33,7 @@ public:
return setChunk();
}
~ReadBufferFromMemoryWriteBuffer()
~ReadBufferFromMemoryWriteBuffer() override
{
for (const auto & range : chunk_list)
free(range.begin(), range.size());

View File

@ -133,7 +133,7 @@ bool ReadBufferFromFileDescriptor::poll(size_t timeout_microseconds)
FD_SET(fd, &fds);
timeval timeout = { time_t(timeout_microseconds / 1000000), suseconds_t(timeout_microseconds % 1000000) };
int res = select(1, &fds, 0, 0, &timeout);
int res = select(1, &fds, nullptr, nullptr, &timeout);
if (-1 == res)
throwFromErrno("Cannot select", ErrorCodes::CANNOT_SELECT);

View File

@ -30,7 +30,7 @@ bool ReadBufferFromPocoSocket::nextImpl()
{
throw NetException(e.displayText(), "while reading from socket (" + peer_address.toString() + ")", ErrorCodes::NETWORK_ERROR);
}
catch (const Poco::TimeoutException & e)
catch (const Poco::TimeoutException &)
{
throw NetException("Timeout exceeded while reading from socket (" + peer_address.toString() + ")", ErrorCodes::SOCKET_TIMEOUT);
}

View File

@ -128,7 +128,7 @@ public:
send_progress_interval_ms = send_progress_interval_ms_;
}
~WriteBufferFromHTTPServerResponse();
~WriteBufferFromHTTPServerResponse() override;
};
}

View File

@ -36,7 +36,7 @@ void WriteBufferFromPocoSocket::nextImpl()
{
throw NetException(e.displayText() + " while writing to socket (" + peer_address.toString() + ")", ErrorCodes::NETWORK_ERROR);
}
catch (const Poco::TimeoutException & e)
catch (const Poco::TimeoutException &)
{
throw NetException("Timeout exceeded while writing to socket (" + peer_address.toString() + ")", ErrorCodes::SOCKET_TIMEOUT);
}

View File

@ -419,12 +419,12 @@ inline void writeProbablyQuotedStringImpl(const String & s, WriteBuffer & buf, F
inline void writeProbablyBackQuotedString(const String & s, WriteBuffer & buf)
{
writeProbablyQuotedStringImpl(s, buf, [](const String & s, WriteBuffer & buf) { return writeBackQuotedString(s, buf); });
writeProbablyQuotedStringImpl(s, buf, [](const String & s_, WriteBuffer & buf_) { return writeBackQuotedString(s_, buf_); });
}
inline void writeProbablyDoubleQuotedString(const String & s, WriteBuffer & buf)
{
writeProbablyQuotedStringImpl(s, buf, [](const String & s, WriteBuffer & buf) { return writeDoubleQuotedString(s, buf); });
writeProbablyQuotedStringImpl(s, buf, [](const String & s_, WriteBuffer & buf_) { return writeDoubleQuotedString(s_, buf_); });
}

View File

@ -17,9 +17,9 @@ ZlibDeflatingWriteBuffer::ZlibDeflatingWriteBuffer(
zstr.zalloc = Z_NULL;
zstr.zfree = Z_NULL;
zstr.opaque = Z_NULL;
zstr.next_in = 0;
zstr.next_in = nullptr;
zstr.avail_in = 0;
zstr.next_out = 0;
zstr.next_out = nullptr;
zstr.avail_out = 0;
int window_bits = 15;

View File

@ -17,9 +17,9 @@ ZlibInflatingReadBuffer::ZlibInflatingReadBuffer(
zstr.zalloc = Z_NULL;
zstr.zfree = Z_NULL;
zstr.opaque = Z_NULL;
zstr.next_in = 0;
zstr.next_in = nullptr;
zstr.avail_in = 0;
zstr.next_out = 0;
zstr.next_out = nullptr;
zstr.avail_out = 0;
int window_bits = 15;

View File

@ -1601,7 +1601,7 @@ public:
Block getHeader() const override { return aggregator.getHeader(final); }
~MergingAndConvertingBlockInputStream()
~MergingAndConvertingBlockInputStream() override
{
LOG_TRACE(&Logger::get(__PRETTY_FUNCTION__), "Waiting for threads to finish");

View File

@ -201,23 +201,18 @@ Cluster::Cluster(Poco::Util::AbstractConfiguration & config, const Settings & se
info.weight = weight;
if (address.is_local)
{
info.local_addresses.push_back(address);
info.per_replica_pools = {nullptr};
}
else
{
ConnectionPoolPtr pool = std::make_shared<ConnectionPool>(
settings.distributed_connections_pool_size,
address.host_name, address.port,
address.default_database, address.user, address.password,
ConnectionTimeouts::getTCPTimeoutsWithoutFailover(settings).getSaturated(settings.max_execution_time),
"server", address.compression, address.secure);
info.pool = std::make_shared<ConnectionPoolWithFailover>(
ConnectionPoolPtrs{pool}, settings.load_balancing, settings.connections_with_failover_max_tries);
info.per_replica_pools = {std::move(pool)};
}
ConnectionPoolPtr pool = std::make_shared<ConnectionPool>(
settings.distributed_connections_pool_size,
address.host_name, address.port,
address.default_database, address.user, address.password,
ConnectionTimeouts::getTCPTimeoutsWithoutFailover(settings).getSaturated(settings.max_execution_time),
"server", address.compression, address.secure);
info.pool = std::make_shared<ConnectionPoolWithFailover>(
ConnectionPoolPtrs{pool}, settings.load_balancing, settings.connections_with_failover_max_tries);
info.per_replica_pools = {std::move(pool)};
if (weight)
slot_to_shard.insert(std::end(slot_to_shard), weight, shards_info.size());
@ -276,36 +271,25 @@ Cluster::Cluster(Poco::Util::AbstractConfiguration & config, const Settings & se
Addresses shard_local_addresses;
ConnectionPoolPtrs remote_replicas_pools;
ConnectionPoolPtrs all_replicas_pools;
remote_replicas_pools.reserve(replica_addresses.size());
all_replicas_pools.reserve(replica_addresses.size());
for (const auto & replica : replica_addresses)
{
if (replica.is_local)
{
shard_local_addresses.push_back(replica);
all_replicas_pools.emplace_back(nullptr);
}
else
{
auto replica_pool = std::make_shared<ConnectionPool>(
settings.distributed_connections_pool_size,
replica.host_name, replica.port,
replica.default_database, replica.user, replica.password,
ConnectionTimeouts::getTCPTimeoutsWithFailover(settings).getSaturated(settings.max_execution_time),
"server", replica.compression, replica.secure);
auto replica_pool = std::make_shared<ConnectionPool>(
settings.distributed_connections_pool_size,
replica.host_name, replica.port,
replica.default_database, replica.user, replica.password,
ConnectionTimeouts::getTCPTimeoutsWithFailover(settings).getSaturated(settings.max_execution_time),
"server", replica.compression, replica.secure);
remote_replicas_pools.emplace_back(replica_pool);
all_replicas_pools.emplace_back(replica_pool);
}
all_replicas_pools.emplace_back(replica_pool);
if (replica.is_local)
shard_local_addresses.push_back(replica);
}
ConnectionPoolWithFailoverPtr shard_pool;
if (!remote_replicas_pools.empty())
shard_pool = std::make_shared<ConnectionPoolWithFailover>(
std::move(remote_replicas_pools), settings.load_balancing, settings.connections_with_failover_max_tries);
ConnectionPoolWithFailoverPtr shard_pool = std::make_shared<ConnectionPoolWithFailover>(
all_replicas_pools, settings.load_balancing, settings.connections_with_failover_max_tries);
if (weight)
slot_to_shard.insert(std::end(slot_to_shard), weight, shards_info.size());
@ -341,32 +325,23 @@ Cluster::Cluster(const Settings & settings, const std::vector<std::vector<String
Addresses shard_local_addresses;
ConnectionPoolPtrs all_replicas;
ConnectionPoolPtrs remote_replicas;
all_replicas.reserve(current.size());
remote_replicas.reserve(current.size());
for (const auto & replica : current)
{
if (replica.is_local && !treat_local_as_remote)
{
shard_local_addresses.push_back(replica);
all_replicas.emplace_back(nullptr);
}
else
{
auto replica_pool = std::make_shared<ConnectionPool>(
auto replica_pool = std::make_shared<ConnectionPool>(
settings.distributed_connections_pool_size,
replica.host_name, replica.port,
replica.default_database, replica.user, replica.password,
ConnectionTimeouts::getTCPTimeoutsWithFailover(settings).getSaturated(settings.max_execution_time),
"server", replica.compression, replica.secure);
all_replicas.emplace_back(replica_pool);
remote_replicas.emplace_back(replica_pool);
}
all_replicas.emplace_back(replica_pool);
if (replica.is_local && !treat_local_as_remote)
shard_local_addresses.push_back(replica);
}
ConnectionPoolWithFailoverPtr shard_pool = std::make_shared<ConnectionPoolWithFailover>(
std::move(remote_replicas), settings.load_balancing, settings.connections_with_failover_max_tries);
all_replicas, settings.load_balancing, settings.connections_with_failover_max_tries);
slot_to_shard.insert(std::end(slot_to_shard), default_weight, shards_info.size());
shards_info.push_back({{}, current_shard_num, default_weight, std::move(shard_local_addresses), std::move(shard_pool),

View File

@ -99,7 +99,7 @@ public:
{
public:
bool isLocal() const { return !local_addresses.empty(); }
bool hasRemoteConnections() const { return pool != nullptr; }
bool hasRemoteConnections() const { return local_addresses.size() != per_replica_pools.size(); }
size_t getLocalNodeCount() const { return local_addresses.size(); }
bool hasInternalReplication() const { return has_internal_replication; }

View File

@ -90,7 +90,9 @@ void SelectStreamFactory::createForShard(
res.emplace_back(std::move(stream));
};
if (shard_info.isLocal())
const auto & settings = context.getSettingsRef();
if (settings.prefer_localhost_replica && shard_info.isLocal())
{
StoragePtr main_table_storage;
@ -106,22 +108,18 @@ void SelectStreamFactory::createForShard(
if (!main_table_storage) /// Table is absent on a local server.
{
ProfileEvents::increment(ProfileEvents::DistributedConnectionMissingTable);
if (shard_info.pool)
if (shard_info.hasRemoteConnections())
{
LOG_WARNING(
&Logger::get("ClusterProxy::SelectStreamFactory"),
"There is no table " << main_table.database << "." << main_table.table
<< " on local replica of shard " << shard_info.shard_num << ", will try remote replicas.");
emplace_remote_stream();
return;
}
else
{
/// Let it fail the usual way.
emplace_local_stream();
return;
}
emplace_local_stream(); /// Let it fail the usual way.
return;
}
const auto * replicated_storage = dynamic_cast<const StorageReplicatedMergeTree *>(main_table_storage.get());
@ -133,7 +131,6 @@ void SelectStreamFactory::createForShard(
return;
}
const Settings & settings = context.getSettingsRef();
UInt64 max_allowed_delay = settings.max_replica_delay_for_distributed_queries;
if (!max_allowed_delay)
@ -158,7 +155,7 @@ void SelectStreamFactory::createForShard(
if (!settings.fallback_to_stale_replicas_for_distributed_queries)
{
if (shard_info.pool)
if (shard_info.hasRemoteConnections())
{
/// If we cannot fallback, then we cannot use local replica. Try our luck with remote replicas.
emplace_remote_stream();
@ -171,7 +168,7 @@ void SelectStreamFactory::createForShard(
ErrorCodes::ALL_REPLICAS_ARE_STALE);
}
if (!shard_info.pool)
if (!shard_info.hasRemoteConnections())
{
/// There are no remote replicas but we are allowed to fall back to stale local replica.
emplace_local_stream();

View File

@ -88,7 +88,7 @@ struct HostID
{
return DB::isLocalAddress(DNSResolver::instance().resolveAddress(host_name, port), clickhouse_port);
}
catch (const Poco::Net::NetException & e)
catch (const Poco::Net::NetException &)
{
/// Avoid "Host not found" exceptions
return false;
@ -578,7 +578,7 @@ void DDLWorker::processTask(DDLTask & task)
tryExecuteQuery(rewritten_query, task, task.execution_status);
}
}
catch (const zkutil::KeeperException & e)
catch (const zkutil::KeeperException &)
{
throw;
}

View File

@ -38,11 +38,11 @@ static bool isNetworkError()
if (e.code() == ErrorCodes::TIMEOUT_EXCEEDED || e.code() == ErrorCodes::ALL_CONNECTION_TRIES_FAILED)
return true;
}
catch (Poco::Net::DNSException & e)
catch (Poco::Net::DNSException &)
{
return true;
}
catch (Poco::TimeoutException & e)
catch (Poco::TimeoutException &)
{
return true;
}

View File

@ -127,7 +127,7 @@ DictionaryPtr DictionaryFactory::create(const std::string & name, const Poco::Ut
throw Exception{name + ": unknown dictionary layout type: " + layout_type,
ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG};
};
}
}

View File

@ -326,7 +326,7 @@ static DatabaseAndTableWithAlias getTableNameWithAliasFromTableExpression(const
throw Exception("Logical error: no known elements in ASTTableExpression", ErrorCodes::LOGICAL_ERROR);
return database_and_table_with_alias;
};
}
void ExpressionAnalyzer::translateQualifiedNames()

View File

@ -24,7 +24,7 @@ public:
size_t subquery_depth_ = 0,
bool only_analyze = false);
~InterpreterSelectWithUnionQuery();
~InterpreterSelectWithUnionQuery() override;
BlockIO execute() override;

View File

@ -268,6 +268,9 @@ struct Settings
M(SettingUInt64, enable_conditional_computation, 0, "Enable conditional computations") \
\
M(SettingDateTimeInputFormat, date_time_input_format, FormatSettings::DateTimeInputFormat::Basic, "Method to read DateTime from text input formats. Possible values: 'basic' and 'best_effort'.") \
M(SettingBool, prefer_localhost_replica, 1, "1 - always send query to local replica, if it exists. 0 - choose replica to send query between local and remote ones according to load_balancing") \
M(SettingUInt64, max_fetch_partition_retries_count, 5, "Amount of retries while fetching partition from another host.") \
#define DECLARE(TYPE, NAME, DEFAULT, DESCRIPTION) \
TYPE NAME {DEFAULT};

View File

@ -18,7 +18,7 @@ add_executable (hash_map3 hash_map3.cpp)
target_link_libraries (hash_map3 dbms ${FARMHASH_LIBRARIES} ${METROHASH_LIBRARIES})
add_executable (hash_map_string hash_map_string.cpp)
target_include_directories (hash_map_string BEFORE PRIVATE ${SPARCEHASH_INCLUDE_DIR})
target_include_directories (hash_map_string SYSTEM BEFORE PRIVATE ${SPARCEHASH_INCLUDE_DIR})
target_link_libraries (hash_map_string dbms)
add_executable (hash_map_string_2 hash_map_string_2.cpp)

View File

@ -41,7 +41,7 @@ inline ASTPtr setAlias(ASTPtr ast, const String & alias)
{
ast->setAlias(alias);
return ast;
};
}
}

View File

@ -67,5 +67,5 @@ struct StringRangePointersEqualTo
}
};
};
}

View File

@ -135,7 +135,7 @@ public:
LOG_TRACE(log, "Row delimiter is: " << row_delimiter);
}
~ReadBufferFromKafkaConsumer() { reset(); }
~ReadBufferFromKafkaConsumer() override { reset(); }
/// Commit messages read with this consumer
void commit()
@ -144,7 +144,7 @@ public:
if (read_messages == 0)
return;
auto err = rd_kafka_commit(consumer, NULL, 1 /* async */);
auto err = rd_kafka_commit(consumer, nullptr, 1 /* async */);
if (err)
throw Exception("Failed to commit offsets: " + String(rd_kafka_err2str(err)), ErrorCodes::UNKNOWN_EXCEPTION);
@ -199,7 +199,7 @@ public:
return reader->read();
}
Block getHeader() const override { return reader->getHeader(); };
Block getHeader() const override { return reader->getHeader(); }
void readPrefixImpl() override
{

View File

@ -125,7 +125,7 @@ void Service::processQuery(const Poco::Net::HTMLForm & params, ReadBuffer & /*bo
part->checksums.checkEqual(data_checksums, false);
}
catch (const NetException & e)
catch (const NetException &)
{
/// Network error or error on remote side. No need to enqueue part for check.
throw;

View File

@ -6,7 +6,7 @@
namespace DB
{
STRONG_TYPEDEF(UInt32, MergeTreeDataFormatVersion);
STRONG_TYPEDEF(UInt32, MergeTreeDataFormatVersion)
const MergeTreeDataFormatVersion MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING {1};

View File

@ -339,7 +339,7 @@ void MergeTreeDataPart::remove() const
{
from_dir.renameTo(to);
}
catch (const Poco::FileNotFoundException & e)
catch (const Poco::FileNotFoundException &)
{
LOG_ERROR(storage.log, "Directory " << from << " (part to remove) doesn't exist or one of nested files has gone."
" Most likely this is due to manual removing. This should be discouraged. Ignoring.");

Some files were not shown because too many files have changed in this diff Show More