From 882747fde6974dde412711a463bc64a62c2a9f9f Mon Sep 17 00:00:00 2001 From: proller Date: Fri, 28 Jun 2019 18:22:57 +0300 Subject: [PATCH 01/84] Fix building without submodules --- cmake/find_mimalloc.cmake | 3 ++- libs/libcommon/cmake/find_jemalloc.cmake | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/cmake/find_mimalloc.cmake b/cmake/find_mimalloc.cmake index 9ee785e0753..45ebf90a204 100644 --- a/cmake/find_mimalloc.cmake +++ b/cmake/find_mimalloc.cmake @@ -3,7 +3,8 @@ if (OS_LINUX AND NOT SANITIZE AND NOT ARCH_ARM AND NOT ARCH_32 AND NOT ARCH_PPC6 endif () if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/mimalloc/include/mimalloc.h") - message (WARNING "submodule contrib/mimalloc is missing. to fix try run: \n git submodule update --init --recursive") + message (WARNING "submodule contrib/mimalloc is missing. to fix try run: \n git submodule update --init --recursive") + return () endif () if (ENABLE_MIMALLOC) diff --git a/libs/libcommon/cmake/find_jemalloc.cmake b/libs/libcommon/cmake/find_jemalloc.cmake index 3a1b14d9c33..0b1c80c8934 100644 --- a/libs/libcommon/cmake/find_jemalloc.cmake +++ b/libs/libcommon/cmake/find_jemalloc.cmake @@ -7,7 +7,7 @@ endif () option (ENABLE_JEMALLOC "Set to TRUE to use jemalloc" ${ENABLE_JEMALLOC_DEFAULT}) if (OS_LINUX AND NOT ARCH_ARM) option (USE_INTERNAL_JEMALLOC_LIBRARY "Set to FALSE to use system jemalloc library instead of bundled" ${NOT_UNBUNDLED}) -elseif () +else() option (USE_INTERNAL_JEMALLOC_LIBRARY "Set to FALSE to use system jemalloc library instead of bundled" OFF) endif() @@ -30,7 +30,7 @@ if (ENABLE_JEMALLOC) if (JEMALLOC_LIBRARIES) set (USE_JEMALLOC 1) - else () + elseif (NOT MISSING_INTERNAL_JEMALLOC_LIBRARY) message (FATAL_ERROR "ENABLE_JEMALLOC is set to true, but library was not found") endif () From b89c38f0f09697f40053965fef14c19ad307e9e1 Mon Sep 17 00:00:00 2001 From: proller Date: Fri, 28 Jun 2019 18:24:56 +0300 Subject: [PATCH 02/84] Fix more gcc9 warnings --- dbms/src/IO/ReadBufferAIO.cpp | 2 +- dbms/src/IO/WriteBufferAIO.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/dbms/src/IO/ReadBufferAIO.cpp b/dbms/src/IO/ReadBufferAIO.cpp index f47e04bff75..7aad9b1eebd 100644 --- a/dbms/src/IO/ReadBufferAIO.cpp +++ b/dbms/src/IO/ReadBufferAIO.cpp @@ -254,7 +254,7 @@ void ReadBufferAIO::prepare() /// Region of the disk from which we want to read data. const off_t region_begin = first_unread_pos_in_file; - if ((requested_byte_count > std::numeric_limits::max()) || + if ((static_cast(requested_byte_count) > std::numeric_limits::max()) || (first_unread_pos_in_file > (std::numeric_limits::max() - static_cast(requested_byte_count)))) throw Exception("An overflow occurred during file operation", ErrorCodes::LOGICAL_ERROR); diff --git a/dbms/src/IO/WriteBufferAIO.cpp b/dbms/src/IO/WriteBufferAIO.cpp index 2fe7da27809..e163124f418 100644 --- a/dbms/src/IO/WriteBufferAIO.cpp +++ b/dbms/src/IO/WriteBufferAIO.cpp @@ -274,7 +274,7 @@ void WriteBufferAIO::prepare() /// Region of the disk in which we want to write data. const off_t region_begin = pos_in_file; - if ((flush_buffer.offset() > std::numeric_limits::max()) || + if ((static_cast(flush_buffer.offset()) > std::numeric_limits::max()) || (pos_in_file > (std::numeric_limits::max() - static_cast(flush_buffer.offset())))) throw Exception("An overflow occurred during file operation", ErrorCodes::LOGICAL_ERROR); From a5e2a725d296d7b67ec6dba6062863aec4da1de2 Mon Sep 17 00:00:00 2001 From: proller Date: Fri, 28 Jun 2019 20:23:11 +0300 Subject: [PATCH 03/84] was wrong! ../dbms/src/IO/WriteBufferAIO.cpp:277:54: error: result of comparison 'ssize_t' (aka 'long') > 9223372036854775807 is always false [-Werror,-Wtautological-type-limit-compare] if ((static_cast(flush_buffer.offset()) > std::numeric_limits::max()) || ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --- cmake/find_mimalloc.cmake | 2 +- dbms/src/IO/ReadBufferAIO.cpp | 2 +- dbms/src/IO/WriteBufferAIO.cpp | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmake/find_mimalloc.cmake b/cmake/find_mimalloc.cmake index 45ebf90a204..6e3f24625b6 100644 --- a/cmake/find_mimalloc.cmake +++ b/cmake/find_mimalloc.cmake @@ -4,7 +4,7 @@ endif () if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/mimalloc/include/mimalloc.h") message (WARNING "submodule contrib/mimalloc is missing. to fix try run: \n git submodule update --init --recursive") - return () + return() endif () if (ENABLE_MIMALLOC) diff --git a/dbms/src/IO/ReadBufferAIO.cpp b/dbms/src/IO/ReadBufferAIO.cpp index 7aad9b1eebd..f47e04bff75 100644 --- a/dbms/src/IO/ReadBufferAIO.cpp +++ b/dbms/src/IO/ReadBufferAIO.cpp @@ -254,7 +254,7 @@ void ReadBufferAIO::prepare() /// Region of the disk from which we want to read data. const off_t region_begin = first_unread_pos_in_file; - if ((static_cast(requested_byte_count) > std::numeric_limits::max()) || + if ((requested_byte_count > std::numeric_limits::max()) || (first_unread_pos_in_file > (std::numeric_limits::max() - static_cast(requested_byte_count)))) throw Exception("An overflow occurred during file operation", ErrorCodes::LOGICAL_ERROR); diff --git a/dbms/src/IO/WriteBufferAIO.cpp b/dbms/src/IO/WriteBufferAIO.cpp index e163124f418..2fe7da27809 100644 --- a/dbms/src/IO/WriteBufferAIO.cpp +++ b/dbms/src/IO/WriteBufferAIO.cpp @@ -274,7 +274,7 @@ void WriteBufferAIO::prepare() /// Region of the disk in which we want to write data. const off_t region_begin = pos_in_file; - if ((static_cast(flush_buffer.offset()) > std::numeric_limits::max()) || + if ((flush_buffer.offset() > std::numeric_limits::max()) || (pos_in_file > (std::numeric_limits::max() - static_cast(flush_buffer.offset())))) throw Exception("An overflow occurred during file operation", ErrorCodes::LOGICAL_ERROR); From a504baf0b32b7a3a5ea34b9a530f42d54e6d8d50 Mon Sep 17 00:00:00 2001 From: Danila Kutenin Date: Fri, 28 Jun 2019 20:33:47 +0300 Subject: [PATCH 04/84] mimalloc off MI_OVERRIDE --- contrib/mimalloc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/mimalloc b/contrib/mimalloc index b4ece3482f9..a787bdebce9 160000 --- a/contrib/mimalloc +++ b/contrib/mimalloc @@ -1 +1 @@ -Subproject commit b4ece3482f944b5d07d889cbaebaf9aa6c56cc03 +Subproject commit a787bdebce94bf3776dc0d1ad597917f479ab8d5 From 828f4e1d2923dfdaccbdddc0870faef5d529bf82 Mon Sep 17 00:00:00 2001 From: proller Date: Mon, 15 Jul 2019 15:27:12 +0300 Subject: [PATCH 05/84] Fix freebsd build --- dbms/src/Common/Arena.h | 26 +++++++++++++++++++++++++- dbms/src/Common/ArenaWithFreeLists.h | 8 +++++++- 2 files changed, 32 insertions(+), 2 deletions(-) diff --git a/dbms/src/Common/Arena.h b/dbms/src/Common/Arena.h index 05154f56c19..0f04e7fc762 100644 --- a/dbms/src/Common/Arena.h +++ b/dbms/src/Common/Arena.h @@ -5,7 +5,9 @@ #include #include #include -#include +#if __has_include() +# include +#endif #include #include #include @@ -55,7 +57,9 @@ private: end = begin + size_ - pad_right; prev = prev_; +#if __has_include() ASAN_POISON_MEMORY_REGION(begin, size_); +#endif } ~Chunk() @@ -64,7 +68,9 @@ private: /// because the allocator might not have asan integration, and the /// memory would stay poisoned forever. If the allocator supports /// asan, it will correctly poison the memory by itself. +#if __has_include() ASAN_UNPOISON_MEMORY_REGION(begin, size()); +#endif Allocator::free(begin, size()); @@ -135,7 +141,11 @@ public: char * res = head->pos; head->pos += size; + +#if __has_include() ASAN_UNPOISON_MEMORY_REGION(res, size + pad_right); +#endif + return res; } @@ -152,7 +162,11 @@ public: { head->pos = static_cast(head_pos); head->pos += size; + +#if __has_include() ASAN_UNPOISON_MEMORY_REGION(res, size + pad_right); +#endif + return res; } @@ -172,7 +186,9 @@ public: void rollback(size_t size) { head->pos -= size; +#if __has_include() ASAN_POISON_MEMORY_REGION(head->pos, size + pad_right); +#endif } /** Begin or expand allocation of contiguous piece of memory without alignment. @@ -199,7 +215,9 @@ public: if (!begin) begin = res; +#if __has_include() ASAN_UNPOISON_MEMORY_REGION(res, size + pad_right); +#endif return res; } @@ -232,7 +250,9 @@ public: if (!begin) begin = res; +#if __has_include() ASAN_UNPOISON_MEMORY_REGION(res, size + pad_right); +#endif return res; } @@ -243,7 +263,9 @@ public: if (old_data) { memcpy(res, old_data, old_size); +#if __has_include() ASAN_POISON_MEMORY_REGION(old_data, old_size); +#endif } return res; } @@ -254,7 +276,9 @@ public: if (old_data) { memcpy(res, old_data, old_size); +#if __has_include() ASAN_POISON_MEMORY_REGION(old_data, old_size); +#endif } return res; } diff --git a/dbms/src/Common/ArenaWithFreeLists.h b/dbms/src/Common/ArenaWithFreeLists.h index 2f1f0ddfb0b..79cd45c0572 100644 --- a/dbms/src/Common/ArenaWithFreeLists.h +++ b/dbms/src/Common/ArenaWithFreeLists.h @@ -1,6 +1,8 @@ #pragma once -#include +#if __has_include() +# include +#endif #include #include @@ -68,8 +70,10 @@ public: /// item in the list. We poisoned the free block before putting /// it into the free list, so we have to unpoison it before /// reading anything. +#if __has_include() ASAN_UNPOISON_MEMORY_REGION(free_block_ptr, std::max(size, sizeof(Block))); +#endif const auto res = free_block_ptr->data; free_block_ptr = free_block_ptr->next; @@ -100,7 +104,9 @@ public: /// destructor, to support an underlying allocator that doesn't /// integrate with asan. We don't do that, and rely on the fact that /// our underlying allocator is Arena, which does have asan integration. +#if __has_include() ASAN_POISON_MEMORY_REGION(ptr, 1ULL << (list_idx + 1)); +#endif } /// Size of the allocated pool in bytes From b2eb9c3e57436d1737e7140848bc1e3a4e9c58c4 Mon Sep 17 00:00:00 2001 From: proller Date: Mon, 15 Jul 2019 17:16:29 +0300 Subject: [PATCH 06/84] Fix shared build --- CMakeLists.txt | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 6262d17f2d2..8d3a2b84864 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -251,7 +251,7 @@ if (USE_STATIC_LIBRARIES AND HAVE_NO_PIE) set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${FLAG_NO_PIE}") endif () -if (NOT SANITIZE) +if (NOT SANITIZE AND NOT SPLIT_SHARED_LIBRARIES) set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-undefined") set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--no-undefined") endif() @@ -301,7 +301,11 @@ if (OS_LINUX AND NOT UNBUNDLED AND (GLIBC_COMPATIBILITY OR USE_INTERNAL_UNWIND_L if (USE_INTERNAL_UNWIND_LIBRARY_FOR_EXCEPTION_HANDLING) # TODO: Allow to use non-static library as well. - set (EXCEPTION_HANDLING_LIBRARY "${ClickHouse_BINARY_DIR}/contrib/libunwind-cmake/libunwind_static${${CMAKE_POSTFIX_VARIABLE}}.a") + if (USE_STATIC_LIBRARIES) + set (EXCEPTION_HANDLING_LIBRARY "${ClickHouse_BINARY_DIR}/contrib/libunwind-cmake/libunwind_static${${CMAKE_POSTFIX_VARIABLE}}.a") + else () + set (EXCEPTION_HANDLING_LIBRARY "${ClickHouse_BINARY_DIR}/contrib/libunwind-cmake/libunwind_shared${${CMAKE_POSTFIX_VARIABLE}}.so") + endif () else () set (EXCEPTION_HANDLING_LIBRARY "-lgcc_eh") endif () From 7695ff8035805e7553641112573505e303d0da26 Mon Sep 17 00:00:00 2001 From: proller Date: Mon, 15 Jul 2019 18:02:43 +0300 Subject: [PATCH 07/84] Fix build in gcc9 --- dbms/src/IO/ReadBufferAIO.cpp | 2 +- dbms/src/IO/WriteBufferAIO.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/dbms/src/IO/ReadBufferAIO.cpp b/dbms/src/IO/ReadBufferAIO.cpp index f47e04bff75..162a83b7b94 100644 --- a/dbms/src/IO/ReadBufferAIO.cpp +++ b/dbms/src/IO/ReadBufferAIO.cpp @@ -254,7 +254,7 @@ void ReadBufferAIO::prepare() /// Region of the disk from which we want to read data. const off_t region_begin = first_unread_pos_in_file; - if ((requested_byte_count > std::numeric_limits::max()) || + if ((static_cast(requested_byte_count) > std::numeric_limits::max()) || (first_unread_pos_in_file > (std::numeric_limits::max() - static_cast(requested_byte_count)))) throw Exception("An overflow occurred during file operation", ErrorCodes::LOGICAL_ERROR); diff --git a/dbms/src/IO/WriteBufferAIO.cpp b/dbms/src/IO/WriteBufferAIO.cpp index 2fe7da27809..37661473d9c 100644 --- a/dbms/src/IO/WriteBufferAIO.cpp +++ b/dbms/src/IO/WriteBufferAIO.cpp @@ -274,7 +274,7 @@ void WriteBufferAIO::prepare() /// Region of the disk in which we want to write data. const off_t region_begin = pos_in_file; - if ((flush_buffer.offset() > std::numeric_limits::max()) || + if ((static_cast(flush_buffer.offset()) > std::numeric_limits::max()) || (pos_in_file > (std::numeric_limits::max() - static_cast(flush_buffer.offset())))) throw Exception("An overflow occurred during file operation", ErrorCodes::LOGICAL_ERROR); From 9144e1f520e484fc0b3cf624d5adf8ea894a6bf0 Mon Sep 17 00:00:00 2001 From: proller Date: Mon, 15 Jul 2019 18:54:14 +0300 Subject: [PATCH 08/84] Fix split build --- contrib/arrow-cmake/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/contrib/arrow-cmake/CMakeLists.txt b/contrib/arrow-cmake/CMakeLists.txt index c0b87efc63e..a7b6628ea4e 100644 --- a/contrib/arrow-cmake/CMakeLists.txt +++ b/contrib/arrow-cmake/CMakeLists.txt @@ -44,6 +44,7 @@ set( thriftcpp_threads_SOURCES add_library(${THRIFT_LIBRARY} ${thriftcpp_SOURCES} ${thriftcpp_threads_SOURCES}) set_target_properties(${THRIFT_LIBRARY} PROPERTIES CXX_STANDARD 14) # REMOVE after https://github.com/apache/thrift/pull/1641 target_include_directories(${THRIFT_LIBRARY} SYSTEM PUBLIC ${ClickHouse_SOURCE_DIR}/contrib/thrift/lib/cpp/src PRIVATE ${Boost_INCLUDE_DIRS}) +target_link_libraries(${THRIFT_LIBRARY} PRIVATE Threads::Threads) From 8caf119e53b88beb3901e44a85553ddea7f6f06b Mon Sep 17 00:00:00 2001 From: proller Date: Mon, 15 Jul 2019 20:17:36 +0300 Subject: [PATCH 09/84] fix --- dbms/src/IO/ReadBufferAIO.cpp | 2 +- dbms/src/IO/WriteBufferAIO.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/dbms/src/IO/ReadBufferAIO.cpp b/dbms/src/IO/ReadBufferAIO.cpp index 162a83b7b94..f47e04bff75 100644 --- a/dbms/src/IO/ReadBufferAIO.cpp +++ b/dbms/src/IO/ReadBufferAIO.cpp @@ -254,7 +254,7 @@ void ReadBufferAIO::prepare() /// Region of the disk from which we want to read data. const off_t region_begin = first_unread_pos_in_file; - if ((static_cast(requested_byte_count) > std::numeric_limits::max()) || + if ((requested_byte_count > std::numeric_limits::max()) || (first_unread_pos_in_file > (std::numeric_limits::max() - static_cast(requested_byte_count)))) throw Exception("An overflow occurred during file operation", ErrorCodes::LOGICAL_ERROR); diff --git a/dbms/src/IO/WriteBufferAIO.cpp b/dbms/src/IO/WriteBufferAIO.cpp index 37661473d9c..2fe7da27809 100644 --- a/dbms/src/IO/WriteBufferAIO.cpp +++ b/dbms/src/IO/WriteBufferAIO.cpp @@ -274,7 +274,7 @@ void WriteBufferAIO::prepare() /// Region of the disk in which we want to write data. const off_t region_begin = pos_in_file; - if ((static_cast(flush_buffer.offset()) > std::numeric_limits::max()) || + if ((flush_buffer.offset() > std::numeric_limits::max()) || (pos_in_file > (std::numeric_limits::max() - static_cast(flush_buffer.offset())))) throw Exception("An overflow occurred during file operation", ErrorCodes::LOGICAL_ERROR); From f8d710dd8464c2af6784c399a9cc677624000bae Mon Sep 17 00:00:00 2001 From: proller Date: Mon, 15 Jul 2019 20:44:03 +0300 Subject: [PATCH 10/84] fix --- dbms/src/IO/ReadBufferAIO.cpp | 3 +++ dbms/src/IO/WriteBufferAIO.cpp | 3 +++ 2 files changed, 6 insertions(+) diff --git a/dbms/src/IO/ReadBufferAIO.cpp b/dbms/src/IO/ReadBufferAIO.cpp index f47e04bff75..eccebf854f0 100644 --- a/dbms/src/IO/ReadBufferAIO.cpp +++ b/dbms/src/IO/ReadBufferAIO.cpp @@ -254,9 +254,12 @@ void ReadBufferAIO::prepare() /// Region of the disk from which we want to read data. const off_t region_begin = first_unread_pos_in_file; +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wsign-compare" if ((requested_byte_count > std::numeric_limits::max()) || (first_unread_pos_in_file > (std::numeric_limits::max() - static_cast(requested_byte_count)))) throw Exception("An overflow occurred during file operation", ErrorCodes::LOGICAL_ERROR); +#pragma GCC diagnostic pop const off_t region_end = first_unread_pos_in_file + requested_byte_count; diff --git a/dbms/src/IO/WriteBufferAIO.cpp b/dbms/src/IO/WriteBufferAIO.cpp index 2fe7da27809..dcceff96d02 100644 --- a/dbms/src/IO/WriteBufferAIO.cpp +++ b/dbms/src/IO/WriteBufferAIO.cpp @@ -274,9 +274,12 @@ void WriteBufferAIO::prepare() /// Region of the disk in which we want to write data. const off_t region_begin = pos_in_file; +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wsign-compare" if ((flush_buffer.offset() > std::numeric_limits::max()) || (pos_in_file > (std::numeric_limits::max() - static_cast(flush_buffer.offset())))) throw Exception("An overflow occurred during file operation", ErrorCodes::LOGICAL_ERROR); +#pragma GCC diagnostic pop const off_t region_end = pos_in_file + flush_buffer.offset(); const size_t region_size = region_end - region_begin; From c13ea83f91f45a2b0a333cd68fac6e0e3a7b78c4 Mon Sep 17 00:00:00 2001 From: proller Date: Mon, 15 Jul 2019 21:00:40 +0300 Subject: [PATCH 11/84] fix --- CMakeLists.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 8d3a2b84864..34b5936773c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -300,7 +300,6 @@ if (OS_LINUX AND NOT UNBUNDLED AND (GLIBC_COMPATIBILITY OR USE_INTERNAL_UNWIND_L # There are two variants of C++ library: libc++ (from LLVM compiler infrastructure) and libstdc++ (from GCC). if (USE_INTERNAL_UNWIND_LIBRARY_FOR_EXCEPTION_HANDLING) - # TODO: Allow to use non-static library as well. if (USE_STATIC_LIBRARIES) set (EXCEPTION_HANDLING_LIBRARY "${ClickHouse_BINARY_DIR}/contrib/libunwind-cmake/libunwind_static${${CMAKE_POSTFIX_VARIABLE}}.a") else () From bdd3bc816971cab1359e70adace15099d0d3eb74 Mon Sep 17 00:00:00 2001 From: proller Date: Wed, 17 Jul 2019 12:21:32 +0300 Subject: [PATCH 12/84] fix --- dbms/src/Common/Arena.h | 18 ------------------ dbms/src/Common/ArenaWithFreeLists.h | 5 +---- dbms/src/Core/Defines.h | 5 +++++ 3 files changed, 6 insertions(+), 22 deletions(-) diff --git a/dbms/src/Common/Arena.h b/dbms/src/Common/Arena.h index 0f04e7fc762..e4c2e973095 100644 --- a/dbms/src/Common/Arena.h +++ b/dbms/src/Common/Arena.h @@ -57,9 +57,7 @@ private: end = begin + size_ - pad_right; prev = prev_; -#if __has_include() ASAN_POISON_MEMORY_REGION(begin, size_); -#endif } ~Chunk() @@ -68,9 +66,7 @@ private: /// because the allocator might not have asan integration, and the /// memory would stay poisoned forever. If the allocator supports /// asan, it will correctly poison the memory by itself. -#if __has_include() ASAN_UNPOISON_MEMORY_REGION(begin, size()); -#endif Allocator::free(begin, size()); @@ -142,9 +138,7 @@ public: char * res = head->pos; head->pos += size; -#if __has_include() ASAN_UNPOISON_MEMORY_REGION(res, size + pad_right); -#endif return res; } @@ -163,9 +157,7 @@ public: head->pos = static_cast(head_pos); head->pos += size; -#if __has_include() ASAN_UNPOISON_MEMORY_REGION(res, size + pad_right); -#endif return res; } @@ -186,9 +178,7 @@ public: void rollback(size_t size) { head->pos -= size; -#if __has_include() ASAN_POISON_MEMORY_REGION(head->pos, size + pad_right); -#endif } /** Begin or expand allocation of contiguous piece of memory without alignment. @@ -215,9 +205,7 @@ public: if (!begin) begin = res; -#if __has_include() ASAN_UNPOISON_MEMORY_REGION(res, size + pad_right); -#endif return res; } @@ -250,9 +238,7 @@ public: if (!begin) begin = res; -#if __has_include() ASAN_UNPOISON_MEMORY_REGION(res, size + pad_right); -#endif return res; } @@ -263,9 +249,7 @@ public: if (old_data) { memcpy(res, old_data, old_size); -#if __has_include() ASAN_POISON_MEMORY_REGION(old_data, old_size); -#endif } return res; } @@ -276,9 +260,7 @@ public: if (old_data) { memcpy(res, old_data, old_size); -#if __has_include() ASAN_POISON_MEMORY_REGION(old_data, old_size); -#endif } return res; } diff --git a/dbms/src/Common/ArenaWithFreeLists.h b/dbms/src/Common/ArenaWithFreeLists.h index 79cd45c0572..6092f03ce19 100644 --- a/dbms/src/Common/ArenaWithFreeLists.h +++ b/dbms/src/Common/ArenaWithFreeLists.h @@ -3,6 +3,7 @@ #if __has_include() # include #endif +#include #include #include @@ -70,10 +71,8 @@ public: /// item in the list. We poisoned the free block before putting /// it into the free list, so we have to unpoison it before /// reading anything. -#if __has_include() ASAN_UNPOISON_MEMORY_REGION(free_block_ptr, std::max(size, sizeof(Block))); -#endif const auto res = free_block_ptr->data; free_block_ptr = free_block_ptr->next; @@ -104,9 +103,7 @@ public: /// destructor, to support an underlying allocator that doesn't /// integrate with asan. We don't do that, and rely on the fact that /// our underlying allocator is Arena, which does have asan integration. -#if __has_include() ASAN_POISON_MEMORY_REGION(ptr, 1ULL << (list_idx + 1)); -#endif } /// Size of the allocated pool in bytes diff --git a/dbms/src/Core/Defines.h b/dbms/src/Core/Defines.h index 461278fad3b..75d1ed2caef 100644 --- a/dbms/src/Core/Defines.h +++ b/dbms/src/Core/Defines.h @@ -139,3 +139,8 @@ /// This number is only used for distributed version compatible. /// It could be any magic number. #define DBMS_DISTRIBUTED_SENDS_MAGIC_NUMBER 0xCAFECABE + +#if !__has_include() +# define ASAN_UNPOISON_MEMORY_REGION(a,b) +# define ASAN_POISON_MEMORY_REGION(a,b) +#endif From 67c647bb9ed58e181e4be679694d8678c0280d50 Mon Sep 17 00:00:00 2001 From: proller Date: Wed, 17 Jul 2019 14:43:17 +0300 Subject: [PATCH 13/84] clean --- dbms/src/Common/Arena.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/dbms/src/Common/Arena.h b/dbms/src/Common/Arena.h index e4c2e973095..e8b6ada44cb 100644 --- a/dbms/src/Common/Arena.h +++ b/dbms/src/Common/Arena.h @@ -137,9 +137,7 @@ public: char * res = head->pos; head->pos += size; - ASAN_UNPOISON_MEMORY_REGION(res, size + pad_right); - return res; } @@ -156,9 +154,7 @@ public: { head->pos = static_cast(head_pos); head->pos += size; - ASAN_UNPOISON_MEMORY_REGION(res, size + pad_right); - return res; } From 436fb279d34751a7f4ad98ad48d3b5c1f16a9399 Mon Sep 17 00:00:00 2001 From: proller Date: Wed, 17 Jul 2019 15:07:10 +0300 Subject: [PATCH 14/84] zstd --- dbms/CMakeLists.txt | 9 ++++----- dbms/src/Compression/CompressedReadBufferBase.cpp | 3 --- dbms/src/Compression/CompressedWriteBuffer.cpp | 3 --- dbms/src/Compression/ICompressionCodec.cpp | 1 - 4 files changed, 4 insertions(+), 12 deletions(-) diff --git a/dbms/CMakeLists.txt b/dbms/CMakeLists.txt index f18608f3256..bcb44b468d8 100644 --- a/dbms/CMakeLists.txt +++ b/dbms/CMakeLists.txt @@ -235,10 +235,6 @@ target_link_libraries(clickhouse_common_io roaring ) -if(ZSTD_LIBRARY) - target_link_libraries(clickhouse_common_io PUBLIC ${ZSTD_LIBRARY}) -endif() - if (USE_RDKAFKA) target_link_libraries(dbms PRIVATE ${CPPKAFKA_LIBRARY} ${RDKAFKA_LIBRARY}) if(NOT USE_INTERNAL_RDKAFKA_LIBRARY) @@ -294,11 +290,14 @@ target_include_directories(dbms SYSTEM PUBLIC ${PCG_RANDOM_INCLUDE_DIR}) if (NOT USE_INTERNAL_LZ4_LIBRARY) target_include_directories(dbms SYSTEM BEFORE PRIVATE ${LZ4_INCLUDE_DIR}) endif () + +if(ZSTD_LIBRARY) + target_link_libraries(dbms PRIVATE ${ZSTD_LIBRARY}) +endif() if (NOT USE_INTERNAL_ZSTD_LIBRARY AND ZSTD_INCLUDE_DIR) target_include_directories(dbms SYSTEM BEFORE PRIVATE ${ZSTD_INCLUDE_DIR}) endif () - if (NOT USE_INTERNAL_BOOST_LIBRARY) target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${Boost_INCLUDE_DIRS}) endif () diff --git a/dbms/src/Compression/CompressedReadBufferBase.cpp b/dbms/src/Compression/CompressedReadBufferBase.cpp index bd2078175dc..9f3664b4c9f 100644 --- a/dbms/src/Compression/CompressedReadBufferBase.cpp +++ b/dbms/src/Compression/CompressedReadBufferBase.cpp @@ -1,11 +1,8 @@ #include "CompressedReadBufferBase.h" #include - #include #include -#include - #include #include #include diff --git a/dbms/src/Compression/CompressedWriteBuffer.cpp b/dbms/src/Compression/CompressedWriteBuffer.cpp index 1285949c863..9dd3c23100f 100644 --- a/dbms/src/Compression/CompressedWriteBuffer.cpp +++ b/dbms/src/Compression/CompressedWriteBuffer.cpp @@ -1,8 +1,5 @@ #include #include -#include -#include -#include #include #include diff --git a/dbms/src/Compression/ICompressionCodec.cpp b/dbms/src/Compression/ICompressionCodec.cpp index ddedf8a4c9c..a50001238da 100644 --- a/dbms/src/Compression/ICompressionCodec.cpp +++ b/dbms/src/Compression/ICompressionCodec.cpp @@ -7,7 +7,6 @@ #include #include #include -#include namespace ProfileEvents { From 969cb2f826757df38dbccbaa4233e6a28a772bca Mon Sep 17 00:00:00 2001 From: Ivan Lezhankin Date: Mon, 22 Jul 2019 14:32:11 +0300 Subject: [PATCH 15/84] Append _partition virtual column --- dbms/src/Storages/Kafka/KafkaBlockInputStream.cpp | 13 +++++++------ .../Storages/Kafka/ReadBufferFromKafkaConsumer.h | 1 + dbms/src/Storages/Kafka/StorageKafka.cpp | 3 ++- dbms/tests/integration/test_storage_kafka/test.py | 8 ++++---- 4 files changed, 14 insertions(+), 11 deletions(-) diff --git a/dbms/src/Storages/Kafka/KafkaBlockInputStream.cpp b/dbms/src/Storages/Kafka/KafkaBlockInputStream.cpp index 5b8d80cb062..3ddb54f848b 100644 --- a/dbms/src/Storages/Kafka/KafkaBlockInputStream.cpp +++ b/dbms/src/Storages/Kafka/KafkaBlockInputStream.cpp @@ -19,7 +19,7 @@ KafkaBlockInputStream::KafkaBlockInputStream( if (!storage.getSchemaName().empty()) context.setSetting("format_schema", storage.getSchemaName()); - virtual_columns = storage.getSampleBlockForColumns({"_topic", "_key", "_offset"}).cloneEmptyColumns(); + virtual_columns = storage.getSampleBlockForColumns({"_topic", "_key", "_offset", "_partition"}).cloneEmptyColumns(); } KafkaBlockInputStream::~KafkaBlockInputStream() @@ -57,9 +57,10 @@ void KafkaBlockInputStream::readPrefixImpl() auto read_callback = [this] { const auto * sub_buffer = buffer->subBufferAs(); - virtual_columns[0]->insert(sub_buffer->currentTopic()); // "topic" - virtual_columns[1]->insert(sub_buffer->currentKey()); // "key" - virtual_columns[2]->insert(sub_buffer->currentOffset()); // "offset" + virtual_columns[0]->insert(sub_buffer->currentTopic()); // "topic" + virtual_columns[1]->insert(sub_buffer->currentKey()); // "key" + virtual_columns[2]->insert(sub_buffer->currentOffset()); // "offset" + virtual_columns[3]->insert(sub_buffer->currentPartition()); // "partition" }; auto child = FormatFactory::instance().getInput( @@ -76,8 +77,8 @@ Block KafkaBlockInputStream::readImpl() if (!block) return block; - Block virtual_block = storage.getSampleBlockForColumns({"_topic", "_key", "_offset"}).cloneWithColumns(std::move(virtual_columns)); - virtual_columns = storage.getSampleBlockForColumns({"_topic", "_key", "_offset"}).cloneEmptyColumns(); + Block virtual_block = storage.getSampleBlockForColumns({"_topic", "_key", "_offset", "_partition"}).cloneWithColumns(std::move(virtual_columns)); + virtual_columns = storage.getSampleBlockForColumns({"_topic", "_key", "_offset", "_partition"}).cloneEmptyColumns(); for (const auto & column : virtual_block.getColumnsWithTypeAndName()) block.insert(column); diff --git a/dbms/src/Storages/Kafka/ReadBufferFromKafkaConsumer.h b/dbms/src/Storages/Kafka/ReadBufferFromKafkaConsumer.h index ac6011cfed0..0de18ba59bf 100644 --- a/dbms/src/Storages/Kafka/ReadBufferFromKafkaConsumer.h +++ b/dbms/src/Storages/Kafka/ReadBufferFromKafkaConsumer.h @@ -30,6 +30,7 @@ public: String currentTopic() const { return current[-1].get_topic(); } String currentKey() const { return current[-1].get_key(); } auto currentOffset() const { return current[-1].get_offset(); } + auto currentPartition() const { return current[-1].get_partition(); } private: using Messages = std::vector; diff --git a/dbms/src/Storages/Kafka/StorageKafka.cpp b/dbms/src/Storages/Kafka/StorageKafka.cpp index 20599c7e4f8..e591649dd6a 100644 --- a/dbms/src/Storages/Kafka/StorageKafka.cpp +++ b/dbms/src/Storages/Kafka/StorageKafka.cpp @@ -85,7 +85,8 @@ StorageKafka::StorageKafka( columns_, ColumnsDescription({{"_topic", std::make_shared()}, {"_key", std::make_shared()}, - {"_offset", std::make_shared()}}, true)) + {"_offset", std::make_shared()}, + {"_partition", std::make_shared()}}, true)) , table_name(table_name_) , database_name(database_name_) , global_context(context_) diff --git a/dbms/tests/integration/test_storage_kafka/test.py b/dbms/tests/integration/test_storage_kafka/test.py index 3f38b068a22..915665ba998 100644 --- a/dbms/tests/integration/test_storage_kafka/test.py +++ b/dbms/tests/integration/test_storage_kafka/test.py @@ -380,7 +380,7 @@ def test_kafka_virtual_columns(kafka_cluster): result = '' while True: time.sleep(1) - result += instance.query('SELECT _key, key, _topic, value, _offset FROM test.kafka') + result += instance.query('SELECT _key, key, _topic, value, _offset, _partition FROM test.kafka') if kafka_check_result(result, False, 'test_kafka_virtual1.reference'): break kafka_check_result(result, True, 'test_kafka_virtual1.reference') @@ -397,11 +397,11 @@ def test_kafka_virtual_columns_with_materialized_view(kafka_cluster): kafka_group_name = 'virt2', kafka_format = 'JSONEachRow', kafka_row_delimiter = '\\n'; - CREATE TABLE test.view (key UInt64, value UInt64, kafka_key String, topic String, offset UInt64) + CREATE TABLE test.view (key UInt64, value UInt64, kafka_key String, topic String, offset UInt64, partition UInt64) ENGINE = MergeTree() ORDER BY key; CREATE MATERIALIZED VIEW test.consumer TO test.view AS - SELECT *, _key as kafka_key, _topic as topic, _offset as offset FROM test.kafka; + SELECT *, _key as kafka_key, _topic as topic, _offset as offset, _partition as partition FROM test.kafka; ''') messages = [] @@ -411,7 +411,7 @@ def test_kafka_virtual_columns_with_materialized_view(kafka_cluster): while True: time.sleep(1) - result = instance.query('SELECT kafka_key, key, topic, value, offset FROM test.view') + result = instance.query('SELECT kafka_key, key, topic, value, offset, partition FROM test.view') if kafka_check_result(result, False, 'test_kafka_virtual2.reference'): break kafka_check_result(result, True, 'test_kafka_virtual2.reference') From 467b654318dc32e9c912536cf605f433b3dedc56 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Mon, 22 Jul 2019 22:03:16 +0300 Subject: [PATCH 16/84] Update CMakeLists.txt --- dbms/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dbms/CMakeLists.txt b/dbms/CMakeLists.txt index dced3ed7b93..ee7fba8328d 100644 --- a/dbms/CMakeLists.txt +++ b/dbms/CMakeLists.txt @@ -291,7 +291,7 @@ if (NOT USE_INTERNAL_LZ4_LIBRARY) target_include_directories(dbms SYSTEM BEFORE PRIVATE ${LZ4_INCLUDE_DIR}) endif () -if(ZSTD_LIBRARY) +if (ZSTD_LIBRARY) target_link_libraries(dbms PRIVATE ${ZSTD_LIBRARY}) endif() if (NOT USE_INTERNAL_ZSTD_LIBRARY AND ZSTD_INCLUDE_DIR) From 4858b067f5edf19df833efd901e588e89e04677b Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Mon, 22 Jul 2019 22:04:19 +0300 Subject: [PATCH 17/84] Update Defines.h --- dbms/src/Core/Defines.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dbms/src/Core/Defines.h b/dbms/src/Core/Defines.h index 5cd6326f445..c83aa984fad 100644 --- a/dbms/src/Core/Defines.h +++ b/dbms/src/Core/Defines.h @@ -141,8 +141,8 @@ #define DBMS_DISTRIBUTED_SENDS_MAGIC_NUMBER 0xCAFECABE #if !__has_include() -# define ASAN_UNPOISON_MEMORY_REGION(a,b) -# define ASAN_POISON_MEMORY_REGION(a,b) +# define ASAN_UNPOISON_MEMORY_REGION(a, b) +# define ASAN_POISON_MEMORY_REGION(a, b) #endif /// A macro for suppressing warnings about unused variables or function results. From 8e8ebf9c441d8408c7698d5e1a6fb1fa1572e48d Mon Sep 17 00:00:00 2001 From: proller Date: Tue, 23 Jul 2019 12:32:08 +0300 Subject: [PATCH 18/84] Try fix -Wsign-compare --- contrib/libunwind | 2 +- dbms/src/IO/ReadBufferAIO.cpp | 5 +---- dbms/src/IO/WriteBufferAIO.cpp | 5 +---- 3 files changed, 3 insertions(+), 9 deletions(-) diff --git a/contrib/libunwind b/contrib/libunwind index 17a48fbfa79..ec86b1c6a2c 160000 --- a/contrib/libunwind +++ b/contrib/libunwind @@ -1 +1 @@ -Subproject commit 17a48fbfa7913ee889960a698516bd3ba51d63ee +Subproject commit ec86b1c6a2c6b8ba316f429db9a6d4122dd12710 diff --git a/dbms/src/IO/ReadBufferAIO.cpp b/dbms/src/IO/ReadBufferAIO.cpp index eccebf854f0..50845330587 100644 --- a/dbms/src/IO/ReadBufferAIO.cpp +++ b/dbms/src/IO/ReadBufferAIO.cpp @@ -254,12 +254,9 @@ void ReadBufferAIO::prepare() /// Region of the disk from which we want to read data. const off_t region_begin = first_unread_pos_in_file; -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wsign-compare" - if ((requested_byte_count > std::numeric_limits::max()) || + if ((requested_byte_count > static_cast(std::numeric_limits::max())) || (first_unread_pos_in_file > (std::numeric_limits::max() - static_cast(requested_byte_count)))) throw Exception("An overflow occurred during file operation", ErrorCodes::LOGICAL_ERROR); -#pragma GCC diagnostic pop const off_t region_end = first_unread_pos_in_file + requested_byte_count; diff --git a/dbms/src/IO/WriteBufferAIO.cpp b/dbms/src/IO/WriteBufferAIO.cpp index dcceff96d02..a558768c64a 100644 --- a/dbms/src/IO/WriteBufferAIO.cpp +++ b/dbms/src/IO/WriteBufferAIO.cpp @@ -274,12 +274,9 @@ void WriteBufferAIO::prepare() /// Region of the disk in which we want to write data. const off_t region_begin = pos_in_file; -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wsign-compare" - if ((flush_buffer.offset() > std::numeric_limits::max()) || + if ((flush_buffer.offset() > static_cast(std::numeric_limits::max())) || (pos_in_file > (std::numeric_limits::max() - static_cast(flush_buffer.offset())))) throw Exception("An overflow occurred during file operation", ErrorCodes::LOGICAL_ERROR); -#pragma GCC diagnostic pop const off_t region_end = pos_in_file + flush_buffer.offset(); const size_t region_size = region_end - region_begin; From cb882d61bbedb82be186a08117d942b3564d61bb Mon Sep 17 00:00:00 2001 From: proller Date: Tue, 23 Jul 2019 12:54:15 +0300 Subject: [PATCH 19/84] Freebsd fix --- dbms/src/Common/QueryProfiler.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/dbms/src/Common/QueryProfiler.cpp b/dbms/src/Common/QueryProfiler.cpp index 9832b64ee8b..aedab2a9eff 100644 --- a/dbms/src/Common/QueryProfiler.cpp +++ b/dbms/src/Common/QueryProfiler.cpp @@ -78,7 +78,13 @@ QueryProfilerBase::QueryProfilerBase(const Int32 thread_id, const struct sigevent sev; sev.sigev_notify = SIGEV_THREAD_ID; sev.sigev_signo = pause_signal; + +#if defined(__FreeBSD__) + sev._sigev_un._threadid = thread_id; +#else sev._sigev_un._tid = thread_id; +#endif + if (timer_create(clock_type, &sev, &timer_id)) throwFromErrno("Failed to create thread timer", ErrorCodes::CANNOT_CREATE_TIMER); From 24725b62a8f552da81a9936732b16d63e939ff71 Mon Sep 17 00:00:00 2001 From: proller Date: Tue, 23 Jul 2019 14:03:35 +0300 Subject: [PATCH 20/84] Add missing libs --- contrib/brotli-cmake/CMakeLists.txt | 1 + contrib/double-conversion-cmake/CMakeLists.txt | 1 - contrib/h3-cmake/CMakeLists.txt | 1 + contrib/librdkafka-cmake/CMakeLists.txt | 4 ++-- contrib/libunwind-cmake/CMakeLists.txt | 1 + contrib/libxml2-cmake/CMakeLists.txt | 2 +- contrib/mariadb-connector-c-cmake/CMakeLists.txt | 2 ++ contrib/unixodbc-cmake/CMakeLists.txt | 1 + 8 files changed, 9 insertions(+), 4 deletions(-) diff --git a/contrib/brotli-cmake/CMakeLists.txt b/contrib/brotli-cmake/CMakeLists.txt index 00fea50fc43..9bc6d3d89e7 100644 --- a/contrib/brotli-cmake/CMakeLists.txt +++ b/contrib/brotli-cmake/CMakeLists.txt @@ -31,3 +31,4 @@ set(SRCS add_library(brotli ${SRCS}) target_include_directories(brotli PUBLIC ${BROTLI_SOURCE_DIR}/include) +target_link_libraries(brotli PRIVATE m) diff --git a/contrib/double-conversion-cmake/CMakeLists.txt b/contrib/double-conversion-cmake/CMakeLists.txt index f91b0fb74c1..8b808b8de9a 100644 --- a/contrib/double-conversion-cmake/CMakeLists.txt +++ b/contrib/double-conversion-cmake/CMakeLists.txt @@ -11,4 +11,3 @@ ${LIBRARY_DIR}/double-conversion/fixed-dtoa.cc ${LIBRARY_DIR}/double-conversion/strtod.cc) target_include_directories(double-conversion SYSTEM PUBLIC "${LIBRARY_DIR}") - diff --git a/contrib/h3-cmake/CMakeLists.txt b/contrib/h3-cmake/CMakeLists.txt index 5df0a205a34..951e5e83bdc 100644 --- a/contrib/h3-cmake/CMakeLists.txt +++ b/contrib/h3-cmake/CMakeLists.txt @@ -25,3 +25,4 @@ add_library(h3 ${SRCS}) target_include_directories(h3 SYSTEM PUBLIC ${H3_SOURCE_DIR}/include) target_include_directories(h3 SYSTEM PUBLIC ${H3_BINARY_DIR}/include) target_compile_definitions(h3 PRIVATE H3_HAVE_VLA) +target_link_libraries(h3 PRIVATE m) diff --git a/contrib/librdkafka-cmake/CMakeLists.txt b/contrib/librdkafka-cmake/CMakeLists.txt index 54149d0db6f..d060994ef05 100644 --- a/contrib/librdkafka-cmake/CMakeLists.txt +++ b/contrib/librdkafka-cmake/CMakeLists.txt @@ -61,7 +61,7 @@ add_library(rdkafka ${SRCS}) target_include_directories(rdkafka SYSTEM PUBLIC include) target_include_directories(rdkafka SYSTEM PUBLIC ${RDKAFKA_SOURCE_DIR}) # Because weird logic with "include_next" is used. target_include_directories(rdkafka SYSTEM PRIVATE ${ZSTD_INCLUDE_DIR}/common) # Because wrong path to "zstd_errors.h" is used. -target_link_libraries(rdkafka PUBLIC ${ZLIB_LIBRARIES} ${ZSTD_LIBRARY} ${LZ4_LIBRARY} ${LIBGSASL_LIBRARY}) +target_link_libraries(rdkafka PRIVATE ${ZLIB_LIBRARIES} ${ZSTD_LIBRARY} ${LZ4_LIBRARY} ${LIBGSASL_LIBRARY} Threads::Threads) if(OPENSSL_SSL_LIBRARY AND OPENSSL_CRYPTO_LIBRARY) - target_link_libraries(rdkafka PUBLIC ${OPENSSL_SSL_LIBRARY} ${OPENSSL_CRYPTO_LIBRARY}) + target_link_libraries(rdkafka PRIVATE ${OPENSSL_SSL_LIBRARY} ${OPENSSL_CRYPTO_LIBRARY}) endif() diff --git a/contrib/libunwind-cmake/CMakeLists.txt b/contrib/libunwind-cmake/CMakeLists.txt index 3d4cd319089..993323d56c0 100644 --- a/contrib/libunwind-cmake/CMakeLists.txt +++ b/contrib/libunwind-cmake/CMakeLists.txt @@ -29,3 +29,4 @@ add_library(unwind_static ${LIBUNWIND_SOURCES}) target_include_directories(unwind_static PUBLIC ${LIBUNWIND_SOURCE_DIR}/include) target_compile_definitions(unwind_static PRIVATE -D_LIBUNWIND_NO_HEAP=1 -D_DEBUG -D_LIBUNWIND_IS_NATIVE_ONLY) target_compile_options(unwind_static PRIVATE -fno-exceptions -funwind-tables -fno-sanitize=all -nostdinc++ -fno-rtti) +target_link_libraries(unwind_static PRIVATE Threads::Threads ${CMAKE_DL_LIBS}) diff --git a/contrib/libxml2-cmake/CMakeLists.txt b/contrib/libxml2-cmake/CMakeLists.txt index 8783fca774e..827ed8fefd8 100644 --- a/contrib/libxml2-cmake/CMakeLists.txt +++ b/contrib/libxml2-cmake/CMakeLists.txt @@ -52,7 +52,7 @@ set(SRCS ) add_library(libxml2 ${SRCS}) -target_link_libraries(libxml2 ${ZLIB_LIBRARIES}) +target_link_libraries(libxml2 PRIVATE ${ZLIB_LIBRARIES} m ${CMAKE_DL_LIBS}) target_include_directories(libxml2 PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/linux_x86_64/include) target_include_directories(libxml2 PUBLIC ${LIBXML2_SOURCE_DIR}/include) diff --git a/contrib/mariadb-connector-c-cmake/CMakeLists.txt b/contrib/mariadb-connector-c-cmake/CMakeLists.txt index 1c9b7a5ec9c..f9bd2490fba 100644 --- a/contrib/mariadb-connector-c-cmake/CMakeLists.txt +++ b/contrib/mariadb-connector-c-cmake/CMakeLists.txt @@ -60,6 +60,8 @@ endif() add_library(mysqlclient ${SRCS}) +target_link_libraries(mysqlclient PRIVATE ${CMAKE_DL_LIBS} m Threads::Threads) + if(OPENSSL_LIBRARIES) target_link_libraries(mysqlclient PRIVATE ${OPENSSL_LIBRARIES}) target_compile_definitions(mysqlclient PRIVATE -D HAVE_OPENSSL -D HAVE_TLS) diff --git a/contrib/unixodbc-cmake/CMakeLists.txt b/contrib/unixodbc-cmake/CMakeLists.txt index 1715747191c..98c1e2268cc 100644 --- a/contrib/unixodbc-cmake/CMakeLists.txt +++ b/contrib/unixodbc-cmake/CMakeLists.txt @@ -32,6 +32,7 @@ target_include_directories(ltdl PUBLIC ${ODBC_SOURCE_DIR}/libltdl/libltdl) target_compile_definitions(ltdl PRIVATE -DHAVE_CONFIG_H -DLTDL -DLTDLOPEN=libltdlc) target_compile_options(ltdl PRIVATE -Wno-constant-logical-operand -Wno-unknown-warning-option -O2) +target_link_libraries(ltdl PRIVATE ${CMAKE_DL_LIBS}) set(SRCS From 3a1bc23370156f3bbf7544cbf02da5f8197f3f92 Mon Sep 17 00:00:00 2001 From: proller Date: Tue, 23 Jul 2019 20:42:48 +0300 Subject: [PATCH 21/84] Fix double-conversion include for copy-headers --- cmake/print_include_directories.cmake | 3 +++ contrib/double-conversion-cmake/CMakeLists.txt | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/cmake/print_include_directories.cmake b/cmake/print_include_directories.cmake index 05be8f909ee..b39ca866148 100644 --- a/cmake/print_include_directories.cmake +++ b/cmake/print_include_directories.cmake @@ -16,6 +16,9 @@ list(APPEND dirs ${dirs1}) get_property (dirs1 TARGET roaring PROPERTY INCLUDE_DIRECTORIES) list(APPEND dirs ${dirs1}) +get_property (dirs1 TARGET double-conversion PROPERTY INCLUDE_DIRECTORIES) +list(APPEND dirs ${dirs1}) + if (USE_INTERNAL_BOOST_LIBRARY) get_property (dirs1 TARGET ${Boost_PROGRAM_OPTIONS_LIBRARY} PROPERTY INCLUDE_DIRECTORIES) list(APPEND dirs ${dirs1}) diff --git a/contrib/double-conversion-cmake/CMakeLists.txt b/contrib/double-conversion-cmake/CMakeLists.txt index 8b808b8de9a..0690731e1b1 100644 --- a/contrib/double-conversion-cmake/CMakeLists.txt +++ b/contrib/double-conversion-cmake/CMakeLists.txt @@ -10,4 +10,4 @@ ${LIBRARY_DIR}/double-conversion/fast-dtoa.cc ${LIBRARY_DIR}/double-conversion/fixed-dtoa.cc ${LIBRARY_DIR}/double-conversion/strtod.cc) -target_include_directories(double-conversion SYSTEM PUBLIC "${LIBRARY_DIR}") +target_include_directories(double-conversion SYSTEM BEFORE PUBLIC "${LIBRARY_DIR}") From 60c4eaedb8f23f9c0030e245128cc378d2f8445c Mon Sep 17 00:00:00 2001 From: proller Date: Tue, 23 Jul 2019 22:23:57 +0300 Subject: [PATCH 22/84] Fix zlib link --- dbms/src/Functions/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/dbms/src/Functions/CMakeLists.txt b/dbms/src/Functions/CMakeLists.txt index 13b294197cf..5b8bfca2cd6 100644 --- a/dbms/src/Functions/CMakeLists.txt +++ b/dbms/src/Functions/CMakeLists.txt @@ -22,6 +22,7 @@ target_link_libraries(clickhouse_functions ${BASE64_LIBRARY} PRIVATE + ${ZLIB_LIBRARIES} ${Boost_FILESYSTEM_LIBRARY} ) From 7d8e9658827239a750f22b0d1dba9a5abcb8a210 Mon Sep 17 00:00:00 2001 From: proller Date: Wed, 24 Jul 2019 19:58:37 +0300 Subject: [PATCH 23/84] Fix includes for arcadia --- dbms/src/Formats/ParquetBlockInputStream.cpp | 3 +-- dbms/src/Formats/ParquetBlockOutputStream.cpp | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/dbms/src/Formats/ParquetBlockInputStream.cpp b/dbms/src/Formats/ParquetBlockInputStream.cpp index deba953bab4..6b5b29e1e11 100644 --- a/dbms/src/Formats/ParquetBlockInputStream.cpp +++ b/dbms/src/Formats/ParquetBlockInputStream.cpp @@ -1,6 +1,5 @@ -#include "config_formats.h" +#include "ParquetBlockInputStream.h" #if USE_PARQUET -# include "ParquetBlockInputStream.h" # include # include diff --git a/dbms/src/Formats/ParquetBlockOutputStream.cpp b/dbms/src/Formats/ParquetBlockOutputStream.cpp index f03aa181bc3..6b595962437 100644 --- a/dbms/src/Formats/ParquetBlockOutputStream.cpp +++ b/dbms/src/Formats/ParquetBlockOutputStream.cpp @@ -1,6 +1,5 @@ -#include "config_formats.h" +#include "ParquetBlockOutputStream.h" #if USE_PARQUET -# include "ParquetBlockOutputStream.h" // TODO: clean includes # include From c8a274f2ed95127da3a216107ff771fe30b7ae3f Mon Sep 17 00:00:00 2001 From: proller Date: Wed, 24 Jul 2019 20:09:14 +0300 Subject: [PATCH 24/84] Fix includes for arcadia --- dbms/src/Formats/ParquetBlockInputStream.cpp | 2 +- dbms/src/Formats/ParquetBlockOutputStream.cpp | 2 +- dbms/src/Formats/ProtobufColumnMatcher.cpp | 5 +---- dbms/src/Formats/ProtobufRowInputStream.cpp | 4 +--- 4 files changed, 4 insertions(+), 9 deletions(-) diff --git a/dbms/src/Formats/ParquetBlockInputStream.cpp b/dbms/src/Formats/ParquetBlockInputStream.cpp index 6b5b29e1e11..e75d5e1d455 100644 --- a/dbms/src/Formats/ParquetBlockInputStream.cpp +++ b/dbms/src/Formats/ParquetBlockInputStream.cpp @@ -1,6 +1,6 @@ #include "ParquetBlockInputStream.h" -#if USE_PARQUET +#if USE_PARQUET # include # include # include diff --git a/dbms/src/Formats/ParquetBlockOutputStream.cpp b/dbms/src/Formats/ParquetBlockOutputStream.cpp index 6b595962437..b0d4cd7818c 100644 --- a/dbms/src/Formats/ParquetBlockOutputStream.cpp +++ b/dbms/src/Formats/ParquetBlockOutputStream.cpp @@ -1,6 +1,6 @@ #include "ParquetBlockOutputStream.h" -#if USE_PARQUET +#if USE_PARQUET // TODO: clean includes # include # include diff --git a/dbms/src/Formats/ProtobufColumnMatcher.cpp b/dbms/src/Formats/ProtobufColumnMatcher.cpp index 317b6388ea5..ed9bd27bb34 100644 --- a/dbms/src/Formats/ProtobufColumnMatcher.cpp +++ b/dbms/src/Formats/ProtobufColumnMatcher.cpp @@ -1,8 +1,5 @@ -#include "config_formats.h" -#if USE_PROTOBUF - #include "ProtobufColumnMatcher.h" - +#if USE_PROTOBUF #include #include #include diff --git a/dbms/src/Formats/ProtobufRowInputStream.cpp b/dbms/src/Formats/ProtobufRowInputStream.cpp index 1c4193b9f1a..ff5717c1bca 100644 --- a/dbms/src/Formats/ProtobufRowInputStream.cpp +++ b/dbms/src/Formats/ProtobufRowInputStream.cpp @@ -1,8 +1,6 @@ -#include "config_formats.h" -#if USE_PROTOBUF - #include "ProtobufRowInputStream.h" +#if USE_PROTOBUF #include #include #include From a734231d425e2ade6cc1421a5c88f27510f26291 Mon Sep 17 00:00:00 2001 From: proller Date: Wed, 24 Jul 2019 20:49:02 +0300 Subject: [PATCH 25/84] Fix includes for arcadia --- dbms/src/Common/MiAllocator.cpp | 4 +--- dbms/src/Common/MiAllocator.h | 7 +++---- dbms/src/Formats/CapnProtoRowInputStream.cpp | 4 +--- dbms/src/IO/ReadBufferFromHDFS.cpp | 4 +--- dbms/src/IO/ReadBufferFromHDFS.h | 2 ++ 5 files changed, 8 insertions(+), 13 deletions(-) diff --git a/dbms/src/Common/MiAllocator.cpp b/dbms/src/Common/MiAllocator.cpp index cafa6c135f7..04e61a5de16 100644 --- a/dbms/src/Common/MiAllocator.cpp +++ b/dbms/src/Common/MiAllocator.cpp @@ -1,8 +1,6 @@ -#include +#include "MiAllocator.h" #if USE_MIMALLOC - -#include "MiAllocator.h" #include #include diff --git a/dbms/src/Common/MiAllocator.h b/dbms/src/Common/MiAllocator.h index 48cfc6f9ab4..127be82434b 100644 --- a/dbms/src/Common/MiAllocator.h +++ b/dbms/src/Common/MiAllocator.h @@ -2,10 +2,7 @@ #include -#if !USE_MIMALLOC -#error "do not include this file until USE_MIMALLOC is set to 1" -#endif - +#if USE_MIMALLOC #include namespace DB @@ -26,3 +23,5 @@ struct MiAllocator }; } + +#endif diff --git a/dbms/src/Formats/CapnProtoRowInputStream.cpp b/dbms/src/Formats/CapnProtoRowInputStream.cpp index 96c3c5fded3..89d652ebb12 100644 --- a/dbms/src/Formats/CapnProtoRowInputStream.cpp +++ b/dbms/src/Formats/CapnProtoRowInputStream.cpp @@ -1,8 +1,6 @@ -#include "config_formats.h" -#if USE_CAPNP - #include "CapnProtoRowInputStream.h" +#if USE_CAPNP #include #include #include diff --git a/dbms/src/IO/ReadBufferFromHDFS.cpp b/dbms/src/IO/ReadBufferFromHDFS.cpp index daedd9b0481..9c44048d4ce 100644 --- a/dbms/src/IO/ReadBufferFromHDFS.cpp +++ b/dbms/src/IO/ReadBufferFromHDFS.cpp @@ -1,8 +1,6 @@ -#include +#include "ReadBufferFromHDFS.h" #if USE_HDFS - -#include #include #include #include diff --git a/dbms/src/IO/ReadBufferFromHDFS.h b/dbms/src/IO/ReadBufferFromHDFS.h index 81a1d4b75dc..6d00c8b2310 100644 --- a/dbms/src/IO/ReadBufferFromHDFS.h +++ b/dbms/src/IO/ReadBufferFromHDFS.h @@ -1,5 +1,7 @@ #pragma once +#include + #if USE_HDFS #include #include From 16de323bbea40f1a134b9d80a82e9209d517b2e4 Mon Sep 17 00:00:00 2001 From: proller Date: Wed, 24 Jul 2019 21:04:26 +0300 Subject: [PATCH 26/84] Freebsd fix --- cmake/print_include_directories.cmake | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/cmake/print_include_directories.cmake b/cmake/print_include_directories.cmake index b39ca866148..fe5e9e8e6e9 100644 --- a/cmake/print_include_directories.cmake +++ b/cmake/print_include_directories.cmake @@ -16,10 +16,12 @@ list(APPEND dirs ${dirs1}) get_property (dirs1 TARGET roaring PROPERTY INCLUDE_DIRECTORIES) list(APPEND dirs ${dirs1}) -get_property (dirs1 TARGET double-conversion PROPERTY INCLUDE_DIRECTORIES) -list(APPEND dirs ${dirs1}) +if (TARGET double-conversion) + get_property (dirs1 TARGET double-conversion PROPERTY INCLUDE_DIRECTORIES) + list(APPEND dirs ${dirs1}) +endif () -if (USE_INTERNAL_BOOST_LIBRARY) +if (TARGET ${Boost_PROGRAM_OPTIONS_LIBRARY}) get_property (dirs1 TARGET ${Boost_PROGRAM_OPTIONS_LIBRARY} PROPERTY INCLUDE_DIRECTORIES) list(APPEND dirs ${dirs1}) endif () From 6cec39ce9641cbb142a0d5fe7d52a9ef07145d7e Mon Sep 17 00:00:00 2001 From: proller Date: Wed, 24 Jul 2019 21:35:45 +0300 Subject: [PATCH 27/84] Arcadia fixes --- dbms/src/Formats/ProtobufReader.h | 2 +- dbms/src/Formats/ProtobufWriter.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/dbms/src/Formats/ProtobufReader.h b/dbms/src/Formats/ProtobufReader.h index 41088e54ac3..9c5fdfb5fe4 100644 --- a/dbms/src/Formats/ProtobufReader.h +++ b/dbms/src/Formats/ProtobufReader.h @@ -9,7 +9,7 @@ #if USE_PROTOBUF #include -#include +#include "ProtobufColumnMatcher.h" #include #include diff --git a/dbms/src/Formats/ProtobufWriter.h b/dbms/src/Formats/ProtobufWriter.h index d133a8b1214..f11fbcbc391 100644 --- a/dbms/src/Formats/ProtobufWriter.h +++ b/dbms/src/Formats/ProtobufWriter.h @@ -7,7 +7,7 @@ #include "config_formats.h" #if USE_PROTOBUF -#include +#include "ProtobufColumnMatcher.h" #include #include #include From 232d59bdcfa0a12fc6918d4d2580e60958237c3a Mon Sep 17 00:00:00 2001 From: alesapin Date: Thu, 25 Jul 2019 23:26:27 +0300 Subject: [PATCH 28/84] Update QueryProfiler.cpp --- dbms/src/Common/QueryProfiler.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/dbms/src/Common/QueryProfiler.cpp b/dbms/src/Common/QueryProfiler.cpp index aedab2a9eff..5fd1c181994 100644 --- a/dbms/src/Common/QueryProfiler.cpp +++ b/dbms/src/Common/QueryProfiler.cpp @@ -84,7 +84,6 @@ QueryProfilerBase::QueryProfilerBase(const Int32 thread_id, const #else sev._sigev_un._tid = thread_id; #endif - if (timer_create(clock_type, &sev, &timer_id)) throwFromErrno("Failed to create thread timer", ErrorCodes::CANNOT_CREATE_TIMER); From 1bbd192543361b78df22180e0aeaa443fc64508b Mon Sep 17 00:00:00 2001 From: proller Date: Mon, 29 Jul 2019 16:45:04 +0300 Subject: [PATCH 29/84] Freebsd fix --- dbms/src/Common/TraceCollector.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/dbms/src/Common/TraceCollector.cpp b/dbms/src/Common/TraceCollector.cpp index e66a580289d..bfb49c4ef75 100644 --- a/dbms/src/Common/TraceCollector.cpp +++ b/dbms/src/Common/TraceCollector.cpp @@ -46,6 +46,7 @@ TraceCollector::TraceCollector(std::shared_ptr & trace_log) if (-1 == fcntl(trace_pipe.fds_rw[1], F_SETFL, flags | O_NONBLOCK)) throwFromErrno("Cannot set non-blocking mode of pipe", ErrorCodes::CANNOT_FCNTL); +#if !defined(__FreeBSD__) /** Increase pipe size to avoid slowdown during fine-grained trace collection. */ constexpr int max_pipe_capacity_to_set = 1048576; @@ -57,6 +58,7 @@ TraceCollector::TraceCollector(std::shared_ptr & trace_log) throwFromErrno("Cannot increase pipe capacity to " + toString(pipe_size * 2), ErrorCodes::CANNOT_FCNTL); LOG_TRACE(log, "Pipe capacity is " << formatReadableSizeWithBinarySuffix(std::min(pipe_size, max_pipe_capacity_to_set))); +#endif thread = ThreadFromGlobalPool(&TraceCollector::run, this); } From ce682a81b9e353c795040941372a5c0c55b9a1d3 Mon Sep 17 00:00:00 2001 From: proller Date: Mon, 29 Jul 2019 21:56:55 +0300 Subject: [PATCH 30/84] clean --- contrib/libunwind | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/libunwind b/contrib/libunwind index ec86b1c6a2c..5afe6d87ae9 160000 --- a/contrib/libunwind +++ b/contrib/libunwind @@ -1 +1 @@ -Subproject commit ec86b1c6a2c6b8ba316f429db9a6d4122dd12710 +Subproject commit 5afe6d87ae9e66485c7fcb106d2f7c2c0359c8f6 From e63c1e4b811bff61ff2d1731e9b19085f410abb1 Mon Sep 17 00:00:00 2001 From: proller Date: Tue, 30 Jul 2019 22:00:30 +0300 Subject: [PATCH 31/84] 19.11: Fixes for arcadia porting (#6223) --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 0ed76689a5c..ab13edb8940 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -258,7 +258,7 @@ endif () if (NOT SANITIZE AND NOT SPLIT_SHARED_LIBRARIES) set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-undefined") set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--no-undefined") -endif() +endif () include (cmake/find_unwind.cmake) From fffa1ec387f416adb5a43dddd62af15e48898d96 Mon Sep 17 00:00:00 2001 From: proller Date: Wed, 31 Jul 2019 17:00:45 +0300 Subject: [PATCH 32/84] fix glibc-compatibility --- CMakeLists.txt | 6 ++++++ contrib/brotli-cmake/CMakeLists.txt | 2 +- contrib/h3-cmake/CMakeLists.txt | 2 +- contrib/libxml2-cmake/CMakeLists.txt | 2 +- contrib/mariadb-connector-c-cmake/CMakeLists.txt | 2 +- 5 files changed, 10 insertions(+), 4 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index e9a433d80b4..3223130632f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -342,6 +342,7 @@ if (OS_LINUX AND NOT UNBUNDLED AND (GLIBC_COMPATIBILITY OR USE_INTERNAL_UNWIND_L # FIXME: glibc-compatibility may be non-static in some builds! set (DEFAULT_LIBS "${DEFAULT_LIBS} ${ClickHouse_BINARY_DIR}/libs/libglibc-compatibility/libglibc-compatibility${${CMAKE_POSTFIX_VARIABLE}}.a") + set (M_OR_GLIBC_LIBRARY glibc-compatibility) endif () # Add Libc. GLIBC is actually a collection of interdependent libraries. @@ -352,6 +353,11 @@ if (OS_LINUX AND NOT UNBUNDLED AND (GLIBC_COMPATIBILITY OR USE_INTERNAL_UNWIND_L message(STATUS "Default libraries: ${DEFAULT_LIBS}") endif () +if (NOT M_OR_GLIBC_LIBRARY) + set (M_OR_GLIBC_LIBRARY m) +endif () +message(STATUS "Using m library=${M_OR_GLIBC_LIBRARY}") + if (DEFAULT_LIBS) # Add default libs to all targets as the last dependency. set(CMAKE_CXX_STANDARD_LIBRARIES ${DEFAULT_LIBS}) diff --git a/contrib/brotli-cmake/CMakeLists.txt b/contrib/brotli-cmake/CMakeLists.txt index 9bc6d3d89e7..3c995467c9d 100644 --- a/contrib/brotli-cmake/CMakeLists.txt +++ b/contrib/brotli-cmake/CMakeLists.txt @@ -31,4 +31,4 @@ set(SRCS add_library(brotli ${SRCS}) target_include_directories(brotli PUBLIC ${BROTLI_SOURCE_DIR}/include) -target_link_libraries(brotli PRIVATE m) +target_link_libraries(brotli PRIVATE ${M_OR_GLIBC_LIBRARY}) diff --git a/contrib/h3-cmake/CMakeLists.txt b/contrib/h3-cmake/CMakeLists.txt index 951e5e83bdc..5ad5f3fdb8d 100644 --- a/contrib/h3-cmake/CMakeLists.txt +++ b/contrib/h3-cmake/CMakeLists.txt @@ -25,4 +25,4 @@ add_library(h3 ${SRCS}) target_include_directories(h3 SYSTEM PUBLIC ${H3_SOURCE_DIR}/include) target_include_directories(h3 SYSTEM PUBLIC ${H3_BINARY_DIR}/include) target_compile_definitions(h3 PRIVATE H3_HAVE_VLA) -target_link_libraries(h3 PRIVATE m) +target_link_libraries(h3 PRIVATE ${M_OR_GLIBC_LIBRARY}) diff --git a/contrib/libxml2-cmake/CMakeLists.txt b/contrib/libxml2-cmake/CMakeLists.txt index 827ed8fefd8..ef362875e08 100644 --- a/contrib/libxml2-cmake/CMakeLists.txt +++ b/contrib/libxml2-cmake/CMakeLists.txt @@ -52,7 +52,7 @@ set(SRCS ) add_library(libxml2 ${SRCS}) -target_link_libraries(libxml2 PRIVATE ${ZLIB_LIBRARIES} m ${CMAKE_DL_LIBS}) +target_link_libraries(libxml2 PRIVATE ${ZLIB_LIBRARIES} ${M_OR_GLIBC_LIBRARY} ${CMAKE_DL_LIBS}) target_include_directories(libxml2 PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/linux_x86_64/include) target_include_directories(libxml2 PUBLIC ${LIBXML2_SOURCE_DIR}/include) diff --git a/contrib/mariadb-connector-c-cmake/CMakeLists.txt b/contrib/mariadb-connector-c-cmake/CMakeLists.txt index f9bd2490fba..d7ea148cf40 100644 --- a/contrib/mariadb-connector-c-cmake/CMakeLists.txt +++ b/contrib/mariadb-connector-c-cmake/CMakeLists.txt @@ -60,7 +60,7 @@ endif() add_library(mysqlclient ${SRCS}) -target_link_libraries(mysqlclient PRIVATE ${CMAKE_DL_LIBS} m Threads::Threads) +target_link_libraries(mysqlclient PRIVATE ${CMAKE_DL_LIBS} ${M_OR_GLIBC_LIBRARY} Threads::Threads) if(OPENSSL_LIBRARIES) target_link_libraries(mysqlclient PRIVATE ${OPENSSL_LIBRARIES}) From 4d2dee5aa7ec26230ddf303da8200aca645686bc Mon Sep 17 00:00:00 2001 From: proller Date: Wed, 31 Jul 2019 17:36:03 +0300 Subject: [PATCH 33/84] M_LIBRARY fix --- CMakeLists.txt | 6 ++---- contrib/brotli-cmake/CMakeLists.txt | 5 ++++- contrib/h3-cmake/CMakeLists.txt | 4 +++- contrib/libxml2-cmake/CMakeLists.txt | 5 ++++- contrib/mariadb-connector-c-cmake/CMakeLists.txt | 5 ++++- 5 files changed, 17 insertions(+), 8 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 3223130632f..a7ffdbc6a32 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -342,7 +342,6 @@ if (OS_LINUX AND NOT UNBUNDLED AND (GLIBC_COMPATIBILITY OR USE_INTERNAL_UNWIND_L # FIXME: glibc-compatibility may be non-static in some builds! set (DEFAULT_LIBS "${DEFAULT_LIBS} ${ClickHouse_BINARY_DIR}/libs/libglibc-compatibility/libglibc-compatibility${${CMAKE_POSTFIX_VARIABLE}}.a") - set (M_OR_GLIBC_LIBRARY glibc-compatibility) endif () # Add Libc. GLIBC is actually a collection of interdependent libraries. @@ -353,10 +352,9 @@ if (OS_LINUX AND NOT UNBUNDLED AND (GLIBC_COMPATIBILITY OR USE_INTERNAL_UNWIND_L message(STATUS "Default libraries: ${DEFAULT_LIBS}") endif () -if (NOT M_OR_GLIBC_LIBRARY) - set (M_OR_GLIBC_LIBRARY m) +if (NOT GLIBC_COMPATIBILITY) + set (M_LIBRARY m) endif () -message(STATUS "Using m library=${M_OR_GLIBC_LIBRARY}") if (DEFAULT_LIBS) # Add default libs to all targets as the last dependency. diff --git a/contrib/brotli-cmake/CMakeLists.txt b/contrib/brotli-cmake/CMakeLists.txt index 3c995467c9d..e22f4593c02 100644 --- a/contrib/brotli-cmake/CMakeLists.txt +++ b/contrib/brotli-cmake/CMakeLists.txt @@ -31,4 +31,7 @@ set(SRCS add_library(brotli ${SRCS}) target_include_directories(brotli PUBLIC ${BROTLI_SOURCE_DIR}/include) -target_link_libraries(brotli PRIVATE ${M_OR_GLIBC_LIBRARY}) + +if(M_LIBRARY) + target_link_libraries(brotli PRIVATE ${M_LIBRARY}) +endif() diff --git a/contrib/h3-cmake/CMakeLists.txt b/contrib/h3-cmake/CMakeLists.txt index 5ad5f3fdb8d..ddd0b1e35ec 100644 --- a/contrib/h3-cmake/CMakeLists.txt +++ b/contrib/h3-cmake/CMakeLists.txt @@ -25,4 +25,6 @@ add_library(h3 ${SRCS}) target_include_directories(h3 SYSTEM PUBLIC ${H3_SOURCE_DIR}/include) target_include_directories(h3 SYSTEM PUBLIC ${H3_BINARY_DIR}/include) target_compile_definitions(h3 PRIVATE H3_HAVE_VLA) -target_link_libraries(h3 PRIVATE ${M_OR_GLIBC_LIBRARY}) +if(M_LIBRARY) + target_link_libraries(h3 PRIVATE ${M_LIBRARY}) +endif() \ No newline at end of file diff --git a/contrib/libxml2-cmake/CMakeLists.txt b/contrib/libxml2-cmake/CMakeLists.txt index ef362875e08..71127fb9e35 100644 --- a/contrib/libxml2-cmake/CMakeLists.txt +++ b/contrib/libxml2-cmake/CMakeLists.txt @@ -52,7 +52,10 @@ set(SRCS ) add_library(libxml2 ${SRCS}) -target_link_libraries(libxml2 PRIVATE ${ZLIB_LIBRARIES} ${M_OR_GLIBC_LIBRARY} ${CMAKE_DL_LIBS}) +target_link_libraries(libxml2 PRIVATE ${ZLIB_LIBRARIES} ${CMAKE_DL_LIBS}) +if(M_LIBRARY) + target_link_libraries(libxml2 PRIVATE ${M_LIBRARY}) +endif() target_include_directories(libxml2 PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/linux_x86_64/include) target_include_directories(libxml2 PUBLIC ${LIBXML2_SOURCE_DIR}/include) diff --git a/contrib/mariadb-connector-c-cmake/CMakeLists.txt b/contrib/mariadb-connector-c-cmake/CMakeLists.txt index d7ea148cf40..a0582d89685 100644 --- a/contrib/mariadb-connector-c-cmake/CMakeLists.txt +++ b/contrib/mariadb-connector-c-cmake/CMakeLists.txt @@ -60,7 +60,10 @@ endif() add_library(mysqlclient ${SRCS}) -target_link_libraries(mysqlclient PRIVATE ${CMAKE_DL_LIBS} ${M_OR_GLIBC_LIBRARY} Threads::Threads) +target_link_libraries(mysqlclient PRIVATE ${CMAKE_DL_LIBS} Threads::Threads) +if(M_LIBRARY) + target_link_libraries(mysqlclient PRIVATE ${M_LIBRARY}) +endif() if(OPENSSL_LIBRARIES) target_link_libraries(mysqlclient PRIVATE ${OPENSSL_LIBRARIES}) From d86c072147ac0611ee0417a2f975d0fddd3835d7 Mon Sep 17 00:00:00 2001 From: proller Date: Fri, 28 Jun 2019 20:15:48 +0300 Subject: [PATCH 34/84] wip --- cmake/Modules/FindODBC.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/Modules/FindODBC.cmake b/cmake/Modules/FindODBC.cmake index 55318b52061..9e209c15777 100644 --- a/cmake/Modules/FindODBC.cmake +++ b/cmake/Modules/FindODBC.cmake @@ -129,7 +129,7 @@ find_package_handle_standard_args(ODBC ) if(ODBC_FOUND) - set(ODBC_LIBRARIES ${ODBC_LIBRARY} ${_odbc_required_libs_paths}) + set(ODBC_LIBRARIES ${ODBC_LIBRARY} ${_odbc_required_libs_paths} ${LTDL_LIBRARY}) set(ODBC_INCLUDE_DIRS ${ODBC_INCLUDE_DIR}) set(ODBC_DEFINITIONS ${PC_ODBC_CFLAGS_OTHER}) endif() From b1b46e6dee9df003cb91c968cfc879dcec847573 Mon Sep 17 00:00:00 2001 From: proller Date: Fri, 2 Aug 2019 17:23:17 +0300 Subject: [PATCH 35/84] Fix --- contrib/libunwind-cmake/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/libunwind-cmake/CMakeLists.txt b/contrib/libunwind-cmake/CMakeLists.txt index 993323d56c0..4f24fe249f5 100644 --- a/contrib/libunwind-cmake/CMakeLists.txt +++ b/contrib/libunwind-cmake/CMakeLists.txt @@ -26,7 +26,7 @@ set(LIBUNWIND_SOURCES add_library(unwind_static ${LIBUNWIND_SOURCES}) -target_include_directories(unwind_static PUBLIC ${LIBUNWIND_SOURCE_DIR}/include) +target_include_directories(unwind_static SYSTEM BEFORE PUBLIC ${LIBUNWIND_SOURCE_DIR}/include) target_compile_definitions(unwind_static PRIVATE -D_LIBUNWIND_NO_HEAP=1 -D_DEBUG -D_LIBUNWIND_IS_NATIVE_ONLY) target_compile_options(unwind_static PRIVATE -fno-exceptions -funwind-tables -fno-sanitize=all -nostdinc++ -fno-rtti) target_link_libraries(unwind_static PRIVATE Threads::Threads ${CMAKE_DL_LIBS}) From 18ade1708c029c78e6f159ea4454972aea2c0606 Mon Sep 17 00:00:00 2001 From: proller Date: Thu, 1 Aug 2019 12:56:17 +0000 Subject: [PATCH 36/84] Fix arm build --- dbms/src/Common/QueryProfiler.cpp | 5 ++++- dbms/src/Common/StackTrace.cpp | 2 ++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/dbms/src/Common/QueryProfiler.cpp b/dbms/src/Common/QueryProfiler.cpp index db977903c3b..def1d6c3f6e 100644 --- a/dbms/src/Common/QueryProfiler.cpp +++ b/dbms/src/Common/QueryProfiler.cpp @@ -186,7 +186,10 @@ QueryProfilerBase::QueryProfilerBase(const Int32 thread_id, const throw; } #else - UNUSED(thread_id, clock_type, period, pause_signal); + UNUSED(thread_id); + UNUSED(clock_type); + UNUSED(period); + UNUSED(pause_signal); throw Exception("QueryProfiler cannot work with stock libunwind", ErrorCodes::NOT_IMPLEMENTED); #endif } diff --git a/dbms/src/Common/StackTrace.cpp b/dbms/src/Common/StackTrace.cpp index a39eaf484e4..35399dd90d5 100644 --- a/dbms/src/Common/StackTrace.cpp +++ b/dbms/src/Common/StackTrace.cpp @@ -31,6 +31,8 @@ std::string signalToErrorMessage(int sig, const siginfo_t & info, const ucontext error << " Access: write."; else error << " Access: read."; +#else + UNUSED(context); #endif switch (info.si_code) From a309754aa9f967adc97c600f085bbdd75dda37a7 Mon Sep 17 00:00:00 2001 From: proller Date: Sat, 3 Aug 2019 16:14:09 +0300 Subject: [PATCH 37/84] unwind fix --- dbms/CMakeLists.txt | 3 --- dbms/src/Common/QueryProfiler.cpp | 2 +- dbms/src/Common/StackTrace.cpp | 3 +-- dbms/src/Common/config.h.in | 2 +- 4 files changed, 3 insertions(+), 7 deletions(-) diff --git a/dbms/CMakeLists.txt b/dbms/CMakeLists.txt index 19ca188438e..a38fa210b7f 100644 --- a/dbms/CMakeLists.txt +++ b/dbms/CMakeLists.txt @@ -160,9 +160,6 @@ if (OS_FREEBSD) endif () if (USE_UNWIND) - target_compile_definitions (clickhouse_common_io PRIVATE USE_UNWIND=1) - target_include_directories (clickhouse_common_io SYSTEM BEFORE PRIVATE ${UNWIND_INCLUDE_DIR}) - if (NOT USE_INTERNAL_UNWIND_LIBRARY_FOR_EXCEPTION_HANDLING) target_link_libraries (clickhouse_common_io PRIVATE ${UNWIND_LIBRARY}) endif () diff --git a/dbms/src/Common/QueryProfiler.cpp b/dbms/src/Common/QueryProfiler.cpp index def1d6c3f6e..85a6d73d7ce 100644 --- a/dbms/src/Common/QueryProfiler.cpp +++ b/dbms/src/Common/QueryProfiler.cpp @@ -114,7 +114,7 @@ namespace out.next(); } - const UInt32 TIMER_PRECISION = 1e9; + [[maybe_unused]] const UInt32 TIMER_PRECISION = 1e9; } namespace ErrorCodes diff --git a/dbms/src/Common/StackTrace.cpp b/dbms/src/Common/StackTrace.cpp index 35399dd90d5..debcd00b05c 100644 --- a/dbms/src/Common/StackTrace.cpp +++ b/dbms/src/Common/StackTrace.cpp @@ -1,11 +1,10 @@ #include #include - +#include #include #include #include #include - #include #include #include diff --git a/dbms/src/Common/config.h.in b/dbms/src/Common/config.h.in index 9b38dd9fc04..b6a5b6de2b8 100644 --- a/dbms/src/Common/config.h.in +++ b/dbms/src/Common/config.h.in @@ -9,5 +9,5 @@ #cmakedefine01 USE_CPUINFO #cmakedefine01 USE_BROTLI #cmakedefine01 USE_MIMALLOC - +#cmakedefine01 USE_UNWIND #cmakedefine01 CLICKHOUSE_SPLIT_BINARY From 6b94991eece9918855fd207168c1c5430544c95e Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Sun, 4 Aug 2019 03:17:27 +0300 Subject: [PATCH 38/84] Update CMakeLists.txt --- contrib/h3-cmake/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/h3-cmake/CMakeLists.txt b/contrib/h3-cmake/CMakeLists.txt index ddd0b1e35ec..80e76311f70 100644 --- a/contrib/h3-cmake/CMakeLists.txt +++ b/contrib/h3-cmake/CMakeLists.txt @@ -27,4 +27,4 @@ target_include_directories(h3 SYSTEM PUBLIC ${H3_BINARY_DIR}/include) target_compile_definitions(h3 PRIVATE H3_HAVE_VLA) if(M_LIBRARY) target_link_libraries(h3 PRIVATE ${M_LIBRARY}) -endif() \ No newline at end of file +endif() From 4f9d870fcadc8f3b9f247b64cc85ce7d1ca998e8 Mon Sep 17 00:00:00 2001 From: proller Date: Sun, 4 Aug 2019 12:37:59 +0000 Subject: [PATCH 39/84] Fix arm and freebsd build --- cmake/find_fastops.cmake | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cmake/find_fastops.cmake b/cmake/find_fastops.cmake index c8ddbaf80a7..bafefd72756 100644 --- a/cmake/find_fastops.cmake +++ b/cmake/find_fastops.cmake @@ -1,4 +1,6 @@ -option (ENABLE_FASTOPS "Enable fast vectorized mathematical functions library by Michael Parakhin" ${NOT_UNBUNDLED}) +if (NOT ARCH_ARM AND NOT OS_FREEBSD) + option (ENABLE_FASTOPS "Enable fast vectorized mathematical functions library by Michael Parakhin" ${NOT_UNBUNDLED}) +endif () if (ENABLE_FASTOPS) if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/fastops/fastops/fastops.h") @@ -12,4 +14,4 @@ else () set(USE_FASTOPS 0) endif () -message (STATUS "Using fastops") +[1~message (STATUS "Using fastops=${USE_FASTOPS}: ${FASTOPS_INCLUDE_DIR} : ${FASTOPS_LIBRARY}") From 274eb836027915fca902854247c9fda04f79b456 Mon Sep 17 00:00:00 2001 From: proller Date: Mon, 5 Aug 2019 10:52:41 +0000 Subject: [PATCH 40/84] fix --- cmake/find_fastops.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/find_fastops.cmake b/cmake/find_fastops.cmake index bafefd72756..b231b78a258 100644 --- a/cmake/find_fastops.cmake +++ b/cmake/find_fastops.cmake @@ -14,4 +14,4 @@ else () set(USE_FASTOPS 0) endif () -[1~message (STATUS "Using fastops=${USE_FASTOPS}: ${FASTOPS_INCLUDE_DIR} : ${FASTOPS_LIBRARY}") +message (STATUS "Using fastops=${USE_FASTOPS}: ${FASTOPS_INCLUDE_DIR} : ${FASTOPS_LIBRARY}") From b55da1888a310479f2b5cb5ba29f85364e0d0d72 Mon Sep 17 00:00:00 2001 From: Ivan Lezhankin Date: Tue, 6 Aug 2019 17:18:37 +0300 Subject: [PATCH 41/84] Add "_timestamp" virtual column --- .../Storages/Kafka/KafkaBlockInputStream.cpp | 10 +++++++--- .../Kafka/ReadBufferFromKafkaConsumer.h | 1 + dbms/src/Storages/Kafka/StorageKafka.cpp | 5 ++++- .../integration/test_storage_kafka/test.py | 18 +++++++++--------- 4 files changed, 21 insertions(+), 13 deletions(-) diff --git a/dbms/src/Storages/Kafka/KafkaBlockInputStream.cpp b/dbms/src/Storages/Kafka/KafkaBlockInputStream.cpp index 8607d1e1e81..aeecc772dce 100644 --- a/dbms/src/Storages/Kafka/KafkaBlockInputStream.cpp +++ b/dbms/src/Storages/Kafka/KafkaBlockInputStream.cpp @@ -19,7 +19,7 @@ KafkaBlockInputStream::KafkaBlockInputStream( if (!storage.getSchemaName().empty()) context.setSetting("format_schema", storage.getSchemaName()); - virtual_columns = storage.getSampleBlockForColumns({"_topic", "_key", "_offset", "_partition"}).cloneEmptyColumns(); + virtual_columns = storage.getSampleBlockForColumns({"_topic", "_key", "_offset", "_partition", "_timestamp"}).cloneEmptyColumns(); } KafkaBlockInputStream::~KafkaBlockInputStream() @@ -64,6 +64,10 @@ void KafkaBlockInputStream::readPrefixImpl() virtual_columns[1]->insert(sub_buffer->currentKey()); // "key" virtual_columns[2]->insert(sub_buffer->currentOffset()); // "offset" virtual_columns[3]->insert(sub_buffer->currentPartition()); // "partition" + + auto timestamp = sub_buffer->currentTimestamp(); + if (timestamp) + virtual_columns[4]->insert(std::chrono::duration_cast(timestamp->get_timestamp()).count()); // "timestamp" }; auto child = FormatFactory::instance().getInput( @@ -80,8 +84,8 @@ Block KafkaBlockInputStream::readImpl() if (!block) return block; - Block virtual_block = storage.getSampleBlockForColumns({"_topic", "_key", "_offset", "_partition"}).cloneWithColumns(std::move(virtual_columns)); - virtual_columns = storage.getSampleBlockForColumns({"_topic", "_key", "_offset", "_partition"}).cloneEmptyColumns(); + Block virtual_block = storage.getSampleBlockForColumns({"_topic", "_key", "_offset", "_partition", "_timestamp"}).cloneWithColumns(std::move(virtual_columns)); + virtual_columns = storage.getSampleBlockForColumns({"_topic", "_key", "_offset", "_partition", "_timestamp"}).cloneEmptyColumns(); for (const auto & column : virtual_block.getColumnsWithTypeAndName()) block.insert(column); diff --git a/dbms/src/Storages/Kafka/ReadBufferFromKafkaConsumer.h b/dbms/src/Storages/Kafka/ReadBufferFromKafkaConsumer.h index d1ea51b896e..a8295152d91 100644 --- a/dbms/src/Storages/Kafka/ReadBufferFromKafkaConsumer.h +++ b/dbms/src/Storages/Kafka/ReadBufferFromKafkaConsumer.h @@ -31,6 +31,7 @@ public: String currentKey() const { return current[-1].get_key(); } auto currentOffset() const { return current[-1].get_offset(); } auto currentPartition() const { return current[-1].get_partition(); } + auto currentTimestamp() const { return current[-1].get_timestamp(); } private: using Messages = std::vector; diff --git a/dbms/src/Storages/Kafka/StorageKafka.cpp b/dbms/src/Storages/Kafka/StorageKafka.cpp index e591649dd6a..7cfcb4caf10 100644 --- a/dbms/src/Storages/Kafka/StorageKafka.cpp +++ b/dbms/src/Storages/Kafka/StorageKafka.cpp @@ -4,6 +4,8 @@ #include #include #include +#include +#include #include #include #include @@ -86,7 +88,8 @@ StorageKafka::StorageKafka( ColumnsDescription({{"_topic", std::make_shared()}, {"_key", std::make_shared()}, {"_offset", std::make_shared()}, - {"_partition", std::make_shared()}}, true)) + {"_partition", std::make_shared()}, + {"_timestamp", std::make_shared(std::make_shared())}}, true)) , table_name(table_name_) , database_name(database_name_) , global_context(context_) diff --git a/dbms/tests/integration/test_storage_kafka/test.py b/dbms/tests/integration/test_storage_kafka/test.py index 8ab046b894b..7c7a30c9fa8 100644 --- a/dbms/tests/integration/test_storage_kafka/test.py +++ b/dbms/tests/integration/test_storage_kafka/test.py @@ -61,10 +61,10 @@ def wait_kafka_is_available(max_retries=50): time.sleep(1) -def kafka_produce(topic, messages): +def kafka_produce(topic, messages, timestamp=None): producer = KafkaProducer(bootstrap_servers="localhost:9092") for message in messages: - producer.send(topic=topic, value=message) + producer.send(topic=topic, value=message, timestamp_ms=timestamp) producer.flush() print ("Produced {} messages for topic {}".format(len(messages), topic)) @@ -376,16 +376,16 @@ def test_kafka_virtual_columns(kafka_cluster): messages = '' for i in range(25): messages += json.dumps({'key': i, 'value': i}) + '\n' - kafka_produce('virt1', [messages]) + kafka_produce('virt1', [messages], 0) messages = '' for i in range(25, 50): messages += json.dumps({'key': i, 'value': i}) + '\n' - kafka_produce('virt1', [messages]) + kafka_produce('virt1', [messages], 0) result = '' for i in range(50): - result += instance.query('SELECT _key, key, _topic, value, _offset, _partition FROM test.kafka') + result += instance.query('SELECT _key, key, _topic, value, _offset, _partition, _timestamp FROM test.kafka') if kafka_check_result(result, False, 'test_kafka_virtual1.reference'): break time.sleep(0.5) @@ -403,20 +403,20 @@ def test_kafka_virtual_columns_with_materialized_view(kafka_cluster): kafka_group_name = 'virt2', kafka_format = 'JSONEachRow', kafka_row_delimiter = '\\n'; - CREATE TABLE test.view (key UInt64, value UInt64, kafka_key String, topic String, offset UInt64, partition UInt64) + CREATE TABLE test.view (key UInt64, value UInt64, kafka_key String, topic String, offset UInt64, partition UInt64, timestamp Nullable(DateTime)) ENGINE = MergeTree() ORDER BY key; CREATE MATERIALIZED VIEW test.consumer TO test.view AS - SELECT *, _key as kafka_key, _topic as topic, _offset as offset, _partition as partition FROM test.kafka; + SELECT *, _key as kafka_key, _topic as topic, _offset as offset, _partition as partition, _timestamp as timestamp FROM test.kafka; ''') messages = [] for i in range(50): messages.append(json.dumps({'key': i, 'value': i})) - kafka_produce('virt2', messages) + kafka_produce('virt2', messages, 0) for i in range(50): - result = instance.query('SELECT kafka_key, key, topic, value, offset, partition FROM test.view') + result = instance.query('SELECT kafka_key, key, topic, value, offset, partition, timestamp FROM test.view') if kafka_check_result(result, False, 'test_kafka_virtual2.reference'): break time.sleep(0.5) From 0bc6847111aee4ca7175fd19bdbf504bf18427fa Mon Sep 17 00:00:00 2001 From: Ivan Lezhankin Date: Wed, 7 Aug 2019 12:54:29 +0300 Subject: [PATCH 42/84] Update references --- .../test_kafka_virtual1.reference | 100 +++++++++--------- .../test_kafka_virtual2.reference | 100 +++++++++--------- 2 files changed, 100 insertions(+), 100 deletions(-) diff --git a/dbms/tests/integration/test_storage_kafka/test_kafka_virtual1.reference b/dbms/tests/integration/test_storage_kafka/test_kafka_virtual1.reference index 5956210d25e..30c2f6e1c59 100644 --- a/dbms/tests/integration/test_storage_kafka/test_kafka_virtual1.reference +++ b/dbms/tests/integration/test_storage_kafka/test_kafka_virtual1.reference @@ -1,50 +1,50 @@ - 0 virt1 0 0 - 1 virt1 1 0 - 2 virt1 2 0 - 3 virt1 3 0 - 4 virt1 4 0 - 5 virt1 5 0 - 6 virt1 6 0 - 7 virt1 7 0 - 8 virt1 8 0 - 9 virt1 9 0 - 10 virt1 10 0 - 11 virt1 11 0 - 12 virt1 12 0 - 13 virt1 13 0 - 14 virt1 14 0 - 15 virt1 15 0 - 16 virt1 16 0 - 17 virt1 17 0 - 18 virt1 18 0 - 19 virt1 19 0 - 20 virt1 20 0 - 21 virt1 21 0 - 22 virt1 22 0 - 23 virt1 23 0 - 24 virt1 24 0 - 25 virt1 25 1 - 26 virt1 26 1 - 27 virt1 27 1 - 28 virt1 28 1 - 29 virt1 29 1 - 30 virt1 30 1 - 31 virt1 31 1 - 32 virt1 32 1 - 33 virt1 33 1 - 34 virt1 34 1 - 35 virt1 35 1 - 36 virt1 36 1 - 37 virt1 37 1 - 38 virt1 38 1 - 39 virt1 39 1 - 40 virt1 40 1 - 41 virt1 41 1 - 42 virt1 42 1 - 43 virt1 43 1 - 44 virt1 44 1 - 45 virt1 45 1 - 46 virt1 46 1 - 47 virt1 47 1 - 48 virt1 48 1 - 49 virt1 49 1 + 0 virt1 0 0 0 0000-00-00 00:00:00 + 1 virt1 1 0 0 0000-00-00 00:00:00 + 2 virt1 2 0 0 0000-00-00 00:00:00 + 3 virt1 3 0 0 0000-00-00 00:00:00 + 4 virt1 4 0 0 0000-00-00 00:00:00 + 5 virt1 5 0 0 0000-00-00 00:00:00 + 6 virt1 6 0 0 0000-00-00 00:00:00 + 7 virt1 7 0 0 0000-00-00 00:00:00 + 8 virt1 8 0 0 0000-00-00 00:00:00 + 9 virt1 9 0 0 0000-00-00 00:00:00 + 10 virt1 10 0 0 0000-00-00 00:00:00 + 11 virt1 11 0 0 0000-00-00 00:00:00 + 12 virt1 12 0 0 0000-00-00 00:00:00 + 13 virt1 13 0 0 0000-00-00 00:00:00 + 14 virt1 14 0 0 0000-00-00 00:00:00 + 15 virt1 15 0 0 0000-00-00 00:00:00 + 16 virt1 16 0 0 0000-00-00 00:00:00 + 17 virt1 17 0 0 0000-00-00 00:00:00 + 18 virt1 18 0 0 0000-00-00 00:00:00 + 19 virt1 19 0 0 0000-00-00 00:00:00 + 20 virt1 20 0 0 0000-00-00 00:00:00 + 21 virt1 21 0 0 0000-00-00 00:00:00 + 22 virt1 22 0 0 0000-00-00 00:00:00 + 23 virt1 23 0 0 0000-00-00 00:00:00 + 24 virt1 24 0 0 0000-00-00 00:00:00 + 25 virt1 25 1 0 0000-00-00 00:00:00 + 26 virt1 26 1 0 0000-00-00 00:00:00 + 27 virt1 27 1 0 0000-00-00 00:00:00 + 28 virt1 28 1 0 0000-00-00 00:00:00 + 29 virt1 29 1 0 0000-00-00 00:00:00 + 30 virt1 30 1 0 0000-00-00 00:00:00 + 31 virt1 31 1 0 0000-00-00 00:00:00 + 32 virt1 32 1 0 0000-00-00 00:00:00 + 33 virt1 33 1 0 0000-00-00 00:00:00 + 34 virt1 34 1 0 0000-00-00 00:00:00 + 35 virt1 35 1 0 0000-00-00 00:00:00 + 36 virt1 36 1 0 0000-00-00 00:00:00 + 37 virt1 37 1 0 0000-00-00 00:00:00 + 38 virt1 38 1 0 0000-00-00 00:00:00 + 39 virt1 39 1 0 0000-00-00 00:00:00 + 40 virt1 40 1 0 0000-00-00 00:00:00 + 41 virt1 41 1 0 0000-00-00 00:00:00 + 42 virt1 42 1 0 0000-00-00 00:00:00 + 43 virt1 43 1 0 0000-00-00 00:00:00 + 44 virt1 44 1 0 0000-00-00 00:00:00 + 45 virt1 45 1 0 0000-00-00 00:00:00 + 46 virt1 46 1 0 0000-00-00 00:00:00 + 47 virt1 47 1 0 0000-00-00 00:00:00 + 48 virt1 48 1 0 0000-00-00 00:00:00 + 49 virt1 49 1 0 0000-00-00 00:00:00 diff --git a/dbms/tests/integration/test_storage_kafka/test_kafka_virtual2.reference b/dbms/tests/integration/test_storage_kafka/test_kafka_virtual2.reference index 50c2edbf802..afb9a64f4fc 100644 --- a/dbms/tests/integration/test_storage_kafka/test_kafka_virtual2.reference +++ b/dbms/tests/integration/test_storage_kafka/test_kafka_virtual2.reference @@ -1,50 +1,50 @@ - 0 virt2 0 0 - 1 virt2 1 1 - 2 virt2 2 2 - 3 virt2 3 3 - 4 virt2 4 4 - 5 virt2 5 5 - 6 virt2 6 6 - 7 virt2 7 7 - 8 virt2 8 8 - 9 virt2 9 9 - 10 virt2 10 10 - 11 virt2 11 11 - 12 virt2 12 12 - 13 virt2 13 13 - 14 virt2 14 14 - 15 virt2 15 15 - 16 virt2 16 16 - 17 virt2 17 17 - 18 virt2 18 18 - 19 virt2 19 19 - 20 virt2 20 20 - 21 virt2 21 21 - 22 virt2 22 22 - 23 virt2 23 23 - 24 virt2 24 24 - 25 virt2 25 25 - 26 virt2 26 26 - 27 virt2 27 27 - 28 virt2 28 28 - 29 virt2 29 29 - 30 virt2 30 30 - 31 virt2 31 31 - 32 virt2 32 32 - 33 virt2 33 33 - 34 virt2 34 34 - 35 virt2 35 35 - 36 virt2 36 36 - 37 virt2 37 37 - 38 virt2 38 38 - 39 virt2 39 39 - 40 virt2 40 40 - 41 virt2 41 41 - 42 virt2 42 42 - 43 virt2 43 43 - 44 virt2 44 44 - 45 virt2 45 45 - 46 virt2 46 46 - 47 virt2 47 47 - 48 virt2 48 48 - 49 virt2 49 49 + 0 virt2 0 0 0 0000-00-00 00:00:00 + 1 virt2 1 1 0 0000-00-00 00:00:00 + 2 virt2 2 2 0 0000-00-00 00:00:00 + 3 virt2 3 3 0 0000-00-00 00:00:00 + 4 virt2 4 4 0 0000-00-00 00:00:00 + 5 virt2 5 5 0 0000-00-00 00:00:00 + 6 virt2 6 6 0 0000-00-00 00:00:00 + 7 virt2 7 7 0 0000-00-00 00:00:00 + 8 virt2 8 8 0 0000-00-00 00:00:00 + 9 virt2 9 9 0 0000-00-00 00:00:00 + 10 virt2 10 10 0 0000-00-00 00:00:00 + 11 virt2 11 11 0 0000-00-00 00:00:00 + 12 virt2 12 12 0 0000-00-00 00:00:00 + 13 virt2 13 13 0 0000-00-00 00:00:00 + 14 virt2 14 14 0 0000-00-00 00:00:00 + 15 virt2 15 15 0 0000-00-00 00:00:00 + 16 virt2 16 16 0 0000-00-00 00:00:00 + 17 virt2 17 17 0 0000-00-00 00:00:00 + 18 virt2 18 18 0 0000-00-00 00:00:00 + 19 virt2 19 19 0 0000-00-00 00:00:00 + 20 virt2 20 20 0 0000-00-00 00:00:00 + 21 virt2 21 21 0 0000-00-00 00:00:00 + 22 virt2 22 22 0 0000-00-00 00:00:00 + 23 virt2 23 23 0 0000-00-00 00:00:00 + 24 virt2 24 24 0 0000-00-00 00:00:00 + 25 virt2 25 25 0 0000-00-00 00:00:00 + 26 virt2 26 26 0 0000-00-00 00:00:00 + 27 virt2 27 27 0 0000-00-00 00:00:00 + 28 virt2 28 28 0 0000-00-00 00:00:00 + 29 virt2 29 29 0 0000-00-00 00:00:00 + 30 virt2 30 30 0 0000-00-00 00:00:00 + 31 virt2 31 31 0 0000-00-00 00:00:00 + 32 virt2 32 32 0 0000-00-00 00:00:00 + 33 virt2 33 33 0 0000-00-00 00:00:00 + 34 virt2 34 34 0 0000-00-00 00:00:00 + 35 virt2 35 35 0 0000-00-00 00:00:00 + 36 virt2 36 36 0 0000-00-00 00:00:00 + 37 virt2 37 37 0 0000-00-00 00:00:00 + 38 virt2 38 38 0 0000-00-00 00:00:00 + 39 virt2 39 39 0 0000-00-00 00:00:00 + 40 virt2 40 40 0 0000-00-00 00:00:00 + 41 virt2 41 41 0 0000-00-00 00:00:00 + 42 virt2 42 42 0 0000-00-00 00:00:00 + 43 virt2 43 43 0 0000-00-00 00:00:00 + 44 virt2 44 44 0 0000-00-00 00:00:00 + 45 virt2 45 45 0 0000-00-00 00:00:00 + 46 virt2 46 46 0 0000-00-00 00:00:00 + 47 virt2 47 47 0 0000-00-00 00:00:00 + 48 virt2 48 48 0 0000-00-00 00:00:00 + 49 virt2 49 49 0 0000-00-00 00:00:00 From 3c227dce13e58bade4ebd191bf16ef830c5394fe Mon Sep 17 00:00:00 2001 From: CurtizJ Date: Wed, 7 Aug 2019 13:15:25 +0300 Subject: [PATCH 43/84] fix topK and topKWeighted functions --- dbms/src/Common/SpaceSaving.h | 79 ++++++++++++++++++++++------------- 1 file changed, 50 insertions(+), 29 deletions(-) diff --git a/dbms/src/Common/SpaceSaving.h b/dbms/src/Common/SpaceSaving.h index 56dbf382293..e5173027fa5 100644 --- a/dbms/src/Common/SpaceSaving.h +++ b/dbms/src/Common/SpaceSaving.h @@ -113,7 +113,8 @@ public: } TKey key; - size_t slot, hash; + ssize_t slot; + size_t hash; UInt64 count; UInt64 error; }; @@ -147,15 +148,13 @@ public: void insert(const TKey & key, UInt64 increment = 1, UInt64 error = 0) { // Increase weight of a key that already exists - // It uses hashtable for both value mapping as a presence test (c_i != 0) auto hash = counter_map.hash(key); - auto it = counter_map.find(key, hash); - if (it != counter_map.end()) + auto counter = findCounter(key, hash); + if (counter) { - auto c = it->getSecond(); - c->count += increment; - c->error += error; - percolate(c); + counter->count += increment; + counter->error += error; + percolate(counter); return; } // Key doesn't exist, but can fit in the top K @@ -177,6 +176,7 @@ public: push(new Counter(arena.emplace(key), increment, error, hash)); return; } + const size_t alpha_mask = alpha_map.size() - 1; auto & alpha = alpha_map[hash & alpha_mask]; if (alpha + increment < min->count) @@ -187,22 +187,16 @@ public: // Erase the current minimum element alpha_map[min->hash & alpha_mask] = min->count; - it = counter_map.find(min->key, min->hash); - // Replace minimum with newly inserted element - if (it != counter_map.end()) - { - arena.free(min->key); - min->hash = hash; - min->key = arena.emplace(key); - min->count = alpha + increment; - min->error = alpha + error; - percolate(min); + counter_list.pop_back(); + counter = findCounter(min->key, min->hash); + counter->slot = -1; - it->getSecond() = min; - it->getFirstMutable() = min->key; - counter_map.reinsert(it, hash); - } + ++removed_keys; + if (removed_keys * 2 > counter_map.size()) + rebuildCounterMap(); + + push(new Counter(arena.emplace(key), alpha + increment, alpha + error, hash)); } /* @@ -242,7 +236,7 @@ public: // The list is sorted in descending order, we have to scan in reverse for (auto counter : boost::adaptors::reverse(rhs.counter_list)) { - if (counter_map.find(counter->key) != counter_map.end()) + if (findCounter(counter->key, counter_map.hash(counter->key))) { // Subtract m2 previously added, guaranteed not negative insert(counter->key, counter->count - m2, counter->error - m2); @@ -346,19 +340,46 @@ private: void destroyLastElement() { auto last_element = counter_list.back(); - auto cell = counter_map.find(last_element->key, last_element->hash); - cell->setZero(); - counter_map.reinsert(cell, last_element->hash); + last_element->slot = -1; counter_list.pop_back(); - arena.free(last_element->key); - delete last_element; + + ++removed_keys; + if (removed_keys * 2 > counter_map.size()) + rebuildCounterMap(); } - HashMap counter_map; + Counter * findCounter(const TKey & key, size_t hash) + { + auto it = counter_map.find(key, hash); + if (it == counter_map.end() || it->getSecond()->slot == -1) + return nullptr; + + return it->getSecond(); + } + + void rebuildCounterMap() + { + removed_keys = 0; + for (const auto & cell : counter_map) + { + auto counter = cell.getSecond(); + if (counter->slot == -1) + delete counter; + } + + counter_map.clear(); + for (auto counter : counter_list) + counter_map[counter->key] = counter; + } + + using CounterMap = HashMap; + + CounterMap counter_map; std::vector counter_list; std::vector alpha_map; SpaceSavingArena arena; size_t m_capacity; + size_t removed_keys = 0; }; } From 61a8abb813fceb5565d48fafb4fb829f767ce825 Mon Sep 17 00:00:00 2001 From: CurtizJ Date: Thu, 8 Aug 2019 15:42:48 +0300 Subject: [PATCH 44/84] add more tests for functions topK and topKWeighted --- .../0_stateless/00981_topK_topKWeighted.reference | 3 +++ .../queries/0_stateless/00981_topK_topKWeighted.sql | 12 ++++++++++++ 2 files changed, 15 insertions(+) create mode 100644 dbms/tests/queries/0_stateless/00981_topK_topKWeighted.reference create mode 100644 dbms/tests/queries/0_stateless/00981_topK_topKWeighted.sql diff --git a/dbms/tests/queries/0_stateless/00981_topK_topKWeighted.reference b/dbms/tests/queries/0_stateless/00981_topK_topKWeighted.reference new file mode 100644 index 00000000000..ca039c9c498 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00981_topK_topKWeighted.reference @@ -0,0 +1,3 @@ +['0','1','2','3','4','5','6','7','8','9'] +['0','1','2','3','4','5','6','7','8','9'] +['2999999','2999998','2999997','2999996','2999995','2999994','2999993','2999992','2999991','2999990'] diff --git a/dbms/tests/queries/0_stateless/00981_topK_topKWeighted.sql b/dbms/tests/queries/0_stateless/00981_topK_topKWeighted.sql new file mode 100644 index 00000000000..c1f385fae80 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00981_topK_topKWeighted.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS topk; + +CREATE TABLE topk (val1 String, val2 UInt32) ENGINE = MergeTree ORDER BY val1; + +INSERT INTO topk SELECT toString(number), number FROM numbers(3000000); +INSERT INTO topk SELECT toString(number % 10), 999999999 FROM numbers(1000000); + +SELECT arraySort(topK(10)(val1)) FROM topk; +SELECT arraySort(topKWeighted(10)(val1, val2)) FROM topk; +SELECT topKWeighted(10)(toString(number), number) from numbers(3000000); + +DROP TABLE topk; From a56d897c2cfc75dcd44eff5c978eaff035cf717d Mon Sep 17 00:00:00 2001 From: CurtizJ Date: Thu, 8 Aug 2019 15:55:08 +0300 Subject: [PATCH 45/84] better performance in topK function --- dbms/src/Common/SpaceSaving.h | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/dbms/src/Common/SpaceSaving.h b/dbms/src/Common/SpaceSaving.h index e5173027fa5..832a528bd48 100644 --- a/dbms/src/Common/SpaceSaving.h +++ b/dbms/src/Common/SpaceSaving.h @@ -187,14 +187,7 @@ public: // Erase the current minimum element alpha_map[min->hash & alpha_mask] = min->count; - - counter_list.pop_back(); - counter = findCounter(min->key, min->hash); - counter->slot = -1; - - ++removed_keys; - if (removed_keys * 2 > counter_map.size()) - rebuildCounterMap(); + destroyLastElement(); push(new Counter(arena.emplace(key), alpha + increment, alpha + error, hash)); } From f609e8a7856cbb4d530fce601d17f47af592a9aa Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 9 Aug 2019 03:00:32 +0300 Subject: [PATCH 46/84] Slightly more safe parsing of NamesAndTypesList --- dbms/src/Core/NamesAndTypes.cpp | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/dbms/src/Core/NamesAndTypes.cpp b/dbms/src/Core/NamesAndTypes.cpp index 09985d97463..15c2be689bf 100644 --- a/dbms/src/Core/NamesAndTypes.cpp +++ b/dbms/src/Core/NamesAndTypes.cpp @@ -26,16 +26,20 @@ void NamesAndTypesList::readText(ReadBuffer & buf) size_t count; DB::readText(count, buf); assertString(" columns:\n", buf); - resize(count); - for (NameAndTypePair & it : *this) + + String column_name; + String type_name; + for (size_t i = 0; i < count; ++i) { - readBackQuotedStringWithSQLStyle(it.name, buf); + readBackQuotedStringWithSQLStyle(column_name, buf); assertChar(' ', buf); - String type_name; readString(type_name, buf); - it.type = data_type_factory.get(type_name); assertChar('\n', buf); + + emplace_back(column_name, data_type_factory.get(type_name)); } + + assertEOF(buf); } void NamesAndTypesList::writeText(WriteBuffer & buf) const From ead6336d2c1e36bb4af464ab0e515f504202cec4 Mon Sep 17 00:00:00 2001 From: CurtizJ Date: Fri, 9 Aug 2019 13:11:50 +0300 Subject: [PATCH 47/84] function topK: fix merge stage and fix memory leaks --- dbms/src/Common/SpaceSaving.h | 41 +++++++++++++------ ...=> 00981_topK_topKWeighted_long.reference} | 0 ...d.sql => 00981_topK_topKWeighted_long.sql} | 0 3 files changed, 28 insertions(+), 13 deletions(-) rename dbms/tests/queries/0_stateless/{00981_topK_topKWeighted.reference => 00981_topK_topKWeighted_long.reference} (100%) rename dbms/tests/queries/0_stateless/{00981_topK_topKWeighted.sql => 00981_topK_topKWeighted_long.sql} (100%) diff --git a/dbms/src/Common/SpaceSaving.h b/dbms/src/Common/SpaceSaving.h index 832a528bd48..da7e9293723 100644 --- a/dbms/src/Common/SpaceSaving.h +++ b/dbms/src/Common/SpaceSaving.h @@ -113,7 +113,7 @@ public: } TKey key; - ssize_t slot; + size_t slot; size_t hash; UInt64 count; UInt64 error; @@ -229,17 +229,35 @@ public: // The list is sorted in descending order, we have to scan in reverse for (auto counter : boost::adaptors::reverse(rhs.counter_list)) { - if (findCounter(counter->key, counter_map.hash(counter->key))) + size_t hash = counter_map.hash(counter->key); + if (auto current = findCounter(counter->key, hash)) { // Subtract m2 previously added, guaranteed not negative - insert(counter->key, counter->count - m2, counter->error - m2); + current->count += (counter->count - m2); + current->error += (counter->error - m2); } else { // Counters not monitored in S1 - insert(counter->key, counter->count + m1, counter->error + m1); + counter_list.push_back(new Counter(arena.emplace(counter->key), counter->count + m1, counter->error + m1, hash)); } } + + std::sort(counter_list.begin(), counter_list.end(), [](Counter * l, Counter * r) { return *l > *r; }); + + if (counter_list.size() > m_capacity) + { + for (size_t i = m_capacity; i < counter_list.size(); ++i) + { + arena.free(counter_list[i]->key); + delete counter_list[i]; + } + counter_list.resize(m_capacity); + } + + for (size_t i = 0; i < counter_list.size(); ++i) + counter_list[i]->slot = i; + rebuildCounterMap(); } std::vector topK(size_t k) const @@ -323,7 +341,10 @@ private: void destroyElements() { for (auto counter : counter_list) + { + arena.free(counter->key); delete counter; + } counter_map.clear(); counter_list.clear(); @@ -333,7 +354,8 @@ private: void destroyLastElement() { auto last_element = counter_list.back(); - last_element->slot = -1; + arena.free(last_element->key); + delete last_element; counter_list.pop_back(); ++removed_keys; @@ -344,7 +366,7 @@ private: Counter * findCounter(const TKey & key, size_t hash) { auto it = counter_map.find(key, hash); - if (it == counter_map.end() || it->getSecond()->slot == -1) + if (it == counter_map.end()) return nullptr; return it->getSecond(); @@ -353,13 +375,6 @@ private: void rebuildCounterMap() { removed_keys = 0; - for (const auto & cell : counter_map) - { - auto counter = cell.getSecond(); - if (counter->slot == -1) - delete counter; - } - counter_map.clear(); for (auto counter : counter_list) counter_map[counter->key] = counter; diff --git a/dbms/tests/queries/0_stateless/00981_topK_topKWeighted.reference b/dbms/tests/queries/0_stateless/00981_topK_topKWeighted_long.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00981_topK_topKWeighted.reference rename to dbms/tests/queries/0_stateless/00981_topK_topKWeighted_long.reference diff --git a/dbms/tests/queries/0_stateless/00981_topK_topKWeighted.sql b/dbms/tests/queries/0_stateless/00981_topK_topKWeighted_long.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00981_topK_topKWeighted.sql rename to dbms/tests/queries/0_stateless/00981_topK_topKWeighted_long.sql From 39eaba4aa4c6ad569f913d7ad93bc7807e305c85 Mon Sep 17 00:00:00 2001 From: BayoNet Date: Fri, 9 Aug 2019 16:22:07 +0300 Subject: [PATCH 48/84] DOCAPI-7436: enum (#6411) * Link fix. * DOCAPI-7436: Updated Enum docs. --- docs/en/data_types/enum.md | 35 +++++++++++++++++++---------------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/docs/en/data_types/enum.md b/docs/en/data_types/enum.md index 754b6651f56..247ec070190 100644 --- a/docs/en/data_types/enum.md +++ b/docs/en/data_types/enum.md @@ -1,26 +1,29 @@ +# Enum -# Enum8, Enum16 +Enumerated type storing pairs of the `'string' = integer` format. -Includes the `Enum8` and `Enum16` types. `Enum` saves the finite set of pairs of `'string' = integer`. In ClickHouse, all operations with the `Enum` data type are performed as if value contains integers, although the user is working with string constants. This is more effective in terms of performance than working with the `String` data type. +ClickHouse supports: -- `Enum8` is described by pairs of `'String' = Int8`. -- `Enum16` is described by pairs of `'String' = Int16`. +- 8-bit `Enum`. It can contain up to 256 values with enumeration of `[-128, 127]`. +- 16-bit `Enum`. It can contain up to 65536 values with enumeration of `[-32768, 32767]`. + +ClickHouse automatically chooses a type for `Enum` at data insertion. Also, you can use `Enum8` or `Enum16` types to be sure in size of storage. ## Usage examples Here we create a table with an `Enum8('hello' = 1, 'world' = 2)` type column: -``` +```sql CREATE TABLE t_enum ( - x Enum8('hello' = 1, 'world' = 2) + x Enum('hello' = 1, 'world' = 2) ) ENGINE = TinyLog ``` -This column `x` can only store the values that are listed in the type definition: `'hello'` or `'world'`. If you try to save any other value, ClickHouse will generate an exception. +This column `x` can only store the values that are listed in the type definition: `'hello'` or `'world'`. If you try to save any other value, ClickHouse will generate an exception. ClickHouse automatically chooses the 8-bit size for enumeration of this `Enum`. -``` +```sql :) INSERT INTO t_enum VALUES ('hello'), ('world'), ('hello') INSERT INTO t_enum VALUES @@ -35,12 +38,12 @@ INSERT INTO t_enum VALUES Exception on client: -Code: 49. DB::Exception: Unknown element 'a' for type Enum8('hello' = 1, 'world' = 2) +Code: 49. DB::Exception: Unknown element 'a' for type Enum('hello' = 1, 'world' = 2) ``` When you query data from the table, ClickHouse outputs the string values from `Enum`. -``` +```sql SELECT * FROM t_enum ┌─x─────┐ @@ -52,7 +55,7 @@ SELECT * FROM t_enum If you need to see the numeric equivalents of the rows, you must cast the `Enum` value to integer type. -``` +```sql SELECT CAST(x, 'Int8') FROM t_enum ┌─CAST(x, 'Int8')─┐ @@ -64,12 +67,12 @@ SELECT CAST(x, 'Int8') FROM t_enum To create an Enum value in a query, you also need to use `CAST`. -``` -SELECT toTypeName(CAST('a', 'Enum8(\'a\' = 1, \'b\' = 2)')) +```sql +SELECT toTypeName(CAST('a', 'Enum(\'a\' = 1, \'b\' = 2)')) -┌─toTypeName(CAST('a', 'Enum8(\'a\' = 1, \'b\' = 2)'))─┐ -│ Enum8('a' = 1, 'b' = 2) │ -└──────────────────────────────────────────────────────┘ +┌─toTypeName(CAST('a', 'Enum(\'a\' = 1, \'b\' = 2)'))─┐ +│ Enum8('a' = 1, 'b' = 2) │ +└─────────────────────────────────────────────────────┘ ``` ## General rules and usage From e06c994b0e97c0553dc415d1a2826d8c3b2efecc Mon Sep 17 00:00:00 2001 From: chertus Date: Fri, 9 Aug 2019 17:50:04 +0300 Subject: [PATCH 49/84] refactoring: move collectUsedColumns from ExpressionAnalyzer to SyntaxAnalyzer --- dbms/src/Interpreters/AnalyzedJoin.cpp | 2 +- dbms/src/Interpreters/AnalyzedJoin.h | 1 + dbms/src/Interpreters/ExpressionAnalyzer.cpp | 224 ++---------------- dbms/src/Interpreters/ExpressionAnalyzer.h | 32 +-- .../Interpreters/InterpreterSelectQuery.cpp | 21 +- .../src/Interpreters/MutationsInterpreter.cpp | 3 +- .../RequiredSourceColumnsVisitor.cpp | 12 +- .../RequiredSourceColumnsVisitor.h | 12 +- dbms/src/Interpreters/SyntaxAnalyzer.cpp | 174 +++++++++++++- dbms/src/Interpreters/SyntaxAnalyzer.h | 13 +- .../Interpreters/evaluateMissingDefaults.cpp | 2 +- dbms/src/Storages/MergeTree/MergeTreeData.cpp | 6 +- .../transformQueryForExternalDatabase.cpp | 4 +- 13 files changed, 248 insertions(+), 258 deletions(-) diff --git a/dbms/src/Interpreters/AnalyzedJoin.cpp b/dbms/src/Interpreters/AnalyzedJoin.cpp index 8357ab80aba..44e145dba8a 100644 --- a/dbms/src/Interpreters/AnalyzedJoin.cpp +++ b/dbms/src/Interpreters/AnalyzedJoin.cpp @@ -80,7 +80,7 @@ ExpressionActionsPtr AnalyzedJoin::createJoinedBlockActions( ASTPtr query = expression_list; auto syntax_result = SyntaxAnalyzer(context).analyze(query, columns_from_joined_table, required_columns); - ExpressionAnalyzer analyzer(query, syntax_result, context, {}, required_columns_set); + ExpressionAnalyzer analyzer(query, syntax_result, context, required_columns_set); return analyzer.getActions(true, false); } diff --git a/dbms/src/Interpreters/AnalyzedJoin.h b/dbms/src/Interpreters/AnalyzedJoin.h index d820cb0da7b..db76acd5ac3 100644 --- a/dbms/src/Interpreters/AnalyzedJoin.h +++ b/dbms/src/Interpreters/AnalyzedJoin.h @@ -33,6 +33,7 @@ struct AnalyzedJoin private: friend class SyntaxAnalyzer; + friend struct SyntaxAnalyzerResult; friend class ExpressionAnalyzer; Names key_names_left; diff --git a/dbms/src/Interpreters/ExpressionAnalyzer.cpp b/dbms/src/Interpreters/ExpressionAnalyzer.cpp index fdc8226a42a..9405f7cedeb 100644 --- a/dbms/src/Interpreters/ExpressionAnalyzer.cpp +++ b/dbms/src/Interpreters/ExpressionAnalyzer.cpp @@ -58,7 +58,6 @@ #include #include #include -#include namespace DB { @@ -77,28 +76,15 @@ ExpressionAnalyzer::ExpressionAnalyzer( const ASTPtr & query_, const SyntaxAnalyzerResultPtr & syntax_analyzer_result_, const Context & context_, - const NamesAndTypesList & additional_source_columns, const NameSet & required_result_columns_, size_t subquery_depth_, bool do_global_, const SubqueriesForSets & subqueries_for_sets_) - : ExpressionAnalyzerData(syntax_analyzer_result_->source_columns, required_result_columns_, subqueries_for_sets_) + : ExpressionAnalyzerData(required_result_columns_, subqueries_for_sets_) , query(query_), context(context_), settings(context.getSettings()) , subquery_depth(subquery_depth_), do_global(do_global_) , syntax(syntax_analyzer_result_) { - storage = syntax->storage; - rewrite_subqueries = syntax->rewrite_subqueries; - - if (!additional_source_columns.empty()) - { - source_columns.insert(source_columns.end(), additional_source_columns.begin(), additional_source_columns.end()); - removeDuplicateColumns(source_columns); - } - - /// Delete the unnecessary from `source_columns` list. Form `columns_added_by_join`. - collectUsedColumns(); - /// external_tables, subqueries_for_sets for global subqueries. /// Replaces global subqueries with the generated names of temporary tables that will be sent to remote servers. initGlobalSubqueriesAndExternalTables(); @@ -115,7 +101,7 @@ ExpressionAnalyzer::ExpressionAnalyzer( bool ExpressionAnalyzer::isRemoteStorage() const { - return storage && storage->isRemote(); + return storage() && storage()->isRemote(); } @@ -133,7 +119,7 @@ void ExpressionAnalyzer::analyzeAggregation() if (select_query && (select_query->groupBy() || select_query->having())) has_aggregation = true; - ExpressionActionsPtr temp_actions = std::make_shared(source_columns, context); + ExpressionActionsPtr temp_actions = std::make_shared(sourceColumns(), context); if (select_query) { @@ -256,7 +242,7 @@ void ExpressionAnalyzer::makeSetsForIndex() { const auto * select_query = query->as(); - if (storage && select_query && storage->supportsIndexForIn()) + if (storage() && select_query && storage()->supportsIndexForIn()) { if (select_query->where()) makeSetsForIndexImpl(select_query->where()); @@ -312,7 +298,7 @@ void ExpressionAnalyzer::makeSetsForIndexImpl(const ASTPtr & node) { const IAST & args = *func->arguments; - if (storage && storage->mayBenefitFromIndexForIn(args.children.at(0), context)) + if (storage() && storage()->mayBenefitFromIndexForIn(args.children.at(0), context)) { const ASTPtr & arg = args.children.at(1); if (arg->as() || arg->as()) @@ -322,9 +308,9 @@ void ExpressionAnalyzer::makeSetsForIndexImpl(const ASTPtr & node) } else { - NamesAndTypesList temp_columns = source_columns; + NamesAndTypesList temp_columns = sourceColumns(); temp_columns.insert(temp_columns.end(), array_join_columns.begin(), array_join_columns.end()); - for (const auto & joined_column : columns_added_by_join) + for (const auto & joined_column : columnsAddedByJoin()) temp_columns.push_back(joined_column); ExpressionActionsPtr temp_actions = std::make_shared(temp_columns, context); getRootActions(func->arguments->children.at(0), true, temp_actions); @@ -343,7 +329,7 @@ void ExpressionAnalyzer::getRootActions(const ASTPtr & ast, bool no_subqueries, { LogAST log; ActionsVisitor actions_visitor(context, settings.size_limits_for_set, subquery_depth, - source_columns, actions, prepared_sets, subqueries_for_sets, + sourceColumns(), actions, prepared_sets, subqueries_for_sets, no_subqueries, only_consts, !isRemoteStorage(), log.stream()); actions_visitor.visit(ast); actions = actions_visitor.popActionsLevel(); @@ -356,7 +342,7 @@ void ExpressionAnalyzer::getActionsFromJoinKeys(const ASTTableJoin & table_join, LogAST log; ActionsVisitor actions_visitor(context, settings.size_limits_for_set, subquery_depth, - source_columns, actions, prepared_sets, subqueries_for_sets, + sourceColumns(), actions, prepared_sets, subqueries_for_sets, no_subqueries, only_consts, !isRemoteStorage(), log.stream()); if (table_join.using_expression_list) @@ -494,7 +480,7 @@ bool ExpressionAnalyzer::appendArrayJoin(ExpressionActionsChain & chain, bool on if (!array_join_expression_list) return false; - initChain(chain, source_columns); + initChain(chain, sourceColumns()); ExpressionActionsChain::Step & step = chain.steps.back(); getRootActions(array_join_expression_list, only_types, step.actions); @@ -507,12 +493,12 @@ bool ExpressionAnalyzer::appendArrayJoin(ExpressionActionsChain & chain, bool on void ExpressionAnalyzer::addJoinAction(ExpressionActionsPtr & actions, bool only_types) const { if (only_types) - actions->add(ExpressionAction::ordinaryJoin(nullptr, analyzedJoin().key_names_left, columns_added_by_join)); + actions->add(ExpressionAction::ordinaryJoin(nullptr, analyzedJoin().key_names_left, columnsAddedByJoin())); else for (auto & subquery_for_set : subqueries_for_sets) if (subquery_for_set.second.join) actions->add(ExpressionAction::ordinaryJoin(subquery_for_set.second.join, analyzedJoin().key_names_left, - columns_added_by_join)); + columnsAddedByJoin())); } static void appendRequiredColumns( @@ -536,7 +522,7 @@ bool ExpressionAnalyzer::appendJoin(ExpressionActionsChain & chain, bool only_ty if (!select_query->join()) return false; - initChain(chain, source_columns); + initChain(chain, sourceColumns()); ExpressionActionsChain::Step & step = chain.steps.back(); const auto & join_element = select_query->join()->as(); @@ -588,7 +574,7 @@ bool ExpressionAnalyzer::appendJoin(ExpressionActionsChain & chain, bool only_ty auto & analyzed_join = analyzedJoin(); /// Actions which need to be calculated on joined block. ExpressionActionsPtr joined_block_actions = - analyzed_join.createJoinedBlockActions(columns_added_by_join, select_query, context); + analyzed_join.createJoinedBlockActions(columnsAddedByJoin(), select_query, context); /** For GLOBAL JOINs (in the case, for example, of the push method for executing GLOBAL subqueries), the following occurs * - in the addExternalStorage function, the JOIN (SELECT ...) subquery is replaced with JOIN _data1, @@ -610,7 +596,7 @@ bool ExpressionAnalyzer::appendJoin(ExpressionActionsChain & chain, bool only_ty NameSet required_columns(action_columns.begin(), action_columns.end()); appendRequiredColumns( - required_columns, joined_block_actions->getSampleBlock(), analyzed_join.key_names_right, columns_added_by_join); + required_columns, joined_block_actions->getSampleBlock(), analyzed_join.key_names_right, columnsAddedByJoin()); auto original_map = analyzed_join.getOriginalColumnsMap(required_columns); Names original_columns; @@ -647,7 +633,7 @@ bool ExpressionAnalyzer::appendPrewhere( if (!select_query->prewhere()) return false; - initChain(chain, source_columns); + initChain(chain, sourceColumns()); auto & step = chain.getLastStep(); getRootActions(select_query->prewhere(), only_types, step.actions); String prewhere_column_name = select_query->prewhere()->getColumnName(); @@ -656,7 +642,7 @@ bool ExpressionAnalyzer::appendPrewhere( { /// Remove unused source_columns from prewhere actions. - auto tmp_actions = std::make_shared(source_columns, context); + auto tmp_actions = std::make_shared(sourceColumns(), context); getRootActions(select_query->prewhere(), only_types, tmp_actions); tmp_actions->finalize({prewhere_column_name}); auto required_columns = tmp_actions->getRequiredColumns(); @@ -676,7 +662,7 @@ bool ExpressionAnalyzer::appendPrewhere( auto names = step.actions->getSampleBlock().getNames(); NameSet name_set(names.begin(), names.end()); - for (const auto & column : source_columns) + for (const auto & column : sourceColumns()) if (required_source_columns.count(column.name) == 0) name_set.erase(column.name); @@ -697,7 +683,7 @@ bool ExpressionAnalyzer::appendPrewhere( NameSet prewhere_input_names(required_columns.begin(), required_columns.end()); NameSet unused_source_columns; - for (const auto & column : source_columns) + for (const auto & column : sourceColumns()) { if (prewhere_input_names.count(column.name) == 0) { @@ -722,7 +708,7 @@ bool ExpressionAnalyzer::appendWhere(ExpressionActionsChain & chain, bool only_t if (!select_query->where()) return false; - initChain(chain, source_columns); + initChain(chain, sourceColumns()); ExpressionActionsChain::Step & step = chain.steps.back(); step.required_output.push_back(select_query->where()->getColumnName()); @@ -742,7 +728,7 @@ bool ExpressionAnalyzer::appendGroupBy(ExpressionActionsChain & chain, bool only if (!select_query->groupBy()) return false; - initChain(chain, source_columns); + initChain(chain, sourceColumns()); ExpressionActionsChain::Step & step = chain.steps.back(); ASTs asts = select_query->groupBy()->children; @@ -761,7 +747,7 @@ void ExpressionAnalyzer::appendAggregateFunctionsArguments(ExpressionActionsChai assertAggregation(); - initChain(chain, source_columns); + initChain(chain, sourceColumns()); ExpressionActionsChain::Step & step = chain.steps.back(); for (size_t i = 0; i < aggregate_descriptions.size(); ++i) @@ -899,7 +885,7 @@ void ExpressionAnalyzer::appendProjectResult(ExpressionActionsChain & chain) con void ExpressionAnalyzer::appendExpression(ExpressionActionsChain & chain, const ASTPtr & expr, bool only_types) { - initChain(chain, source_columns); + initChain(chain, sourceColumns()); ExpressionActionsChain::Step & step = chain.steps.back(); getRootActions(expr, only_types, step.actions); step.required_output.push_back(expr->getColumnName()); @@ -921,7 +907,7 @@ void ExpressionAnalyzer::getActionsBeforeAggregation(const ASTPtr & ast, Express ExpressionActionsPtr ExpressionAnalyzer::getActions(bool add_aliases, bool project_result) { - ExpressionActionsPtr actions = std::make_shared(source_columns, context); + ExpressionActionsPtr actions = std::make_shared(sourceColumns(), context); NamesWithAliases result_columns; Names result_names; @@ -956,7 +942,7 @@ ExpressionActionsPtr ExpressionAnalyzer::getActions(bool add_aliases, bool proje if (!(add_aliases && project_result)) { /// We will not delete the original columns. - for (const auto & column_name_type : source_columns) + for (const auto & column_name_type : sourceColumns()) result_names.push_back(column_name_type.name); } @@ -982,164 +968,4 @@ void ExpressionAnalyzer::getAggregateInfo(Names & key_names, AggregateDescriptio aggregates = aggregate_descriptions; } -void ExpressionAnalyzer::collectUsedColumns() -{ - /** Calculate which columns are required to execute the expression. - * Then, delete all other columns from the list of available columns. - * After execution, columns will only contain the list of columns needed to read from the table. - */ - - RequiredSourceColumnsVisitor::Data columns_context; - RequiredSourceColumnsVisitor(columns_context).visit(query); - - NameSet source_column_names; - for (const auto & column : source_columns) - source_column_names.insert(column.name); - - NameSet required = columns_context.requiredColumns(); - - if (columns_context.has_table_join) - { - const AnalyzedJoin & analyzed_join = analyzedJoin(); - NameSet avaliable_columns; - for (const auto & name : source_columns) - avaliable_columns.insert(name.name); - - /// Add columns obtained by JOIN (if needed). - columns_added_by_join.clear(); - for (const auto & joined_column : analyzed_join.available_joined_columns) - { - auto & name = joined_column.name; - if (avaliable_columns.count(name)) - continue; - - if (required.count(name)) - { - /// Optimisation: do not add columns needed only in JOIN ON section. - if (columns_context.nameInclusion(name) > analyzed_join.rightKeyInclusion(name)) - columns_added_by_join.push_back(joined_column); - required.erase(name); - } - } - } - - NameSet array_join_sources; - if (columns_context.has_array_join) - { - /// Insert the columns required for the ARRAY JOIN calculation into the required columns list. - for (const auto & result_source : syntax->array_join_result_to_source) - array_join_sources.insert(result_source.second); - - for (const auto & column_name_type : source_columns) - if (array_join_sources.count(column_name_type.name)) - required.insert(column_name_type.name); - } - - const auto * select_query = query->as(); - - /// You need to read at least one column to find the number of rows. - if (select_query && required.empty()) - { - /// We will find a column with minimum . - /// Because it is the column that is cheapest to read. - struct ColumnSizeTuple - { - size_t compressed_size; - size_t type_size; - size_t uncompressed_size; - String name; - bool operator<(const ColumnSizeTuple & that) const - { - return std::tie(compressed_size, type_size, uncompressed_size) - < std::tie(that.compressed_size, that.type_size, that.uncompressed_size); - } - }; - std::vector columns; - if (storage) - { - auto column_sizes = storage->getColumnSizes(); - for (auto & source_column : source_columns) - { - auto c = column_sizes.find(source_column.name); - if (c == column_sizes.end()) - continue; - size_t type_size = source_column.type->haveMaximumSizeOfValue() ? source_column.type->getMaximumSizeOfValueInMemory() : 100; - columns.emplace_back(ColumnSizeTuple{c->second.data_compressed, type_size, c->second.data_uncompressed, source_column.name}); - } - } - if (columns.size()) - required.insert(std::min_element(columns.begin(), columns.end())->name); - else - /// If we have no information about columns sizes, choose a column of minimum size of its data type. - required.insert(ExpressionActions::getSmallestColumn(source_columns)); - } - - NameSet unknown_required_source_columns = required; - - for (NamesAndTypesList::iterator it = source_columns.begin(); it != source_columns.end();) - { - const String & column_name = it->name; - unknown_required_source_columns.erase(column_name); - - if (!required.count(column_name)) - source_columns.erase(it++); - else - ++it; - } - - /// If there are virtual columns among the unknown columns. Remove them from the list of unknown and add - /// in columns list, so that when further processing they are also considered. - if (storage) - { - for (auto it = unknown_required_source_columns.begin(); it != unknown_required_source_columns.end();) - { - if (storage->hasColumn(*it)) - { - source_columns.push_back(storage->getColumn(*it)); - unknown_required_source_columns.erase(it++); - } - else - ++it; - } - } - - if (!unknown_required_source_columns.empty()) - { - std::stringstream ss; - ss << "Missing columns:"; - for (const auto & name : unknown_required_source_columns) - ss << " '" << name << "'"; - ss << " while processing query: '" << query << "'"; - - ss << ", required columns:"; - for (const auto & name : columns_context.requiredColumns()) - ss << " '" << name << "'"; - - if (!source_column_names.empty()) - { - ss << ", source columns:"; - for (const auto & name : source_column_names) - ss << " '" << name << "'"; - } - else - ss << ", no source columns"; - - if (columns_context.has_table_join) - { - ss << ", joined columns:"; - for (const auto & column : analyzedJoin().available_joined_columns) - ss << " '" << column.name << "'"; - } - - if (!array_join_sources.empty()) - { - ss << ", arrayJoin columns:"; - for (const auto & name : array_join_sources) - ss << " '" << name << "'"; - } - - throw Exception(ss.str(), ErrorCodes::UNKNOWN_IDENTIFIER); - } -} - } diff --git a/dbms/src/Interpreters/ExpressionAnalyzer.h b/dbms/src/Interpreters/ExpressionAnalyzer.h index 644d10da1be..3bb8a2bab07 100644 --- a/dbms/src/Interpreters/ExpressionAnalyzer.h +++ b/dbms/src/Interpreters/ExpressionAnalyzer.h @@ -29,13 +29,8 @@ struct SyntaxAnalyzerResult; using SyntaxAnalyzerResultPtr = std::shared_ptr; /// ExpressionAnalyzer sources, intermediates and results. It splits data and logic, allows to test them separately. -/// If you are not writing a test you probably don't need it. Use ExpressionAnalyzer itself. struct ExpressionAnalyzerData { - /// Original columns. - /// First, all available columns of the table are placed here. Then (when analyzing the query), unused columns are deleted. - NamesAndTypesList source_columns; - /// If non-empty, ignore all expressions in not from this list. NameSet required_result_columns; @@ -58,15 +53,10 @@ struct ExpressionAnalyzerData /// Predicate optimizer overrides the sub queries bool rewrite_subqueries = false; - /// Columns will be added to block by join. - NamesAndTypesList columns_added_by_join; /// Subset of analyzed_join.available_joined_columns - protected: - ExpressionAnalyzerData(const NamesAndTypesList & source_columns_, - const NameSet & required_result_columns_, + ExpressionAnalyzerData(const NameSet & required_result_columns_, const SubqueriesForSets & subqueries_for_sets_) - : source_columns(source_columns_), - required_result_columns(required_result_columns_), + : required_result_columns(required_result_columns_), subqueries_for_sets(subqueries_for_sets_) {} }; @@ -102,7 +92,6 @@ public: const ASTPtr & query_, const SyntaxAnalyzerResultPtr & syntax_analyzer_result_, const Context & context_, - const NamesAndTypesList & additional_source_columns = {}, const NameSet & required_result_columns_ = {}, size_t subquery_depth_ = 0, bool do_global_ = false, @@ -114,11 +103,6 @@ public: /// Get a list of aggregation keys and descriptions of aggregate functions if the query contains GROUP BY. void getAggregateInfo(Names & key_names, AggregateDescriptions & aggregates) const; - /** Get a set of columns that are enough to read from the table to evaluate the expression. - * Columns added from another table by JOIN are not counted. - */ - Names getRequiredSourceColumns() const { return source_columns.getNames(); } - /** These methods allow you to build a chain of transformations over a block, that receives values in the desired sections of the query. * * Example usage: @@ -182,25 +166,21 @@ public: /// Create Set-s that we can from IN section to use the index on them. void makeSetsForIndex(); - bool isRewriteSubqueriesPredicate() { return rewrite_subqueries; } - bool hasGlobalSubqueries() { return has_global_subqueries; } private: ASTPtr query; const Context & context; const ExtractedSettings settings; - StoragePtr storage; /// The main table in FROM clause, if exists. size_t subquery_depth; bool do_global; /// Do I need to prepare for execution global subqueries when analyzing the query. SyntaxAnalyzerResultPtr syntax; - const AnalyzedJoin & analyzedJoin() const { return syntax->analyzed_join; } - /** Remove all unnecessary columns from the list of all available columns of the table (`columns`). - * At the same time, form a set of columns added by JOIN (`columns_added_by_join`). - */ - void collectUsedColumns(); + const StoragePtr & storage() const { return syntax->storage; } /// The main table in FROM clause, if exists. + const AnalyzedJoin & analyzedJoin() const { return syntax->analyzed_join; } + const NamesAndTypesList & sourceColumns() const { return syntax->required_source_columns; } + const NamesAndTypesList & columnsAddedByJoin() const { return syntax->columns_added_by_join; } /// Find global subqueries in the GLOBAL IN/JOIN sections. Fills in external_tables. void initGlobalSubqueriesAndExternalTables(); diff --git a/dbms/src/Interpreters/InterpreterSelectQuery.cpp b/dbms/src/Interpreters/InterpreterSelectQuery.cpp index 9682d0e29e4..0335fbd2a81 100644 --- a/dbms/src/Interpreters/InterpreterSelectQuery.cpp +++ b/dbms/src/Interpreters/InterpreterSelectQuery.cpp @@ -292,9 +292,9 @@ InterpreterSelectQuery::InterpreterSelectQuery( table_lock = storage->lockStructureForShare(false, context.getCurrentQueryId()); syntax_analyzer_result = SyntaxAnalyzer(context, options).analyze( - query_ptr, source_header.getNamesAndTypesList(), required_result_column_names, storage); + query_ptr, source_header.getNamesAndTypesList(), required_result_column_names, storage, NamesAndTypesList()); query_analyzer = std::make_unique( - query_ptr, syntax_analyzer_result, context, NamesAndTypesList(), + query_ptr, syntax_analyzer_result, context, NameSet(required_result_column_names.begin(), required_result_column_names.end()), options.subquery_depth, !options.only_analyze); @@ -317,7 +317,7 @@ InterpreterSelectQuery::InterpreterSelectQuery( if (!options.only_analyze || options.modify_inplace) { - if (query_analyzer->isRewriteSubqueriesPredicate()) + if (syntax_analyzer_result->rewrite_subqueries) { /// remake interpreter_subquery when PredicateOptimizer rewrites subqueries and main table is subquery if (is_subquery) @@ -336,7 +336,7 @@ InterpreterSelectQuery::InterpreterSelectQuery( interpreter_subquery->ignoreWithTotals(); } - required_columns = query_analyzer->getRequiredSourceColumns(); + required_columns = syntax_analyzer_result->requiredSourceColumns(); if (storage) source_header = storage->getSampleBlockForColumns(required_columns); @@ -675,7 +675,16 @@ static SortingInfoPtr optimizeReadInOrder(const MergeTreeData & merge_tree, cons size_t prefix_size = std::min(order_descr.size(), sorting_key_columns.size()); auto order_by_expr = query.orderBy(); - auto syntax_result = SyntaxAnalyzer(context).analyze(order_by_expr, merge_tree.getColumns().getAllPhysical()); + SyntaxAnalyzerResultPtr syntax_result; + try + { + syntax_result = SyntaxAnalyzer(context).analyze(order_by_expr, merge_tree.getColumns().getAllPhysical()); + } + catch (const Exception &) + { + return {}; + } + for (size_t i = 0; i < prefix_size; ++i) { /// Read in pk order in case of exact match with order key element @@ -789,7 +798,7 @@ void InterpreterSelectQuery::executeImpl(TPipeline & pipeline, const BlockInputS /// Try transferring some condition from WHERE to PREWHERE if enabled and viable if (settings.optimize_move_to_prewhere && query.where() && !query.prewhere() && !query.final()) - MergeTreeWhereOptimizer{current_info, context, merge_tree, query_analyzer->getRequiredSourceColumns(), log}; + MergeTreeWhereOptimizer{current_info, context, merge_tree, syntax_analyzer_result->requiredSourceColumns(), log}; }; if (const MergeTreeData * merge_tree_data = dynamic_cast(storage.get())) diff --git a/dbms/src/Interpreters/MutationsInterpreter.cpp b/dbms/src/Interpreters/MutationsInterpreter.cpp index 69339f66712..b5a60666ae0 100644 --- a/dbms/src/Interpreters/MutationsInterpreter.cpp +++ b/dbms/src/Interpreters/MutationsInterpreter.cpp @@ -186,8 +186,7 @@ void MutationsInterpreter::prepare(bool dry_run) { auto query = column.default_desc.expression->clone(); auto syntax_result = SyntaxAnalyzer(context).analyze(query, all_columns); - ExpressionAnalyzer analyzer(query, syntax_result, context); - for (const String & dependency : analyzer.getRequiredSourceColumns()) + for (const String & dependency : syntax_result->requiredSourceColumns()) { if (updated_columns.count(dependency)) column_to_affected_materialized[dependency].push_back(column.name); diff --git a/dbms/src/Interpreters/RequiredSourceColumnsVisitor.cpp b/dbms/src/Interpreters/RequiredSourceColumnsVisitor.cpp index a6e581cb6a8..b5f9c83db50 100644 --- a/dbms/src/Interpreters/RequiredSourceColumnsVisitor.cpp +++ b/dbms/src/Interpreters/RequiredSourceColumnsVisitor.cpp @@ -40,7 +40,7 @@ static std::vector extractNamesFromLambda(const ASTFunction & node) return names; } -bool RequiredSourceColumnsMatcher::needChildVisit(ASTPtr & node, const ASTPtr & child) +bool RequiredSourceColumnsMatcher::needChildVisit(const ASTPtr & node, const ASTPtr & child) { if (child->as()) return false; @@ -60,7 +60,7 @@ bool RequiredSourceColumnsMatcher::needChildVisit(ASTPtr & node, const ASTPtr & return true; } -void RequiredSourceColumnsMatcher::visit(ASTPtr & ast, Data & data) +void RequiredSourceColumnsMatcher::visit(const ASTPtr & ast, Data & data) { /// results are columns @@ -111,7 +111,7 @@ void RequiredSourceColumnsMatcher::visit(ASTPtr & ast, Data & data) } } -void RequiredSourceColumnsMatcher::visit(ASTSelectQuery & select, const ASTPtr &, Data & data) +void RequiredSourceColumnsMatcher::visit(const ASTSelectQuery & select, const ASTPtr &, Data & data) { /// special case for top-level SELECT items: they are publics for (auto & node : select.select()->children) @@ -128,7 +128,7 @@ void RequiredSourceColumnsMatcher::visit(ASTSelectQuery & select, const ASTPtr & Visitor(data).visit(node); /// revisit select_expression_list (with children) when all the aliases are set - Visitor(data).visit(select.refSelect()); + Visitor(data).visit(select.select()); } void RequiredSourceColumnsMatcher::visit(const ASTIdentifier & node, const ASTPtr &, Data & data) @@ -158,7 +158,7 @@ void RequiredSourceColumnsMatcher::visit(const ASTFunction & node, const ASTPtr } } -void RequiredSourceColumnsMatcher::visit(ASTTablesInSelectQueryElement & node, const ASTPtr &, Data & data) +void RequiredSourceColumnsMatcher::visit(const ASTTablesInSelectQueryElement & node, const ASTPtr &, Data & data) { ASTTableExpression * expr = nullptr; ASTTableJoin * join = nullptr; @@ -177,7 +177,7 @@ void RequiredSourceColumnsMatcher::visit(ASTTablesInSelectQueryElement & node, c } /// ASTIdentifiers here are tables. Do not visit them as generic ones. -void RequiredSourceColumnsMatcher::visit(ASTTableExpression & node, const ASTPtr &, Data & data) +void RequiredSourceColumnsMatcher::visit(const ASTTableExpression & node, const ASTPtr &, Data & data) { if (node.database_and_table_name) data.addTableAliasIfAny(*node.database_and_table_name); diff --git a/dbms/src/Interpreters/RequiredSourceColumnsVisitor.h b/dbms/src/Interpreters/RequiredSourceColumnsVisitor.h index ed3ec75ddc9..b42a95f29ee 100644 --- a/dbms/src/Interpreters/RequiredSourceColumnsVisitor.h +++ b/dbms/src/Interpreters/RequiredSourceColumnsVisitor.h @@ -21,19 +21,19 @@ struct ASTTableExpression; class RequiredSourceColumnsMatcher { public: - using Visitor = InDepthNodeVisitor; + using Visitor = ConstInDepthNodeVisitor; using Data = ColumnNamesContext; - static bool needChildVisit(ASTPtr & node, const ASTPtr & child); - static void visit(ASTPtr & ast, Data & data); + static bool needChildVisit(const ASTPtr & node, const ASTPtr & child); + static void visit(const ASTPtr & ast, Data & data); private: static void visit(const ASTIdentifier & node, const ASTPtr &, Data & data); static void visit(const ASTFunction & node, const ASTPtr &, Data & data); - static void visit(ASTTablesInSelectQueryElement & node, const ASTPtr &, Data & data); - static void visit(ASTTableExpression & node, const ASTPtr &, Data & data); + static void visit(const ASTTablesInSelectQueryElement & node, const ASTPtr &, Data & data); + static void visit(const ASTTableExpression & node, const ASTPtr &, Data & data); static void visit(const ASTArrayJoin & node, const ASTPtr &, Data & data); - static void visit(ASTSelectQuery & select, const ASTPtr &, Data & data); + static void visit(const ASTSelectQuery & select, const ASTPtr &, Data & data); }; /// Extracts all the information about columns and tables from ASTSelectQuery block into ColumnNamesContext object. diff --git a/dbms/src/Interpreters/SyntaxAnalyzer.cpp b/dbms/src/Interpreters/SyntaxAnalyzer.cpp index 0cb833e9bc7..282b19991b1 100644 --- a/dbms/src/Interpreters/SyntaxAnalyzer.cpp +++ b/dbms/src/Interpreters/SyntaxAnalyzer.cpp @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -44,6 +45,7 @@ namespace ErrorCodes extern const int INVALID_JOIN_ON_EXPRESSION; extern const int EMPTY_LIST_OF_COLUMNS_QUERIED; extern const int NOT_IMPLEMENTED; + extern const int UNKNOWN_IDENTIFIER; } NameSet removeDuplicateColumns(NamesAndTypesList & columns) @@ -558,12 +560,181 @@ void checkJoin(const ASTTablesInSelectQueryElement * join) } +/// Calculate which columns are required to execute the expression. +/// Then, delete all other columns from the list of available columns. +/// After execution, columns will only contain the list of columns needed to read from the table. +void SyntaxAnalyzerResult::collectUsedColumns(const ASTPtr & query, const NamesAndTypesList & additional_source_columns) +{ + /// We caclulate required_source_columns with source_columns modifications and swap them on exit + required_source_columns = source_columns; + + if (!additional_source_columns.empty()) + { + source_columns.insert(source_columns.end(), additional_source_columns.begin(), additional_source_columns.end()); + removeDuplicateColumns(source_columns); + } + + RequiredSourceColumnsVisitor::Data columns_context; + RequiredSourceColumnsVisitor(columns_context).visit(query); + + NameSet source_column_names; + for (const auto & column : source_columns) + source_column_names.insert(column.name); + + NameSet required = columns_context.requiredColumns(); + + if (columns_context.has_table_join) + { + NameSet avaliable_columns; + for (const auto & name : source_columns) + avaliable_columns.insert(name.name); + + /// Add columns obtained by JOIN (if needed). + columns_added_by_join.clear(); + for (const auto & joined_column : analyzed_join.available_joined_columns) + { + auto & name = joined_column.name; + if (avaliable_columns.count(name)) + continue; + + if (required.count(name)) + { + /// Optimisation: do not add columns needed only in JOIN ON section. + if (columns_context.nameInclusion(name) > analyzed_join.rightKeyInclusion(name)) + columns_added_by_join.push_back(joined_column); + required.erase(name); + } + } + } + + NameSet array_join_sources; + if (columns_context.has_array_join) + { + /// Insert the columns required for the ARRAY JOIN calculation into the required columns list. + for (const auto & result_source : array_join_result_to_source) + array_join_sources.insert(result_source.second); + + for (const auto & column_name_type : source_columns) + if (array_join_sources.count(column_name_type.name)) + required.insert(column_name_type.name); + } + + const auto * select_query = query->as(); + + /// You need to read at least one column to find the number of rows. + if (select_query && required.empty()) + { + /// We will find a column with minimum . + /// Because it is the column that is cheapest to read. + struct ColumnSizeTuple + { + size_t compressed_size; + size_t type_size; + size_t uncompressed_size; + String name; + bool operator<(const ColumnSizeTuple & that) const + { + return std::tie(compressed_size, type_size, uncompressed_size) + < std::tie(that.compressed_size, that.type_size, that.uncompressed_size); + } + }; + std::vector columns; + if (storage) + { + auto column_sizes = storage->getColumnSizes(); + for (auto & source_column : source_columns) + { + auto c = column_sizes.find(source_column.name); + if (c == column_sizes.end()) + continue; + size_t type_size = source_column.type->haveMaximumSizeOfValue() ? source_column.type->getMaximumSizeOfValueInMemory() : 100; + columns.emplace_back(ColumnSizeTuple{c->second.data_compressed, type_size, c->second.data_uncompressed, source_column.name}); + } + } + if (columns.size()) + required.insert(std::min_element(columns.begin(), columns.end())->name); + else + /// If we have no information about columns sizes, choose a column of minimum size of its data type. + required.insert(ExpressionActions::getSmallestColumn(source_columns)); + } + + NameSet unknown_required_source_columns = required; + + for (NamesAndTypesList::iterator it = source_columns.begin(); it != source_columns.end();) + { + const String & column_name = it->name; + unknown_required_source_columns.erase(column_name); + + if (!required.count(column_name)) + source_columns.erase(it++); + else + ++it; + } + + /// If there are virtual columns among the unknown columns. Remove them from the list of unknown and add + /// in columns list, so that when further processing they are also considered. + if (storage) + { + for (auto it = unknown_required_source_columns.begin(); it != unknown_required_source_columns.end();) + { + if (storage->hasColumn(*it)) + { + source_columns.push_back(storage->getColumn(*it)); + unknown_required_source_columns.erase(it++); + } + else + ++it; + } + } + + if (!unknown_required_source_columns.empty()) + { + std::stringstream ss; + ss << "Missing columns:"; + for (const auto & name : unknown_required_source_columns) + ss << " '" << name << "'"; + ss << " while processing query: '" << queryToString(query) << "'"; + + ss << ", required columns:"; + for (const auto & name : columns_context.requiredColumns()) + ss << " '" << name << "'"; + + if (!source_column_names.empty()) + { + ss << ", source columns:"; + for (const auto & name : source_column_names) + ss << " '" << name << "'"; + } + else + ss << ", no source columns"; + + if (columns_context.has_table_join) + { + ss << ", joined columns:"; + for (const auto & column : analyzed_join.available_joined_columns) + ss << " '" << column.name << "'"; + } + + if (!array_join_sources.empty()) + { + ss << ", arrayJoin columns:"; + for (const auto & name : array_join_sources) + ss << " '" << name << "'"; + } + + throw Exception(ss.str(), ErrorCodes::UNKNOWN_IDENTIFIER); + } + + required_source_columns.swap(source_columns); +} + SyntaxAnalyzerResultPtr SyntaxAnalyzer::analyze( ASTPtr & query, const NamesAndTypesList & source_columns_, const Names & required_result_columns, - StoragePtr storage) const + StoragePtr storage, + const NamesAndTypesList & additional_source_columns) const { auto * select_query = query->as(); if (!storage && select_query) @@ -669,6 +840,7 @@ SyntaxAnalyzerResultPtr SyntaxAnalyzer::analyze( collectJoinedColumns(result.analyzed_join, *select_query, source_columns_set, result.aliases, settings.join_use_nulls); } + result.collectUsedColumns(query, additional_source_columns); return std::make_shared(result); } diff --git a/dbms/src/Interpreters/SyntaxAnalyzer.h b/dbms/src/Interpreters/SyntaxAnalyzer.h index 5896358eb85..c7addb03526 100644 --- a/dbms/src/Interpreters/SyntaxAnalyzer.h +++ b/dbms/src/Interpreters/SyntaxAnalyzer.h @@ -13,8 +13,13 @@ NameSet removeDuplicateColumns(NamesAndTypesList & columns); struct SyntaxAnalyzerResult { StoragePtr storage; + AnalyzedJoin analyzed_join; NamesAndTypesList source_columns; + /// Set of columns that are enough to read from the table to evaluate the expression. It does not include joined columns. + NamesAndTypesList required_source_columns; + /// Columns will be added to block by JOIN. It's a subset of analyzed_join.available_joined_columns + NamesAndTypesList columns_added_by_join; Aliases aliases; @@ -31,10 +36,11 @@ struct SyntaxAnalyzerResult /// Note: not used further. NameToNameMap array_join_name_to_alias; - AnalyzedJoin analyzed_join; - /// Predicate optimizer overrides the sub queries bool rewrite_subqueries = false; + + void collectUsedColumns(const ASTPtr & query, const NamesAndTypesList & additional_source_columns); + Names requiredSourceColumns() const { return required_source_columns.getNames(); } }; using SyntaxAnalyzerResultPtr = std::shared_ptr; @@ -64,7 +70,8 @@ public: ASTPtr & query, const NamesAndTypesList & source_columns_, const Names & required_result_columns = {}, - StoragePtr storage = {}) const; + StoragePtr storage = {}, + const NamesAndTypesList & additional_source_columns = {}) const; private: const Context & context; diff --git a/dbms/src/Interpreters/evaluateMissingDefaults.cpp b/dbms/src/Interpreters/evaluateMissingDefaults.cpp index 83af15d1924..bef41488793 100644 --- a/dbms/src/Interpreters/evaluateMissingDefaults.cpp +++ b/dbms/src/Interpreters/evaluateMissingDefaults.cpp @@ -61,7 +61,7 @@ void evaluateMissingDefaults(Block & block, auto syntax_result = SyntaxAnalyzer(context).analyze(default_expr_list, block.getNamesAndTypesList()); auto expression_analyzer = ExpressionAnalyzer{default_expr_list, syntax_result, context}; - auto required_source_columns = expression_analyzer.getRequiredSourceColumns(); + auto required_source_columns = syntax_result->requiredSourceColumns(); auto rows_was = copy_block.rows(); // Delete all not needed columns in DEFAULT expression. diff --git a/dbms/src/Storages/MergeTree/MergeTreeData.cpp b/dbms/src/Storages/MergeTree/MergeTreeData.cpp index dfc59654629..74e5df41217 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeData.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeData.cpp @@ -134,8 +134,7 @@ MergeTreeData::MergeTreeData( throw Exception("Sampling expression must be present in the primary key", ErrorCodes::BAD_ARGUMENTS); auto syntax = SyntaxAnalyzer(global_context).analyze(sample_by_ast, getColumns().getAllPhysical()); - columns_required_for_sampling = ExpressionAnalyzer(sample_by_ast, syntax, global_context) - .getRequiredSourceColumns(); + columns_required_for_sampling = syntax->requiredSourceColumns(); } MergeTreeDataFormatVersion min_format_version(0); if (!date_column_name.empty()) @@ -295,8 +294,7 @@ void MergeTreeData::setPrimaryKeyIndicesAndColumns( if (!added_key_column_expr_list->children.empty()) { auto syntax = SyntaxAnalyzer(global_context).analyze(added_key_column_expr_list, all_columns); - Names used_columns = ExpressionAnalyzer(added_key_column_expr_list, syntax, global_context) - .getRequiredSourceColumns(); + Names used_columns = syntax->requiredSourceColumns(); NamesAndTypesList deleted_columns; NamesAndTypesList added_columns; diff --git a/dbms/src/Storages/transformQueryForExternalDatabase.cpp b/dbms/src/Storages/transformQueryForExternalDatabase.cpp index c0fcbabba42..55a0ef95200 100644 --- a/dbms/src/Storages/transformQueryForExternalDatabase.cpp +++ b/dbms/src/Storages/transformQueryForExternalDatabase.cpp @@ -8,7 +8,6 @@ #include #include #include -#include #include #include @@ -111,8 +110,7 @@ String transformQueryForExternalDatabase( { auto clone_query = query.clone(); auto syntax_result = SyntaxAnalyzer(context).analyze(clone_query, available_columns); - ExpressionAnalyzer analyzer(clone_query, syntax_result, context); - const Names & used_columns = analyzer.getRequiredSourceColumns(); + const Names used_columns = syntax_result->requiredSourceColumns(); auto select = std::make_shared(); From 98ea652ad63a2cc8037b50f1bf5eff432de0256c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 9 Aug 2019 18:30:39 +0300 Subject: [PATCH 50/84] Whitespaces --- dbms/src/Compression/LZ4_decompress_faster.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/dbms/src/Compression/LZ4_decompress_faster.cpp b/dbms/src/Compression/LZ4_decompress_faster.cpp index 65ffdb2173b..77f9e226de4 100644 --- a/dbms/src/Compression/LZ4_decompress_faster.cpp +++ b/dbms/src/Compression/LZ4_decompress_faster.cpp @@ -537,7 +537,6 @@ void decompress( if (source_size == 0 || dest_size == 0) return; - /// Don't run timer if the block is too small. if (dest_size >= 32768) { From ff009a28d26ac4f6584d53cc7911d75706e8932f Mon Sep 17 00:00:00 2001 From: proller Date: Fri, 9 Aug 2019 18:37:45 +0300 Subject: [PATCH 51/84] Arcadia build fixes --- libs/libcommon/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/libs/libcommon/CMakeLists.txt b/libs/libcommon/CMakeLists.txt index ce8c5801613..885a6f0ec0b 100644 --- a/libs/libcommon/CMakeLists.txt +++ b/libs/libcommon/CMakeLists.txt @@ -123,6 +123,7 @@ target_link_libraries (common PUBLIC ${Boost_SYSTEM_LIBRARY} PRIVATE + ${CMAKE_DL_LIBS} ${MALLOC_LIBRARIES} Threads::Threads ${MEMCPY_LIBRARIES}) From 9a438d9f7d7c8e006f9b8c4167e9cb1d0b3ff657 Mon Sep 17 00:00:00 2001 From: proller Date: Fri, 9 Aug 2019 19:09:24 +0300 Subject: [PATCH 52/84] Fix build without protobuf --- dbms/src/Formats/ProtobufReader.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dbms/src/Formats/ProtobufReader.h b/dbms/src/Formats/ProtobufReader.h index c2660369c67..d848215c294 100644 --- a/dbms/src/Formats/ProtobufReader.h +++ b/dbms/src/Formats/ProtobufReader.h @@ -231,7 +231,7 @@ public: bool readDecimal(Decimal64 &, UInt32, UInt32) { return false; } bool readDecimal(Decimal128 &, UInt32, UInt32) { return false; } bool readAggregateFunction(const AggregateFunctionPtr &, AggregateDataPtr, Arena &) { return false; } - bool maybeCanReadValue() const { return false; } + bool canReadMoreValues() const { return false; } }; } From 6491de7eddcd3097cde4a288593aab16a349c59b Mon Sep 17 00:00:00 2001 From: chertus Date: Fri, 9 Aug 2019 19:17:01 +0300 Subject: [PATCH 53/84] forget to remove field in last patch --- dbms/src/Interpreters/ExpressionAnalyzer.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/dbms/src/Interpreters/ExpressionAnalyzer.h b/dbms/src/Interpreters/ExpressionAnalyzer.h index 3bb8a2bab07..ed35bafbe75 100644 --- a/dbms/src/Interpreters/ExpressionAnalyzer.h +++ b/dbms/src/Interpreters/ExpressionAnalyzer.h @@ -50,9 +50,6 @@ struct ExpressionAnalyzerData /// All new temporary tables obtained by performing the GLOBAL IN/JOIN subqueries. Tables external_tables; - /// Predicate optimizer overrides the sub queries - bool rewrite_subqueries = false; - protected: ExpressionAnalyzerData(const NameSet & required_result_columns_, const SubqueriesForSets & subqueries_for_sets_) From 1e8f04aaa755c1efa993c2c5175b8d1d1f2a4aa3 Mon Sep 17 00:00:00 2001 From: proller Date: Fri, 9 Aug 2019 20:14:41 +0300 Subject: [PATCH 54/84] Fix unbundled build --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 5e03af27cfa..d369dca7e78 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -437,10 +437,10 @@ message (STATUS "Building for: ${CMAKE_SYSTEM} ${CMAKE_SYSTEM_PROCESSOR} ${CMAKE include(GNUInstallDirs) include (cmake/find_contrib_lib.cmake) +include (cmake/lib_name.cmake) find_contrib_lib(double-conversion) # Must be before parquet include (cmake/find_ssl.cmake) -include (cmake/lib_name.cmake) include (cmake/find_icu.cmake) include (cmake/find_boost.cmake) include (cmake/find_zlib.cmake) From f636a4c2c1773de8949a31e72fa541d45a33887d Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 9 Aug 2019 20:25:19 +0300 Subject: [PATCH 55/84] Fixed arrayEnumerateUniqRanked function (TODO: simplify code as much as possible) --- .../Functions/array/arrayEnumerateRanked.h | 45 ++++++++++++------- 1 file changed, 29 insertions(+), 16 deletions(-) diff --git a/dbms/src/Functions/array/arrayEnumerateRanked.h b/dbms/src/Functions/array/arrayEnumerateRanked.h index ed7f0d647d1..ab46af2266d 100644 --- a/dbms/src/Functions/array/arrayEnumerateRanked.h +++ b/dbms/src/Functions/array/arrayEnumerateRanked.h @@ -307,10 +307,8 @@ void FunctionArrayEnumerateRankedExtended::executeMethodImpl( ColumnUInt32::Container & res_values) { /// Offsets at the depth we want to look. - const size_t current_offset_depth = arrays_depths.max_array_depth; - const auto & offsets = *offsets_by_depth[current_offset_depth - 1]; - - ColumnArray::Offset prev_off = 0; + const size_t depth_to_look = arrays_depths.max_array_depth; + const auto & offsets = *offsets_by_depth[depth_to_look - 1]; using Map = ClearableHashMap< UInt128, @@ -320,14 +318,17 @@ void FunctionArrayEnumerateRankedExtended::executeMethodImpl( HashTableAllocatorWithStackMemory<(1ULL << INITIAL_SIZE_DEGREE) * sizeof(UInt128)>>; Map indices; - std::vector indices_by_depth(arrays_depths.max_array_depth); - std::vector current_offset_n_by_depth(arrays_depths.max_array_depth); - std::vector last_offset_by_depth(arrays_depths.max_array_depth, 0); // For skipping empty arrays + std::vector indices_by_depth(depth_to_look); + std::vector current_offset_n_by_depth(depth_to_look); + std::vector last_offset_by_depth(depth_to_look, 0); // For skipping empty arrays + /// For arrayEnumerateDense variant: to calculate every distinct value. UInt32 rank = 0; std::vector columns_indices(columns.size()); + /// For each array at the depth we want to look. + ColumnArray::Offset prev_off = 0; for (size_t off : offsets) { bool want_clear = false; @@ -336,17 +337,29 @@ void FunctionArrayEnumerateRankedExtended::executeMethodImpl( if (prev_off == off) { want_clear = true; - if (arrays_depths.max_array_depth > 1) - ++indices_by_depth[0]; - for (ssize_t depth = current_offset_depth - 1; depth >= 0; --depth) + if (depth_to_look >= 2) { - const auto offsets_by_depth_size = offsets_by_depth[depth]->size(); - while (last_offset_by_depth[depth] == (*offsets_by_depth[depth])[current_offset_n_by_depth[depth]]) + /// Advance to the next element of the parent array. + for (ssize_t depth = depth_to_look - 2; depth >= 0; --depth) { - if (current_offset_n_by_depth[depth] + 1 >= offsets_by_depth_size) - break; // only one empty array: SELECT arrayEnumerateUniqRanked([]); - ++current_offset_n_by_depth[depth]; + /// Skipping offsets for empty arrays + while (last_offset_by_depth[depth] == (*offsets_by_depth[depth])[current_offset_n_by_depth[depth]]) + { + ++current_offset_n_by_depth[depth]; + } + + ++indices_by_depth[depth]; + + if (indices_by_depth[depth] == (*offsets_by_depth[depth])[current_offset_n_by_depth[depth]]) + { + last_offset_by_depth[depth] = (*offsets_by_depth[depth])[current_offset_n_by_depth[depth]]; + ++current_offset_n_by_depth[depth]; + } + else + { + break; + } } } } @@ -377,7 +390,7 @@ void FunctionArrayEnumerateRankedExtended::executeMethodImpl( // Debug: DUMP(off, prev_off, j, columns_indices, res_values[j], columns); - for (ssize_t depth = current_offset_depth - 1; depth >= 0; --depth) + for (ssize_t depth = depth_to_look - 1; depth >= 0; --depth) { /// Skipping offsets for empty arrays while (last_offset_by_depth[depth] == (*offsets_by_depth[depth])[current_offset_n_by_depth[depth]]) From 7b375032b59ceca448c412416a07586f5baffd72 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 9 Aug 2019 20:26:25 +0300 Subject: [PATCH 56/84] Added a test --- .../0_stateless/00982_array_enumerate_uniq_ranked.reference | 3 +++ .../queries/0_stateless/00982_array_enumerate_uniq_ranked.sql | 1 + 2 files changed, 4 insertions(+) create mode 100644 dbms/tests/queries/0_stateless/00982_array_enumerate_uniq_ranked.reference create mode 100644 dbms/tests/queries/0_stateless/00982_array_enumerate_uniq_ranked.sql diff --git a/dbms/tests/queries/0_stateless/00982_array_enumerate_uniq_ranked.reference b/dbms/tests/queries/0_stateless/00982_array_enumerate_uniq_ranked.reference new file mode 100644 index 00000000000..cba4f6fbff1 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00982_array_enumerate_uniq_ranked.reference @@ -0,0 +1,3 @@ +[[]] +[[1],[2],[1]] +[[1],[2],[1]] diff --git a/dbms/tests/queries/0_stateless/00982_array_enumerate_uniq_ranked.sql b/dbms/tests/queries/0_stateless/00982_array_enumerate_uniq_ranked.sql new file mode 100644 index 00000000000..46bb03e4682 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00982_array_enumerate_uniq_ranked.sql @@ -0,0 +1 @@ +SELECT arrayEnumerateUniqRanked(x, 2) FROM VALUES('x Array(Array(String))', ([[]]), ([['a'], ['a'], ['b']]), ([['a'], ['a'], ['b']])); From f91af03d2a61e9990478aeb89e74c9a8f0ceb356 Mon Sep 17 00:00:00 2001 From: stavrolia Date: Fri, 9 Aug 2019 21:15:04 +0300 Subject: [PATCH 57/84] fix --- CHANGELOG.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f9c461e63e4..2a0b69bcc6d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -185,7 +185,7 @@ * Fixed "select_format" performance test for `Pretty` formats [#5642](https://github.com/yandex/ClickHouse/pull/5642) ([alexey-milovidov](https://github.com/alexey-milovidov)) -## ClickHouse release 19.9.4.1, 2019-07-05 +## ClickHouse release 19.9.3.31, 2019-07-05 ### Bug Fix * Fix segfault in Delta codec which affects columns with values less than 32 bits size. The bug led to random memory corruption. [#5786](https://github.com/yandex/ClickHouse/pull/5786) ([alesapin](https://github.com/alesapin)) @@ -197,7 +197,7 @@ * Fix race condition, which cause that some queries may not appear in query_log instantly after SYSTEM FLUSH LOGS query. [#5685](https://github.com/yandex/ClickHouse/pull/5685) ([Anton Popov](https://github.com/CurtizJ)) * Added missing support for constant arguments to `evalMLModel` function. [#5820](https://github.com/yandex/ClickHouse/pull/5820) ([alexey-milovidov](https://github.com/alexey-milovidov)) -## ClickHouse release 19.7.6.1, 2019-07-05 +## ClickHouse release 19.7.5.29, 2019-07-05 ### Bug Fix * Fix performance regression in some queries with JOIN. [#5192](https://github.com/yandex/ClickHouse/pull/5192) ([Winter Zhang](https://github.com/zhang2014)) From 626b75b6a5549a2f56dec329146478e4daffc1a4 Mon Sep 17 00:00:00 2001 From: stavrolia Date: Fri, 9 Aug 2019 22:17:19 +0300 Subject: [PATCH 58/84] Fix behavior low cardinality setting in creating materialized view --- dbms/src/Interpreters/InterpreterCreateQuery.cpp | 2 +- .../00982_low_cardinality_setting_in_mv.reference | 0 .../00982_low_cardinality_setting_in_mv.sql | 11 +++++++++++ 3 files changed, 12 insertions(+), 1 deletion(-) create mode 100644 dbms/tests/queries/0_stateless/00982_low_cardinality_setting_in_mv.reference create mode 100644 dbms/tests/queries/0_stateless/00982_low_cardinality_setting_in_mv.sql diff --git a/dbms/src/Interpreters/InterpreterCreateQuery.cpp b/dbms/src/Interpreters/InterpreterCreateQuery.cpp index 4b9271df0e8..0467e91c6d1 100644 --- a/dbms/src/Interpreters/InterpreterCreateQuery.cpp +++ b/dbms/src/Interpreters/InterpreterCreateQuery.cpp @@ -542,7 +542,7 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) columns = setColumns(create, as_select_sample, as_storage); /// Check low cardinality types in creating table if it was not allowed in setting - if (!create.attach && !context.getSettingsRef().allow_suspicious_low_cardinality_types) + if (!create.attach && !context.getSettingsRef().allow_suspicious_low_cardinality_types && !create.is_materialized_view) { for (const auto & name_and_type_pair : columns.getAllPhysical()) { diff --git a/dbms/tests/queries/0_stateless/00982_low_cardinality_setting_in_mv.reference b/dbms/tests/queries/0_stateless/00982_low_cardinality_setting_in_mv.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/queries/0_stateless/00982_low_cardinality_setting_in_mv.sql b/dbms/tests/queries/0_stateless/00982_low_cardinality_setting_in_mv.sql new file mode 100644 index 00000000000..7192642bcde --- /dev/null +++ b/dbms/tests/queries/0_stateless/00982_low_cardinality_setting_in_mv.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS test1; +DROP TABLE IF EXISTS test2; +DROP TABLE IF EXISTS mat_view; + +CREATE TABLE test1 (a LowCardinality(String)) ENGINE=MergeTree() ORDER BY a; +CREATE TABLE test2 (a UInt64) engine=MergeTree() ORDER BY a; +CREATE MATERIALIZED VIEW test_mv TO test2 AS SELECT toUInt64(a = 'test') FROM test1; + +DROP TABLE test_mv; +DROP TABLE test1; +DROP TABLE test2; From 112fc71276517adf29939baf1353048e414f4877 Mon Sep 17 00:00:00 2001 From: kreuzerkrieg Date: Sat, 3 Aug 2019 14:02:40 +0300 Subject: [PATCH 59/84] adding -Wshadow for GCC --- dbms/CMakeLists.txt | 6 +- dbms/programs/copier/ClusterCopier.cpp | 6 +- dbms/programs/obfuscator/Obfuscator.cpp | 20 +- .../odbc-bridge/ODBCBlockInputStream.cpp | 34 ++-- .../odbc-bridge/ODBCBlockInputStream.h | 2 +- dbms/programs/server/MetricsTransmitter.cpp | 4 +- dbms/programs/server/MetricsTransmitter.h | 2 +- dbms/programs/server/MySQLHandler.cpp | 8 +- dbms/programs/server/MySQLHandler.h | 2 +- dbms/programs/server/Server.cpp | 8 +- .../AggregateFunctionArgMinMax.h | 4 +- .../AggregateFunctionGroupArray.h | 4 +- .../AggregateFunctionGroupUniqArray.h | 4 +- .../AggregateFunctionHistogram.h | 4 +- .../AggregateFunctionMLMethod.cpp | 26 +-- .../AggregateFunctionMLMethod.h | 36 ++-- .../AggregateFunctionMinMaxAny.h | 4 +- .../AggregateFunctionQuantile.h | 4 +- .../AggregateFunctionResample.h | 16 +- .../AggregateFunctionSequenceMatch.h | 18 +- .../AggregateFunctionState.h | 6 +- .../AggregateFunctionSumMap.h | 8 +- .../AggregateFunctionTopK.h | 10 +- .../AggregateFunctionUniqUpTo.h | 8 +- dbms/src/AggregateFunctions/QuantileTDigest.h | 6 +- .../ReservoirSamplerDeterministic.h | 4 +- dbms/src/Columns/ColumnConst.cpp | 4 +- dbms/src/Columns/ColumnConst.h | 2 +- dbms/src/Columns/ColumnFunction.cpp | 4 +- dbms/src/Columns/ColumnFunction.h | 2 +- dbms/src/Columns/ColumnLowCardinality.cpp | 4 +- dbms/src/Columns/ColumnLowCardinality.h | 4 +- dbms/src/Columns/ColumnTuple.cpp | 4 +- dbms/src/Columns/ColumnUnique.h | 6 +- dbms/src/Columns/ReverseIndex.h | 4 +- dbms/src/Common/ColumnsHashing.h | 22 +- dbms/src/Common/ColumnsHashingImpl.h | 10 +- dbms/src/Common/CurrentMetrics.h | 8 +- dbms/src/Common/Elf.cpp | 4 +- dbms/src/Common/Elf.h | 2 +- dbms/src/Common/FieldVisitors.cpp | 2 +- dbms/src/Common/FieldVisitors.h | 2 +- dbms/src/Common/MemoryTracker.h | 6 +- dbms/src/Common/ProfileEvents.cpp | 6 +- dbms/src/Common/ProfileEvents.h | 2 +- dbms/src/Common/QueryProfiler.cpp | 4 +- dbms/src/Common/QueryProfiler.h | 2 +- dbms/src/Common/RWLock.cpp | 6 +- dbms/src/Common/RWLock.h | 2 +- dbms/src/Common/ShellCommand.cpp | 10 +- dbms/src/Common/ShellCommand.h | 2 +- dbms/src/Common/Stopwatch.h | 2 +- dbms/src/Common/StringSearcher.h | 12 +- dbms/src/Common/ThreadPool.cpp | 8 +- dbms/src/Common/ThreadPool.h | 8 +- dbms/src/Common/Throttler.h | 8 +- dbms/src/Common/TraceCollector.cpp | 4 +- dbms/src/Common/TraceCollector.h | 2 +- dbms/src/Common/UInt128.h | 2 +- dbms/src/Common/Volnitsky.h | 8 +- dbms/src/Common/ZooKeeper/IKeeper.cpp | 16 +- dbms/src/Common/ZooKeeper/IKeeper.h | 8 +- dbms/src/Common/ZooKeeper/TestKeeper.cpp | 4 +- dbms/src/Common/ZooKeeper/TestKeeper.h | 2 +- dbms/src/Common/ZooKeeper/ZooKeeper.cpp | 10 +- dbms/src/Common/ZooKeeper/ZooKeeper.h | 8 +- dbms/src/Common/ZooKeeper/ZooKeeperImpl.cpp | 14 +- dbms/src/Common/ZooKeeper/ZooKeeperImpl.h | 4 +- dbms/src/Common/hex.h | 20 +- .../Common/tests/arena_with_free_lists.cpp | 46 ++--- dbms/src/Common/tests/cow_columns.cpp | 2 +- dbms/src/Common/tests/cow_compositions.cpp | 2 +- dbms/src/Common/tests/mi_malloc_test.cpp | 4 +- .../Compression/CompressionCodecMultiple.cpp | 4 +- .../Compression/CompressionCodecMultiple.h | 2 +- dbms/src/Compression/LZ4_decompress_faster.h | 2 +- .../tests/gtest_compressionCodec.cpp | 10 +- dbms/src/Core/BackgroundSchedulePool.cpp | 6 +- dbms/src/Core/BackgroundSchedulePool.h | 2 +- dbms/src/Core/ExternalResultDescription.cpp | 28 +-- dbms/src/Core/ExternalResultDescription.h | 28 +-- dbms/src/Core/MySQLProtocol.h | 120 +++++------ dbms/src/Core/Types.h | 70 +++---- .../CollapsingFinalBlockInputStream.h | 8 +- .../DataStreams/DistinctBlockInputStream.cpp | 6 +- .../DataStreams/DistinctBlockInputStream.h | 2 +- .../DistinctSortedBlockInputStream.cpp | 4 +- .../DistinctSortedBlockInputStream.h | 2 +- .../DataStreams/FilterBlockInputStream.cpp | 4 +- dbms/src/DataStreams/FilterBlockInputStream.h | 2 +- .../GraphiteRollupSortedBlockInputStream.cpp | 4 +- .../GraphiteRollupSortedBlockInputStream.h | 2 +- .../MaterializingBlockOutputStream.h | 4 +- .../MergeSortingBlockInputStream.h | 4 +- dbms/src/DataStreams/NullBlockOutputStream.h | 2 +- dbms/src/DataStreams/OwningBlockInputStream.h | 4 +- .../ParallelAggregatingBlockInputStream.h | 8 +- dbms/src/DataStreams/SizeLimits.h | 4 +- .../SquashingBlockOutputStream.cpp | 4 +- .../DataStreams/SquashingBlockOutputStream.h | 2 +- dbms/src/DataStreams/SquashingTransform.cpp | 4 +- dbms/src/DataStreams/SquashingTransform.h | 4 +- dbms/src/DataTypes/DataTypeInterval.h | 2 +- dbms/src/DataTypes/DataTypeLowCardinality.cpp | 16 +- dbms/src/DataTypes/IDataType.h | 2 +- dbms/src/Dictionaries/CacheDictionary.cpp | 92 ++++----- dbms/src/Dictionaries/CacheDictionary.h | 10 +- .../CacheDictionary_generate1.cpp.in | 2 +- .../CacheDictionary_generate2.cpp.in | 2 +- .../CacheDictionary_generate3.cpp.in | 2 +- .../ClickHouseDictionarySource.cpp | 4 +- .../Dictionaries/ClickHouseDictionarySource.h | 2 +- .../ComplexKeyCacheDictionary.cpp | 30 +-- .../Dictionaries/ComplexKeyCacheDictionary.h | 10 +- ...acheDictionary_createAttributeWithType.cpp | 4 +- ...ComplexKeyCacheDictionary_generate1.cpp.in | 2 +- ...ComplexKeyCacheDictionary_generate2.cpp.in | 2 +- ...ComplexKeyCacheDictionary_generate3.cpp.in | 2 +- ...exKeyCacheDictionary_setAttributeValue.cpp | 30 +-- ...cheDictionary_setDefaultAttributeValue.cpp | 30 +-- .../ComplexKeyHashedDictionary.cpp | 186 ++++++++--------- .../Dictionaries/ComplexKeyHashedDictionary.h | 12 +- .../Dictionaries/DictionaryBlockInputStream.h | 76 +++---- .../DictionaryBlockInputStreamBase.cpp | 4 +- .../DictionaryBlockInputStreamBase.h | 2 +- dbms/src/Dictionaries/DictionaryStructure.cpp | 66 +++--- dbms/src/Dictionaries/DictionaryStructure.h | 30 +-- .../GeodataProviders/HierarchiesProvider.cpp | 2 +- .../GeodataProviders/HierarchiesProvider.h | 2 +- .../ExecutableDictionarySource.cpp | 8 +- .../Dictionaries/ExecutableDictionarySource.h | 4 +- .../src/Dictionaries/FileDictionarySource.cpp | 4 +- dbms/src/Dictionaries/FileDictionarySource.h | 2 +- dbms/src/Dictionaries/FlatDictionary.cpp | 158 +++++++-------- dbms/src/Dictionaries/FlatDictionary.h | 12 +- .../src/Dictionaries/HTTPDictionarySource.cpp | 8 +- dbms/src/Dictionaries/HTTPDictionarySource.h | 4 +- dbms/src/Dictionaries/HashedDictionary.cpp | 188 +++++++++--------- dbms/src/Dictionaries/HashedDictionary.h | 12 +- .../Dictionaries/LibraryDictionarySource.cpp | 8 +- .../Dictionaries/LibraryDictionarySource.h | 4 +- .../Dictionaries/MongoDBBlockInputStream.cpp | 32 +-- .../Dictionaries/MongoDBBlockInputStream.h | 2 +- .../Dictionaries/MongoDBDictionarySource.cpp | 74 +++---- .../Dictionaries/MongoDBDictionarySource.h | 18 +- .../Dictionaries/MySQLDictionarySource.cpp | 4 +- dbms/src/Dictionaries/MySQLDictionarySource.h | 2 +- .../RangeDictionaryBlockInputStream.h | 46 ++--- .../Dictionaries/RangeHashedDictionary.cpp | 146 +++++++------- dbms/src/Dictionaries/RangeHashedDictionary.h | 10 +- dbms/src/Dictionaries/TrieDictionary.cpp | 152 +++++++------- dbms/src/Dictionaries/TrieDictionary.h | 10 +- .../src/Dictionaries/XDBCDictionarySource.cpp | 4 +- .../BlockInputStreamFromRowInputStream.cpp | 8 +- dbms/src/Formats/CSVRowInputStream.cpp | 4 +- dbms/src/Formats/CSVRowInputStream.h | 2 +- dbms/src/Formats/MySQLBlockInputStream.cpp | 32 +-- dbms/src/Formats/MySQLBlockInputStream.h | 6 +- dbms/src/Formats/ProtobufWriter.h | 6 +- .../Formats/TabSeparatedRowInputStream.cpp | 4 +- dbms/src/Formats/TabSeparatedRowInputStream.h | 2 +- dbms/src/Functions/FunctionJoinGet.h | 14 +- dbms/src/Functions/FunctionsComparison.h | 4 +- dbms/src/Functions/FunctionsConversion.h | 14 +- .../Functions/FunctionsExternalDictionaries.h | 22 +- dbms/src/Functions/FunctionsExternalModels.h | 2 +- dbms/src/Functions/FunctionsMiscellaneous.h | 20 +- dbms/src/Functions/FunctionsRound.h | 2 +- dbms/src/Functions/GatherUtils/Sinks.h | 4 +- dbms/src/Functions/GatherUtils/Sources.h | 6 +- dbms/src/Functions/GeoUtils.h | 6 +- dbms/src/Functions/IFunction.h | 8 +- dbms/src/Functions/RapidJSONParser.h | 2 +- dbms/src/Functions/array/array.cpp | 4 +- dbms/src/Functions/array/arrayConcat.cpp | 2 +- dbms/src/Functions/array/arrayIntersect.cpp | 6 +- dbms/src/Functions/array/arrayPop.h | 2 +- dbms/src/Functions/array/arrayPush.h | 4 +- dbms/src/Functions/array/arrayPushBack.cpp | 2 +- dbms/src/Functions/array/arrayPushFront.cpp | 2 +- dbms/src/Functions/array/arrayResize.cpp | 2 +- dbms/src/Functions/array/arraySort.cpp | 2 +- dbms/src/Functions/array/hasAll.cpp | 2 +- dbms/src/Functions/array/hasAllAny.h | 4 +- dbms/src/Functions/array/hasAny.cpp | 2 +- dbms/src/Functions/coalesce.cpp | 2 +- dbms/src/Functions/concat.cpp | 4 +- dbms/src/Functions/currentDatabase.cpp | 2 +- dbms/src/Functions/evalMLMethod.cpp | 2 +- dbms/src/Functions/formatDateTime.cpp | 2 +- dbms/src/Functions/if.cpp | 2 +- dbms/src/Functions/ifNull.cpp | 2 +- dbms/src/Functions/multiIf.cpp | 2 +- dbms/src/Functions/nullIf.cpp | 2 +- dbms/src/Functions/reverse.cpp | 2 +- dbms/src/IO/LimitReadBuffer.cpp | 4 +- dbms/src/IO/LimitReadBuffer.h | 2 +- dbms/src/IO/MMapReadBufferFromFile.cpp | 4 +- dbms/src/IO/MMapReadBufferFromFile.h | 2 +- .../IO/MMapReadBufferFromFileDescriptor.cpp | 8 +- .../src/IO/MMapReadBufferFromFileDescriptor.h | 4 +- dbms/src/IO/ReadWriteBufferFromHTTP.h | 8 +- dbms/src/IO/WriteBufferValidUTF8.cpp | 6 +- dbms/src/IO/WriteBufferValidUTF8.h | 6 +- dbms/src/Interpreters/Aggregator.cpp | 2 +- dbms/src/Interpreters/CatBoostModel.cpp | 8 +- dbms/src/Interpreters/Context.cpp | 6 +- dbms/src/Interpreters/DDLWorker.cpp | 4 +- dbms/src/Interpreters/ExpressionAnalyzer.h | 12 +- .../src/Interpreters/ExternalDictionaries.cpp | 4 +- dbms/src/Interpreters/ExternalDictionaries.h | 2 +- dbms/src/Interpreters/ExternalLoader.h | 4 +- dbms/src/Interpreters/ExternalModels.cpp | 6 +- dbms/src/Interpreters/ExternalModels.h | 2 +- .../InJoinSubqueriesPreprocessor.h | 4 +- .../Interpreters/InterpreterSelectQuery.cpp | 14 +- .../src/Interpreters/InterpreterSelectQuery.h | 2 +- dbms/src/Interpreters/Join.cpp | 4 +- dbms/src/Interpreters/Join.h | 2 +- dbms/src/Interpreters/MutationsInterpreter.h | 2 +- .../PredicateExpressionsOptimizer.h | 14 +- dbms/src/Interpreters/Set.h | 4 +- dbms/src/Interpreters/loadMetadata.cpp | 4 +- dbms/src/Parsers/CommonParsers.h | 2 +- dbms/src/Parsers/Lexer.h | 6 +- dbms/src/Parsers/ParserInsertQuery.h | 2 +- dbms/src/Parsers/ParserQuery.h | 4 +- dbms/src/Parsers/ParserTablesInSelectQuery.h | 2 +- dbms/src/Parsers/TokenIterator.h | 2 +- .../Processors/Executors/PipelineExecutor.cpp | 4 +- .../Processors/Executors/PipelineExecutor.h | 2 +- dbms/src/Processors/Formats/IInputFormat.h | 4 +- dbms/src/Processors/Formats/IOutputFormat.cpp | 4 +- dbms/src/Processors/Formats/IOutputFormat.h | 2 +- dbms/src/Processors/Formats/IRowInputFormat.h | 4 +- .../Formats/Impl/BinaryRowInputFormat.cpp | 4 +- .../Formats/Impl/BinaryRowInputFormat.h | 2 +- .../Formats/Impl/CSVRowInputFormat.cpp | 8 +- .../Formats/Impl/CSVRowInputFormat.h | 2 +- .../Formats/Impl/CSVRowOutputFormat.cpp | 4 +- .../Formats/Impl/CSVRowOutputFormat.h | 2 +- .../Formats/Impl/CapnProtoRowInputFormat.cpp | 4 +- .../Formats/Impl/CapnProtoRowInputFormat.h | 2 +- .../Impl/JSONEachRowRowInputFormat.cpp | 6 +- .../Formats/Impl/JSONEachRowRowInputFormat.h | 2 +- .../Impl/JSONEachRowRowOutputFormat.cpp | 4 +- .../Formats/Impl/JSONEachRowRowOutputFormat.h | 2 +- .../Formats/Impl/MySQLOutputFormat.cpp | 12 +- .../Formats/Impl/MySQLOutputFormat.h | 2 +- .../Impl/ODBCDriver2BlockOutputFormat.cpp | 4 +- .../Impl/ODBCDriver2BlockOutputFormat.h | 2 +- .../Impl/ODBCDriverBlockOutputFormat.cpp | 4 +- .../Impl/ODBCDriverBlockOutputFormat.h | 2 +- .../Formats/Impl/ParquetBlockInputFormat.cpp | 4 +- .../Formats/Impl/ParquetBlockInputFormat.h | 2 +- .../Formats/Impl/ParquetBlockOutputFormat.cpp | 4 +- .../Formats/Impl/ParquetBlockOutputFormat.h | 2 +- .../Formats/Impl/PrettyBlockOutputFormat.cpp | 8 +- .../Formats/Impl/PrettyBlockOutputFormat.h | 2 +- .../Formats/Impl/ProtobufRowInputFormat.cpp | 8 +- .../Formats/Impl/ProtobufRowInputFormat.h | 2 +- .../Formats/Impl/TSKVRowInputFormat.cpp | 4 +- .../Formats/Impl/TSKVRowInputFormat.h | 2 +- .../Impl/TabSeparatedRawRowOutputFormat.h | 4 +- .../Impl/TabSeparatedRowInputFormat.cpp | 4 +- .../Formats/Impl/TabSeparatedRowInputFormat.h | 2 +- .../Impl/TabSeparatedRowOutputFormat.cpp | 4 +- .../Impl/TabSeparatedRowOutputFormat.h | 2 +- .../Formats/Impl/ValuesRowInputFormat.cpp | 6 +- .../Formats/Impl/ValuesRowInputFormat.h | 2 +- .../Formats/Impl/ValuesRowOutputFormat.cpp | 4 +- .../Formats/Impl/ValuesRowOutputFormat.h | 2 +- .../Formats/Impl/VerticalRowOutputFormat.cpp | 4 +- .../Formats/Impl/VerticalRowOutputFormat.h | 2 +- .../Formats/Impl/XMLRowOutputFormat.cpp | 4 +- .../Formats/Impl/XMLRowOutputFormat.h | 2 +- dbms/src/Processors/ISimpleTransform.cpp | 6 +- dbms/src/Processors/ISimpleTransform.h | 2 +- dbms/src/Processors/LimitTransform.cpp | 10 +- dbms/src/Processors/LimitTransform.h | 4 +- dbms/src/Processors/Port.h | 4 +- .../Sources/SourceFromInputStream.cpp | 4 +- .../Sources/SourceFromInputStream.h | 2 +- .../Transforms/AggregatingTransform.cpp | 10 +- .../Transforms/AggregatingTransform.h | 4 +- .../Transforms/ConvertingTransform.cpp | 16 +- .../Transforms/ConvertingTransform.h | 8 +- .../Transforms/CreatingSetsTransform.cpp | 12 +- .../Transforms/CreatingSetsTransform.h | 6 +- .../Transforms/DistinctTransform.cpp | 24 +-- .../Processors/Transforms/DistinctTransform.h | 8 +- .../Transforms/ExpressionTransform.cpp | 10 +- .../Transforms/ExpressionTransform.h | 2 +- .../Processors/Transforms/FilterTransform.cpp | 8 +- .../Processors/Transforms/FilterTransform.h | 2 +- .../Transforms/LimitsCheckingTransform.cpp | 6 +- .../Transforms/LimitsCheckingTransform.h | 2 +- ...gingAggregatedMemoryEfficientTransform.cpp | 20 +- ...ergingAggregatedMemoryEfficientTransform.h | 2 +- .../Transforms/MergingAggregatedTransform.cpp | 6 +- .../Transforms/MergingAggregatedTransform.h | 2 +- .../Transforms/MergingSortedTransform.cpp | 12 +- .../Transforms/PartialSortingTransform.cpp | 6 +- .../Transforms/PartialSortingTransform.h | 8 +- dbms/src/Processors/tests/processors_test.cpp | 12 +- .../tests/processors_test_aggregation.cpp | 8 +- .../tests/processors_test_chain.cpp | 12 +- .../tests/processors_test_expand_pipeline.cpp | 8 +- .../tests/processors_test_merge.cpp | 12 +- ...rocessors_test_merge_sorting_transform.cpp | 4 +- ...ocessors_test_merging_sorted_transform.cpp | 12 +- dbms/src/Storages/AlterCommands.h | 14 +- .../Storages/Distributed/DirectoryMonitor.cpp | 6 +- .../Storages/Distributed/DirectoryMonitor.h | 2 +- .../DistributedBlockOutputStream.cpp | 4 +- .../DistributedBlockOutputStream.h | 6 +- .../Storages/Kafka/KafkaBlockInputStream.cpp | 6 +- dbms/src/Storages/MarkCache.h | 4 +- .../MergeTree/IMergedBlockOutputStream.cpp | 18 +- .../MergeTree/IMergedBlockOutputStream.h | 12 +- .../Storages/MergeTree/LevelMergeSelector.h | 2 +- dbms/src/Storages/MergeTree/MarkRange.h | 2 +- dbms/src/Storages/MergeTree/MergeList.cpp | 4 +- dbms/src/Storages/MergeTree/MergeList.h | 2 +- .../MergeTreeBaseSelectBlockInputStream.cpp | 40 ++-- .../MergeTreeBaseSelectBlockInputStream.h | 20 +- .../MergeTree/MergeTreeBlockOutputStream.h | 4 +- .../MergeTree/MergeTreeBlockReadUtils.cpp | 14 +- .../MergeTree/MergeTreeBlockReadUtils.h | 8 +- .../Storages/MergeTree/MergeTreeDataPart.h | 2 +- .../MergeTree/MergeTreeIndexBloomFilter.cpp | 4 +- .../MergeTreeIndexConditionBloomFilter.cpp | 6 +- .../MergeTreeIndexConditionBloomFilter.h | 2 +- .../MergeTree/MergeTreeIndexFullText.cpp | 8 +- .../MergeTree/MergeTreeIndexFullText.h | 2 +- .../MergeTreeIndexGranuleBloomFilter.cpp | 18 +- .../MergeTreeIndexGranuleBloomFilter.h | 4 +- .../MergeTree/MergeTreeIndexMinMax.cpp | 20 +- .../Storages/MergeTree/MergeTreeIndexMinMax.h | 4 +- .../MergeTree/MergeTreeIndexReader.cpp | 12 +- .../Storages/MergeTree/MergeTreeIndexReader.h | 8 +- .../Storages/MergeTree/MergeTreeIndexSet.cpp | 18 +- .../Storages/MergeTree/MergeTreeIndexSet.h | 4 +- .../src/Storages/MergeTree/MergeTreeIndices.h | 24 +-- .../MergeTree/MergeTreeRangeReader.cpp | 26 +-- .../Storages/MergeTree/MergeTreeRangeReader.h | 8 +- .../Storages/MergeTree/MergeTreeReadPool.cpp | 20 +- .../Storages/MergeTree/MergeTreeReadPool.h | 10 +- .../Storages/MergeTree/MergeTreeReader.cpp | 22 +- dbms/src/Storages/MergeTree/MergeTreeReader.h | 20 +- .../MergeTreeSelectBlockInputStream.cpp | 4 +- .../MergeTreeSelectBlockInputStream.h | 2 +- .../MergeTreeThreadSelectBlockInputStream.cpp | 10 +- .../MergeTreeThreadSelectBlockInputStream.h | 22 +- .../MergeTree/MergeTreeWhereOptimizer.cpp | 8 +- .../MergeTree/MergeTreeWhereOptimizer.h | 4 +- .../MergedColumnOnlyOutputStream.cpp | 4 +- .../MergeTree/MergedColumnOnlyOutputStream.h | 2 +- .../src/Storages/MergeTree/RangesInDataPart.h | 6 +- .../ReplicatedMergeTreeBlockOutputStream.cpp | 4 +- .../ReplicatedMergeTreeBlockOutputStream.h | 2 +- .../ReplicatedMergeTreePartCheckThread.h | 2 +- .../MergeTree/ReplicatedMergeTreeQueue.cpp | 14 +- .../MergeTree/ReplicatedMergeTreeQueue.h | 4 +- .../Storages/MergeTree/SimpleMergeSelector.h | 2 +- dbms/src/Storages/MergeTree/checkDataPart.cpp | 4 +- dbms/src/Storages/StorageDistributed.cpp | 14 +- dbms/src/Storages/StorageDistributed.h | 4 +- dbms/src/Storages/StorageFile.cpp | 4 +- dbms/src/Storages/StorageLog.cpp | 4 +- dbms/src/Storages/StorageMergeTree.cpp | 4 +- dbms/src/Storages/StorageMySQL.cpp | 56 +++--- dbms/src/Storages/StorageMySQL.h | 14 +- .../Storages/StorageReplicatedMergeTree.cpp | 4 +- dbms/src/Storages/StorageTinyLog.cpp | 4 +- dbms/src/Storages/StorageValues.cpp | 4 +- dbms/src/Storages/StorageValues.h | 2 +- .../Storages/System/StorageSystemColumns.cpp | 16 +- .../System/StorageSystemDataTypeFamilies.cpp | 10 +- .../System/StorageSystemDetachedParts.cpp | 20 +- .../Storages/System/StorageSystemFormats.cpp | 4 +- .../System/StorageSystemFunctions.cpp | 8 +- .../Storages/System/StorageSystemModels.cpp | 4 +- .../Storages/System/StorageSystemParts.cpp | 70 +++---- dbms/src/Storages/System/StorageSystemParts.h | 2 +- .../System/StorageSystemPartsBase.cpp | 8 +- .../System/StorageSystemPartsColumns.cpp | 78 ++++---- .../System/StorageSystemReplicationQueue.cpp | 4 +- .../System/StorageSystemTableFunctions.cpp | 4 +- .../Storages/System/StorageSystemTables.cpp | 12 +- .../TableFunctions/TableFunctionRemote.cpp | 4 +- dbms/src/TableFunctions/TableFunctionRemote.h | 2 +- libs/libcommon/include/ext/enumerate.h | 4 +- libs/libcommon/include/ext/scope_guard.h | 6 +- libs/libloggers/loggers/ExtendedLogChannel.h | 2 +- 395 files changed, 2080 insertions(+), 2078 deletions(-) diff --git a/dbms/CMakeLists.txt b/dbms/CMakeLists.txt index a9e276852c1..b589c398238 100644 --- a/dbms/CMakeLists.txt +++ b/dbms/CMakeLists.txt @@ -48,7 +48,7 @@ if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wshadow -Wshadow-uncaptured-local -Wextra-semi -Wcomma -Winconsistent-missing-destructor-override -Wunused-exception-parameter -Wcovered-switch-default -Wold-style-cast -Wrange-loop-analysis -Wunused-member-function -Wunreachable-code -Wunreachable-code-return -Wnewline-eof -Wembedded-directive -Wgnu-case-range -Wunused-macros -Wconditional-uninitialized -Wdeprecated -Wundef -Wreserved-id-macro -Wredundant-parens -Wzero-as-null-pointer-constant") if (WEVERYTHING) - set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Weverything -Wno-c++98-compat -Wno-c++98-compat-pedantic -Wno-padded -Wno-switch-enum -Wno-shadow-field-in-constructor -Wno-deprecated-dynamic-exception-spec -Wno-float-equal -Wno-weak-vtables -Wno-shift-sign-overflow -Wno-sign-conversion -Wno-conversion -Wno-exit-time-destructors -Wno-undefined-func-template -Wno-documentation-unknown-command -Wno-missing-variable-declarations -Wno-unused-template -Wno-global-constructors -Wno-c99-extensions -Wno-missing-prototypes -Wno-weak-template-vtables -Wno-zero-length-array -Wno-gnu-anonymous-struct -Wno-nested-anon-types -Wno-double-promotion -Wno-disabled-macro-expansion -Wno-vla-extension -Wno-vla -Wno-packed") + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Weverything -Wno-c++98-compat -Wno-c++98-compat-pedantic -Wno-padded -Wno-switch-enum -Wno-deprecated-dynamic-exception-spec -Wno-float-equal -Wno-weak-vtables -Wno-shift-sign-overflow -Wno-sign-conversion -Wno-conversion -Wno-exit-time-destructors -Wno-undefined-func-template -Wno-documentation-unknown-command -Wno-missing-variable-declarations -Wno-unused-template -Wno-global-constructors -Wno-c99-extensions -Wno-missing-prototypes -Wno-weak-template-vtables -Wno-zero-length-array -Wno-gnu-anonymous-struct -Wno-nested-anon-types -Wno-double-promotion -Wno-disabled-macro-expansion -Wno-vla-extension -Wno-vla -Wno-packed") # TODO Enable conversion, sign-conversion, double-promotion warnings. endif () @@ -71,7 +71,9 @@ if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-ctad-maybe-unsupported") endif () endif () -endif () +elseif (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wshadow") +endif() if (USE_DEBUG_HELPERS) set (INCLUDE_DEBUG_HELPERS "-include ${ClickHouse_SOURCE_DIR}/libs/libcommon/include/common/iostream_debug_helpers.h") diff --git a/dbms/programs/copier/ClusterCopier.cpp b/dbms/programs/copier/ClusterCopier.cpp index 43158dedd71..435d06da854 100644 --- a/dbms/programs/copier/ClusterCopier.cpp +++ b/dbms/programs/copier/ClusterCopier.cpp @@ -123,7 +123,7 @@ enum class TaskState struct TaskStateWithOwner { TaskStateWithOwner() = default; - TaskStateWithOwner(TaskState state, const String & owner) : state(state), owner(owner) {} + TaskStateWithOwner(TaskState state_, const String & owner_) : state(state_), owner(owner_) {} TaskState state{TaskState::Unknown}; String owner; @@ -2100,9 +2100,9 @@ void ClusterCopierApp::initialize(Poco::Util::Application & self) // process_id is '#_' time_t timestamp = Poco::Timestamp().epochTime(); - auto pid = Poco::Process::id(); + auto curr_pid = Poco::Process::id(); - process_id = std::to_string(DateLUT::instance().toNumYYYYMMDDhhmmss(timestamp)) + "_" + std::to_string(pid); + process_id = std::to_string(DateLUT::instance().toNumYYYYMMDDhhmmss(timestamp)) + "_" + std::to_string(curr_pid); host_id = escapeForFileName(getFQDNOrHostName()) + '#' + process_id; process_path = Poco::Path(base_dir + "/clickhouse-copier_" + process_id).absolute().toString(); Poco::File(process_path).createDirectories(); diff --git a/dbms/programs/obfuscator/Obfuscator.cpp b/dbms/programs/obfuscator/Obfuscator.cpp index 3c20510d481..a96c10072dc 100644 --- a/dbms/programs/obfuscator/Obfuscator.cpp +++ b/dbms/programs/obfuscator/Obfuscator.cpp @@ -176,7 +176,7 @@ private: const UInt64 seed; public: - UnsignedIntegerModel(UInt64 seed) : seed(seed) {} + UnsignedIntegerModel(UInt64 seed_) : seed(seed_) {} void train(const IColumn &) override {} void finalize() override {} @@ -212,7 +212,7 @@ private: const UInt64 seed; public: - SignedIntegerModel(UInt64 seed) : seed(seed) {} + SignedIntegerModel(UInt64 seed_) : seed(seed_) {} void train(const IColumn &) override {} void finalize() override {} @@ -256,7 +256,7 @@ private: Float res_prev_value = 0; public: - FloatModel(UInt64 seed) : seed(seed) {} + FloatModel(UInt64 seed_) : seed(seed_) {} void train(const IColumn &) override {} void finalize() override {} @@ -348,7 +348,7 @@ private: const UInt64 seed; public: - FixedStringModel(UInt64 seed) : seed(seed) {} + FixedStringModel(UInt64 seed_) : seed(seed_) {} void train(const IColumn &) override {} void finalize() override {} @@ -385,7 +385,7 @@ private: const DateLUTImpl & date_lut; public: - DateTimeModel(UInt64 seed) : seed(seed), date_lut(DateLUT::instance()) {} + DateTimeModel(UInt64 seed_) : seed(seed_), date_lut(DateLUT::instance()) {} void train(const IColumn &) override {} void finalize() override {} @@ -533,8 +533,8 @@ private: } public: - MarkovModel(MarkovModelParameters params) - : params(std::move(params)), code_points(params.order, BEGIN) {} + MarkovModel(MarkovModelParameters params_) + : params(std::move(params_)), code_points(params.order, BEGIN) {} void consume(const char * data, size_t size) { @@ -745,7 +745,7 @@ private: MarkovModel markov_model; public: - StringModel(UInt64 seed, MarkovModelParameters params) : seed(seed), markov_model(std::move(params)) {} + StringModel(UInt64 seed_, MarkovModelParameters params_) : seed(seed_), markov_model(std::move(params_)) {} void train(const IColumn & column) override { @@ -797,7 +797,7 @@ private: ModelPtr nested_model; public: - ArrayModel(ModelPtr nested_model) : nested_model(std::move(nested_model)) {} + ArrayModel(ModelPtr nested_model_) : nested_model(std::move(nested_model_)) {} void train(const IColumn & column) override { @@ -830,7 +830,7 @@ private: ModelPtr nested_model; public: - NullableModel(ModelPtr nested_model) : nested_model(std::move(nested_model)) {} + NullableModel(ModelPtr nested_model_) : nested_model(std::move(nested_model_)) {} void train(const IColumn & column) override { diff --git a/dbms/programs/odbc-bridge/ODBCBlockInputStream.cpp b/dbms/programs/odbc-bridge/ODBCBlockInputStream.cpp index dcedac47f42..70aaba3f137 100644 --- a/dbms/programs/odbc-bridge/ODBCBlockInputStream.cpp +++ b/dbms/programs/odbc-bridge/ODBCBlockInputStream.cpp @@ -18,12 +18,12 @@ namespace ErrorCodes ODBCBlockInputStream::ODBCBlockInputStream( - Poco::Data::Session && session, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size) - : session{session} + Poco::Data::Session && session_, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size_) + : session{session_} , statement{(this->session << query_str, Poco::Data::Keywords::now)} , result{statement} , iterator{result.begin()} - , max_block_size{max_block_size} + , max_block_size{max_block_size_} , log(&Logger::get("ODBCBlockInputStream")) { if (sample_block.columns() != result.columnCount()) @@ -43,46 +43,46 @@ namespace { switch (type) { - case ValueType::UInt8: + case ValueType::vtUInt8: static_cast(column).insertValue(value.convert()); break; - case ValueType::UInt16: + case ValueType::vtUInt16: static_cast(column).insertValue(value.convert()); break; - case ValueType::UInt32: + case ValueType::vtUInt32: static_cast(column).insertValue(value.convert()); break; - case ValueType::UInt64: + case ValueType::vtUInt64: static_cast(column).insertValue(value.convert()); break; - case ValueType::Int8: + case ValueType::vtInt8: static_cast(column).insertValue(value.convert()); break; - case ValueType::Int16: + case ValueType::vtInt16: static_cast(column).insertValue(value.convert()); break; - case ValueType::Int32: + case ValueType::vtInt32: static_cast(column).insertValue(value.convert()); break; - case ValueType::Int64: + case ValueType::vtInt64: static_cast(column).insertValue(value.convert()); break; - case ValueType::Float32: + case ValueType::vtFloat32: static_cast(column).insertValue(value.convert()); break; - case ValueType::Float64: + case ValueType::vtFloat64: static_cast(column).insertValue(value.convert()); break; - case ValueType::String: + case ValueType::vtString: static_cast(column).insert(value.convert()); break; - case ValueType::Date: + case ValueType::vtDate: static_cast(column).insertValue(UInt16{LocalDate{value.convert()}.getDayNum()}); break; - case ValueType::DateTime: + case ValueType::vtDateTime: static_cast(column).insertValue(time_t{LocalDateTime{value.convert()}}); break; - case ValueType::UUID: + case ValueType::vtUUID: static_cast(column).insert(parse(value.convert())); break; } diff --git a/dbms/programs/odbc-bridge/ODBCBlockInputStream.h b/dbms/programs/odbc-bridge/ODBCBlockInputStream.h index d22aad91232..13491e05822 100644 --- a/dbms/programs/odbc-bridge/ODBCBlockInputStream.h +++ b/dbms/programs/odbc-bridge/ODBCBlockInputStream.h @@ -16,7 +16,7 @@ class ODBCBlockInputStream final : public IBlockInputStream { public: ODBCBlockInputStream( - Poco::Data::Session && session, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size); + Poco::Data::Session && session_, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size_); String getName() const override { return "ODBC"; } diff --git a/dbms/programs/server/MetricsTransmitter.cpp b/dbms/programs/server/MetricsTransmitter.cpp index e8cf4a2c21b..8419d3e1b8c 100644 --- a/dbms/programs/server/MetricsTransmitter.cpp +++ b/dbms/programs/server/MetricsTransmitter.cpp @@ -16,8 +16,8 @@ namespace DB { MetricsTransmitter::MetricsTransmitter( - const Poco::Util::AbstractConfiguration & config, const std::string & config_name, const AsynchronousMetrics & async_metrics) - : async_metrics(async_metrics), config_name(config_name) + const Poco::Util::AbstractConfiguration & config, const std::string & config_name_, const AsynchronousMetrics & async_metrics_) + : async_metrics(async_metrics_), config_name(config_name_) { interval_seconds = config.getInt(config_name + ".interval", 60); send_events = config.getBool(config_name + ".events", true); diff --git a/dbms/programs/server/MetricsTransmitter.h b/dbms/programs/server/MetricsTransmitter.h index 69a11bf2bad..b9c7fd7f179 100644 --- a/dbms/programs/server/MetricsTransmitter.h +++ b/dbms/programs/server/MetricsTransmitter.h @@ -32,7 +32,7 @@ class AsynchronousMetrics; class MetricsTransmitter { public: - MetricsTransmitter(const Poco::Util::AbstractConfiguration & config, const std::string & config_name, const AsynchronousMetrics & async_metrics); + MetricsTransmitter(const Poco::Util::AbstractConfiguration & config, const std::string & config_name_, const AsynchronousMetrics & async_metrics_); ~MetricsTransmitter(); private: diff --git a/dbms/programs/server/MySQLHandler.cpp b/dbms/programs/server/MySQLHandler.cpp index 6a943183104..ffc2c9ae200 100644 --- a/dbms/programs/server/MySQLHandler.cpp +++ b/dbms/programs/server/MySQLHandler.cpp @@ -37,14 +37,14 @@ namespace ErrorCodes extern const int OPENSSL_ERROR; } -MySQLHandler::MySQLHandler(IServer & server_, const Poco::Net::StreamSocket & socket_, RSA & public_key, RSA & private_key, bool ssl_enabled, size_t connection_id) +MySQLHandler::MySQLHandler(IServer & server_, const Poco::Net::StreamSocket & socket_, RSA & public_key_, RSA & private_key_, bool ssl_enabled, size_t connection_id_) : Poco::Net::TCPServerConnection(socket_) , server(server_) , log(&Poco::Logger::get("MySQLHandler")) , connection_context(server.context()) - , connection_id(connection_id) - , public_key(public_key) - , private_key(private_key) + , connection_id(connection_id_) + , public_key(public_key_) + , private_key(private_key_) { server_capability_flags = CLIENT_PROTOCOL_41 | CLIENT_SECURE_CONNECTION | CLIENT_PLUGIN_AUTH | CLIENT_PLUGIN_AUTH_LENENC_CLIENT_DATA | CLIENT_CONNECT_WITH_DB | CLIENT_DEPRECATE_EOF; if (ssl_enabled) diff --git a/dbms/programs/server/MySQLHandler.h b/dbms/programs/server/MySQLHandler.h index e899d8ef501..a5465916f8e 100644 --- a/dbms/programs/server/MySQLHandler.h +++ b/dbms/programs/server/MySQLHandler.h @@ -14,7 +14,7 @@ namespace DB class MySQLHandler : public Poco::Net::TCPServerConnection { public: - MySQLHandler(IServer & server_, const Poco::Net::StreamSocket & socket_, RSA & public_key, RSA & private_key, bool ssl_enabled, size_t connection_id); + MySQLHandler(IServer & server_, const Poco::Net::StreamSocket & socket_, RSA & public_key_, RSA & private_key_, bool ssl_enabled, size_t connection_id_); void run() final; diff --git a/dbms/programs/server/Server.cpp b/dbms/programs/server/Server.cpp index c2fbce603d4..464de1c7066 100644 --- a/dbms/programs/server/Server.cpp +++ b/dbms/programs/server/Server.cpp @@ -156,19 +156,19 @@ std::string Server::getDefaultCorePath() const return getCanonicalPath(config().getString("path", DBMS_DEFAULT_PATH)) + "cores"; } -void Server::defineOptions(Poco::Util::OptionSet & _options) +void Server::defineOptions(Poco::Util::OptionSet & options) { - _options.addOption( + options.addOption( Poco::Util::Option("help", "h", "show help and exit") .required(false) .repeatable(false) .binding("help")); - _options.addOption( + options.addOption( Poco::Util::Option("version", "V", "show version and exit") .required(false) .repeatable(false) .binding("version")); - BaseDaemon::defineOptions(_options); + BaseDaemon::defineOptions(options); } int Server::main(const std::vector & /*args*/) diff --git a/dbms/src/AggregateFunctions/AggregateFunctionArgMinMax.h b/dbms/src/AggregateFunctions/AggregateFunctionArgMinMax.h index a5a1b777fa3..ec151baa305 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionArgMinMax.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionArgMinMax.h @@ -35,8 +35,8 @@ private: const DataTypePtr & type_val; public: - AggregateFunctionArgMinMax(const DataTypePtr & type_res, const DataTypePtr & type_val) - : IAggregateFunctionDataHelper>({type_res, type_val}, {}), + AggregateFunctionArgMinMax(const DataTypePtr & type_res_, const DataTypePtr & type_val_) + : IAggregateFunctionDataHelper>({type_res_, type_val_}, {}), type_res(this->argument_types[0]), type_val(this->argument_types[1]) { if (!type_val->isComparable()) diff --git a/dbms/src/AggregateFunctions/AggregateFunctionGroupArray.h b/dbms/src/AggregateFunctions/AggregateFunctionGroupArray.h index d732d65ecf8..f578cee9d00 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionGroupArray.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionGroupArray.h @@ -253,8 +253,8 @@ class GroupArrayGeneralListImpl final UInt64 max_elems; public: - GroupArrayGeneralListImpl(const DataTypePtr & data_type, UInt64 max_elems_ = std::numeric_limits::max()) - : IAggregateFunctionDataHelper, GroupArrayGeneralListImpl>({data_type}, {}) + GroupArrayGeneralListImpl(const DataTypePtr & data_type_, UInt64 max_elems_ = std::numeric_limits::max()) + : IAggregateFunctionDataHelper, GroupArrayGeneralListImpl>({data_type_}, {}) , data_type(this->argument_types[0]), max_elems(max_elems_) {} String getName() const override { return "groupArray"; } diff --git a/dbms/src/AggregateFunctions/AggregateFunctionGroupUniqArray.h b/dbms/src/AggregateFunctions/AggregateFunctionGroupUniqArray.h index 7a913c48ffa..029bf6efe83 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionGroupUniqArray.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionGroupUniqArray.h @@ -164,8 +164,8 @@ class AggregateFunctionGroupUniqArrayGeneric } public: - AggregateFunctionGroupUniqArrayGeneric(const DataTypePtr & input_data_type, UInt64 max_elems_ = std::numeric_limits::max()) - : IAggregateFunctionDataHelper>({input_data_type}, {}) + AggregateFunctionGroupUniqArrayGeneric(const DataTypePtr & input_data_type_, UInt64 max_elems_ = std::numeric_limits::max()) + : IAggregateFunctionDataHelper>({input_data_type_}, {}) , input_data_type(this->argument_types[0]) , max_elems(max_elems_) {} diff --git a/dbms/src/AggregateFunctions/AggregateFunctionHistogram.h b/dbms/src/AggregateFunctions/AggregateFunctionHistogram.h index df6078d86fc..04aa88a806c 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionHistogram.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionHistogram.h @@ -304,9 +304,9 @@ private: const UInt32 max_bins; public: - AggregateFunctionHistogram(UInt32 max_bins, const DataTypes & arguments, const Array & params) + AggregateFunctionHistogram(UInt32 max_bins_, const DataTypes & arguments, const Array & params) : IAggregateFunctionDataHelper>(arguments, params) - , max_bins(max_bins) + , max_bins(max_bins_) { } diff --git a/dbms/src/AggregateFunctions/AggregateFunctionMLMethod.cpp b/dbms/src/AggregateFunctions/AggregateFunctionMLMethod.cpp index 9d82e6930ee..2f4962f26a2 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionMLMethod.cpp +++ b/dbms/src/AggregateFunctions/AggregateFunctionMLMethod.cpp @@ -104,21 +104,21 @@ void registerAggregateFunctionMLMethod(AggregateFunctionFactory & factory) } LinearModelData::LinearModelData( - Float64 learning_rate, - Float64 l2_reg_coef, - UInt64 param_num, - UInt64 batch_capacity, - std::shared_ptr gradient_computer, - std::shared_ptr weights_updater) - : learning_rate(learning_rate) - , l2_reg_coef(l2_reg_coef) - , batch_capacity(batch_capacity) + Float64 learning_rate_, + Float64 l2_reg_coef_, + UInt64 param_num_, + UInt64 batch_capacity_, + std::shared_ptr gradient_computer_, + std::shared_ptr weights_updater_) + : learning_rate(learning_rate_) + , l2_reg_coef(l2_reg_coef_) + , batch_capacity(batch_capacity_) , batch_size(0) - , gradient_computer(std::move(gradient_computer)) - , weights_updater(std::move(weights_updater)) + , gradient_computer(std::move(gradient_computer_)) + , weights_updater(std::move(weights_updater_)) { - weights.resize(param_num, Float64{0.0}); - gradient_batch.resize(param_num + 1, Float64{0.0}); + weights.resize(param_num_, Float64{0.0}); + gradient_batch.resize(param_num_ + 1, Float64{0.0}); } void LinearModelData::update_state() diff --git a/dbms/src/AggregateFunctions/AggregateFunctionMLMethod.h b/dbms/src/AggregateFunctions/AggregateFunctionMLMethod.h index 95ac64c21d8..a5d558364a5 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionMLMethod.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionMLMethod.h @@ -248,12 +248,12 @@ public: LinearModelData() {} LinearModelData( - Float64 learning_rate, - Float64 l2_reg_coef, - UInt64 param_num, - UInt64 batch_capacity, - std::shared_ptr gradient_computer, - std::shared_ptr weights_updater); + Float64 learning_rate_, + Float64 l2_reg_coef_, + UInt64 param_num_, + UInt64 batch_capacity_, + std::shared_ptr gradient_computer_, + std::shared_ptr weights_updater_); void add(const IColumn ** columns, size_t row_num); @@ -304,21 +304,21 @@ public: String getName() const override { return Name::name; } explicit AggregateFunctionMLMethod( - UInt32 param_num, - std::unique_ptr gradient_computer, - std::string weights_updater_name, - Float64 learning_rate, - Float64 l2_reg_coef, - UInt64 batch_size, + UInt32 param_num_, + std::unique_ptr gradient_computer_, + std::string weights_updater_name_, + Float64 learning_rate_, + Float64 l2_reg_coef_, + UInt64 batch_size_, const DataTypes & arguments_types, const Array & params) : IAggregateFunctionDataHelper>(arguments_types, params) - , param_num(param_num) - , learning_rate(learning_rate) - , l2_reg_coef(l2_reg_coef) - , batch_size(batch_size) - , gradient_computer(std::move(gradient_computer)) - , weights_updater_name(std::move(weights_updater_name)) + , param_num(param_num_) + , learning_rate(learning_rate_) + , l2_reg_coef(l2_reg_coef_) + , batch_size(batch_size_) + , gradient_computer(std::move(gradient_computer_)) + , weights_updater_name(std::move(weights_updater_name_)) { } diff --git a/dbms/src/AggregateFunctions/AggregateFunctionMinMaxAny.h b/dbms/src/AggregateFunctions/AggregateFunctionMinMaxAny.h index 6660e03b529..019968994b1 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionMinMaxAny.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionMinMaxAny.h @@ -679,8 +679,8 @@ private: DataTypePtr & type; public: - AggregateFunctionsSingleValue(const DataTypePtr & type) - : IAggregateFunctionDataHelper>({type}, {}) + AggregateFunctionsSingleValue(const DataTypePtr & type_) + : IAggregateFunctionDataHelper>({type_}, {}) , type(this->argument_types[0]) { if (StringRef(Data::name()) == StringRef("min") diff --git a/dbms/src/AggregateFunctions/AggregateFunctionQuantile.h b/dbms/src/AggregateFunctions/AggregateFunctionQuantile.h index 2e9ec914b99..1461b1bcae9 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionQuantile.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionQuantile.h @@ -76,8 +76,8 @@ private: DataTypePtr & argument_type; public: - AggregateFunctionQuantile(const DataTypePtr & argument_type, const Array & params) - : IAggregateFunctionDataHelper>({argument_type}, params) + AggregateFunctionQuantile(const DataTypePtr & argument_type_, const Array & params) + : IAggregateFunctionDataHelper>({argument_type_}, params) , levels(params, returns_many), level(levels.levels[0]), argument_type(this->argument_types[0]) { if (!returns_many && levels.size() > 1) diff --git a/dbms/src/AggregateFunctions/AggregateFunctionResample.h b/dbms/src/AggregateFunctions/AggregateFunctionResample.h index e82f08366df..894e0e18f51 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionResample.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionResample.h @@ -33,18 +33,18 @@ private: public: AggregateFunctionResample( - AggregateFunctionPtr nested_function, - Key begin, - Key end, - size_t step, + AggregateFunctionPtr nested_function_, + Key begin_, + Key end_, + size_t step_, const DataTypes & arguments, const Array & params) : IAggregateFunctionHelper>{arguments, params} - , nested_function{nested_function} + , nested_function{nested_function_} , last_col{arguments.size() - 1} - , begin{begin} - , end{end} - , step{step} + , begin{begin_} + , end{end_} + , step{step_} , total{0} , aod{nested_function->alignOfData()} , sod{(nested_function->sizeOfData() + aod - 1) / aod * aod} diff --git a/dbms/src/AggregateFunctions/AggregateFunctionSequenceMatch.h b/dbms/src/AggregateFunctions/AggregateFunctionSequenceMatch.h index 80860fdb62a..e4b6985316f 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionSequenceMatch.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionSequenceMatch.h @@ -142,9 +142,9 @@ template class AggregateFunctionSequenceBase : public IAggregateFunctionDataHelper { public: - AggregateFunctionSequenceBase(const DataTypes & arguments, const Array & params, const String & pattern) + AggregateFunctionSequenceBase(const DataTypes & arguments, const Array & params, const String & pattern_) : IAggregateFunctionDataHelper(arguments, params) - , pattern(pattern) + , pattern(pattern_) { arg_count = arguments.size(); parsePattern(); @@ -199,7 +199,7 @@ private: std::uint64_t extra; PatternAction() = default; - PatternAction(const PatternActionType type, const std::uint64_t extra = 0) : type{type}, extra{extra} {} + PatternAction(const PatternActionType type_, const std::uint64_t extra_ = 0) : type{type_}, extra{extra_} {} }; using PatternActions = PODArrayWithStackMemory; @@ -520,8 +520,8 @@ private: struct DFAState { - DFAState(bool has_kleene = false) - : has_kleene{has_kleene}, event{0}, transition{DFATransition::None} + DFAState(bool has_kleene_ = false) + : has_kleene{has_kleene_}, event{0}, transition{DFATransition::None} {} /// .-------. @@ -554,8 +554,8 @@ template class AggregateFunctionSequenceMatch final : public AggregateFunctionSequenceBase> { public: - AggregateFunctionSequenceMatch(const DataTypes & arguments, const Array & params, const String & pattern) - : AggregateFunctionSequenceBase>(arguments, params, pattern) {} + AggregateFunctionSequenceMatch(const DataTypes & arguments, const Array & params, const String & pattern_) + : AggregateFunctionSequenceBase>(arguments, params, pattern_) {} using AggregateFunctionSequenceBase>::AggregateFunctionSequenceBase; @@ -582,8 +582,8 @@ template class AggregateFunctionSequenceCount final : public AggregateFunctionSequenceBase> { public: - AggregateFunctionSequenceCount(const DataTypes & arguments, const Array & params, const String & pattern) - : AggregateFunctionSequenceBase>(arguments, params, pattern) {} + AggregateFunctionSequenceCount(const DataTypes & arguments, const Array & params, const String & pattern_) + : AggregateFunctionSequenceBase>(arguments, params, pattern_) {} using AggregateFunctionSequenceBase>::AggregateFunctionSequenceBase; diff --git a/dbms/src/AggregateFunctions/AggregateFunctionState.h b/dbms/src/AggregateFunctions/AggregateFunctionState.h index 2d8e5c6a537..1f49ac80db9 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionState.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionState.h @@ -23,9 +23,9 @@ private: Array params; public: - AggregateFunctionState(AggregateFunctionPtr nested, const DataTypes & arguments, const Array & params) - : IAggregateFunctionHelper(arguments, params) - , nested_func(nested), arguments(arguments), params(params) {} + AggregateFunctionState(AggregateFunctionPtr nested_, const DataTypes & arguments_, const Array & params_) + : IAggregateFunctionHelper(arguments_, params_) + , nested_func(nested_), arguments(arguments_), params(params_) {} String getName() const override { diff --git a/dbms/src/AggregateFunctions/AggregateFunctionSumMap.h b/dbms/src/AggregateFunctions/AggregateFunctionSumMap.h index 6837379f98f..34bc92edfa0 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionSumMap.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionSumMap.h @@ -62,10 +62,10 @@ private: public: AggregateFunctionSumMapBase( - const DataTypePtr & keys_type, const DataTypes & values_types, + const DataTypePtr & keys_type_, const DataTypes & values_types_, const DataTypes & argument_types_, const Array & params_) : IAggregateFunctionDataHelper>, Derived>(argument_types_, params_) - , keys_type(keys_type), values_types(values_types) {} + , keys_type(keys_type_), values_types(values_types_) {} String getName() const override { return "sumMap"; } @@ -295,9 +295,9 @@ private: public: AggregateFunctionSumMapFiltered( - const DataTypePtr & keys_type, const DataTypes & values_types, const Array & keys_to_keep_, + const DataTypePtr & keys_type_, const DataTypes & values_types_, const Array & keys_to_keep_, const DataTypes & argument_types_, const Array & params_) - : Base{keys_type, values_types, argument_types_, params_} + : Base{keys_type_, values_types_, argument_types_, params_} { keys_to_keep.reserve(keys_to_keep_.size()); for (const Field & f : keys_to_keep_) diff --git a/dbms/src/AggregateFunctions/AggregateFunctionTopK.h b/dbms/src/AggregateFunctions/AggregateFunctionTopK.h index 72b724843a1..9bcfa07b78e 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionTopK.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionTopK.h @@ -44,9 +44,9 @@ protected: UInt64 reserved; public: - AggregateFunctionTopK(UInt64 threshold, UInt64 load_factor, const DataTypes & argument_types_, const Array & params) + AggregateFunctionTopK(UInt64 threshold_, UInt64 load_factor, const DataTypes & argument_types_, const Array & params) : IAggregateFunctionDataHelper, AggregateFunctionTopK>(argument_types_, params) - , threshold(threshold), reserved(load_factor * threshold) {} + , threshold(threshold_), reserved(load_factor * threshold) {} String getName() const override { return is_weighted ? "topKWeighted" : "topK"; } @@ -139,9 +139,9 @@ private: public: AggregateFunctionTopKGeneric( - UInt64 threshold, UInt64 load_factor, const DataTypePtr & input_data_type, const Array & params) - : IAggregateFunctionDataHelper>({input_data_type}, params) - , threshold(threshold), reserved(load_factor * threshold), input_data_type(this->argument_types[0]) {} + UInt64 threshold_, UInt64 load_factor, const DataTypePtr & input_data_type_, const Array & params) + : IAggregateFunctionDataHelper>({input_data_type_}, params) + , threshold(threshold_), reserved(load_factor * threshold), input_data_type(this->argument_types[0]) {} String getName() const override { return is_weighted ? "topKWeighted" : "topK"; } diff --git a/dbms/src/AggregateFunctions/AggregateFunctionUniqUpTo.h b/dbms/src/AggregateFunctions/AggregateFunctionUniqUpTo.h index de9ca69c17f..88e16f330ce 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionUniqUpTo.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionUniqUpTo.h @@ -136,9 +136,9 @@ private: UInt8 threshold; public: - AggregateFunctionUniqUpTo(UInt8 threshold, const DataTypes & argument_types_, const Array & params_) + AggregateFunctionUniqUpTo(UInt8 threshold_, const DataTypes & argument_types_, const Array & params_) : IAggregateFunctionDataHelper, AggregateFunctionUniqUpTo>(argument_types_, params_) - , threshold(threshold) + , threshold(threshold_) { } @@ -196,9 +196,9 @@ private: UInt8 threshold; public: - AggregateFunctionUniqUpToVariadic(const DataTypes & arguments, const Array & params, UInt8 threshold) + AggregateFunctionUniqUpToVariadic(const DataTypes & arguments, const Array & params, UInt8 threshold_) : IAggregateFunctionDataHelper, AggregateFunctionUniqUpToVariadic>(arguments, params) - , threshold(threshold) + , threshold(threshold_) { if (argument_is_tuple) num_args = typeid_cast(*arguments[0]).getElements().size(); diff --git a/dbms/src/AggregateFunctions/QuantileTDigest.h b/dbms/src/AggregateFunctions/QuantileTDigest.h index f7201ef3b0d..91211d25173 100644 --- a/dbms/src/AggregateFunctions/QuantileTDigest.h +++ b/dbms/src/AggregateFunctions/QuantileTDigest.h @@ -50,9 +50,9 @@ class QuantileTDigest Centroid() = default; - explicit Centroid(Value mean, Count count) - : mean(mean) - , count(count) + explicit Centroid(Value mean_, Count count_) + : mean(mean_) + , count(count_) {} Centroid & operator+=(const Centroid & other) diff --git a/dbms/src/AggregateFunctions/ReservoirSamplerDeterministic.h b/dbms/src/AggregateFunctions/ReservoirSamplerDeterministic.h index 4beeecd93bc..52d0181fce1 100644 --- a/dbms/src/AggregateFunctions/ReservoirSamplerDeterministic.h +++ b/dbms/src/AggregateFunctions/ReservoirSamplerDeterministic.h @@ -53,8 +53,8 @@ class ReservoirSamplerDeterministic } public: - ReservoirSamplerDeterministic(const size_t sample_count = DEFAULT_SAMPLE_COUNT) - : sample_count{sample_count} + ReservoirSamplerDeterministic(const size_t sample_count_ = DEFAULT_SAMPLE_COUNT) + : sample_count{sample_count_} { } diff --git a/dbms/src/Columns/ColumnConst.cpp b/dbms/src/Columns/ColumnConst.cpp index 3703d24f1cb..91036499871 100644 --- a/dbms/src/Columns/ColumnConst.cpp +++ b/dbms/src/Columns/ColumnConst.cpp @@ -13,8 +13,8 @@ namespace ErrorCodes extern const int SIZES_OF_COLUMNS_DOESNT_MATCH; } -ColumnConst::ColumnConst(const ColumnPtr & data_, size_t s) - : data(data_), s(s) +ColumnConst::ColumnConst(const ColumnPtr & data_, size_t s_) + : data(data_), s(s_) { /// Squash Const of Const. while (const ColumnConst * const_data = typeid_cast(data.get())) diff --git a/dbms/src/Columns/ColumnConst.h b/dbms/src/Columns/ColumnConst.h index 6731061a797..6b320f12f28 100644 --- a/dbms/src/Columns/ColumnConst.h +++ b/dbms/src/Columns/ColumnConst.h @@ -26,7 +26,7 @@ private: WrappedPtr data; size_t s; - ColumnConst(const ColumnPtr & data, size_t s); + ColumnConst(const ColumnPtr & data, size_t s_); ColumnConst(const ColumnConst & src) = default; public: diff --git a/dbms/src/Columns/ColumnFunction.cpp b/dbms/src/Columns/ColumnFunction.cpp index 4fb34959ddc..75fe22446f4 100644 --- a/dbms/src/Columns/ColumnFunction.cpp +++ b/dbms/src/Columns/ColumnFunction.cpp @@ -13,8 +13,8 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -ColumnFunction::ColumnFunction(size_t size, FunctionBasePtr function, const ColumnsWithTypeAndName & columns_to_capture) - : size_(size), function(function) +ColumnFunction::ColumnFunction(size_t size, FunctionBasePtr function_, const ColumnsWithTypeAndName & columns_to_capture) + : size_(size), function(function_) { appendArguments(columns_to_capture); } diff --git a/dbms/src/Columns/ColumnFunction.h b/dbms/src/Columns/ColumnFunction.h index 571123ae892..77475b97da8 100644 --- a/dbms/src/Columns/ColumnFunction.h +++ b/dbms/src/Columns/ColumnFunction.h @@ -20,7 +20,7 @@ class ColumnFunction final : public COWHelper private: friend class COWHelper; - ColumnFunction(size_t size, FunctionBasePtr function, const ColumnsWithTypeAndName & columns_to_capture); + ColumnFunction(size_t size, FunctionBasePtr function_, const ColumnsWithTypeAndName & columns_to_capture); public: const char * getFamilyName() const override { return "Function"; } diff --git a/dbms/src/Columns/ColumnLowCardinality.cpp b/dbms/src/Columns/ColumnLowCardinality.cpp index 1dbb3f8574f..152570f1a6f 100644 --- a/dbms/src/Columns/ColumnLowCardinality.cpp +++ b/dbms/src/Columns/ColumnLowCardinality.cpp @@ -360,12 +360,12 @@ bool ColumnLowCardinality::containsNull() const ColumnLowCardinality::Index::Index() : positions(ColumnUInt8::create()), size_of_type(sizeof(UInt8)) {} -ColumnLowCardinality::Index::Index(MutableColumnPtr && positions) : positions(std::move(positions)) +ColumnLowCardinality::Index::Index(MutableColumnPtr && positions_) : positions(std::move(positions_)) { updateSizeOfType(); } -ColumnLowCardinality::Index::Index(ColumnPtr positions) : positions(std::move(positions)) +ColumnLowCardinality::Index::Index(ColumnPtr positions_) : positions(std::move(positions_)) { updateSizeOfType(); } diff --git a/dbms/src/Columns/ColumnLowCardinality.h b/dbms/src/Columns/ColumnLowCardinality.h index a6a129fbb09..9081938e2c6 100644 --- a/dbms/src/Columns/ColumnLowCardinality.h +++ b/dbms/src/Columns/ColumnLowCardinality.h @@ -201,8 +201,8 @@ public: public: Index(); Index(const Index & other) = default; - explicit Index(MutableColumnPtr && positions); - explicit Index(ColumnPtr positions); + explicit Index(MutableColumnPtr && positions_); + explicit Index(ColumnPtr positions_); const ColumnPtr & getPositions() const { return positions; } WrappedPtr & getPositionsPtr() { return positions; } diff --git a/dbms/src/Columns/ColumnTuple.cpp b/dbms/src/Columns/ColumnTuple.cpp index 3ad7f007edf..bef717361df 100644 --- a/dbms/src/Columns/ColumnTuple.cpp +++ b/dbms/src/Columns/ColumnTuple.cpp @@ -257,8 +257,8 @@ struct ColumnTuple::Less TupleColumns columns; int nan_direction_hint; - Less(const TupleColumns & columns, int nan_direction_hint_) - : columns(columns), nan_direction_hint(nan_direction_hint_) + Less(const TupleColumns & columns_, int nan_direction_hint_) + : columns(columns_), nan_direction_hint(nan_direction_hint_) { } diff --git a/dbms/src/Columns/ColumnUnique.h b/dbms/src/Columns/ColumnUnique.h index 0c5efd8058d..154bb457f01 100644 --- a/dbms/src/Columns/ColumnUnique.h +++ b/dbms/src/Columns/ColumnUnique.h @@ -186,10 +186,10 @@ ColumnUnique::ColumnUnique(const IDataType & type) } template -ColumnUnique::ColumnUnique(MutableColumnPtr && holder, bool is_nullable) +ColumnUnique::ColumnUnique(MutableColumnPtr && holder, bool is_nullable_) : column_holder(std::move(holder)) - , is_nullable(is_nullable) - , index(numSpecialValues(is_nullable), 0) + , is_nullable(is_nullable_) + , index(numSpecialValues(is_nullable_), 0) { if (column_holder->size() < numSpecialValues()) throw Exception("Too small holder column for ColumnUnique.", ErrorCodes::ILLEGAL_COLUMN); diff --git a/dbms/src/Columns/ReverseIndex.h b/dbms/src/Columns/ReverseIndex.h index 43d191bbc3e..8fa4e87680b 100644 --- a/dbms/src/Columns/ReverseIndex.h +++ b/dbms/src/Columns/ReverseIndex.h @@ -235,8 +235,8 @@ template class ReverseIndex { public: - explicit ReverseIndex(UInt64 num_prefix_rows_to_skip, UInt64 base_index) - : num_prefix_rows_to_skip(num_prefix_rows_to_skip), base_index(base_index), saved_hash_ptr(nullptr) {} + explicit ReverseIndex(UInt64 num_prefix_rows_to_skip_, UInt64 base_index_) + : num_prefix_rows_to_skip(num_prefix_rows_to_skip_), base_index(base_index_), saved_hash_ptr(nullptr) {} void setColumn(ColumnType * column_); diff --git a/dbms/src/Common/ColumnsHashing.h b/dbms/src/Common/ColumnsHashing.h index 661f6527d8e..bf564738f7a 100644 --- a/dbms/src/Common/ColumnsHashing.h +++ b/dbms/src/Common/ColumnsHashing.h @@ -243,11 +243,11 @@ struct HashMethodSingleLowCardinalityColumn : public SingleColumnMethod throw Exception("Cache wasn't created for HashMethodSingleLowCardinalityColumn", ErrorCodes::LOGICAL_ERROR); - LowCardinalityDictionaryCache * cache; + LowCardinalityDictionaryCache * lcd_cache; if constexpr (use_cache) { - cache = typeid_cast(context.get()); - if (!cache) + lcd_cache = typeid_cast(context.get()); + if (!lcd_cache) { const auto & cached_val = *context; throw Exception("Invalid type for HashMethodSingleLowCardinalityColumn cache: " @@ -267,7 +267,7 @@ struct HashMethodSingleLowCardinalityColumn : public SingleColumnMethod { dictionary_key = {column->getDictionary().getHash(), dict->size()}; if constexpr (use_cache) - cached_values = cache->get(dictionary_key); + cached_values = lcd_cache->get(dictionary_key); } if (cached_values) @@ -288,7 +288,7 @@ struct HashMethodSingleLowCardinalityColumn : public SingleColumnMethod cached_values->saved_hash = saved_hash; cached_values->dictionary_holder = dictionary_holder; - cache->set(dictionary_key, cached_values); + lcd_cache->set(dictionary_key, cached_values); } } } @@ -470,8 +470,8 @@ struct HashMethodKeysFixed Sizes key_sizes; size_t keys_size; - HashMethodKeysFixed(const ColumnRawPtrs & key_columns, const Sizes & key_sizes, const HashMethodContextPtr &) - : Base(key_columns), key_sizes(std::move(key_sizes)), keys_size(key_columns.size()) + HashMethodKeysFixed(const ColumnRawPtrs & key_columns, const Sizes & key_sizes_, const HashMethodContextPtr &) + : Base(key_columns), key_sizes(std::move(key_sizes_)), keys_size(key_columns.size()) { if constexpr (has_low_cardinality) { @@ -525,8 +525,8 @@ struct HashMethodSerialized ColumnRawPtrs key_columns; size_t keys_size; - HashMethodSerialized(const ColumnRawPtrs & key_columns, const Sizes & /*key_sizes*/, const HashMethodContextPtr &) - : key_columns(key_columns), keys_size(key_columns.size()) {} + HashMethodSerialized(const ColumnRawPtrs & key_columns_, const Sizes & /*key_sizes*/, const HashMethodContextPtr &) + : key_columns(key_columns_), keys_size(key_columns_.size()) {} protected: friend class columns_hashing_impl::HashMethodBase; @@ -550,8 +550,8 @@ struct HashMethodHashed ColumnRawPtrs key_columns; - HashMethodHashed(ColumnRawPtrs key_columns, const Sizes &, const HashMethodContextPtr &) - : key_columns(std::move(key_columns)) {} + HashMethodHashed(ColumnRawPtrs key_columns_, const Sizes &, const HashMethodContextPtr &) + : key_columns(std::move(key_columns_)) {} ALWAYS_INLINE Key getKey(size_t row, Arena &) const { return hash128(row, key_columns.size(), key_columns); } diff --git a/dbms/src/Common/ColumnsHashingImpl.h b/dbms/src/Common/ColumnsHashingImpl.h index 2a6cdb6cd69..d980a3f1b64 100644 --- a/dbms/src/Common/ColumnsHashingImpl.h +++ b/dbms/src/Common/ColumnsHashingImpl.h @@ -56,8 +56,8 @@ class EmplaceResultImpl bool inserted; public: - EmplaceResultImpl(Mapped & value, Mapped & cached_value, bool inserted) - : value(value), cached_value(cached_value), inserted(inserted) {} + EmplaceResultImpl(Mapped & value_, Mapped & cached_value_, bool inserted_) + : value(value_), cached_value(cached_value_), inserted(inserted_) {} bool isInserted() const { return inserted; } auto & getMapped() const { return value; } @@ -75,7 +75,7 @@ class EmplaceResultImpl bool inserted; public: - explicit EmplaceResultImpl(bool inserted) : inserted(inserted) {} + explicit EmplaceResultImpl(bool inserted_) : inserted(inserted_) {} bool isInserted() const { return inserted; } }; @@ -86,7 +86,7 @@ class FindResultImpl bool found; public: - FindResultImpl(Mapped * value, bool found) : value(value), found(found) {} + FindResultImpl(Mapped * value_, bool found_) : value(value_), found(found_) {} bool isFound() const { return found; } Mapped & getMapped() const { return *value; } }; @@ -97,7 +97,7 @@ class FindResultImpl bool found; public: - explicit FindResultImpl(bool found) : found(found) {} + explicit FindResultImpl(bool found_) : found(found_) {} bool isFound() const { return found; } }; diff --git a/dbms/src/Common/CurrentMetrics.h b/dbms/src/Common/CurrentMetrics.h index 43c85caba3c..b87504ef49a 100644 --- a/dbms/src/Common/CurrentMetrics.h +++ b/dbms/src/Common/CurrentMetrics.h @@ -59,15 +59,15 @@ namespace CurrentMetrics std::atomic * what; Value amount; - Increment(std::atomic * what, Value amount) - : what(what), amount(amount) + Increment(std::atomic * what_, Value amount_) + : what(what_), amount(amount_) { *what += amount; } public: - Increment(Metric metric, Value amount = 1) - : Increment(&values[metric], amount) {} + Increment(Metric metric, Value amount_ = 1) + : Increment(&values[metric], amount_) {} ~Increment() { diff --git a/dbms/src/Common/Elf.cpp b/dbms/src/Common/Elf.cpp index bb51b837a13..035477d0243 100644 --- a/dbms/src/Common/Elf.cpp +++ b/dbms/src/Common/Elf.cpp @@ -55,8 +55,8 @@ Elf::Elf(const std::string & path) } -Elf::Section::Section(const ElfShdr & header, const Elf & elf) - : header(header), elf(elf) +Elf::Section::Section(const ElfShdr & header_, const Elf & elf_) + : header(header_), elf(elf_) { } diff --git a/dbms/src/Common/Elf.h b/dbms/src/Common/Elf.h index 7f7fcc538b5..869b869b530 100644 --- a/dbms/src/Common/Elf.h +++ b/dbms/src/Common/Elf.h @@ -35,7 +35,7 @@ public: const char * end() const; size_t size() const; - Section(const ElfShdr & header, const Elf & elf); + Section(const ElfShdr & header_, const Elf & elf_); private: const Elf & elf; diff --git a/dbms/src/Common/FieldVisitors.cpp b/dbms/src/Common/FieldVisitors.cpp index f77977b3eed..9a437d5ffe6 100644 --- a/dbms/src/Common/FieldVisitors.cpp +++ b/dbms/src/Common/FieldVisitors.cpp @@ -167,7 +167,7 @@ String FieldVisitorToString::operator() (const Tuple & x_def) const } -FieldVisitorHash::FieldVisitorHash(SipHash & hash) : hash(hash) {} +FieldVisitorHash::FieldVisitorHash(SipHash & hash_) : hash(hash_) {} void FieldVisitorHash::operator() (const Null &) const { diff --git a/dbms/src/Common/FieldVisitors.h b/dbms/src/Common/FieldVisitors.h index 5575c607b3b..56d3c84decc 100644 --- a/dbms/src/Common/FieldVisitors.h +++ b/dbms/src/Common/FieldVisitors.h @@ -222,7 +222,7 @@ class FieldVisitorHash : public StaticVisitor<> private: SipHash & hash; public: - FieldVisitorHash(SipHash & hash); + FieldVisitorHash(SipHash & hash_); void operator() (const Null & x) const; void operator() (const UInt64 & x) const; diff --git a/dbms/src/Common/MemoryTracker.h b/dbms/src/Common/MemoryTracker.h index 4ce0ac262fa..620e1c44c83 100644 --- a/dbms/src/Common/MemoryTracker.h +++ b/dbms/src/Common/MemoryTracker.h @@ -31,9 +31,9 @@ class MemoryTracker const char * description = nullptr; public: - MemoryTracker(VariableContext level = VariableContext::Thread) : level(level) {} - MemoryTracker(Int64 limit_, VariableContext level = VariableContext::Thread) : limit(limit_), level(level) {} - MemoryTracker(MemoryTracker * parent_, VariableContext level = VariableContext::Thread) : parent(parent_), level(level) {} + MemoryTracker(VariableContext level_ = VariableContext::Thread) : level(level_) {} + MemoryTracker(Int64 limit_, VariableContext level_ = VariableContext::Thread) : limit(limit_), level(level_) {} + MemoryTracker(MemoryTracker * parent_, VariableContext level_ = VariableContext::Thread) : parent(parent_), level(level_) {} ~MemoryTracker(); diff --git a/dbms/src/Common/ProfileEvents.cpp b/dbms/src/Common/ProfileEvents.cpp index e9b11c823ed..22cc9e57a2f 100644 --- a/dbms/src/Common/ProfileEvents.cpp +++ b/dbms/src/Common/ProfileEvents.cpp @@ -191,10 +191,10 @@ Counters global_counters(global_counters_array); const Event Counters::num_counters = END; -Counters::Counters(VariableContext level, Counters * parent) +Counters::Counters(VariableContext level_, Counters * parent_) : counters_holder(new Counter[num_counters] {}), - parent(parent), - level(level) + parent(parent_), + level(level_) { counters = counters_holder.get(); } diff --git a/dbms/src/Common/ProfileEvents.h b/dbms/src/Common/ProfileEvents.h index 78b4ebbf42e..ca327c9810b 100644 --- a/dbms/src/Common/ProfileEvents.h +++ b/dbms/src/Common/ProfileEvents.h @@ -33,7 +33,7 @@ namespace ProfileEvents VariableContext level = VariableContext::Thread; /// By default, any instance have to increment global counters - Counters(VariableContext level = VariableContext::Thread, Counters * parent = &global_counters); + Counters(VariableContext level_ = VariableContext::Thread, Counters * parent_ = &global_counters); /// Global level static initializer Counters(Counter * allocated_counters) diff --git a/dbms/src/Common/QueryProfiler.cpp b/dbms/src/Common/QueryProfiler.cpp index 64ac6311065..51d139d8fe0 100644 --- a/dbms/src/Common/QueryProfiler.cpp +++ b/dbms/src/Common/QueryProfiler.cpp @@ -127,9 +127,9 @@ namespace ErrorCodes } template -QueryProfilerBase::QueryProfilerBase(const Int32 thread_id, const int clock_type, UInt32 period, const int pause_signal) +QueryProfilerBase::QueryProfilerBase(const Int32 thread_id, const int clock_type, UInt32 period, const int pause_signal_) : log(&Logger::get("QueryProfiler")) - , pause_signal(pause_signal) + , pause_signal(pause_signal_) { #if USE_INTERNAL_UNWIND_LIBRARY /// Sanity check. diff --git a/dbms/src/Common/QueryProfiler.h b/dbms/src/Common/QueryProfiler.h index 5eaf5e2e7f7..48b5ffc8b2c 100644 --- a/dbms/src/Common/QueryProfiler.h +++ b/dbms/src/Common/QueryProfiler.h @@ -35,7 +35,7 @@ template class QueryProfilerBase { public: - QueryProfilerBase(const Int32 thread_id, const int clock_type, UInt32 period, const int pause_signal); + QueryProfilerBase(const Int32 thread_id, const int clock_type, UInt32 period, const int pause_signal_); ~QueryProfilerBase(); private: diff --git a/dbms/src/Common/RWLock.cpp b/dbms/src/Common/RWLock.cpp index cbdf8b842c6..e343ce0b0cd 100644 --- a/dbms/src/Common/RWLock.cpp +++ b/dbms/src/Common/RWLock.cpp @@ -161,9 +161,9 @@ RWLockImpl::LockHolderImpl::~LockHolderImpl() } -RWLockImpl::LockHolderImpl::LockHolderImpl(RWLock && parent, RWLockImpl::GroupsContainer::iterator it_group, - RWLockImpl::ClientsContainer::iterator it_client) - : parent{std::move(parent)}, it_group{it_group}, it_client{it_client}, +RWLockImpl::LockHolderImpl::LockHolderImpl(RWLock && parent_, RWLockImpl::GroupsContainer::iterator it_group_, + RWLockImpl::ClientsContainer::iterator it_client_) + : parent{std::move(parent_)}, it_group{it_group_}, it_client{it_client_}, active_client_increment{(*it_client == RWLockImpl::Read) ? CurrentMetrics::RWLockActiveReaders : CurrentMetrics::RWLockActiveWriters} {} diff --git a/dbms/src/Common/RWLock.h b/dbms/src/Common/RWLock.h index 02a1e7cdb94..0467901fa27 100644 --- a/dbms/src/Common/RWLock.h +++ b/dbms/src/Common/RWLock.h @@ -68,7 +68,7 @@ private: std::condition_variable cv; /// all clients of the group wait group condvar - explicit Group(Type type) : type{type} {} + explicit Group(Type type_) : type{type_} {} }; mutable std::mutex mutex; diff --git a/dbms/src/Common/ShellCommand.cpp b/dbms/src/Common/ShellCommand.cpp index 66dbab35a20..8807d795a0d 100644 --- a/dbms/src/Common/ShellCommand.cpp +++ b/dbms/src/Common/ShellCommand.cpp @@ -34,13 +34,13 @@ namespace ErrorCodes extern const int CANNOT_CREATE_CHILD_PROCESS; } -ShellCommand::ShellCommand(pid_t pid, int in_fd, int out_fd, int err_fd, bool terminate_in_destructor_) - : pid(pid) +ShellCommand::ShellCommand(pid_t pid_, int in_fd_, int out_fd_, int err_fd_, bool terminate_in_destructor_) + : pid(pid_) , terminate_in_destructor(terminate_in_destructor_) , log(&Poco::Logger::get("ShellCommand")) - , in(in_fd) - , out(out_fd) - , err(err_fd) {} + , in(in_fd_) + , out(out_fd_) + , err(err_fd_) {} ShellCommand::~ShellCommand() { diff --git a/dbms/src/Common/ShellCommand.h b/dbms/src/Common/ShellCommand.h index 3d1308272e9..0298ec73a2b 100644 --- a/dbms/src/Common/ShellCommand.h +++ b/dbms/src/Common/ShellCommand.h @@ -32,7 +32,7 @@ private: Poco::Logger * log; - ShellCommand(pid_t pid, int in_fd, int out_fd, int err_fd, bool terminate_in_destructor_); + ShellCommand(pid_t pid_, int in_fd_, int out_fd_, int err_fd_, bool terminate_in_destructor_); static std::unique_ptr executeImpl(const char * filename, char * const argv[], bool pipe_stdin_only, bool terminate_in_destructor); diff --git a/dbms/src/Common/Stopwatch.h b/dbms/src/Common/Stopwatch.h index 7bfaccc72b2..6db345c219a 100644 --- a/dbms/src/Common/Stopwatch.h +++ b/dbms/src/Common/Stopwatch.h @@ -86,7 +86,7 @@ public: operator bool() const { return parent != nullptr; } - Lock(AtomicStopwatch * parent) : parent(parent) {} + Lock(AtomicStopwatch * parent_) : parent(parent_) {} Lock(Lock &&) = default; diff --git a/dbms/src/Common/StringSearcher.h b/dbms/src/Common/StringSearcher.h index f722ebc6c55..5e78ff23df1 100644 --- a/dbms/src/Common/StringSearcher.h +++ b/dbms/src/Common/StringSearcher.h @@ -75,8 +75,8 @@ private: #endif public: - StringSearcher(const char * const needle_, const size_t needle_size) - : needle{reinterpret_cast(needle_)}, needle_size{needle_size} + StringSearcher(const char * const needle_, const size_t needle_size_) + : needle{reinterpret_cast(needle_)}, needle_size{needle_size_} { if (0 == needle_size) return; @@ -714,8 +714,8 @@ struct LibCASCIICaseSensitiveStringSearcher { const char * const needle; - LibCASCIICaseSensitiveStringSearcher(const char * const needle, const size_t /* needle_size */) - : needle(needle) {} + LibCASCIICaseSensitiveStringSearcher(const char * const needle_, const size_t /* needle_size */) + : needle(needle_) {} const UInt8 * search(const UInt8 * haystack, const UInt8 * const haystack_end) const { @@ -735,8 +735,8 @@ struct LibCASCIICaseInsensitiveStringSearcher { const char * const needle; - LibCASCIICaseInsensitiveStringSearcher(const char * const needle, const size_t /* needle_size */) - : needle(needle) {} + LibCASCIICaseInsensitiveStringSearcher(const char * const needle_, const size_t /* needle_size */) + : needle(needle_) {} const UInt8 * search(const UInt8 * haystack, const UInt8 * const haystack_end) const { diff --git a/dbms/src/Common/ThreadPool.cpp b/dbms/src/Common/ThreadPool.cpp index ce004ed7674..e790ac07839 100644 --- a/dbms/src/Common/ThreadPool.cpp +++ b/dbms/src/Common/ThreadPool.cpp @@ -22,14 +22,14 @@ namespace CurrentMetrics template -ThreadPoolImpl::ThreadPoolImpl(size_t max_threads) - : ThreadPoolImpl(max_threads, max_threads, max_threads) +ThreadPoolImpl::ThreadPoolImpl(size_t max_threads_) + : ThreadPoolImpl(max_threads_, max_threads_, max_threads_) { } template -ThreadPoolImpl::ThreadPoolImpl(size_t max_threads, size_t max_free_threads, size_t queue_size) - : max_threads(max_threads), max_free_threads(max_free_threads), queue_size(queue_size) +ThreadPoolImpl::ThreadPoolImpl(size_t max_threads_, size_t max_free_threads_, size_t queue_size_) + : max_threads(max_threads_), max_free_threads(max_free_threads_), queue_size(queue_size_) { } diff --git a/dbms/src/Common/ThreadPool.h b/dbms/src/Common/ThreadPool.h index 23c0848e931..4354b9194b0 100644 --- a/dbms/src/Common/ThreadPool.h +++ b/dbms/src/Common/ThreadPool.h @@ -31,10 +31,10 @@ public: using Job = std::function; /// Size is constant. Up to num_threads are created on demand and then run until shutdown. - explicit ThreadPoolImpl(size_t max_threads); + explicit ThreadPoolImpl(size_t max_threads_); /// queue_size - maximum number of running plus scheduled jobs. It can be greater than max_threads. Zero means unlimited. - ThreadPoolImpl(size_t max_threads, size_t max_free_threads, size_t queue_size); + ThreadPoolImpl(size_t max_threads_, size_t max_free_threads_, size_t queue_size_); /// Add new job. Locks until number of scheduled jobs is less than maximum or exception in one of threads was thrown. /// If an exception in some thread was thrown, method silently returns, and exception will be rethrown only on call to 'wait' function. @@ -81,8 +81,8 @@ private: Job job; int priority; - JobWithPriority(Job job, int priority) - : job(job), priority(priority) {} + JobWithPriority(Job job_, int priority_) + : job(job_), priority(priority_) {} bool operator< (const JobWithPriority & rhs) const { diff --git a/dbms/src/Common/Throttler.h b/dbms/src/Common/Throttler.h index 3ad50215b9e..a23b0f9db22 100644 --- a/dbms/src/Common/Throttler.h +++ b/dbms/src/Common/Throttler.h @@ -36,12 +36,12 @@ namespace ErrorCodes class Throttler { public: - Throttler(size_t max_speed_, const std::shared_ptr & parent = nullptr) - : max_speed(max_speed_), limit_exceeded_exception_message(""), parent(parent) {} + Throttler(size_t max_speed_, const std::shared_ptr & parent_ = nullptr) + : max_speed(max_speed_), limit_exceeded_exception_message(""), parent(parent_) {} Throttler(size_t max_speed_, size_t limit_, const char * limit_exceeded_exception_message_, - const std::shared_ptr & parent = nullptr) - : max_speed(max_speed_), limit(limit_), limit_exceeded_exception_message(limit_exceeded_exception_message_), parent(parent) {} + const std::shared_ptr & parent_ = nullptr) + : max_speed(max_speed_), limit(limit_), limit_exceeded_exception_message(limit_exceeded_exception_message_), parent(parent_) {} void add(const size_t amount) { diff --git a/dbms/src/Common/TraceCollector.cpp b/dbms/src/Common/TraceCollector.cpp index ff5de756886..6ed2074e47d 100644 --- a/dbms/src/Common/TraceCollector.cpp +++ b/dbms/src/Common/TraceCollector.cpp @@ -28,9 +28,9 @@ namespace ErrorCodes extern const int CANNOT_FCNTL; } -TraceCollector::TraceCollector(std::shared_ptr & trace_log) +TraceCollector::TraceCollector(std::shared_ptr & trace_log_) : log(&Poco::Logger::get("TraceCollector")) - , trace_log(trace_log) + , trace_log(trace_log_) { if (trace_log == nullptr) throw Exception("Invalid trace log pointer passed", ErrorCodes::NULL_POINTER_DEREFERENCE); diff --git a/dbms/src/Common/TraceCollector.h b/dbms/src/Common/TraceCollector.h index 7c07f48776f..5d1b3775356 100644 --- a/dbms/src/Common/TraceCollector.h +++ b/dbms/src/Common/TraceCollector.h @@ -24,7 +24,7 @@ private: static void notifyToStop(); public: - TraceCollector(std::shared_ptr & trace_log); + TraceCollector(std::shared_ptr & trace_log_); ~TraceCollector(); }; diff --git a/dbms/src/Common/UInt128.h b/dbms/src/Common/UInt128.h index 92758ec80ff..b895c514c3e 100644 --- a/dbms/src/Common/UInt128.h +++ b/dbms/src/Common/UInt128.h @@ -28,7 +28,7 @@ struct UInt128 UInt64 high; UInt128() = default; - explicit UInt128(const UInt64 low, const UInt64 high) : low(low), high(high) {} + explicit UInt128(const UInt64 low_, const UInt64 high_) : low(low_), high(high_) {} explicit UInt128(const UInt64 rhs) : low(rhs), high() {} auto tuple() const { return std::tie(high, low); } diff --git a/dbms/src/Common/Volnitsky.h b/dbms/src/Common/Volnitsky.h index 907af50aefa..748cbe09138 100644 --- a/dbms/src/Common/Volnitsky.h +++ b/dbms/src/Common/Volnitsky.h @@ -331,11 +331,11 @@ public: * If you specify it small enough, the fallback algorithm will be used, * since it is considered that it's useless to waste time initializing the hash table. */ - VolnitskyBase(const char * const needle, const size_t needle_size, size_t haystack_size_hint = 0) - : needle{reinterpret_cast(needle)} - , needle_size{needle_size} + VolnitskyBase(const char * const needle_, const size_t needle_size_, size_t haystack_size_hint = 0) + : needle{reinterpret_cast(needle_)} + , needle_size{needle_size_} , fallback{VolnitskyTraits::isFallbackNeedle(needle_size, haystack_size_hint)} - , fallback_searcher{needle, needle_size} + , fallback_searcher{needle_, needle_size} { if (fallback) return; diff --git a/dbms/src/Common/ZooKeeper/IKeeper.cpp b/dbms/src/Common/ZooKeeper/IKeeper.cpp index 114b14d0118..34cfd02b78d 100644 --- a/dbms/src/Common/ZooKeeper/IKeeper.cpp +++ b/dbms/src/Common/ZooKeeper/IKeeper.cpp @@ -23,8 +23,8 @@ namespace ProfileEvents namespace Coordination { -Exception::Exception(const std::string & msg, const int32_t code, int) - : DB::Exception(msg, DB::ErrorCodes::KEEPER_EXCEPTION), code(code) +Exception::Exception(const std::string & msg, const int32_t code_, int) + : DB::Exception(msg, DB::ErrorCodes::KEEPER_EXCEPTION), code(code_) { if (Coordination::isUserError(code)) ProfileEvents::increment(ProfileEvents::ZooKeeperUserExceptions); @@ -34,18 +34,18 @@ Exception::Exception(const std::string & msg, const int32_t code, int) ProfileEvents::increment(ProfileEvents::ZooKeeperOtherExceptions); } -Exception::Exception(const std::string & msg, const int32_t code) - : Exception(msg + " (" + errorMessage(code) + ")", code, 0) +Exception::Exception(const std::string & msg, const int32_t code_) + : Exception(msg + " (" + errorMessage(code_) + ")", code_, 0) { } -Exception::Exception(const int32_t code) - : Exception(errorMessage(code), code, 0) +Exception::Exception(const int32_t code_) + : Exception(errorMessage(code_), code_, 0) { } -Exception::Exception(const int32_t code, const std::string & path) - : Exception(std::string{errorMessage(code)} + ", path: " + path, code, 0) +Exception::Exception(const int32_t code_, const std::string & path) + : Exception(std::string{errorMessage(code_)} + ", path: " + path, code_, 0) { } diff --git a/dbms/src/Common/ZooKeeper/IKeeper.h b/dbms/src/Common/ZooKeeper/IKeeper.h index b4ecb9a7ceb..f415e0306e8 100644 --- a/dbms/src/Common/ZooKeeper/IKeeper.h +++ b/dbms/src/Common/ZooKeeper/IKeeper.h @@ -301,12 +301,12 @@ class Exception : public DB::Exception { private: /// Delegate constructor, used to minimize repetition; last parameter used for overload resolution. - Exception(const std::string & msg, const int32_t code, int); + Exception(const std::string & msg, const int32_t code_, int); public: - explicit Exception(const int32_t code); - Exception(const std::string & msg, const int32_t code); - Exception(const int32_t code, const std::string & path); + explicit Exception(const int32_t code_); + Exception(const std::string & msg, const int32_t code_); + Exception(const int32_t code_, const std::string & path); Exception(const Exception & exc); const char * name() const throw() override { return "Coordination::Exception"; } diff --git a/dbms/src/Common/ZooKeeper/TestKeeper.cpp b/dbms/src/Common/ZooKeeper/TestKeeper.cpp index eb42c6d0464..7c7d9bba016 100644 --- a/dbms/src/Common/ZooKeeper/TestKeeper.cpp +++ b/dbms/src/Common/ZooKeeper/TestKeeper.cpp @@ -418,8 +418,8 @@ ResponsePtr TestKeeperCheckRequest::createResponse() const { return std::make_sh ResponsePtr TestKeeperMultiRequest::createResponse() const { return std::make_shared(); } -TestKeeper::TestKeeper(const String & root_path_, Poco::Timespan operation_timeout) - : root_path(root_path_), operation_timeout(operation_timeout) +TestKeeper::TestKeeper(const String & root_path_, Poco::Timespan operation_timeout_) + : root_path(root_path_), operation_timeout(operation_timeout_) { container.emplace("/", Node()); diff --git a/dbms/src/Common/ZooKeeper/TestKeeper.h b/dbms/src/Common/ZooKeeper/TestKeeper.h index 6b26e4cf8a1..01c92c98778 100644 --- a/dbms/src/Common/ZooKeeper/TestKeeper.h +++ b/dbms/src/Common/ZooKeeper/TestKeeper.h @@ -33,7 +33,7 @@ using TestKeeperRequestPtr = std::shared_ptr; class TestKeeper : public IKeeper { public: - TestKeeper(const String & root_path, Poco::Timespan operation_timeout); + TestKeeper(const String & root_path_, Poco::Timespan operation_timeout_); ~TestKeeper() override; bool isExpired() const override { return expired; } diff --git a/dbms/src/Common/ZooKeeper/ZooKeeper.cpp b/dbms/src/Common/ZooKeeper/ZooKeeper.cpp index caebc59ce7f..f60085195ed 100644 --- a/dbms/src/Common/ZooKeeper/ZooKeeper.cpp +++ b/dbms/src/Common/ZooKeeper/ZooKeeper.cpp @@ -106,10 +106,10 @@ void ZooKeeper::init(const std::string & implementation, const std::string & hos throw KeeperException("Zookeeper root doesn't exist. You should create root node " + chroot + " before start.", Coordination::ZNONODE); } -ZooKeeper::ZooKeeper(const std::string & hosts, const std::string & identity, int32_t session_timeout_ms, - int32_t operation_timeout_ms, const std::string & chroot, const std::string & implementation) +ZooKeeper::ZooKeeper(const std::string & hosts_, const std::string & identity_, int32_t session_timeout_ms_, + int32_t operation_timeout_ms_, const std::string & chroot_, const std::string & implementation) { - init(implementation, hosts, identity, session_timeout_ms, operation_timeout_ms, chroot); + init(implementation, hosts_, identity_, session_timeout_ms_, operation_timeout_ms_, chroot_); } struct ZooKeeperArgs @@ -891,9 +891,9 @@ size_t KeeperMultiException::getFailedOpIndex(int32_t exception_code, const Coor } -KeeperMultiException::KeeperMultiException(int32_t exception_code, const Coordination::Requests & requests, const Coordination::Responses & responses) +KeeperMultiException::KeeperMultiException(int32_t exception_code, const Coordination::Requests & requests_, const Coordination::Responses & responses_) : KeeperException("Transaction failed", exception_code), - requests(requests), responses(responses), failed_op_index(getFailedOpIndex(exception_code, responses)) + requests(requests_), responses(responses_), failed_op_index(getFailedOpIndex(exception_code, responses)) { addMessage("Op #" + std::to_string(failed_op_index) + ", path: " + getPathForFirstFailedOp()); } diff --git a/dbms/src/Common/ZooKeeper/ZooKeeper.h b/dbms/src/Common/ZooKeeper/ZooKeeper.h index a888759c134..5bae272102d 100644 --- a/dbms/src/Common/ZooKeeper/ZooKeeper.h +++ b/dbms/src/Common/ZooKeeper/ZooKeeper.h @@ -52,10 +52,10 @@ class ZooKeeper public: using Ptr = std::shared_ptr; - ZooKeeper(const std::string & hosts, const std::string & identity = "", - int32_t session_timeout_ms = DEFAULT_SESSION_TIMEOUT, - int32_t operation_timeout_ms = DEFAULT_OPERATION_TIMEOUT, - const std::string & chroot = "", + ZooKeeper(const std::string & hosts_, const std::string & identity_ = "", + int32_t session_timeout_ms_ = DEFAULT_SESSION_TIMEOUT, + int32_t operation_timeout_ms_ = DEFAULT_OPERATION_TIMEOUT, + const std::string & chroot_ = "", const std::string & implementation = "zookeeper"); /** Config of the form: diff --git a/dbms/src/Common/ZooKeeper/ZooKeeperImpl.cpp b/dbms/src/Common/ZooKeeper/ZooKeeperImpl.cpp index 41e18b95fcf..be91d4b6d93 100644 --- a/dbms/src/Common/ZooKeeper/ZooKeeperImpl.cpp +++ b/dbms/src/Common/ZooKeeper/ZooKeeperImpl.cpp @@ -758,17 +758,17 @@ struct ZooKeeperMultiResponse final : MultiResponse, ZooKeeperResponse { ZooKeeper::OpNum op_num; bool done; - int32_t error; + int32_t error_; Coordination::read(op_num, in); Coordination::read(done, in); - Coordination::read(error, in); + Coordination::read(error_, in); if (!done) throw Exception("Too many results received for multi transaction", ZMARSHALLINGERROR); if (op_num != -1) throw Exception("Unexpected op_num received at the end of results for multi transaction", ZMARSHALLINGERROR); - if (error != -1) + if (error_ != -1) throw Exception("Unexpected error value received at the end of results for multi transaction", ZMARSHALLINGERROR); } } @@ -821,12 +821,12 @@ ZooKeeper::ZooKeeper( const String & root_path_, const String & auth_scheme, const String & auth_data, - Poco::Timespan session_timeout, + Poco::Timespan session_timeout_, Poco::Timespan connection_timeout, - Poco::Timespan operation_timeout) + Poco::Timespan operation_timeout_) : root_path(root_path_), - session_timeout(session_timeout), - operation_timeout(std::min(operation_timeout, session_timeout)) + session_timeout(session_timeout_), + operation_timeout(std::min(operation_timeout_, session_timeout_)) { if (!root_path.empty()) { diff --git a/dbms/src/Common/ZooKeeper/ZooKeeperImpl.h b/dbms/src/Common/ZooKeeper/ZooKeeperImpl.h index 24868571d0d..1588c74b412 100644 --- a/dbms/src/Common/ZooKeeper/ZooKeeperImpl.h +++ b/dbms/src/Common/ZooKeeper/ZooKeeperImpl.h @@ -108,9 +108,9 @@ public: const String & root_path, const String & auth_scheme, const String & auth_data, - Poco::Timespan session_timeout, + Poco::Timespan session_timeout_, Poco::Timespan connection_timeout, - Poco::Timespan operation_timeout); + Poco::Timespan operation_timeout_); ~ZooKeeper() override; diff --git a/dbms/src/Common/hex.h b/dbms/src/Common/hex.h index f5ca4904b8c..81fa725e17d 100644 --- a/dbms/src/Common/hex.h +++ b/dbms/src/Common/hex.h @@ -42,7 +42,7 @@ inline void writeHexByteLowercase(UInt8 byte, void * out) /// Produces hex representation of an unsigned int with leading zeros (for checksums) template -inline void writeHexUIntImpl(TUInt uint, char * out, const char * const table) +inline void writeHexUIntImpl(TUInt uint_, char * out, const char * const table) { union { @@ -50,7 +50,7 @@ inline void writeHexUIntImpl(TUInt uint, char * out, const char * const table) UInt8 uint8[sizeof(TUInt)]; }; - value = uint; + value = uint_; /// Use little endian for (size_t i = 0; i < sizeof(TUInt); ++i) @@ -58,30 +58,30 @@ inline void writeHexUIntImpl(TUInt uint, char * out, const char * const table) } template -inline void writeHexUIntUppercase(TUInt uint, char * out) +inline void writeHexUIntUppercase(TUInt uint_, char * out) { - writeHexUIntImpl(uint, out, hex_byte_to_char_uppercase_table); + writeHexUIntImpl(uint_, out, hex_byte_to_char_uppercase_table); } template -inline void writeHexUIntLowercase(TUInt uint, char * out) +inline void writeHexUIntLowercase(TUInt uint_, char * out) { - writeHexUIntImpl(uint, out, hex_byte_to_char_lowercase_table); + writeHexUIntImpl(uint_, out, hex_byte_to_char_lowercase_table); } template -std::string getHexUIntUppercase(TUInt uint) +std::string getHexUIntUppercase(TUInt uint_) { std::string res(sizeof(TUInt) * 2, '\0'); - writeHexUIntUppercase(uint, res.data()); + writeHexUIntUppercase(uint_, res.data()); return res; } template -std::string getHexUIntLowercase(TUInt uint) +std::string getHexUIntLowercase(TUInt uint_) { std::string res(sizeof(TUInt) * 2, '\0'); - writeHexUIntLowercase(uint, res.data()); + writeHexUIntLowercase(uint_, res.data()); return res; } diff --git a/dbms/src/Common/tests/arena_with_free_lists.cpp b/dbms/src/Common/tests/arena_with_free_lists.cpp index 4d4915f5dcc..20a3e547da0 100644 --- a/dbms/src/Common/tests/arena_with_free_lists.cpp +++ b/dbms/src/Common/tests/arena_with_free_lists.cpp @@ -137,17 +137,17 @@ struct Dictionary enum class AttributeUnderlyingType { - UInt8, - UInt16, - UInt32, - UInt64, - Int8, - Int16, - Int32, - Int64, - Float32, - Float64, - String + utUInt8, + utUInt16, + utUInt32, + utUInt64, + utInt8, + utInt16, + utInt32, + utInt64, + utFloat32, + utFloat64, + utString }; struct Attribute final @@ -172,17 +172,17 @@ struct Dictionary { switch (attribute.type) { - case AttributeUnderlyingType::UInt8: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::UInt16: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::UInt32: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::UInt64: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Int8: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Int16: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Int32: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Int64: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Float32: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Float64: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utUInt8: std::get>(attribute.arrays)[idx] = value.get(); break; + case AttributeUnderlyingType::utUInt16: std::get>(attribute.arrays)[idx] = value.get(); break; + case AttributeUnderlyingType::utUInt32: std::get>(attribute.arrays)[idx] = value.get(); break; + case AttributeUnderlyingType::utUInt64: std::get>(attribute.arrays)[idx] = value.get(); break; + case AttributeUnderlyingType::utInt8: std::get>(attribute.arrays)[idx] = value.get(); break; + case AttributeUnderlyingType::utInt16: std::get>(attribute.arrays)[idx] = value.get(); break; + case AttributeUnderlyingType::utInt32: std::get>(attribute.arrays)[idx] = value.get(); break; + case AttributeUnderlyingType::utInt64: std::get>(attribute.arrays)[idx] = value.get(); break; + case AttributeUnderlyingType::utFloat32: std::get>(attribute.arrays)[idx] = value.get(); break; + case AttributeUnderlyingType::utFloat64: std::get>(attribute.arrays)[idx] = value.get(); break; + case AttributeUnderlyingType::utString: { const auto & string = value.get(); auto & string_ref = std::get>(attribute.arrays)[idx]; @@ -308,7 +308,7 @@ int main(int argc, char ** argv) constexpr size_t cache_size = 1024; Dictionary::Attribute attr; - attr.type = Dictionary::AttributeUnderlyingType::String; + attr.type = Dictionary::AttributeUnderlyingType::utString; std::get>(attr.arrays).reset(new StringRef[cache_size]{}); while (true) diff --git a/dbms/src/Common/tests/cow_columns.cpp b/dbms/src/Common/tests/cow_columns.cpp index dad2ba13de5..7b629e264e5 100644 --- a/dbms/src/Common/tests/cow_columns.cpp +++ b/dbms/src/Common/tests/cow_columns.cpp @@ -28,7 +28,7 @@ private: friend class COWHelper; int data; - ConcreteColumn(int data) : data(data) {} + ConcreteColumn(int data_) : data(data_) {} ConcreteColumn(const ConcreteColumn &) = default; MutableColumnPtr test() const override diff --git a/dbms/src/Common/tests/cow_compositions.cpp b/dbms/src/Common/tests/cow_compositions.cpp index a48624d7d64..8d0110a0290 100644 --- a/dbms/src/Common/tests/cow_compositions.cpp +++ b/dbms/src/Common/tests/cow_compositions.cpp @@ -30,7 +30,7 @@ private: friend class COWHelper; int data; - ConcreteColumn(int data) : data(data) {} + ConcreteColumn(int data_) : data(data_) {} ConcreteColumn(const ConcreteColumn &) = default; public: diff --git a/dbms/src/Common/tests/mi_malloc_test.cpp b/dbms/src/Common/tests/mi_malloc_test.cpp index d9ee75fba6e..ce1e4a3a770 100644 --- a/dbms/src/Common/tests/mi_malloc_test.cpp +++ b/dbms/src/Common/tests/mi_malloc_test.cpp @@ -58,8 +58,8 @@ struct Allocation Allocation() {} - Allocation(size_t size) - : size(size) + Allocation(size_t size_) + : size(size_) { ptr = malloc(size); if (!ptr) diff --git a/dbms/src/Compression/CompressionCodecMultiple.cpp b/dbms/src/Compression/CompressionCodecMultiple.cpp index a40fbafefc9..23c244f4dcb 100644 --- a/dbms/src/Compression/CompressionCodecMultiple.cpp +++ b/dbms/src/Compression/CompressionCodecMultiple.cpp @@ -18,8 +18,8 @@ extern const int UNKNOWN_CODEC; extern const int CORRUPTED_DATA; } -CompressionCodecMultiple::CompressionCodecMultiple(Codecs codecs) - : codecs(codecs) +CompressionCodecMultiple::CompressionCodecMultiple(Codecs codecs_) + : codecs(codecs_) { } diff --git a/dbms/src/Compression/CompressionCodecMultiple.h b/dbms/src/Compression/CompressionCodecMultiple.h index 3770266e915..8702a7ab538 100644 --- a/dbms/src/Compression/CompressionCodecMultiple.h +++ b/dbms/src/Compression/CompressionCodecMultiple.h @@ -9,7 +9,7 @@ class CompressionCodecMultiple final : public ICompressionCodec { public: CompressionCodecMultiple() = default; - explicit CompressionCodecMultiple(Codecs codecs); + explicit CompressionCodecMultiple(Codecs codecs_); UInt8 getMethodByte() const override; diff --git a/dbms/src/Compression/LZ4_decompress_faster.h b/dbms/src/Compression/LZ4_decompress_faster.h index ff29c205276..dd923279ebf 100644 --- a/dbms/src/Compression/LZ4_decompress_faster.h +++ b/dbms/src/Compression/LZ4_decompress_faster.h @@ -123,7 +123,7 @@ struct PerformanceStatistics } PerformanceStatistics() {} - PerformanceStatistics(ssize_t choose_method) : choose_method(choose_method) {} + PerformanceStatistics(ssize_t choose_method_) : choose_method(choose_method_) {} }; diff --git a/dbms/src/Compression/tests/gtest_compressionCodec.cpp b/dbms/src/Compression/tests/gtest_compressionCodec.cpp index 2b2f6927ed4..0f03070fff3 100644 --- a/dbms/src/Compression/tests/gtest_compressionCodec.cpp +++ b/dbms/src/Compression/tests/gtest_compressionCodec.cpp @@ -334,10 +334,10 @@ auto SequentialGenerator = [](auto stride = 1) template struct MonotonicGenerator { - MonotonicGenerator(T stride = 1, size_t max_step = 10) + MonotonicGenerator(T stride_ = 1, size_t max_step_ = 10) : prev_value(0), - stride(stride), - max_step(max_step) + stride(stride_), + max_step(max_step_) {} template @@ -369,9 +369,9 @@ auto MinMaxGenerator = [](auto i) template struct RandomGenerator { - RandomGenerator(T seed = 0, T value_cap = std::numeric_limits::max()) + RandomGenerator(T seed = 0, T value_cap_ = std::numeric_limits::max()) : e(seed), - value_cap(value_cap) + value_cap(value_cap_) { } diff --git a/dbms/src/Core/BackgroundSchedulePool.cpp b/dbms/src/Core/BackgroundSchedulePool.cpp index ee63fdbadff..732be068569 100644 --- a/dbms/src/Core/BackgroundSchedulePool.cpp +++ b/dbms/src/Core/BackgroundSchedulePool.cpp @@ -23,7 +23,7 @@ namespace DB class TaskNotification final : public Poco::Notification { public: - explicit TaskNotification(const BackgroundSchedulePoolTaskInfoPtr & task) : task(task) {} + explicit TaskNotification(const BackgroundSchedulePoolTaskInfoPtr & task_) : task(task_) {} void execute() { task->execute(); } private: @@ -155,8 +155,8 @@ Coordination::WatchCallback BackgroundSchedulePoolTaskInfo::getWatchCallback() } -BackgroundSchedulePool::BackgroundSchedulePool(size_t size) - : size(size) +BackgroundSchedulePool::BackgroundSchedulePool(size_t size_) + : size(size_) { LOG_INFO(&Logger::get("BackgroundSchedulePool"), "Create BackgroundSchedulePool with " << size << " threads"); diff --git a/dbms/src/Core/BackgroundSchedulePool.h b/dbms/src/Core/BackgroundSchedulePool.h index f2627366da7..5a1f6489a7b 100644 --- a/dbms/src/Core/BackgroundSchedulePool.h +++ b/dbms/src/Core/BackgroundSchedulePool.h @@ -49,7 +49,7 @@ public: size_t getNumberOfThreads() const { return size; } - BackgroundSchedulePool(size_t size); + BackgroundSchedulePool(size_t size_); ~BackgroundSchedulePool(); private: diff --git a/dbms/src/Core/ExternalResultDescription.cpp b/dbms/src/Core/ExternalResultDescription.cpp index c41d8486552..8ca6795d2e9 100644 --- a/dbms/src/Core/ExternalResultDescription.cpp +++ b/dbms/src/Core/ExternalResultDescription.cpp @@ -33,33 +33,33 @@ void ExternalResultDescription::init(const Block & sample_block_) const IDataType * type = type_not_nullable.get(); if (typeid_cast(type)) - types.emplace_back(ValueType::UInt8, is_nullable); + types.emplace_back(ValueType::vtUInt8, is_nullable); else if (typeid_cast(type)) - types.emplace_back(ValueType::UInt16, is_nullable); + types.emplace_back(ValueType::vtUInt16, is_nullable); else if (typeid_cast(type)) - types.emplace_back(ValueType::UInt32, is_nullable); + types.emplace_back(ValueType::vtUInt32, is_nullable); else if (typeid_cast(type)) - types.emplace_back(ValueType::UInt64, is_nullable); + types.emplace_back(ValueType::vtUInt64, is_nullable); else if (typeid_cast(type)) - types.emplace_back(ValueType::Int8, is_nullable); + types.emplace_back(ValueType::vtInt8, is_nullable); else if (typeid_cast(type)) - types.emplace_back(ValueType::Int16, is_nullable); + types.emplace_back(ValueType::vtInt16, is_nullable); else if (typeid_cast(type)) - types.emplace_back(ValueType::Int32, is_nullable); + types.emplace_back(ValueType::vtInt32, is_nullable); else if (typeid_cast(type)) - types.emplace_back(ValueType::Int64, is_nullable); + types.emplace_back(ValueType::vtInt64, is_nullable); else if (typeid_cast(type)) - types.emplace_back(ValueType::Float32, is_nullable); + types.emplace_back(ValueType::vtFloat32, is_nullable); else if (typeid_cast(type)) - types.emplace_back(ValueType::Float64, is_nullable); + types.emplace_back(ValueType::vtFloat64, is_nullable); else if (typeid_cast(type)) - types.emplace_back(ValueType::String, is_nullable); + types.emplace_back(ValueType::vtString, is_nullable); else if (typeid_cast(type)) - types.emplace_back(ValueType::Date, is_nullable); + types.emplace_back(ValueType::vtDate, is_nullable); else if (typeid_cast(type)) - types.emplace_back(ValueType::DateTime, is_nullable); + types.emplace_back(ValueType::vtDateTime, is_nullable); else if (typeid_cast(type)) - types.emplace_back(ValueType::UUID, is_nullable); + types.emplace_back(ValueType::vtUUID, is_nullable); else throw Exception{"Unsupported type " + type->getName(), ErrorCodes::UNKNOWN_TYPE}; } diff --git a/dbms/src/Core/ExternalResultDescription.h b/dbms/src/Core/ExternalResultDescription.h index c59104019b7..0bd77afa628 100644 --- a/dbms/src/Core/ExternalResultDescription.h +++ b/dbms/src/Core/ExternalResultDescription.h @@ -12,20 +12,20 @@ struct ExternalResultDescription { enum struct ValueType { - UInt8, - UInt16, - UInt32, - UInt64, - Int8, - Int16, - Int32, - Int64, - Float32, - Float64, - String, - Date, - DateTime, - UUID, + vtUInt8, + vtUInt16, + vtUInt32, + vtUInt64, + vtInt8, + vtInt16, + vtInt32, + vtInt64, + vtFloat32, + vtFloat64, + vtString, + vtDate, + vtDateTime, + vtUUID, }; Block sample_block; diff --git a/dbms/src/Core/MySQLProtocol.h b/dbms/src/Core/MySQLProtocol.h index ccd127352ed..b7a8d514bc2 100644 --- a/dbms/src/Core/MySQLProtocol.h +++ b/dbms/src/Core/MySQLProtocol.h @@ -137,10 +137,10 @@ public: class PacketPayloadReadBuffer : public ReadBuffer { public: - PacketPayloadReadBuffer(ReadBuffer & in, uint8_t & sequence_id) - : ReadBuffer(in.position(), 0) // not in.buffer().begin(), because working buffer may include previous packet - , in(in) - , sequence_id(sequence_id) + PacketPayloadReadBuffer(ReadBuffer & in_, uint8_t & sequence_id_) + : ReadBuffer(in_.position(), 0) // not in.buffer().begin(), because working buffer may include previous packet + , in(in_) + , sequence_id(sequence_id_) { } @@ -245,8 +245,8 @@ public: class PacketPayloadWriteBuffer : public WriteBuffer { public: - PacketPayloadWriteBuffer(WriteBuffer & out, size_t payload_length, uint8_t & sequence_id) - : WriteBuffer(out.position(), 0), out(out), sequence_id(sequence_id), total_left(payload_length) + PacketPayloadWriteBuffer(WriteBuffer & out_, size_t payload_length_, uint8_t & sequence_id_) + : WriteBuffer(out_.position(), 0), out(out_), sequence_id(sequence_id_), total_left(payload_length_) { startNewPacket(); setWorkingBuffer(); @@ -347,18 +347,18 @@ public: size_t max_packet_size = MAX_PACKET_LENGTH; /// For reading and writing. - PacketSender(ReadBuffer & in, WriteBuffer & out, uint8_t & sequence_id) - : sequence_id(sequence_id) - , in(&in) - , out(&out) + PacketSender(ReadBuffer & in_, WriteBuffer & out_, uint8_t & sequence_id_) + : sequence_id(sequence_id_) + , in(&in_) + , out(&out_) { } /// For writing. - PacketSender(WriteBuffer & out, uint8_t & sequence_id) - : sequence_id(sequence_id) + PacketSender(WriteBuffer & out_, uint8_t & sequence_id_) + : sequence_id(sequence_id_) , in(nullptr) - , out(&out) + , out(&out_) { } @@ -421,15 +421,15 @@ class Handshake : public WritePacket String auth_plugin_name; String auth_plugin_data; public: - explicit Handshake(uint32_t capability_flags, uint32_t connection_id, String server_version, String auth_plugin_name, String auth_plugin_data) + explicit Handshake(uint32_t capability_flags_, uint32_t connection_id_, String server_version_, String auth_plugin_name_, String auth_plugin_data_) : protocol_version(0xa) - , server_version(std::move(server_version)) - , connection_id(connection_id) - , capability_flags(capability_flags) + , server_version(std::move(server_version_)) + , connection_id(connection_id_) + , capability_flags(capability_flags_) , character_set(CharacterSet::utf8_general_ci) , status_flags(0) - , auth_plugin_name(std::move(auth_plugin_name)) - , auth_plugin_data(std::move(auth_plugin_data)) + , auth_plugin_name(std::move(auth_plugin_name_)) + , auth_plugin_data(std::move(auth_plugin_data_)) { } @@ -532,8 +532,8 @@ class AuthSwitchRequest : public WritePacket String plugin_name; String auth_plugin_data; public: - AuthSwitchRequest(String plugin_name, String auth_plugin_data) - : plugin_name(std::move(plugin_name)), auth_plugin_data(std::move(auth_plugin_data)) + AuthSwitchRequest(String plugin_name_, String auth_plugin_data_) + : plugin_name(std::move(plugin_name_)), auth_plugin_data(std::move(auth_plugin_data_)) { } @@ -566,7 +566,7 @@ class AuthMoreData : public WritePacket { String data; public: - explicit AuthMoreData(String data): data(std::move(data)) {} + explicit AuthMoreData(String data_): data(std::move(data_)) {} protected: size_t getPayloadSize() const override @@ -592,20 +592,20 @@ class OK_Packet : public WritePacket String session_state_changes; String info; public: - OK_Packet(uint8_t header, - uint32_t capabilities, - uint64_t affected_rows, - uint32_t status_flags, - int16_t warnings, - String session_state_changes = "", - String info = "") - : header(header) - , capabilities(capabilities) - , affected_rows(affected_rows) - , warnings(warnings) - , status_flags(status_flags) - , session_state_changes(std::move(session_state_changes)) - , info(std::move(info)) + OK_Packet(uint8_t header_, + uint32_t capabilities_, + uint64_t affected_rows_, + uint32_t status_flags_, + int16_t warnings_, + String session_state_changes_ = "", + String info_ = "") + : header(header_) + , capabilities(capabilities_) + , affected_rows(affected_rows_) + , warnings(warnings_) + , status_flags(status_flags_) + , session_state_changes(std::move(session_state_changes_)) + , info(std::move(info_)) { } @@ -671,7 +671,7 @@ class EOF_Packet : public WritePacket int warnings; int status_flags; public: - EOF_Packet(int warnings, int status_flags) : warnings(warnings), status_flags(status_flags) + EOF_Packet(int warnings_, int status_flags_) : warnings(warnings_), status_flags(status_flags_) {} protected: @@ -694,8 +694,8 @@ class ERR_Packet : public WritePacket String sql_state; String error_message; public: - ERR_Packet(int error_code, String sql_state, String error_message) - : error_code(error_code), sql_state(std::move(sql_state)), error_message(std::move(error_message)) + ERR_Packet(int error_code_, String sql_state_, String error_message_) + : error_code(error_code_), sql_state(std::move(sql_state_)), error_message(std::move(error_message_)) { } @@ -730,32 +730,32 @@ class ColumnDefinition : public WritePacket uint8_t decimals = 0x00; public: ColumnDefinition( - String schema, - String table, - String org_table, - String name, - String org_name, - uint16_t character_set, - uint32_t column_length, - ColumnType column_type, - uint16_t flags, - uint8_t decimals) + String schema_, + String table_, + String org_table_, + String name_, + String org_name_, + uint16_t character_set_, + uint32_t column_length_, + ColumnType column_type_, + uint16_t flags_, + uint8_t decimals_) - : schema(std::move(schema)), table(std::move(table)), org_table(std::move(org_table)), name(std::move(name)), - org_name(std::move(org_name)), character_set(character_set), column_length(column_length), column_type(column_type), flags(flags), - decimals(decimals) + : schema(std::move(schema_)), table(std::move(table_)), org_table(std::move(org_table_)), name(std::move(name_)), + org_name(std::move(org_name_)), character_set(character_set_), column_length(column_length_), column_type(column_type_), flags(flags_), + decimals(decimals_) { } /// Should be used when column metadata (original name, table, original table, database) is unknown. ColumnDefinition( - String name, - uint16_t character_set, - uint32_t column_length, - ColumnType column_type, - uint16_t flags, - uint8_t decimals) - : ColumnDefinition("", "", "", std::move(name), "", character_set, column_length, column_type, flags, decimals) + String name_, + uint16_t character_set_, + uint32_t column_length_, + ColumnType column_type_, + uint16_t flags_, + uint8_t decimals_) + : ColumnDefinition("", "", "", std::move(name_), "", character_set_, column_length_, column_type_, flags_, decimals_) { } @@ -801,7 +801,7 @@ class LengthEncodedNumber : public WritePacket { uint64_t value; public: - explicit LengthEncodedNumber(uint64_t value): value(value) + explicit LengthEncodedNumber(uint64_t value_): value(value_) { } diff --git a/dbms/src/Core/Types.h b/dbms/src/Core/Types.h index 75c7cbaff66..81446180cdc 100644 --- a/dbms/src/Core/Types.h +++ b/dbms/src/Core/Types.h @@ -12,6 +12,41 @@ namespace DB struct Null {}; +enum class TypeIndex +{ + Nothing = 0, + UInt8, + UInt16, + UInt32, + UInt64, + UInt128, + Int8, + Int16, + Int32, + Int64, + Int128, + Float32, + Float64, + Date, + DateTime, + String, + FixedString, + Enum8, + Enum16, + Decimal32, + Decimal64, + Decimal128, + UUID, + Array, + Tuple, + Set, + Interval, + Nullable, + Function, + AggregateFunction, + LowCardinality, +}; + using UInt8 = uint8_t; using UInt16 = uint16_t; using UInt32 = uint32_t; @@ -57,41 +92,6 @@ template <> struct TypeName { static const char * get() { return "Float template <> struct TypeName { static const char * get() { return "Float64"; } }; template <> struct TypeName { static const char * get() { return "String"; } }; -enum class TypeIndex -{ - Nothing = 0, - UInt8, - UInt16, - UInt32, - UInt64, - UInt128, - Int8, - Int16, - Int32, - Int64, - Int128, - Float32, - Float64, - Date, - DateTime, - String, - FixedString, - Enum8, - Enum16, - Decimal32, - Decimal64, - Decimal128, - UUID, - Array, - Tuple, - Set, - Interval, - Nullable, - Function, - AggregateFunction, - LowCardinality, -}; - template struct TypeId; template <> struct TypeId { static constexpr const TypeIndex value = TypeIndex::UInt8; }; template <> struct TypeId { static constexpr const TypeIndex value = TypeIndex::UInt16; }; diff --git a/dbms/src/DataStreams/CollapsingFinalBlockInputStream.h b/dbms/src/DataStreams/CollapsingFinalBlockInputStream.h index af049cce7c0..8cfc4cbeab4 100644 --- a/dbms/src/DataStreams/CollapsingFinalBlockInputStream.h +++ b/dbms/src/DataStreams/CollapsingFinalBlockInputStream.h @@ -45,9 +45,9 @@ private: MergingBlock(const Block & block_, size_t stream_index_, const SortDescription & desc, - const String & sign_column_name, - BlockPlainPtrs * output_blocks) - : block(block_), stream_index(stream_index_), output_blocks(output_blocks) + const String & sign_column_name_, + BlockPlainPtrs * output_blocks_) + : block(block_), stream_index(stream_index_), output_blocks(output_blocks_) { sort_columns.resize(desc.size()); for (size_t i = 0; i < desc.size(); ++i) @@ -59,7 +59,7 @@ private: sort_columns[i] = block.safeGetByPosition(column_number).column.get(); } - const IColumn * sign_icolumn = block.getByName(sign_column_name).column.get(); + const IColumn * sign_icolumn = block.getByName(sign_column_name_).column.get(); sign_column = typeid_cast(sign_icolumn); diff --git a/dbms/src/DataStreams/DistinctBlockInputStream.cpp b/dbms/src/DataStreams/DistinctBlockInputStream.cpp index 77ea87f1be3..54d7134d0cd 100644 --- a/dbms/src/DataStreams/DistinctBlockInputStream.cpp +++ b/dbms/src/DataStreams/DistinctBlockInputStream.cpp @@ -8,10 +8,10 @@ namespace ErrorCodes extern const int SET_SIZE_LIMIT_EXCEEDED; } -DistinctBlockInputStream::DistinctBlockInputStream(const BlockInputStreamPtr & input, const SizeLimits & set_size_limits, UInt64 limit_hint_, const Names & columns) - : columns_names(columns) +DistinctBlockInputStream::DistinctBlockInputStream(const BlockInputStreamPtr & input, const SizeLimits & set_size_limits_, UInt64 limit_hint_, const Names & columns_) + : columns_names(columns_) , limit_hint(limit_hint_) - , set_size_limits(set_size_limits) + , set_size_limits(set_size_limits_) { children.push_back(input); } diff --git a/dbms/src/DataStreams/DistinctBlockInputStream.h b/dbms/src/DataStreams/DistinctBlockInputStream.h index 3eb7c5ffcb0..4df0bf46070 100644 --- a/dbms/src/DataStreams/DistinctBlockInputStream.h +++ b/dbms/src/DataStreams/DistinctBlockInputStream.h @@ -17,7 +17,7 @@ class DistinctBlockInputStream : public IBlockInputStream { public: /// Empty columns_ means all collumns. - DistinctBlockInputStream(const BlockInputStreamPtr & input, const SizeLimits & set_size_limits, UInt64 limit_hint_, const Names & columns); + DistinctBlockInputStream(const BlockInputStreamPtr & input, const SizeLimits & set_size_limits_, UInt64 limit_hint_, const Names & columns_); String getName() const override { return "Distinct"; } diff --git a/dbms/src/DataStreams/DistinctSortedBlockInputStream.cpp b/dbms/src/DataStreams/DistinctSortedBlockInputStream.cpp index 2b715f64823..e8e9f7278aa 100644 --- a/dbms/src/DataStreams/DistinctSortedBlockInputStream.cpp +++ b/dbms/src/DataStreams/DistinctSortedBlockInputStream.cpp @@ -9,11 +9,11 @@ namespace ErrorCodes } DistinctSortedBlockInputStream::DistinctSortedBlockInputStream( - const BlockInputStreamPtr & input, const SizeLimits & set_size_limits, UInt64 limit_hint_, const Names & columns) + const BlockInputStreamPtr & input, const SizeLimits & set_size_limits_, UInt64 limit_hint_, const Names & columns) : description(input->getSortDescription()) , columns_names(columns) , limit_hint(limit_hint_) - , set_size_limits(set_size_limits) + , set_size_limits(set_size_limits_) { children.push_back(input); } diff --git a/dbms/src/DataStreams/DistinctSortedBlockInputStream.h b/dbms/src/DataStreams/DistinctSortedBlockInputStream.h index 9ecc053feb5..dbccb892b3f 100644 --- a/dbms/src/DataStreams/DistinctSortedBlockInputStream.h +++ b/dbms/src/DataStreams/DistinctSortedBlockInputStream.h @@ -21,7 +21,7 @@ class DistinctSortedBlockInputStream : public IBlockInputStream { public: /// Empty columns_ means all collumns. - DistinctSortedBlockInputStream(const BlockInputStreamPtr & input, const SizeLimits & set_size_limits, UInt64 limit_hint_, const Names & columns); + DistinctSortedBlockInputStream(const BlockInputStreamPtr & input, const SizeLimits & set_size_limits_, UInt64 limit_hint_, const Names & columns); String getName() const override { return "DistinctSorted"; } diff --git a/dbms/src/DataStreams/FilterBlockInputStream.cpp b/dbms/src/DataStreams/FilterBlockInputStream.cpp index 63782890331..8613bc8cf8f 100644 --- a/dbms/src/DataStreams/FilterBlockInputStream.cpp +++ b/dbms/src/DataStreams/FilterBlockInputStream.cpp @@ -18,8 +18,8 @@ namespace ErrorCodes FilterBlockInputStream::FilterBlockInputStream(const BlockInputStreamPtr & input, const ExpressionActionsPtr & expression_, - const String & filter_column_name, bool remove_filter) - : remove_filter(remove_filter), expression(expression_) + const String & filter_column_name, bool remove_filter_) + : remove_filter(remove_filter_), expression(expression_) { children.push_back(input); diff --git a/dbms/src/DataStreams/FilterBlockInputStream.h b/dbms/src/DataStreams/FilterBlockInputStream.h index 9bee8a50c8a..e287b69460c 100644 --- a/dbms/src/DataStreams/FilterBlockInputStream.h +++ b/dbms/src/DataStreams/FilterBlockInputStream.h @@ -21,7 +21,7 @@ private: public: FilterBlockInputStream(const BlockInputStreamPtr & input, const ExpressionActionsPtr & expression_, - const String & filter_column_name_, bool remove_filter = false); + const String & filter_column_name_, bool remove_filter_ = false); String getName() const override; Block getTotals() override; diff --git a/dbms/src/DataStreams/GraphiteRollupSortedBlockInputStream.cpp b/dbms/src/DataStreams/GraphiteRollupSortedBlockInputStream.cpp index fb24d8c37a4..456c43ca802 100644 --- a/dbms/src/DataStreams/GraphiteRollupSortedBlockInputStream.cpp +++ b/dbms/src/DataStreams/GraphiteRollupSortedBlockInputStream.cpp @@ -14,9 +14,9 @@ namespace ErrorCodes GraphiteRollupSortedBlockInputStream::GraphiteRollupSortedBlockInputStream( const BlockInputStreams & inputs_, const SortDescription & description_, size_t max_block_size_, - const Graphite::Params & params, time_t time_of_merge) + const Graphite::Params & params_, time_t time_of_merge_) : MergingSortedBlockInputStream(inputs_, description_, max_block_size_), - params(params), time_of_merge(time_of_merge) + params(params_), time_of_merge(time_of_merge_) { size_t max_size_of_aggregate_state = 0; size_t max_alignment_of_aggregate_state = 1; diff --git a/dbms/src/DataStreams/GraphiteRollupSortedBlockInputStream.h b/dbms/src/DataStreams/GraphiteRollupSortedBlockInputStream.h index 00bd2f4b67e..560274f1dae 100644 --- a/dbms/src/DataStreams/GraphiteRollupSortedBlockInputStream.h +++ b/dbms/src/DataStreams/GraphiteRollupSortedBlockInputStream.h @@ -152,7 +152,7 @@ class GraphiteRollupSortedBlockInputStream : public MergingSortedBlockInputStrea public: GraphiteRollupSortedBlockInputStream( const BlockInputStreams & inputs_, const SortDescription & description_, size_t max_block_size_, - const Graphite::Params & params, time_t time_of_merge); + const Graphite::Params & params_, time_t time_of_merge_); String getName() const override { return "GraphiteRollupSorted"; } diff --git a/dbms/src/DataStreams/MaterializingBlockOutputStream.h b/dbms/src/DataStreams/MaterializingBlockOutputStream.h index 9e1efeb29d3..64c2bc12a57 100644 --- a/dbms/src/DataStreams/MaterializingBlockOutputStream.h +++ b/dbms/src/DataStreams/MaterializingBlockOutputStream.h @@ -12,8 +12,8 @@ namespace DB class MaterializingBlockOutputStream : public IBlockOutputStream { public: - MaterializingBlockOutputStream(const BlockOutputStreamPtr & output, const Block & header) - : output{output}, header(header) {} + MaterializingBlockOutputStream(const BlockOutputStreamPtr & output_, const Block & header_) + : output{output_}, header(header_) {} Block getHeader() const override { return header; } void write(const Block & block) override { output->write(materializeBlock(block)); } diff --git a/dbms/src/DataStreams/MergeSortingBlockInputStream.h b/dbms/src/DataStreams/MergeSortingBlockInputStream.h index e256e575a8e..9f257b82260 100644 --- a/dbms/src/DataStreams/MergeSortingBlockInputStream.h +++ b/dbms/src/DataStreams/MergeSortingBlockInputStream.h @@ -117,8 +117,8 @@ private: CompressedReadBuffer compressed_in; BlockInputStreamPtr block_in; - TemporaryFileStream(const std::string & path, const Block & header) - : file_in(path), compressed_in(file_in), block_in(std::make_shared(compressed_in, header, 0)) {} + TemporaryFileStream(const std::string & path, const Block & header_) + : file_in(path), compressed_in(file_in), block_in(std::make_shared(compressed_in, header_, 0)) {} }; std::vector> temporary_inputs; diff --git a/dbms/src/DataStreams/NullBlockOutputStream.h b/dbms/src/DataStreams/NullBlockOutputStream.h index 3d437527960..8b3e61d35a7 100644 --- a/dbms/src/DataStreams/NullBlockOutputStream.h +++ b/dbms/src/DataStreams/NullBlockOutputStream.h @@ -11,7 +11,7 @@ namespace DB class NullBlockOutputStream : public IBlockOutputStream { public: - NullBlockOutputStream(const Block & header) : header(header) {} + NullBlockOutputStream(const Block & header_) : header(header_) {} Block getHeader() const override { return header; } void write(const Block &) override {} diff --git a/dbms/src/DataStreams/OwningBlockInputStream.h b/dbms/src/DataStreams/OwningBlockInputStream.h index b7ea121814c..dac42028cd7 100644 --- a/dbms/src/DataStreams/OwningBlockInputStream.h +++ b/dbms/src/DataStreams/OwningBlockInputStream.h @@ -14,8 +14,8 @@ template class OwningBlockInputStream : public IBlockInputStream { public: - OwningBlockInputStream(const BlockInputStreamPtr & stream, std::unique_ptr own) - : stream{stream}, own{std::move(own)} + OwningBlockInputStream(const BlockInputStreamPtr & stream_, std::unique_ptr own_) + : stream{stream_}, own{std::move(own_)} { children.push_back(stream); } diff --git a/dbms/src/DataStreams/ParallelAggregatingBlockInputStream.h b/dbms/src/DataStreams/ParallelAggregatingBlockInputStream.h index 52852526935..5342c03e68f 100644 --- a/dbms/src/DataStreams/ParallelAggregatingBlockInputStream.h +++ b/dbms/src/DataStreams/ParallelAggregatingBlockInputStream.h @@ -84,11 +84,11 @@ private: ColumnRawPtrs key_columns; Aggregator::AggregateColumns aggregate_columns; - ThreadData(size_t keys_size, size_t aggregates_size) + ThreadData(size_t keys_size_, size_t aggregates_size_) { - key.resize(keys_size); - key_columns.resize(keys_size); - aggregate_columns.resize(aggregates_size); + key.resize(keys_size_); + key_columns.resize(keys_size_); + aggregate_columns.resize(aggregates_size_); } }; diff --git a/dbms/src/DataStreams/SizeLimits.h b/dbms/src/DataStreams/SizeLimits.h index 66373a179ab..41238087613 100644 --- a/dbms/src/DataStreams/SizeLimits.h +++ b/dbms/src/DataStreams/SizeLimits.h @@ -27,8 +27,8 @@ struct SizeLimits OverflowMode overflow_mode = OverflowMode::THROW; SizeLimits() {} - SizeLimits(UInt64 max_rows, UInt64 max_bytes, OverflowMode overflow_mode) - : max_rows(max_rows), max_bytes(max_bytes), overflow_mode(overflow_mode) {} + SizeLimits(UInt64 max_rows_, UInt64 max_bytes_, OverflowMode overflow_mode_) + : max_rows(max_rows_), max_bytes(max_bytes_), overflow_mode(overflow_mode_) {} /// Check limits. If exceeded, return false or throw an exception, depending on overflow_mode. bool check(UInt64 rows, UInt64 bytes, const char * what, int exception_code) const; diff --git a/dbms/src/DataStreams/SquashingBlockOutputStream.cpp b/dbms/src/DataStreams/SquashingBlockOutputStream.cpp index 48156ed090f..5d0638c1c57 100644 --- a/dbms/src/DataStreams/SquashingBlockOutputStream.cpp +++ b/dbms/src/DataStreams/SquashingBlockOutputStream.cpp @@ -4,8 +4,8 @@ namespace DB { -SquashingBlockOutputStream::SquashingBlockOutputStream(BlockOutputStreamPtr dst, Block header, size_t min_block_size_rows, size_t min_block_size_bytes) - : output(std::move(dst)), header(std::move(header)), transform(min_block_size_rows, min_block_size_bytes) +SquashingBlockOutputStream::SquashingBlockOutputStream(BlockOutputStreamPtr dst, Block header_, size_t min_block_size_rows, size_t min_block_size_bytes) + : output(std::move(dst)), header(std::move(header_)), transform(min_block_size_rows, min_block_size_bytes) { } diff --git a/dbms/src/DataStreams/SquashingBlockOutputStream.h b/dbms/src/DataStreams/SquashingBlockOutputStream.h index f255d18e331..7828ad7e96d 100644 --- a/dbms/src/DataStreams/SquashingBlockOutputStream.h +++ b/dbms/src/DataStreams/SquashingBlockOutputStream.h @@ -12,7 +12,7 @@ namespace DB class SquashingBlockOutputStream : public IBlockOutputStream { public: - SquashingBlockOutputStream(BlockOutputStreamPtr dst, Block header, size_t min_block_size_rows, size_t min_block_size_bytes); + SquashingBlockOutputStream(BlockOutputStreamPtr dst, Block header_, size_t min_block_size_rows, size_t min_block_size_bytes); Block getHeader() const override { return header; } void write(const Block & block) override; diff --git a/dbms/src/DataStreams/SquashingTransform.cpp b/dbms/src/DataStreams/SquashingTransform.cpp index abac72e79bd..00e3a51582c 100644 --- a/dbms/src/DataStreams/SquashingTransform.cpp +++ b/dbms/src/DataStreams/SquashingTransform.cpp @@ -4,8 +4,8 @@ namespace DB { -SquashingTransform::SquashingTransform(size_t min_block_size_rows, size_t min_block_size_bytes) - : min_block_size_rows(min_block_size_rows), min_block_size_bytes(min_block_size_bytes) +SquashingTransform::SquashingTransform(size_t min_block_size_rows_, size_t min_block_size_bytes_) + : min_block_size_rows(min_block_size_rows_), min_block_size_bytes(min_block_size_bytes_) { } diff --git a/dbms/src/DataStreams/SquashingTransform.h b/dbms/src/DataStreams/SquashingTransform.h index 519c0e1ae4b..f1681c57c8c 100644 --- a/dbms/src/DataStreams/SquashingTransform.h +++ b/dbms/src/DataStreams/SquashingTransform.h @@ -23,7 +23,7 @@ class SquashingTransform { public: /// Conditions on rows and bytes are OR-ed. If one of them is zero, then corresponding condition is ignored. - SquashingTransform(size_t min_block_size_rows, size_t min_block_size_bytes); + SquashingTransform(size_t min_block_size_rows_, size_t min_block_size_bytes_); /// When not ready, you need to pass more blocks to add function. struct Result @@ -32,7 +32,7 @@ public: MutableColumns columns; Result(bool ready_) : ready(ready_) {} - Result(MutableColumns && columns) : ready(true), columns(std::move(columns)) {} + Result(MutableColumns && columns_) : ready(true), columns(std::move(columns_)) {} }; /** Add next block and possibly returns squashed block. diff --git a/dbms/src/DataTypes/DataTypeInterval.h b/dbms/src/DataTypes/DataTypeInterval.h index 22f088c01f8..fa99ac430b6 100644 --- a/dbms/src/DataTypes/DataTypeInterval.h +++ b/dbms/src/DataTypes/DataTypeInterval.h @@ -54,7 +54,7 @@ public: __builtin_unreachable(); } - DataTypeInterval(Kind kind) : kind(kind) {} + DataTypeInterval(Kind kind_) : kind(kind_) {} std::string doGetName() const override { return std::string("Interval") + kindToString(); } const char * getFamilyName() const override { return "Interval"; } diff --git a/dbms/src/DataTypes/DataTypeLowCardinality.cpp b/dbms/src/DataTypes/DataTypeLowCardinality.cpp index 33d3eb658d3..812c5d04032 100644 --- a/dbms/src/DataTypes/DataTypeLowCardinality.cpp +++ b/dbms/src/DataTypes/DataTypeLowCardinality.cpp @@ -140,11 +140,11 @@ struct IndexesSerializationType } IndexesSerializationType(const IColumn & column, - bool has_additional_keys, - bool need_global_dictionary, + bool has_additional_keys_, + bool need_global_dictionary_, bool enumerate_dictionaries) - : has_additional_keys(has_additional_keys) - , need_global_dictionary(need_global_dictionary) + : has_additional_keys(has_additional_keys_) + , need_global_dictionary(need_global_dictionary_) , need_update_dictionary(enumerate_dictionaries) { if (typeid_cast(&column)) @@ -182,7 +182,7 @@ struct SerializeStateLowCardinality : public IDataType::SerializeBinaryBulkState KeysSerializationVersion key_version; MutableColumnUniquePtr shared_dictionary; - explicit SerializeStateLowCardinality(UInt64 key_version) : key_version(key_version) {} + explicit SerializeStateLowCardinality(UInt64 key_version_) : key_version(key_version_) {} }; struct DeserializeStateLowCardinality : public IDataType::DeserializeBinaryBulkState @@ -201,7 +201,7 @@ struct DeserializeStateLowCardinality : public IDataType::DeserializeBinaryBulkS /// in case of long block of empty arrays we may not need read dictionary at first reading. bool need_update_dictionary = false; - explicit DeserializeStateLowCardinality(UInt64 key_version) : key_version(key_version) {} + explicit DeserializeStateLowCardinality(UInt64 key_version_) : key_version(key_version_) {} }; static SerializeStateLowCardinality * checkAndGetLowCardinalitySerializeState( @@ -791,8 +791,8 @@ namespace const IDataType & keys_type; const Creator & creator; - CreateColumnVector(MutableColumnUniquePtr & column, const IDataType & keys_type, const Creator & creator) - : column(column), keys_type(keys_type), creator(creator) + CreateColumnVector(MutableColumnUniquePtr & column_, const IDataType & keys_type_, const Creator & creator_) + : column(column_), keys_type(keys_type_), creator(creator_) { } diff --git a/dbms/src/DataTypes/IDataType.h b/dbms/src/DataTypes/IDataType.h index e5020fe19de..2812b48e8f0 100644 --- a/dbms/src/DataTypes/IDataType.h +++ b/dbms/src/DataTypes/IDataType.h @@ -98,7 +98,7 @@ public: /// Index of tuple element, starting at 1. String tuple_element_name; - Substream(Type type) : type(type) {} + Substream(Type type_) : type(type_) {} }; using SubstreamPath = std::vector; diff --git a/dbms/src/Dictionaries/CacheDictionary.cpp b/dbms/src/Dictionaries/CacheDictionary.cpp index 4d8cd03b3eb..53fc746e565 100644 --- a/dbms/src/Dictionaries/CacheDictionary.cpp +++ b/dbms/src/Dictionaries/CacheDictionary.cpp @@ -61,16 +61,16 @@ inline size_t CacheDictionary::getCellIdx(const Key id) const CacheDictionary::CacheDictionary( - const std::string & name, - const DictionaryStructure & dict_struct, - DictionarySourcePtr source_ptr, - const DictionaryLifetime dict_lifetime, - const size_t size) - : name{name} - , dict_struct(dict_struct) - , source_ptr{std::move(source_ptr)} - , dict_lifetime(dict_lifetime) - , size{roundUpToPowerOfTwoOrZero(std::max(size, size_t(max_collision_length)))} + const std::string & name_, + const DictionaryStructure & dict_struct_, + DictionarySourcePtr source_ptr_, + const DictionaryLifetime dict_lifetime_, + const size_t size_) + : name{name_} + , dict_struct(dict_struct_) + , source_ptr{std::move(source_ptr_)} + , dict_lifetime(dict_lifetime_) + , size{roundUpToPowerOfTwoOrZero(std::max(size_, size_t(max_collision_length)))} , size_overlap_mask{this->size - 1} , cells{this->size} , rnd_engine(randomSeed()) @@ -207,7 +207,7 @@ void CacheDictionary::isInConstantVector(const Key child_id, const PaddedPODArra void CacheDictionary::getString(const std::string & attribute_name, const PaddedPODArray & ids, ColumnString * out) const { auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::String); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::utString); const auto null_value = StringRef{std::get(attribute.null_values)}; @@ -218,7 +218,7 @@ void CacheDictionary::getString( const std::string & attribute_name, const PaddedPODArray & ids, const ColumnString * const def, ColumnString * const out) const { auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::String); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::utString); getItemsString(attribute, ids, out, [&](const size_t row) { return def->getDataAt(row); }); } @@ -227,7 +227,7 @@ void CacheDictionary::getString( const std::string & attribute_name, const PaddedPODArray & ids, const String & def, ColumnString * const out) const { auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::String); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::utString); getItemsString(attribute, ids, out, [&](const size_t) { return StringRef{def}; }); } @@ -354,7 +354,7 @@ void CacheDictionary::createAttributes() { hierarchical_attribute = &attributes.back(); - if (hierarchical_attribute->type != AttributeUnderlyingType::UInt64) + if (hierarchical_attribute->type != AttributeUnderlyingType::utUInt64) throw Exception{name + ": hierarchical attribute must be UInt64.", ErrorCodes::TYPE_MISMATCH}; } } @@ -367,7 +367,7 @@ CacheDictionary::Attribute CacheDictionary::createAttributeWithType(const Attrib switch (type) { #define DISPATCH(TYPE) \ - case AttributeUnderlyingType::TYPE: \ + case AttributeUnderlyingType::ut##TYPE: \ attr.null_values = TYPE(null_value.get>()); \ attr.arrays = std::make_unique>(size); \ bytes_allocated += size * sizeof(TYPE); \ @@ -387,7 +387,7 @@ CacheDictionary::Attribute CacheDictionary::createAttributeWithType(const Attrib DISPATCH(Float32) DISPATCH(Float64) #undef DISPATCH - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: attr.null_values = null_value.get(); attr.arrays = std::make_unique>(size); bytes_allocated += size * sizeof(StringRef); @@ -403,51 +403,51 @@ void CacheDictionary::setDefaultAttributeValue(Attribute & attribute, const Key { switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: { const auto & null_value_ref = std::get(attribute.null_values); auto & string_ref = std::get>(attribute.arrays)[idx]; @@ -469,51 +469,51 @@ void CacheDictionary::setAttributeValue(Attribute & attribute, const Key idx, co { switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: { const auto & string = value.get(); auto & string_ref = std::get>(attribute.arrays)[idx]; diff --git a/dbms/src/Dictionaries/CacheDictionary.h b/dbms/src/Dictionaries/CacheDictionary.h index cc613d0d96b..7e1cec6ffe9 100644 --- a/dbms/src/Dictionaries/CacheDictionary.h +++ b/dbms/src/Dictionaries/CacheDictionary.h @@ -24,11 +24,11 @@ class CacheDictionary final : public IDictionary { public: CacheDictionary( - const std::string & name, - const DictionaryStructure & dict_struct, - DictionarySourcePtr source_ptr, - const DictionaryLifetime dict_lifetime, - const size_t size); + const std::string & name_, + const DictionaryStructure & dict_struct_, + DictionarySourcePtr source_ptr_, + const DictionaryLifetime dict_lifetime_, + const size_t size_); std::string getName() const override { return name; } diff --git a/dbms/src/Dictionaries/CacheDictionary_generate1.cpp.in b/dbms/src/Dictionaries/CacheDictionary_generate1.cpp.in index 6a545403aed..b870a0ed69b 100644 --- a/dbms/src/Dictionaries/CacheDictionary_generate1.cpp.in +++ b/dbms/src/Dictionaries/CacheDictionary_generate1.cpp.in @@ -12,7 +12,7 @@ using TYPE = @NAME@; void CacheDictionary::get@NAME@(const std::string & attribute_name, const PaddedPODArray & ids, ResultArrayType & out) const { auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::@NAME@); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::ut@NAME@); const auto null_value = std::get(attribute.null_values); diff --git a/dbms/src/Dictionaries/CacheDictionary_generate2.cpp.in b/dbms/src/Dictionaries/CacheDictionary_generate2.cpp.in index 787a0f267af..367e150c2cb 100644 --- a/dbms/src/Dictionaries/CacheDictionary_generate2.cpp.in +++ b/dbms/src/Dictionaries/CacheDictionary_generate2.cpp.in @@ -15,7 +15,7 @@ void CacheDictionary::get@NAME@(const std::string & attribute_name, ResultArrayType & out) const { auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::@NAME@); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::ut@NAME@); getItemsNumberImpl(attribute, ids, out, [&](const size_t row) { return def[row]; }); } diff --git a/dbms/src/Dictionaries/CacheDictionary_generate3.cpp.in b/dbms/src/Dictionaries/CacheDictionary_generate3.cpp.in index 7b1d08920f9..8e2c26302e8 100644 --- a/dbms/src/Dictionaries/CacheDictionary_generate3.cpp.in +++ b/dbms/src/Dictionaries/CacheDictionary_generate3.cpp.in @@ -12,7 +12,7 @@ using TYPE = @NAME@; void CacheDictionary::get@NAME@(const std::string & attribute_name, const PaddedPODArray & ids, const TYPE def, ResultArrayType & out) const { auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::@NAME@); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::ut@NAME@); getItemsNumberImpl(attribute, ids, out, [&](const size_t) { return def; }); } diff --git a/dbms/src/Dictionaries/ClickHouseDictionarySource.cpp b/dbms/src/Dictionaries/ClickHouseDictionarySource.cpp index 67124ff880c..a3b4e8c5cfb 100644 --- a/dbms/src/Dictionaries/ClickHouseDictionarySource.cpp +++ b/dbms/src/Dictionaries/ClickHouseDictionarySource.cpp @@ -51,7 +51,7 @@ ClickHouseDictionarySource::ClickHouseDictionarySource( const DictionaryStructure & dict_struct_, const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix, - const Block & sample_block, + const Block & sample_block_, Context & context_) : update_time{std::chrono::system_clock::from_time_t(0)} , dict_struct{dict_struct_} @@ -66,7 +66,7 @@ ClickHouseDictionarySource::ClickHouseDictionarySource( , update_field{config.getString(config_prefix + ".update_field", "")} , invalidate_query{config.getString(config_prefix + ".invalidate_query", "")} , query_builder{dict_struct, db, table, where, IdentifierQuotingStyle::Backticks} - , sample_block{sample_block} + , sample_block{sample_block_} , context(context_) , is_local{isLocalAddress({host, port}, context.getTCPPort())} , pool{is_local ? nullptr : createPool(host, port, secure, db, user, password)} diff --git a/dbms/src/Dictionaries/ClickHouseDictionarySource.h b/dbms/src/Dictionaries/ClickHouseDictionarySource.h index 991782b1549..3df962708bd 100644 --- a/dbms/src/Dictionaries/ClickHouseDictionarySource.h +++ b/dbms/src/Dictionaries/ClickHouseDictionarySource.h @@ -22,7 +22,7 @@ public: const DictionaryStructure & dict_struct_, const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix, - const Block & sample_block, + const Block & sample_block_, Context & context); /// copy-constructor is provided in order to support cloneability diff --git a/dbms/src/Dictionaries/ComplexKeyCacheDictionary.cpp b/dbms/src/Dictionaries/ComplexKeyCacheDictionary.cpp index b9172746120..3478e631076 100644 --- a/dbms/src/Dictionaries/ComplexKeyCacheDictionary.cpp +++ b/dbms/src/Dictionaries/ComplexKeyCacheDictionary.cpp @@ -51,16 +51,16 @@ inline UInt64 ComplexKeyCacheDictionary::getCellIdx(const StringRef key) const ComplexKeyCacheDictionary::ComplexKeyCacheDictionary( - const std::string & name, - const DictionaryStructure & dict_struct, - DictionarySourcePtr source_ptr, - const DictionaryLifetime dict_lifetime, - const size_t size) - : name{name} - , dict_struct(dict_struct) - , source_ptr{std::move(source_ptr)} - , dict_lifetime(dict_lifetime) - , size{roundUpToPowerOfTwoOrZero(std::max(size, size_t(max_collision_length)))} + const std::string & name_, + const DictionaryStructure & dict_struct_, + DictionarySourcePtr source_ptr_, + const DictionaryLifetime dict_lifetime_, + const size_t size_) + : name{name_} + , dict_struct(dict_struct_) + , source_ptr{std::move(source_ptr_)} + , dict_lifetime(dict_lifetime_) + , size{roundUpToPowerOfTwoOrZero(std::max(size_, size_t(max_collision_length)))} , size_overlap_mask{this->size - 1} , rnd_engine(randomSeed()) { @@ -77,7 +77,7 @@ void ComplexKeyCacheDictionary::getString( dict_struct.validateKeyTypes(key_types); auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::String); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::utString); const auto null_value = StringRef{std::get(attribute.null_values)}; @@ -94,7 +94,7 @@ void ComplexKeyCacheDictionary::getString( dict_struct.validateKeyTypes(key_types); auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::String); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::utString); getItemsString(attribute, key_columns, out, [&](const size_t row) { return def->getDataAt(row); }); } @@ -109,7 +109,7 @@ void ComplexKeyCacheDictionary::getString( dict_struct.validateKeyTypes(key_types); auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::String); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::utString); getItemsString(attribute, key_columns, out, [&](const size_t) { return StringRef{def}; }); } @@ -290,7 +290,7 @@ StringRef ComplexKeyCacheDictionary::placeKeysInPool( { keys[j] = key_columns[j]->getDataAt(row); sum_keys_size += keys[j].size; - if (key_attributes[j].underlying_type == AttributeUnderlyingType::String) + if (key_attributes[j].underlying_type == AttributeUnderlyingType::utString) sum_keys_size += sizeof(size_t) + 1; } @@ -299,7 +299,7 @@ StringRef ComplexKeyCacheDictionary::placeKeysInPool( auto key_start = place; for (size_t j = 0; j < keys_size; ++j) { - if (key_attributes[j].underlying_type == AttributeUnderlyingType::String) + if (key_attributes[j].underlying_type == AttributeUnderlyingType::utString) { auto start = key_start; auto key_size = keys[j].size + 1; diff --git a/dbms/src/Dictionaries/ComplexKeyCacheDictionary.h b/dbms/src/Dictionaries/ComplexKeyCacheDictionary.h index ffac807c04c..7c2ba75ba17 100644 --- a/dbms/src/Dictionaries/ComplexKeyCacheDictionary.h +++ b/dbms/src/Dictionaries/ComplexKeyCacheDictionary.h @@ -42,11 +42,11 @@ class ComplexKeyCacheDictionary final : public IDictionaryBase { public: ComplexKeyCacheDictionary( - const std::string & name, - const DictionaryStructure & dict_struct, - DictionarySourcePtr source_ptr, - const DictionaryLifetime dict_lifetime, - const size_t size); + const std::string & name_, + const DictionaryStructure & dict_struct_, + DictionarySourcePtr source_ptr_, + const DictionaryLifetime dict_lifetime_, + const size_t size_); std::string getKeyDescription() const { return key_description; } diff --git a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_createAttributeWithType.cpp b/dbms/src/Dictionaries/ComplexKeyCacheDictionary_createAttributeWithType.cpp index aeb85881f86..e15a6fb3014 100644 --- a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_createAttributeWithType.cpp +++ b/dbms/src/Dictionaries/ComplexKeyCacheDictionary_createAttributeWithType.cpp @@ -10,7 +10,7 @@ ComplexKeyCacheDictionary::createAttributeWithType(const AttributeUnderlyingType switch (type) { #define DISPATCH(TYPE) \ - case AttributeUnderlyingType::TYPE: \ + case AttributeUnderlyingType::ut##TYPE: \ attr.null_values = TYPE(null_value.get>()); \ attr.arrays = std::make_unique>(size); \ bytes_allocated += size * sizeof(TYPE); \ @@ -30,7 +30,7 @@ ComplexKeyCacheDictionary::createAttributeWithType(const AttributeUnderlyingType DISPATCH(Float32) DISPATCH(Float64) #undef DISPATCH - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: attr.null_values = null_value.get(); attr.arrays = std::make_unique>(size); bytes_allocated += size * sizeof(StringRef); diff --git a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_generate1.cpp.in b/dbms/src/Dictionaries/ComplexKeyCacheDictionary_generate1.cpp.in index fd6863e4669..5c0ed408a55 100644 --- a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_generate1.cpp.in +++ b/dbms/src/Dictionaries/ComplexKeyCacheDictionary_generate1.cpp.in @@ -13,7 +13,7 @@ void ComplexKeyCacheDictionary::get@NAME@(const std::string & attribute_name, co dict_struct.validateKeyTypes(key_types); auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::@NAME@); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::ut@NAME@); const auto null_value = std::get(attribute.null_values); diff --git a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_generate2.cpp.in b/dbms/src/Dictionaries/ComplexKeyCacheDictionary_generate2.cpp.in index 6b94bee5700..b3233cd05e1 100644 --- a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_generate2.cpp.in +++ b/dbms/src/Dictionaries/ComplexKeyCacheDictionary_generate2.cpp.in @@ -18,7 +18,7 @@ void ComplexKeyCacheDictionary::get@NAME@(const std::string & attribute_name, dict_struct.validateKeyTypes(key_types); auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::@NAME@); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::ut@NAME@); getItemsNumberImpl(attribute, key_columns, out, [&](const size_t row) { return def[row]; }); } diff --git a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_generate3.cpp.in b/dbms/src/Dictionaries/ComplexKeyCacheDictionary_generate3.cpp.in index 20e05efe399..02e77c01a4a 100644 --- a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_generate3.cpp.in +++ b/dbms/src/Dictionaries/ComplexKeyCacheDictionary_generate3.cpp.in @@ -18,7 +18,7 @@ void ComplexKeyCacheDictionary::get@NAME@(const std::string & attribute_name, dict_struct.validateKeyTypes(key_types); auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::@NAME@); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::ut@NAME@); getItemsNumberImpl(attribute, key_columns, out, [&](const size_t) { return def; }); } diff --git a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_setAttributeValue.cpp b/dbms/src/Dictionaries/ComplexKeyCacheDictionary_setAttributeValue.cpp index 7b3a44214c5..cf2eef82347 100644 --- a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_setAttributeValue.cpp +++ b/dbms/src/Dictionaries/ComplexKeyCacheDictionary_setAttributeValue.cpp @@ -6,51 +6,51 @@ void ComplexKeyCacheDictionary::setAttributeValue(Attribute & attribute, const s { switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: { const auto & string = value.get(); auto & string_ref = std::get>(attribute.arrays)[idx]; diff --git a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_setDefaultAttributeValue.cpp b/dbms/src/Dictionaries/ComplexKeyCacheDictionary_setDefaultAttributeValue.cpp index 89cf1506f90..aa03cc88038 100644 --- a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_setDefaultAttributeValue.cpp +++ b/dbms/src/Dictionaries/ComplexKeyCacheDictionary_setDefaultAttributeValue.cpp @@ -6,51 +6,51 @@ void ComplexKeyCacheDictionary::setDefaultAttributeValue(Attribute & attribute, { switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: { const auto & null_value_ref = std::get(attribute.null_values); auto & string_ref = std::get>(attribute.arrays)[idx]; diff --git a/dbms/src/Dictionaries/ComplexKeyHashedDictionary.cpp b/dbms/src/Dictionaries/ComplexKeyHashedDictionary.cpp index 39ef9124061..586fc5e89f9 100644 --- a/dbms/src/Dictionaries/ComplexKeyHashedDictionary.cpp +++ b/dbms/src/Dictionaries/ComplexKeyHashedDictionary.cpp @@ -15,18 +15,18 @@ namespace ErrorCodes } ComplexKeyHashedDictionary::ComplexKeyHashedDictionary( - const std::string & name, - const DictionaryStructure & dict_struct, - DictionarySourcePtr source_ptr, - const DictionaryLifetime dict_lifetime, - bool require_nonempty, - BlockPtr saved_block) - : name{name} - , dict_struct(dict_struct) - , source_ptr{std::move(source_ptr)} - , dict_lifetime(dict_lifetime) - , require_nonempty(require_nonempty) - , saved_block{std::move(saved_block)} + const std::string & name_, + const DictionaryStructure & dict_struct_, + DictionarySourcePtr source_ptr_, + const DictionaryLifetime dict_lifetime_, + bool require_nonempty_, + BlockPtr saved_block_) + : name{name_} + , dict_struct(dict_struct_) + , source_ptr{std::move(source_ptr_)} + , dict_lifetime(dict_lifetime_) + , require_nonempty(require_nonempty_) + , saved_block{std::move(saved_block_)} { createAttributes(); loadData(); @@ -40,7 +40,7 @@ ComplexKeyHashedDictionary::ComplexKeyHashedDictionary( dict_struct.validateKeyTypes(key_types); \ \ const auto & attribute = getAttribute(attribute_name); \ - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::TYPE); \ + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::ut##TYPE); \ \ const auto null_value = std::get(attribute.null_values); \ \ @@ -72,7 +72,7 @@ void ComplexKeyHashedDictionary::getString( dict_struct.validateKeyTypes(key_types); const auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::String); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::utString); const auto & null_value = StringRef{std::get(attribute.null_values)}; @@ -94,7 +94,7 @@ void ComplexKeyHashedDictionary::getString( dict_struct.validateKeyTypes(key_types); \ \ const auto & attribute = getAttribute(attribute_name); \ - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::TYPE); \ + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::ut##TYPE); \ \ getItemsImpl( \ attribute, \ @@ -128,7 +128,7 @@ void ComplexKeyHashedDictionary::getString( dict_struct.validateKeyTypes(key_types); const auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::String); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::utString); getItemsImpl( attribute, @@ -148,7 +148,7 @@ void ComplexKeyHashedDictionary::getString( dict_struct.validateKeyTypes(key_types); \ \ const auto & attribute = getAttribute(attribute_name); \ - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::TYPE); \ + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::ut##TYPE); \ \ getItemsImpl( \ attribute, key_columns, [&](const size_t row, const auto value) { out[row] = value; }, [&](const size_t) { return def; }); \ @@ -179,7 +179,7 @@ void ComplexKeyHashedDictionary::getString( dict_struct.validateKeyTypes(key_types); const auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::String); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::utString); getItemsImpl( attribute, @@ -196,50 +196,50 @@ void ComplexKeyHashedDictionary::has(const Columns & key_columns, const DataType switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: has(attribute, key_columns, out); break; } @@ -416,51 +416,51 @@ void ComplexKeyHashedDictionary::calculateBytesAllocated() { switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: addAttributeSize(attribute); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: { addAttributeSize(attribute); bytes_allocated += sizeof(Arena) + attribute.string_arena->size(); @@ -487,51 +487,51 @@ ComplexKeyHashedDictionary::createAttributeWithType(const AttributeUnderlyingTyp switch (type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: { attr.null_values = null_value.get(); attr.maps.emplace>(); @@ -583,37 +583,37 @@ bool ComplexKeyHashedDictionary::setAttributeValue(Attribute & attribute, const { switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: { auto & map = std::get>(attribute.maps); const auto & string = value.get(); @@ -687,36 +687,36 @@ std::vector ComplexKeyHashedDictionary::getKeys() const switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: return getKeys(attribute); - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: return getKeys(attribute); - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: return getKeys(attribute); - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: return getKeys(attribute); - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: return getKeys(attribute); - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: return getKeys(attribute); - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: return getKeys(attribute); - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: return getKeys(attribute); - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: return getKeys(attribute); - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: return getKeys(attribute); - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: return getKeys(attribute); - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: return getKeys(attribute); - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: return getKeys(attribute); - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: return getKeys(attribute); - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: return getKeys(attribute); } return {}; diff --git a/dbms/src/Dictionaries/ComplexKeyHashedDictionary.h b/dbms/src/Dictionaries/ComplexKeyHashedDictionary.h index 54ee8627f9b..68b8d9d0d36 100644 --- a/dbms/src/Dictionaries/ComplexKeyHashedDictionary.h +++ b/dbms/src/Dictionaries/ComplexKeyHashedDictionary.h @@ -23,12 +23,12 @@ class ComplexKeyHashedDictionary final : public IDictionaryBase { public: ComplexKeyHashedDictionary( - const std::string & name, - const DictionaryStructure & dict_struct, - DictionarySourcePtr source_ptr, - const DictionaryLifetime dict_lifetime, - bool require_nonempty, - BlockPtr saved_block = nullptr); + const std::string & name_, + const DictionaryStructure & dict_struct_, + DictionarySourcePtr source_ptr_, + const DictionaryLifetime dict_lifetime_, + bool require_nonempty_, + BlockPtr saved_block_ = nullptr); std::string getKeyDescription() const { return key_description; } diff --git a/dbms/src/Dictionaries/DictionaryBlockInputStream.h b/dbms/src/Dictionaries/DictionaryBlockInputStream.h index b5af6e3b912..09b9ec8d4af 100644 --- a/dbms/src/Dictionaries/DictionaryBlockInputStream.h +++ b/dbms/src/Dictionaries/DictionaryBlockInputStream.h @@ -202,11 +202,11 @@ private: template DictionaryBlockInputStream::DictionaryBlockInputStream( - std::shared_ptr dictionary, UInt64 max_block_size, PaddedPODArray && ids, const Names & column_names) - : DictionaryBlockInputStreamBase(ids.size(), max_block_size) - , dictionary(std::static_pointer_cast(dictionary)) - , column_names(column_names) - , ids(std::move(ids)) + std::shared_ptr dictionary_, UInt64 max_block_size_, PaddedPODArray && ids_, const Names & column_names_) + : DictionaryBlockInputStreamBase(ids_.size(), max_block_size_) + , dictionary(std::static_pointer_cast(dictionary_)) + , column_names(column_names_) + , ids(std::move(ids_)) , logger(&Poco::Logger::get("DictionaryBlockInputStream")) , fill_block_function( &DictionaryBlockInputStream::fillBlock) @@ -216,13 +216,13 @@ DictionaryBlockInputStream::DictionaryBlockInputStream( template DictionaryBlockInputStream::DictionaryBlockInputStream( - std::shared_ptr dictionary, - UInt64 max_block_size, + std::shared_ptr dictionary_, + UInt64 max_block_size_, const std::vector & keys, - const Names & column_names) - : DictionaryBlockInputStreamBase(keys.size(), max_block_size) - , dictionary(std::static_pointer_cast(dictionary)) - , column_names(column_names) + const Names & column_names_) + : DictionaryBlockInputStreamBase(keys.size(), max_block_size_) + , dictionary(std::static_pointer_cast(dictionary_)) + , column_names(column_names_) , logger(&Poco::Logger::get("DictionaryBlockInputStream")) , fill_block_function(&DictionaryBlockInputStream::fillBlock) , key_type(DictionaryKeyType::ComplexKey) @@ -233,20 +233,20 @@ DictionaryBlockInputStream::DictionaryBlockInputStream( template DictionaryBlockInputStream::DictionaryBlockInputStream( - std::shared_ptr dictionary, - UInt64 max_block_size, - const Columns & data_columns, - const Names & column_names, - GetColumnsFunction && get_key_columns_function, - GetColumnsFunction && get_view_columns_function) - : DictionaryBlockInputStreamBase(data_columns.front()->size(), max_block_size) - , dictionary(std::static_pointer_cast(dictionary)) - , column_names(column_names) + std::shared_ptr dictionary_, + UInt64 max_block_size_, + const Columns & data_columns_, + const Names & column_names_, + GetColumnsFunction && get_key_columns_function_, + GetColumnsFunction && get_view_columns_function_) + : DictionaryBlockInputStreamBase(data_columns_.front()->size(), max_block_size_) + , dictionary(std::static_pointer_cast(dictionary_)) + , column_names(column_names_) , logger(&Poco::Logger::get("DictionaryBlockInputStream")) , fill_block_function(&DictionaryBlockInputStream::fillBlock) - , data_columns(data_columns) - , get_key_columns_function(get_key_columns_function) - , get_view_columns_function(get_view_columns_function) + , data_columns(data_columns_) + , get_key_columns_function(get_key_columns_function_) + , get_view_columns_function(get_view_columns_function_) , key_type(DictionaryKeyType::Callback) { } @@ -422,58 +422,58 @@ Block DictionaryBlockInputStream::fillBlock( column = getColumnFromAttribute>(&DictionaryType::get##TYPE, ids_to_fill, keys, data_types, attribute, *dictionary) switch (attribute.underlying_type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: GET_COLUMN_FORM_ATTRIBUTE(UInt8); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: GET_COLUMN_FORM_ATTRIBUTE(UInt16); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: GET_COLUMN_FORM_ATTRIBUTE(UInt32); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: GET_COLUMN_FORM_ATTRIBUTE(UInt64); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: GET_COLUMN_FORM_ATTRIBUTE(UInt128); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: GET_COLUMN_FORM_ATTRIBUTE(Int8); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: GET_COLUMN_FORM_ATTRIBUTE(Int16); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: GET_COLUMN_FORM_ATTRIBUTE(Int32); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: GET_COLUMN_FORM_ATTRIBUTE(Int64); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: GET_COLUMN_FORM_ATTRIBUTE(Float32); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: GET_COLUMN_FORM_ATTRIBUTE(Float64); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: { column = getColumnFromAttribute>( &DictionaryType::getDecimal32, ids_to_fill, keys, data_types, attribute, *dictionary); break; } - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: { column = getColumnFromAttribute>( &DictionaryType::getDecimal64, ids_to_fill, keys, data_types, attribute, *dictionary); break; } - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: { column = getColumnFromAttribute>( &DictionaryType::getDecimal128, ids_to_fill, keys, data_types, attribute, *dictionary); break; } - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: { column = getColumnFromStringAttribute( &DictionaryType::getString, ids_to_fill, keys, data_types, attribute, *dictionary); diff --git a/dbms/src/Dictionaries/DictionaryBlockInputStreamBase.cpp b/dbms/src/Dictionaries/DictionaryBlockInputStreamBase.cpp index 1a793d4705f..3a3fd09220f 100644 --- a/dbms/src/Dictionaries/DictionaryBlockInputStreamBase.cpp +++ b/dbms/src/Dictionaries/DictionaryBlockInputStreamBase.cpp @@ -2,8 +2,8 @@ namespace DB { -DictionaryBlockInputStreamBase::DictionaryBlockInputStreamBase(size_t rows_count, size_t max_block_size) - : rows_count(rows_count), max_block_size(max_block_size) +DictionaryBlockInputStreamBase::DictionaryBlockInputStreamBase(size_t rows_count_, size_t max_block_size_) + : rows_count(rows_count_), max_block_size(max_block_size_) { } diff --git a/dbms/src/Dictionaries/DictionaryBlockInputStreamBase.h b/dbms/src/Dictionaries/DictionaryBlockInputStreamBase.h index 571a0da3b50..fb99918aed8 100644 --- a/dbms/src/Dictionaries/DictionaryBlockInputStreamBase.h +++ b/dbms/src/Dictionaries/DictionaryBlockInputStreamBase.h @@ -7,7 +7,7 @@ namespace DB class DictionaryBlockInputStreamBase : public IBlockInputStream { protected: - DictionaryBlockInputStreamBase(size_t rows_count, size_t max_block_size); + DictionaryBlockInputStreamBase(size_t rows_count_, size_t max_block_size_); virtual Block getBlock(size_t start, size_t length) const = 0; diff --git a/dbms/src/Dictionaries/DictionaryStructure.cpp b/dbms/src/Dictionaries/DictionaryStructure.cpp index d43b749935f..925e9e01a82 100644 --- a/dbms/src/Dictionaries/DictionaryStructure.cpp +++ b/dbms/src/Dictionaries/DictionaryStructure.cpp @@ -43,20 +43,20 @@ namespace AttributeUnderlyingType getAttributeUnderlyingType(const std::string & type) { static const std::unordered_map dictionary{ - {"UInt8", AttributeUnderlyingType::UInt8}, - {"UInt16", AttributeUnderlyingType::UInt16}, - {"UInt32", AttributeUnderlyingType::UInt32}, - {"UInt64", AttributeUnderlyingType::UInt64}, - {"UUID", AttributeUnderlyingType::UInt128}, - {"Int8", AttributeUnderlyingType::Int8}, - {"Int16", AttributeUnderlyingType::Int16}, - {"Int32", AttributeUnderlyingType::Int32}, - {"Int64", AttributeUnderlyingType::Int64}, - {"Float32", AttributeUnderlyingType::Float32}, - {"Float64", AttributeUnderlyingType::Float64}, - {"String", AttributeUnderlyingType::String}, - {"Date", AttributeUnderlyingType::UInt16}, - {"DateTime", AttributeUnderlyingType::UInt32}, + {"UInt8", AttributeUnderlyingType::utUInt8}, + {"UInt16", AttributeUnderlyingType::utUInt16}, + {"UInt32", AttributeUnderlyingType::utUInt32}, + {"UInt64", AttributeUnderlyingType::utUInt64}, + {"UUID", AttributeUnderlyingType::utUInt128}, + {"Int8", AttributeUnderlyingType::utInt8}, + {"Int16", AttributeUnderlyingType::utInt16}, + {"Int32", AttributeUnderlyingType::utInt32}, + {"Int64", AttributeUnderlyingType::utInt64}, + {"Float32", AttributeUnderlyingType::utFloat32}, + {"Float64", AttributeUnderlyingType::utFloat64}, + {"String", AttributeUnderlyingType::utString}, + {"Date", AttributeUnderlyingType::utUInt16}, + {"DateTime", AttributeUnderlyingType::utUInt32}, }; const auto it = dictionary.find(type); @@ -67,11 +67,11 @@ AttributeUnderlyingType getAttributeUnderlyingType(const std::string & type) { size_t start = strlen("Decimal"); if (type.find("32", start) == start) - return AttributeUnderlyingType::Decimal32; + return AttributeUnderlyingType::utDecimal32; if (type.find("64", start) == start) - return AttributeUnderlyingType::Decimal64; + return AttributeUnderlyingType::utDecimal64; if (type.find("128", start) == start) - return AttributeUnderlyingType::Decimal128; + return AttributeUnderlyingType::utDecimal128; } throw Exception{"Unknown type " + type, ErrorCodes::UNKNOWN_TYPE}; @@ -82,35 +82,35 @@ std::string toString(const AttributeUnderlyingType type) { switch (type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: return "UInt8"; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: return "UInt16"; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: return "UInt32"; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: return "UInt64"; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: return "UUID"; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: return "Int8"; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: return "Int16"; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: return "Int32"; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: return "Int64"; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: return "Float32"; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: return "Float64"; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: return "Decimal32"; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: return "Decimal64"; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: return "Decimal128"; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: return "String"; } @@ -243,7 +243,7 @@ bool DictionaryStructure::isKeySizeFixed() const return true; for (const auto & key_i : *key) - if (key_i.underlying_type == AttributeUnderlyingType::String) + if (key_i.underlying_type == AttributeUnderlyingType::utString) return false; return true; diff --git a/dbms/src/Dictionaries/DictionaryStructure.h b/dbms/src/Dictionaries/DictionaryStructure.h index 4de712a012a..f39f59d90d2 100644 --- a/dbms/src/Dictionaries/DictionaryStructure.h +++ b/dbms/src/Dictionaries/DictionaryStructure.h @@ -21,21 +21,21 @@ namespace ErrorCodes enum class AttributeUnderlyingType { - UInt8, - UInt16, - UInt32, - UInt64, - UInt128, - Int8, - Int16, - Int32, - Int64, - Float32, - Float64, - Decimal32, - Decimal64, - Decimal128, - String + utUInt8, + utUInt16, + utUInt32, + utUInt64, + utUInt128, + utInt8, + utInt16, + utInt32, + utInt64, + utFloat32, + utFloat64, + utDecimal32, + utDecimal64, + utDecimal128, + utString }; diff --git a/dbms/src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.cpp b/dbms/src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.cpp index 67180921a93..94ee6d8b2f8 100644 --- a/dbms/src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.cpp +++ b/dbms/src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.cpp @@ -20,7 +20,7 @@ IRegionsHierarchyReaderPtr RegionsHierarchyDataSource::createReader() } -RegionsHierarchiesDataProvider::RegionsHierarchiesDataProvider(const std::string & path) : path(path) +RegionsHierarchiesDataProvider::RegionsHierarchiesDataProvider(const std::string & path_) : path(path_) { discoverFilesWithCustomHierarchies(); } diff --git a/dbms/src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.h b/dbms/src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.h index 1c7392cb98a..198f13e0f32 100644 --- a/dbms/src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.h +++ b/dbms/src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.h @@ -40,7 +40,7 @@ public: * For example, if /opt/geo/regions_hierarchy.txt is specified, * then the /opt/geo/regions_hierarchy_ua.txt file will also be loaded, if any, it will be accessible by the `ua` key. */ - RegionsHierarchiesDataProvider(const std::string & path); + RegionsHierarchiesDataProvider(const std::string & path_); std::vector listCustomHierarchies() const override; diff --git a/dbms/src/Dictionaries/ExecutableDictionarySource.cpp b/dbms/src/Dictionaries/ExecutableDictionarySource.cpp index b54894e043c..d76de3abe0e 100644 --- a/dbms/src/Dictionaries/ExecutableDictionarySource.cpp +++ b/dbms/src/Dictionaries/ExecutableDictionarySource.cpp @@ -46,15 +46,15 @@ ExecutableDictionarySource::ExecutableDictionarySource( const DictionaryStructure & dict_struct_, const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix, - Block & sample_block, - const Context & context) + Block & sample_block_, + const Context & context_) : log(&Logger::get("ExecutableDictionarySource")) , dict_struct{dict_struct_} , command{config.getString(config_prefix + ".command")} , update_field{config.getString(config_prefix + ".update_field", "")} , format{config.getString(config_prefix + ".format")} - , sample_block{sample_block} - , context(context) + , sample_block{sample_block_} + , context(context_) { } diff --git a/dbms/src/Dictionaries/ExecutableDictionarySource.h b/dbms/src/Dictionaries/ExecutableDictionarySource.h index 9816161a70e..879248663dc 100644 --- a/dbms/src/Dictionaries/ExecutableDictionarySource.h +++ b/dbms/src/Dictionaries/ExecutableDictionarySource.h @@ -19,8 +19,8 @@ public: const DictionaryStructure & dict_struct_, const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix, - Block & sample_block, - const Context & context); + Block & sample_block_, + const Context & context_); ExecutableDictionarySource(const ExecutableDictionarySource & other); ExecutableDictionarySource & operator=(const ExecutableDictionarySource &) = delete; diff --git a/dbms/src/Dictionaries/FileDictionarySource.cpp b/dbms/src/Dictionaries/FileDictionarySource.cpp index 2ac580d7bb3..1505c2629f6 100644 --- a/dbms/src/Dictionaries/FileDictionarySource.cpp +++ b/dbms/src/Dictionaries/FileDictionarySource.cpp @@ -13,8 +13,8 @@ static const UInt64 max_block_size = 8192; FileDictionarySource::FileDictionarySource( - const std::string & filename, const std::string & format, Block & sample_block, const Context & context) - : filename{filename}, format{format}, sample_block{sample_block}, context(context) + const std::string & filename_, const std::string & format_, Block & sample_block_, const Context & context_) + : filename{filename_}, format{format_}, sample_block{sample_block_}, context(context_) { } diff --git a/dbms/src/Dictionaries/FileDictionarySource.h b/dbms/src/Dictionaries/FileDictionarySource.h index 083c3c6a3a7..b7ed46a99e2 100644 --- a/dbms/src/Dictionaries/FileDictionarySource.h +++ b/dbms/src/Dictionaries/FileDictionarySource.h @@ -13,7 +13,7 @@ class Context; class FileDictionarySource final : public IDictionarySource { public: - FileDictionarySource(const std::string & filename, const std::string & format, Block & sample_block, const Context & context); + FileDictionarySource(const std::string & filename_, const std::string & format_, Block & sample_block_, const Context & context_); FileDictionarySource(const FileDictionarySource & other); diff --git a/dbms/src/Dictionaries/FlatDictionary.cpp b/dbms/src/Dictionaries/FlatDictionary.cpp index b7b70748c01..d1c6a138c89 100644 --- a/dbms/src/Dictionaries/FlatDictionary.cpp +++ b/dbms/src/Dictionaries/FlatDictionary.cpp @@ -21,19 +21,19 @@ static const auto max_array_size = 500000; FlatDictionary::FlatDictionary( - const std::string & name, - const DictionaryStructure & dict_struct, - DictionarySourcePtr source_ptr, - const DictionaryLifetime dict_lifetime, - bool require_nonempty, - BlockPtr saved_block) - : name{name} - , dict_struct(dict_struct) - , source_ptr{std::move(source_ptr)} - , dict_lifetime(dict_lifetime) - , require_nonempty(require_nonempty) + const std::string & name_, + const DictionaryStructure & dict_struct_, + DictionarySourcePtr source_ptr_, + const DictionaryLifetime dict_lifetime_, + bool require_nonempty_, + BlockPtr saved_block_) + : name{name_} + , dict_struct(dict_struct_) + , source_ptr{std::move(source_ptr_)} + , dict_lifetime(dict_lifetime_) + , require_nonempty(require_nonempty_) , loaded_ids(initial_array_size, false) - , saved_block{std::move(saved_block)} + , saved_block{std::move(saved_block_)} { createAttributes(); loadData(); @@ -107,7 +107,7 @@ void FlatDictionary::isInConstantVector(const Key child_id, const PaddedPODArray void FlatDictionary::get##TYPE(const std::string & attribute_name, const PaddedPODArray & ids, ResultArrayType & out) const \ { \ const auto & attribute = getAttribute(attribute_name); \ - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::TYPE); \ + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::ut##TYPE); \ \ const auto null_value = std::get(attribute.null_values); \ \ @@ -133,7 +133,7 @@ DECLARE(Decimal128) void FlatDictionary::getString(const std::string & attribute_name, const PaddedPODArray & ids, ColumnString * out) const { const auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::String); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::utString); const auto & null_value = std::get(attribute.null_values); @@ -152,7 +152,7 @@ void FlatDictionary::getString(const std::string & attribute_name, const PaddedP ResultArrayType & out) const \ { \ const auto & attribute = getAttribute(attribute_name); \ - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::TYPE); \ + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::ut##TYPE); \ \ getItemsImpl( \ attribute, ids, [&](const size_t row, const auto value) { out[row] = value; }, [&](const size_t row) { return def[row]; }); \ @@ -177,7 +177,7 @@ void FlatDictionary::getString( const std::string & attribute_name, const PaddedPODArray & ids, const ColumnString * const def, ColumnString * const out) const { const auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::String); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::utString); getItemsImpl( attribute, @@ -191,7 +191,7 @@ void FlatDictionary::getString( const std::string & attribute_name, const PaddedPODArray & ids, const TYPE def, ResultArrayType & out) const \ { \ const auto & attribute = getAttribute(attribute_name); \ - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::TYPE); \ + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::ut##TYPE); \ \ getItemsImpl( \ attribute, ids, [&](const size_t row, const auto value) { out[row] = value; }, [&](const size_t) { return def; }); \ @@ -216,7 +216,7 @@ void FlatDictionary::getString( const std::string & attribute_name, const PaddedPODArray & ids, const String & def, ColumnString * const out) const { const auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::String); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::utString); FlatDictionary::getItemsImpl( attribute, @@ -232,50 +232,50 @@ void FlatDictionary::has(const PaddedPODArray & ids, PaddedPODArray switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: has(attribute, ids, out); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: has(attribute, ids, out); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: has(attribute, ids, out); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: has(attribute, ids, out); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: has(attribute, ids, out); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: has(attribute, ids, out); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: has(attribute, ids, out); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: has(attribute, ids, out); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: has(attribute, ids, out); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: has(attribute, ids, out); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: has(attribute, ids, out); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: has(attribute, ids, out); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: has(attribute, ids, out); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: has(attribute, ids, out); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: has(attribute, ids, out); break; } @@ -296,7 +296,7 @@ void FlatDictionary::createAttributes() { hierarchical_attribute = &attributes.back(); - if (hierarchical_attribute->type != AttributeUnderlyingType::UInt64) + if (hierarchical_attribute->type != AttributeUnderlyingType::utUInt64) throw Exception{name + ": hierarchical attribute must be UInt64.", ErrorCodes::TYPE_MISMATCH}; } } @@ -425,51 +425,51 @@ void FlatDictionary::calculateBytesAllocated() { switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: addAttributeSize(attribute); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: { addAttributeSize(attribute); bytes_allocated += sizeof(Arena) + attribute.string_arena->size(); @@ -506,50 +506,50 @@ FlatDictionary::Attribute FlatDictionary::createAttributeWithType(const Attribut switch (type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: createAttributeImpl(attr, null_value); break; } @@ -612,50 +612,50 @@ void FlatDictionary::setAttributeValue(Attribute & attribute, const Key id, cons { switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: setAttributeValueImpl(attribute, id, value.get()); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: setAttributeValueImpl(attribute, id, value.get()); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: setAttributeValueImpl(attribute, id, value.get()); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: setAttributeValueImpl(attribute, id, value.get()); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: setAttributeValueImpl(attribute, id, value.get()); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: setAttributeValueImpl(attribute, id, value.get()); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: setAttributeValueImpl(attribute, id, value.get()); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: setAttributeValueImpl(attribute, id, value.get()); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: setAttributeValueImpl(attribute, id, value.get()); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: setAttributeValueImpl(attribute, id, value.get()); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: setAttributeValueImpl(attribute, id, value.get()); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: setAttributeValueImpl(attribute, id, value.get()); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: setAttributeValueImpl(attribute, id, value.get()); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: setAttributeValueImpl(attribute, id, value.get()); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: setAttributeValueImpl(attribute, id, value.get()); break; } diff --git a/dbms/src/Dictionaries/FlatDictionary.h b/dbms/src/Dictionaries/FlatDictionary.h index de14cc3dc1a..d9ea141de2e 100644 --- a/dbms/src/Dictionaries/FlatDictionary.h +++ b/dbms/src/Dictionaries/FlatDictionary.h @@ -22,12 +22,12 @@ class FlatDictionary final : public IDictionary { public: FlatDictionary( - const std::string & name, - const DictionaryStructure & dict_struct, - DictionarySourcePtr source_ptr, - const DictionaryLifetime dict_lifetime, - bool require_nonempty, - BlockPtr saved_block = nullptr); + const std::string & name_, + const DictionaryStructure & dict_struct_, + DictionarySourcePtr source_ptr_, + const DictionaryLifetime dict_lifetime_, + bool require_nonempty_, + BlockPtr saved_block_ = nullptr); std::string getName() const override { return name; } diff --git a/dbms/src/Dictionaries/HTTPDictionarySource.cpp b/dbms/src/Dictionaries/HTTPDictionarySource.cpp index bb64b13c103..fffbcc402b0 100644 --- a/dbms/src/Dictionaries/HTTPDictionarySource.cpp +++ b/dbms/src/Dictionaries/HTTPDictionarySource.cpp @@ -22,16 +22,16 @@ HTTPDictionarySource::HTTPDictionarySource( const DictionaryStructure & dict_struct_, const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix, - Block & sample_block, - const Context & context) + Block & sample_block_, + const Context & context_) : log(&Logger::get("HTTPDictionarySource")) , update_time{std::chrono::system_clock::from_time_t(0)} , dict_struct{dict_struct_} , url{config.getString(config_prefix + ".url", "")} , update_field{config.getString(config_prefix + ".update_field", "")} , format{config.getString(config_prefix + ".format")} - , sample_block{sample_block} - , context(context) + , sample_block{sample_block_} + , context(context_) , timeouts(ConnectionTimeouts::getHTTPTimeouts(context)) { } diff --git a/dbms/src/Dictionaries/HTTPDictionarySource.h b/dbms/src/Dictionaries/HTTPDictionarySource.h index 78fe5193533..705095193d1 100644 --- a/dbms/src/Dictionaries/HTTPDictionarySource.h +++ b/dbms/src/Dictionaries/HTTPDictionarySource.h @@ -22,8 +22,8 @@ public: const DictionaryStructure & dict_struct_, const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix, - Block & sample_block, - const Context & context); + Block & sample_block_, + const Context & context_); HTTPDictionarySource(const HTTPDictionarySource & other); HTTPDictionarySource & operator=(const HTTPDictionarySource &) = delete; diff --git a/dbms/src/Dictionaries/HashedDictionary.cpp b/dbms/src/Dictionaries/HashedDictionary.cpp index 413cfadec39..9b853ac2df5 100644 --- a/dbms/src/Dictionaries/HashedDictionary.cpp +++ b/dbms/src/Dictionaries/HashedDictionary.cpp @@ -16,18 +16,18 @@ namespace ErrorCodes HashedDictionary::HashedDictionary( - const std::string & name, - const DictionaryStructure & dict_struct, - DictionarySourcePtr source_ptr, - const DictionaryLifetime dict_lifetime, - bool require_nonempty, - BlockPtr saved_block) - : name{name} - , dict_struct(dict_struct) - , source_ptr{std::move(source_ptr)} - , dict_lifetime(dict_lifetime) - , require_nonempty(require_nonempty) - , saved_block{std::move(saved_block)} + const std::string & name_, + const DictionaryStructure & dict_struct_, + DictionarySourcePtr source_ptr_, + const DictionaryLifetime dict_lifetime_, + bool require_nonempty_, + BlockPtr saved_block_) + : name{name_} + , dict_struct(dict_struct_) + , source_ptr{std::move(source_ptr_)} + , dict_lifetime(dict_lifetime_) + , require_nonempty(require_nonempty_) + , saved_block{std::move(saved_block_)} { createAttributes(); loadData(); @@ -106,7 +106,7 @@ void HashedDictionary::isInConstantVector(const Key child_id, const PaddedPODArr const \ { \ const auto & attribute = getAttribute(attribute_name); \ - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::TYPE); \ + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::ut##TYPE); \ \ const auto null_value = std::get(attribute.null_values); \ \ @@ -132,7 +132,7 @@ DECLARE(Decimal128) void HashedDictionary::getString(const std::string & attribute_name, const PaddedPODArray & ids, ColumnString * out) const { const auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::String); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::utString); const auto & null_value = StringRef{std::get(attribute.null_values)}; @@ -151,7 +151,7 @@ void HashedDictionary::getString(const std::string & attribute_name, const Padde ResultArrayType & out) const \ { \ const auto & attribute = getAttribute(attribute_name); \ - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::TYPE); \ + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::ut##TYPE); \ \ getItemsImpl( \ attribute, ids, [&](const size_t row, const auto value) { out[row] = value; }, [&](const size_t row) { return def[row]; }); \ @@ -176,7 +176,7 @@ void HashedDictionary::getString( const std::string & attribute_name, const PaddedPODArray & ids, const ColumnString * const def, ColumnString * const out) const { const auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::String); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::utString); getItemsImpl( attribute, @@ -190,7 +190,7 @@ void HashedDictionary::getString( const std::string & attribute_name, const PaddedPODArray & ids, const TYPE & def, ResultArrayType & out) const \ { \ const auto & attribute = getAttribute(attribute_name); \ - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::TYPE); \ + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::ut##TYPE); \ \ getItemsImpl( \ attribute, ids, [&](const size_t row, const auto value) { out[row] = value; }, [&](const size_t) { return def; }); \ @@ -215,7 +215,7 @@ void HashedDictionary::getString( const std::string & attribute_name, const PaddedPODArray & ids, const String & def, ColumnString * const out) const { const auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::String); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::utString); getItemsImpl( attribute, @@ -230,50 +230,50 @@ void HashedDictionary::has(const PaddedPODArray & ids, PaddedPODArray(attribute, ids, out); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: has(attribute, ids, out); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: has(attribute, ids, out); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: has(attribute, ids, out); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: has(attribute, ids, out); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: has(attribute, ids, out); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: has(attribute, ids, out); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: has(attribute, ids, out); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: has(attribute, ids, out); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: has(attribute, ids, out); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: has(attribute, ids, out); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: has(attribute, ids, out); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: has(attribute, ids, out); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: has(attribute, ids, out); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: has(attribute, ids, out); break; } @@ -293,7 +293,7 @@ void HashedDictionary::createAttributes() { hierarchical_attribute = &attributes.back(); - if (hierarchical_attribute->type != AttributeUnderlyingType::UInt64) + if (hierarchical_attribute->type != AttributeUnderlyingType::utUInt64) throw Exception{name + ": hierarchical attribute must be UInt64.", ErrorCodes::TYPE_MISMATCH}; } } @@ -420,51 +420,51 @@ void HashedDictionary::calculateBytesAllocated() { switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: addAttributeSize(attribute); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: { addAttributeSize(attribute); bytes_allocated += sizeof(Arena) + attribute.string_arena->size(); @@ -488,51 +488,51 @@ HashedDictionary::Attribute HashedDictionary::createAttributeWithType(const Attr switch (type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: { attr.null_values = null_value.get(); attr.maps = std::make_unique>(); @@ -573,37 +573,37 @@ bool HashedDictionary::setAttributeValue(Attribute & attribute, const Key id, co { switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: return setAttributeValueImpl(attribute, id, value.get()); - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: return setAttributeValueImpl(attribute, id, value.get()); - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: return setAttributeValueImpl(attribute, id, value.get()); - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: return setAttributeValueImpl(attribute, id, value.get()); - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: return setAttributeValueImpl(attribute, id, value.get()); - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: return setAttributeValueImpl(attribute, id, value.get()); - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: return setAttributeValueImpl(attribute, id, value.get()); - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: return setAttributeValueImpl(attribute, id, value.get()); - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: return setAttributeValueImpl(attribute, id, value.get()); - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: return setAttributeValueImpl(attribute, id, value.get()); - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: return setAttributeValueImpl(attribute, id, value.get()); - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: return setAttributeValueImpl(attribute, id, value.get()); - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: return setAttributeValueImpl(attribute, id, value.get()); - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: return setAttributeValueImpl(attribute, id, value.get()); - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: { auto & map = *std::get>(attribute.maps); const auto & string = value.get(); @@ -655,36 +655,36 @@ PaddedPODArray HashedDictionary::getIds() const switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: return getIds(attribute); - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: return getIds(attribute); - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: return getIds(attribute); - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: return getIds(attribute); - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: return getIds(attribute); - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: return getIds(attribute); - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: return getIds(attribute); - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: return getIds(attribute); - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: return getIds(attribute); - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: return getIds(attribute); - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: return getIds(attribute); - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: return getIds(attribute); - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: return getIds(attribute); - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: return getIds(attribute); - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: return getIds(attribute); } return PaddedPODArray(); diff --git a/dbms/src/Dictionaries/HashedDictionary.h b/dbms/src/Dictionaries/HashedDictionary.h index 92875f27cf3..d1aa5a38d97 100644 --- a/dbms/src/Dictionaries/HashedDictionary.h +++ b/dbms/src/Dictionaries/HashedDictionary.h @@ -21,12 +21,12 @@ class HashedDictionary final : public IDictionary { public: HashedDictionary( - const std::string & name, - const DictionaryStructure & dict_struct, - DictionarySourcePtr source_ptr, - const DictionaryLifetime dict_lifetime, - bool require_nonempty, - BlockPtr saved_block = nullptr); + const std::string & name_, + const DictionaryStructure & dict_struct_, + DictionarySourcePtr source_ptr_, + const DictionaryLifetime dict_lifetime_, + bool require_nonempty_, + BlockPtr saved_block_ = nullptr); std::string getName() const override { return name; } diff --git a/dbms/src/Dictionaries/LibraryDictionarySource.cpp b/dbms/src/Dictionaries/LibraryDictionarySource.cpp index 1e11a2ed011..b4de6506db1 100644 --- a/dbms/src/Dictionaries/LibraryDictionarySource.cpp +++ b/dbms/src/Dictionaries/LibraryDictionarySource.cpp @@ -120,13 +120,13 @@ namespace LibraryDictionarySource::LibraryDictionarySource( const DictionaryStructure & dict_struct_, const Poco::Util::AbstractConfiguration & config, - const std::string & config_prefix, - Block & sample_block) + const std::string & config_prefix_, + Block & sample_block_) : log(&Logger::get("LibraryDictionarySource")) , dict_struct{dict_struct_} - , config_prefix{config_prefix} + , config_prefix{config_prefix_} , path{config.getString(config_prefix + ".path", "")} - , sample_block{sample_block} + , sample_block{sample_block_} { if (!Poco::File(path).exists()) throw Exception( diff --git a/dbms/src/Dictionaries/LibraryDictionarySource.h b/dbms/src/Dictionaries/LibraryDictionarySource.h index d09e5eee691..5d18f114177 100644 --- a/dbms/src/Dictionaries/LibraryDictionarySource.h +++ b/dbms/src/Dictionaries/LibraryDictionarySource.h @@ -31,8 +31,8 @@ public: LibraryDictionarySource( const DictionaryStructure & dict_struct_, const Poco::Util::AbstractConfiguration & config, - const std::string & config_prefix, - Block & sample_block); + const std::string & config_prefix_, + Block & sample_block_); LibraryDictionarySource(const LibraryDictionarySource & other); LibraryDictionarySource & operator=(const LibraryDictionarySource &) = delete; diff --git a/dbms/src/Dictionaries/MongoDBBlockInputStream.cpp b/dbms/src/Dictionaries/MongoDBBlockInputStream.cpp index f9199fa90bb..363147c484e 100644 --- a/dbms/src/Dictionaries/MongoDBBlockInputStream.cpp +++ b/dbms/src/Dictionaries/MongoDBBlockInputStream.cpp @@ -33,8 +33,8 @@ MongoDBBlockInputStream::MongoDBBlockInputStream( std::shared_ptr & connection_, std::unique_ptr cursor_, const Block & sample_block, - const UInt64 max_block_size) - : connection(connection_), cursor{std::move(cursor_)}, max_block_size{max_block_size} + const UInt64 max_block_size_) + : connection(connection_), cursor{std::move(cursor_)}, max_block_size{max_block_size_} { description.init(sample_block); } @@ -87,38 +87,38 @@ namespace { switch (type) { - case ValueType::UInt8: + case ValueType::vtUInt8: insertNumber(column, value, name); break; - case ValueType::UInt16: + case ValueType::vtUInt16: insertNumber(column, value, name); break; - case ValueType::UInt32: + case ValueType::vtUInt32: insertNumber(column, value, name); break; - case ValueType::UInt64: + case ValueType::vtUInt64: insertNumber(column, value, name); break; - case ValueType::Int8: + case ValueType::vtInt8: insertNumber(column, value, name); break; - case ValueType::Int16: + case ValueType::vtInt16: insertNumber(column, value, name); break; - case ValueType::Int32: + case ValueType::vtInt32: insertNumber(column, value, name); break; - case ValueType::Int64: + case ValueType::vtInt64: insertNumber(column, value, name); break; - case ValueType::Float32: + case ValueType::vtFloat32: insertNumber(column, value, name); break; - case ValueType::Float64: + case ValueType::vtFloat64: insertNumber(column, value, name); break; - case ValueType::String: + case ValueType::vtString: { if (value.type() == Poco::MongoDB::ElementTraits::TypeId) { @@ -137,7 +137,7 @@ namespace ErrorCodes::TYPE_MISMATCH}; } - case ValueType::Date: + case ValueType::vtDate: { if (value.type() != Poco::MongoDB::ElementTraits::TypeId) throw Exception{"Type mismatch, expected Timestamp, got type id = " + toString(value.type()) + " for column " + name, @@ -148,7 +148,7 @@ namespace break; } - case ValueType::DateTime: + case ValueType::vtDateTime: { if (value.type() != Poco::MongoDB::ElementTraits::TypeId) throw Exception{"Type mismatch, expected Timestamp, got type id = " + toString(value.type()) + " for column " + name, @@ -158,7 +158,7 @@ namespace static_cast &>(value).value().epochTime()); break; } - case ValueType::UUID: + case ValueType::vtUUID: { if (value.type() == Poco::MongoDB::ElementTraits::TypeId) { diff --git a/dbms/src/Dictionaries/MongoDBBlockInputStream.h b/dbms/src/Dictionaries/MongoDBBlockInputStream.h index cf759692e9f..d5d692c827c 100644 --- a/dbms/src/Dictionaries/MongoDBBlockInputStream.h +++ b/dbms/src/Dictionaries/MongoDBBlockInputStream.h @@ -25,7 +25,7 @@ public: std::shared_ptr & connection_, std::unique_ptr cursor_, const Block & sample_block, - const UInt64 max_block_size); + const UInt64 max_block_size_); ~MongoDBBlockInputStream() override; diff --git a/dbms/src/Dictionaries/MongoDBDictionarySource.cpp b/dbms/src/Dictionaries/MongoDBDictionarySource.cpp index 73ffd4727fa..18d9f840426 100644 --- a/dbms/src/Dictionaries/MongoDBDictionarySource.cpp +++ b/dbms/src/Dictionaries/MongoDBDictionarySource.cpp @@ -168,24 +168,24 @@ authenticate(Poco::MongoDB::Connection & connection, const std::string & databas MongoDBDictionarySource::MongoDBDictionarySource( - const DictionaryStructure & dict_struct, - const std::string & host, - UInt16 port, - const std::string & user, - const std::string & password, - const std::string & method, - const std::string & db, - const std::string & collection, - const Block & sample_block) - : dict_struct{dict_struct} - , host{host} - , port{port} - , user{user} - , password{password} - , method{method} - , db{db} - , collection{collection} - , sample_block{sample_block} + const DictionaryStructure & dict_struct_, + const std::string & host_, + UInt16 port_, + const std::string & user_, + const std::string & password_, + const std::string & method_, + const std::string & db_, + const std::string & collection_, + const Block & sample_block_) + : dict_struct{dict_struct_} + , host{host_} + , port{port_} + , user{user_} + , password{password_} + , method{method_} + , db{db_} + , collection{collection_} + , sample_block{sample_block_} , connection{std::make_shared(host, port)} { if (!user.empty()) @@ -202,12 +202,12 @@ MongoDBDictionarySource::MongoDBDictionarySource( MongoDBDictionarySource::MongoDBDictionarySource( - const DictionaryStructure & dict_struct, + const DictionaryStructure & dict_struct_, const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix, - Block & sample_block) + Block & sample_block_) : MongoDBDictionarySource( - dict_struct, + dict_struct_, config.getString(config_prefix + ".host"), config.getUInt(config_prefix + ".port"), config.getString(config_prefix + ".user", ""), @@ -215,7 +215,7 @@ MongoDBDictionarySource::MongoDBDictionarySource( config.getString(config_prefix + ".method", ""), config.getString(config_prefix + ".db", ""), config.getString(config_prefix + ".collection"), - sample_block) + sample_block_) { } @@ -297,27 +297,27 @@ BlockInputStreamPtr MongoDBDictionarySource::loadKeys(const Columns & key_column { switch (attr.second.underlying_type) { - case AttributeUnderlyingType::UInt8: - case AttributeUnderlyingType::UInt16: - case AttributeUnderlyingType::UInt32: - case AttributeUnderlyingType::UInt64: - case AttributeUnderlyingType::UInt128: - case AttributeUnderlyingType::Int8: - case AttributeUnderlyingType::Int16: - case AttributeUnderlyingType::Int32: - case AttributeUnderlyingType::Int64: - case AttributeUnderlyingType::Decimal32: - case AttributeUnderlyingType::Decimal64: - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utUInt8: + case AttributeUnderlyingType::utUInt16: + case AttributeUnderlyingType::utUInt32: + case AttributeUnderlyingType::utUInt64: + case AttributeUnderlyingType::utUInt128: + case AttributeUnderlyingType::utInt8: + case AttributeUnderlyingType::utInt16: + case AttributeUnderlyingType::utInt32: + case AttributeUnderlyingType::utInt64: + case AttributeUnderlyingType::utDecimal32: + case AttributeUnderlyingType::utDecimal64: + case AttributeUnderlyingType::utDecimal128: key.add(attr.second.name, Int32(key_columns[attr.first]->get64(row_idx))); break; - case AttributeUnderlyingType::Float32: - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat32: + case AttributeUnderlyingType::utFloat64: key.add(attr.second.name, applyVisitor(FieldVisitorConvertToNumber(), (*key_columns[attr.first])[row_idx])); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: String _str(get((*key_columns[attr.first])[row_idx])); /// Convert string to ObjectID if (attr.second.is_object_id) diff --git a/dbms/src/Dictionaries/MongoDBDictionarySource.h b/dbms/src/Dictionaries/MongoDBDictionarySource.h index 6cb627ec7ed..dd53cca10c0 100644 --- a/dbms/src/Dictionaries/MongoDBDictionarySource.h +++ b/dbms/src/Dictionaries/MongoDBDictionarySource.h @@ -27,15 +27,15 @@ namespace DB class MongoDBDictionarySource final : public IDictionarySource { MongoDBDictionarySource( - const DictionaryStructure & dict_struct, - const std::string & host, - UInt16 port, - const std::string & user, - const std::string & password, - const std::string & method, - const std::string & db, - const std::string & collection, - const Block & sample_block); + const DictionaryStructure & dict_struct_, + const std::string & host_, + UInt16 port_, + const std::string & user_, + const std::string & password_, + const std::string & method_, + const std::string & db_, + const std::string & collection_, + const Block & sample_block_); public: MongoDBDictionarySource( diff --git a/dbms/src/Dictionaries/MySQLDictionarySource.cpp b/dbms/src/Dictionaries/MySQLDictionarySource.cpp index 73b9b089806..497448bf64c 100644 --- a/dbms/src/Dictionaries/MySQLDictionarySource.cpp +++ b/dbms/src/Dictionaries/MySQLDictionarySource.cpp @@ -57,7 +57,7 @@ MySQLDictionarySource::MySQLDictionarySource( const DictionaryStructure & dict_struct_, const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix, - const Block & sample_block) + const Block & sample_block_) : log(&Logger::get("MySQLDictionarySource")) , update_time{std::chrono::system_clock::from_time_t(0)} , dict_struct{dict_struct_} @@ -66,7 +66,7 @@ MySQLDictionarySource::MySQLDictionarySource( , where{config.getString(config_prefix + ".where", "")} , update_field{config.getString(config_prefix + ".update_field", "")} , dont_check_update_time{config.getBool(config_prefix + ".dont_check_update_time", false)} - , sample_block{sample_block} + , sample_block{sample_block_} , pool{config, config_prefix} , query_builder{dict_struct, db, table, where, IdentifierQuotingStyle::Backticks} , load_all_query{query_builder.composeLoadAllQuery()} diff --git a/dbms/src/Dictionaries/MySQLDictionarySource.h b/dbms/src/Dictionaries/MySQLDictionarySource.h index cfc45f42bb3..047bd860ee1 100644 --- a/dbms/src/Dictionaries/MySQLDictionarySource.h +++ b/dbms/src/Dictionaries/MySQLDictionarySource.h @@ -33,7 +33,7 @@ public: const DictionaryStructure & dict_struct_, const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix, - const Block & sample_block); + const Block & sample_block_); /// copy-constructor is provided in order to support cloneability MySQLDictionarySource(const MySQLDictionarySource & other); diff --git a/dbms/src/Dictionaries/RangeDictionaryBlockInputStream.h b/dbms/src/Dictionaries/RangeDictionaryBlockInputStream.h index 827667a7dc5..a2353051e5d 100644 --- a/dbms/src/Dictionaries/RangeDictionaryBlockInputStream.h +++ b/dbms/src/Dictionaries/RangeDictionaryBlockInputStream.h @@ -87,16 +87,16 @@ private: template RangeDictionaryBlockInputStream::RangeDictionaryBlockInputStream( - DictionaryPtr dictionary, - size_t max_block_size, - const Names & column_names, - PaddedPODArray && ids, + DictionaryPtr dictionary_, + size_t max_block_size_, + const Names & column_names_, + PaddedPODArray && ids_, PaddedPODArray && block_start_dates, PaddedPODArray && block_end_dates) - : DictionaryBlockInputStreamBase(ids.size(), max_block_size) - , dictionary(dictionary) - , column_names(column_names) - , ids(std::move(ids)) + : DictionaryBlockInputStreamBase(ids_.size(), max_block_size_) + , dictionary(dictionary_) + , column_names(column_names_) + , ids(std::move(ids_)) , start_dates(std::move(block_start_dates)) , end_dates(std::move(block_end_dates)) { @@ -231,49 +231,49 @@ Block RangeDictionaryBlockInputStream::fillBlock column = getColumnFromAttribute(&DictionaryType::get##TYPE, ids_to_fill, date_key, attribute, *dictionary) switch (attribute.underlying_type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: GET_COLUMN_FORM_ATTRIBUTE(UInt8); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: GET_COLUMN_FORM_ATTRIBUTE(UInt16); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: GET_COLUMN_FORM_ATTRIBUTE(UInt32); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: GET_COLUMN_FORM_ATTRIBUTE(UInt64); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: GET_COLUMN_FORM_ATTRIBUTE(UInt128); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: GET_COLUMN_FORM_ATTRIBUTE(Int8); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: GET_COLUMN_FORM_ATTRIBUTE(Int16); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: GET_COLUMN_FORM_ATTRIBUTE(Int32); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: GET_COLUMN_FORM_ATTRIBUTE(Int64); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: GET_COLUMN_FORM_ATTRIBUTE(Float32); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: GET_COLUMN_FORM_ATTRIBUTE(Float64); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: GET_COLUMN_FORM_ATTRIBUTE(Decimal32); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: GET_COLUMN_FORM_ATTRIBUTE(Decimal64); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: GET_COLUMN_FORM_ATTRIBUTE(Decimal128); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: column = getColumnFromAttributeString(ids_to_fill, date_key, attribute, *dictionary); break; } diff --git a/dbms/src/Dictionaries/RangeHashedDictionary.cpp b/dbms/src/Dictionaries/RangeHashedDictionary.cpp index 05f29e05c42..ab67ce59371 100644 --- a/dbms/src/Dictionaries/RangeHashedDictionary.cpp +++ b/dbms/src/Dictionaries/RangeHashedDictionary.cpp @@ -68,16 +68,16 @@ bool operator<(const RangeHashedDictionary::Range & left, const RangeHashedDicti RangeHashedDictionary::RangeHashedDictionary( - const std::string & dictionary_name, - const DictionaryStructure & dict_struct, - DictionarySourcePtr source_ptr, - const DictionaryLifetime dict_lifetime, - bool require_nonempty) - : dictionary_name{dictionary_name} - , dict_struct(dict_struct) - , source_ptr{std::move(source_ptr)} - , dict_lifetime(dict_lifetime) - , require_nonempty(require_nonempty) + const std::string & dictionary_name_, + const DictionaryStructure & dict_struct_, + DictionarySourcePtr source_ptr_, + const DictionaryLifetime dict_lifetime_, + bool require_nonempty_) + : dictionary_name{dictionary_name_} + , dict_struct(dict_struct_) + , source_ptr{std::move(source_ptr_)} + , dict_lifetime(dict_lifetime_) + , require_nonempty(require_nonempty_) { createAttributes(); loadData(); @@ -92,7 +92,7 @@ RangeHashedDictionary::RangeHashedDictionary( const PaddedPODArray & dates, \ ResultArrayType & out) const \ { \ - const auto & attribute = getAttributeWithType(attribute_name, AttributeUnderlyingType::TYPE); \ + const auto & attribute = getAttributeWithType(attribute_name, AttributeUnderlyingType::ut##TYPE); \ getItems(attribute, ids, dates, out); \ } DECLARE_MULTIPLE_GETTER(UInt8) @@ -117,7 +117,7 @@ void RangeHashedDictionary::getString( const PaddedPODArray & dates, ColumnString * out) const { - const auto & attribute = getAttributeWithType(attribute_name, AttributeUnderlyingType::String); + const auto & attribute = getAttributeWithType(attribute_name, AttributeUnderlyingType::utString); const auto & attr = *std::get>(attribute.maps); const auto & null_value = std::get(attribute.null_values); @@ -227,51 +227,51 @@ void RangeHashedDictionary::calculateBytesAllocated() { switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: addAttributeSize(attribute); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: { addAttributeSize(attribute); bytes_allocated += sizeof(Arena) + attribute.string_arena->size(); @@ -296,51 +296,51 @@ RangeHashedDictionary::createAttributeWithType(const AttributeUnderlyingType typ switch (type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: { attr.null_values = null_value.get(); attr.maps = std::make_unique>(); @@ -363,7 +363,7 @@ void RangeHashedDictionary::getItems( if (false) { } -#define DISPATCH(TYPE) else if (attribute.type == AttributeUnderlyingType::TYPE) getItemsImpl(attribute, ids, dates, out); +#define DISPATCH(TYPE) else if (attribute.type == AttributeUnderlyingType::ut##TYPE) getItemsImpl(attribute, ids, dates, out); DISPATCH(UInt8) DISPATCH(UInt16) DISPATCH(UInt32) @@ -443,51 +443,51 @@ void RangeHashedDictionary::setAttributeValue(Attribute & attribute, const Key i { switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: setAttributeValueImpl(attribute, id, range, value.get()); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: setAttributeValueImpl(attribute, id, range, value.get()); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: setAttributeValueImpl(attribute, id, range, value.get()); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: setAttributeValueImpl(attribute, id, range, value.get()); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: setAttributeValueImpl(attribute, id, range, value.get()); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: setAttributeValueImpl(attribute, id, range, value.get()); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: setAttributeValueImpl(attribute, id, range, value.get()); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: setAttributeValueImpl(attribute, id, range, value.get()); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: setAttributeValueImpl(attribute, id, range, value.get()); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: setAttributeValueImpl(attribute, id, range, value.get()); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: setAttributeValueImpl(attribute, id, range, value.get()); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: setAttributeValueImpl(attribute, id, range, value.get()); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: setAttributeValueImpl(attribute, id, range, value.get()); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: setAttributeValueImpl(attribute, id, range, value.get()); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: { auto & map = *std::get>(attribute.maps); const auto & string = value.get(); @@ -544,50 +544,50 @@ void RangeHashedDictionary::getIdsAndDates( switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: getIdsAndDates(attribute, ids, start_dates, end_dates); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: getIdsAndDates(attribute, ids, start_dates, end_dates); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: getIdsAndDates(attribute, ids, start_dates, end_dates); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: getIdsAndDates(attribute, ids, start_dates, end_dates); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: getIdsAndDates(attribute, ids, start_dates, end_dates); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: getIdsAndDates(attribute, ids, start_dates, end_dates); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: getIdsAndDates(attribute, ids, start_dates, end_dates); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: getIdsAndDates(attribute, ids, start_dates, end_dates); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: getIdsAndDates(attribute, ids, start_dates, end_dates); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: getIdsAndDates(attribute, ids, start_dates, end_dates); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: getIdsAndDates(attribute, ids, start_dates, end_dates); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: getIdsAndDates(attribute, ids, start_dates, end_dates); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: getIdsAndDates(attribute, ids, start_dates, end_dates); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: getIdsAndDates(attribute, ids, start_dates, end_dates); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: getIdsAndDates(attribute, ids, start_dates, end_dates); break; } diff --git a/dbms/src/Dictionaries/RangeHashedDictionary.h b/dbms/src/Dictionaries/RangeHashedDictionary.h index a02b1377db5..6e03fc30720 100644 --- a/dbms/src/Dictionaries/RangeHashedDictionary.h +++ b/dbms/src/Dictionaries/RangeHashedDictionary.h @@ -18,11 +18,11 @@ class RangeHashedDictionary final : public IDictionaryBase { public: RangeHashedDictionary( - const std::string & dictionary_name, - const DictionaryStructure & dict_struct, - DictionarySourcePtr source_ptr, - const DictionaryLifetime dict_lifetime, - bool require_nonempty); + const std::string & dictionary_name_, + const DictionaryStructure & dict_struct_, + DictionarySourcePtr source_ptr_, + const DictionaryLifetime dict_lifetime_, + bool require_nonempty_); std::string getName() const override { return dictionary_name; } diff --git a/dbms/src/Dictionaries/TrieDictionary.cpp b/dbms/src/Dictionaries/TrieDictionary.cpp index ac11272145e..8ab7d2f34af 100644 --- a/dbms/src/Dictionaries/TrieDictionary.cpp +++ b/dbms/src/Dictionaries/TrieDictionary.cpp @@ -35,16 +35,16 @@ namespace ErrorCodes } TrieDictionary::TrieDictionary( - const std::string & name, - const DictionaryStructure & dict_struct, - DictionarySourcePtr source_ptr, - const DictionaryLifetime dict_lifetime, - bool require_nonempty) - : name{name} - , dict_struct(dict_struct) - , source_ptr{std::move(source_ptr)} - , dict_lifetime(dict_lifetime) - , require_nonempty(require_nonempty) + const std::string & name_, + const DictionaryStructure & dict_struct_, + DictionarySourcePtr source_ptr_, + const DictionaryLifetime dict_lifetime_, + bool require_nonempty_) + : name{name_} + , dict_struct(dict_struct_) + , source_ptr{std::move(source_ptr_)} + , dict_lifetime(dict_lifetime_) + , require_nonempty(require_nonempty_) , logger(&Poco::Logger::get("TrieDictionary")) { createAttributes(); @@ -75,7 +75,7 @@ TrieDictionary::~TrieDictionary() validateKeyTypes(key_types); \ \ const auto & attribute = getAttribute(attribute_name); \ - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::TYPE); \ + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::ut##TYPE); \ \ const auto null_value = std::get(attribute.null_values); \ \ @@ -107,7 +107,7 @@ void TrieDictionary::getString( validateKeyTypes(key_types); const auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::String); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::utString); const auto & null_value = StringRef{std::get(attribute.null_values)}; @@ -129,7 +129,7 @@ void TrieDictionary::getString( validateKeyTypes(key_types); \ \ const auto & attribute = getAttribute(attribute_name); \ - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::TYPE); \ + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::ut##TYPE); \ \ getItemsImpl( \ attribute, \ @@ -163,7 +163,7 @@ void TrieDictionary::getString( validateKeyTypes(key_types); const auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::String); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::utString); getItemsImpl( attribute, @@ -183,7 +183,7 @@ void TrieDictionary::getString( validateKeyTypes(key_types); \ \ const auto & attribute = getAttribute(attribute_name); \ - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::TYPE); \ + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::ut##TYPE); \ \ getItemsImpl( \ attribute, key_columns, [&](const size_t row, const auto value) { out[row] = value; }, [&](const size_t) { return def; }); \ @@ -214,7 +214,7 @@ void TrieDictionary::getString( validateKeyTypes(key_types); const auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::String); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::utString); getItemsImpl( attribute, @@ -231,50 +231,50 @@ void TrieDictionary::has(const Columns & key_columns, const DataTypes & key_type switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: has(attribute, key_columns, out); break; } @@ -356,51 +356,51 @@ void TrieDictionary::calculateBytesAllocated() { switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: addAttributeSize(attribute); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: { addAttributeSize(attribute); bytes_allocated += sizeof(Arena) + attribute.string_arena->size(); @@ -438,51 +438,51 @@ TrieDictionary::Attribute TrieDictionary::createAttributeWithType(const Attribut switch (type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: { attr.null_values = null_value.get(); attr.maps.emplace>(); @@ -575,37 +575,37 @@ bool TrieDictionary::setAttributeValue(Attribute & attribute, const StringRef ke { switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: { const auto & string = value.get(); const auto string_in_arena = attribute.string_arena->insert(string.data(), string.size()); diff --git a/dbms/src/Dictionaries/TrieDictionary.h b/dbms/src/Dictionaries/TrieDictionary.h index a873f7bdd16..18b1b1c79b9 100644 --- a/dbms/src/Dictionaries/TrieDictionary.h +++ b/dbms/src/Dictionaries/TrieDictionary.h @@ -23,11 +23,11 @@ class TrieDictionary final : public IDictionaryBase { public: TrieDictionary( - const std::string & name, - const DictionaryStructure & dict_struct, - DictionarySourcePtr source_ptr, - const DictionaryLifetime dict_lifetime, - bool require_nonempty); + const std::string & name_, + const DictionaryStructure & dict_struct_, + DictionarySourcePtr source_ptr_, + const DictionaryLifetime dict_lifetime_, + bool require_nonempty_); ~TrieDictionary() override; diff --git a/dbms/src/Dictionaries/XDBCDictionarySource.cpp b/dbms/src/Dictionaries/XDBCDictionarySource.cpp index 243d8213f0b..627092844ec 100644 --- a/dbms/src/Dictionaries/XDBCDictionarySource.cpp +++ b/dbms/src/Dictionaries/XDBCDictionarySource.cpp @@ -40,8 +40,8 @@ namespace const Context & context, UInt64 max_block_size, const ConnectionTimeouts & timeouts, - const String name) - : name(name) + const String name_) + : name(name_) { read_buf = std::make_unique(uri, Poco::Net::HTTPRequest::HTTP_POST, callback, timeouts); reader diff --git a/dbms/src/Formats/BlockInputStreamFromRowInputStream.cpp b/dbms/src/Formats/BlockInputStreamFromRowInputStream.cpp index 2335363db70..fc38b476e0b 100644 --- a/dbms/src/Formats/BlockInputStreamFromRowInputStream.cpp +++ b/dbms/src/Formats/BlockInputStreamFromRowInputStream.cpp @@ -76,15 +76,15 @@ Block BlockInputStreamFromRowInputStream::readImpl() try { ++total_rows; - RowReadExtension info; - if (!row_input->read(columns, info)) + RowReadExtension info_; + if (!row_input->read(columns, info_)) break; if (read_virtual_columns_callback) read_virtual_columns_callback(); - for (size_t column_idx = 0; column_idx < info.read_columns.size(); ++column_idx) + for (size_t column_idx = 0; column_idx < info_.read_columns.size(); ++column_idx) { - if (!info.read_columns[column_idx]) + if (!info_.read_columns[column_idx]) { size_t column_size = columns[column_idx]->size(); if (column_size == 0) diff --git a/dbms/src/Formats/CSVRowInputStream.cpp b/dbms/src/Formats/CSVRowInputStream.cpp index 635fef82cd0..662e6306e25 100644 --- a/dbms/src/Formats/CSVRowInputStream.cpp +++ b/dbms/src/Formats/CSVRowInputStream.cpp @@ -92,8 +92,8 @@ static void skipRow(ReadBuffer & istr, const FormatSettings::CSV & settings, siz } -CSVRowInputStream::CSVRowInputStream(ReadBuffer & istr_, const Block & header_, bool with_names_, const FormatSettings & format_settings) - : istr(istr_), header(header_), with_names(with_names_), format_settings(format_settings) +CSVRowInputStream::CSVRowInputStream(ReadBuffer & istr_, const Block & header_, bool with_names_, const FormatSettings & format_settings_) + : istr(istr_), header(header_), with_names(with_names_), format_settings(format_settings_) { const auto num_columns = header.columns(); diff --git a/dbms/src/Formats/CSVRowInputStream.h b/dbms/src/Formats/CSVRowInputStream.h index 6cb0fe8e82f..b398858ee78 100644 --- a/dbms/src/Formats/CSVRowInputStream.h +++ b/dbms/src/Formats/CSVRowInputStream.h @@ -21,7 +21,7 @@ class CSVRowInputStream : public IRowInputStream public: /** with_names - in the first line the header with column names */ - CSVRowInputStream(ReadBuffer & istr_, const Block & header_, bool with_names_, const FormatSettings & format_settings); + CSVRowInputStream(ReadBuffer & istr_, const Block & header_, bool with_names_, const FormatSettings & format_settings_); bool read(MutableColumns & columns, RowReadExtension & ext) override; void readPrefix() override; diff --git a/dbms/src/Formats/MySQLBlockInputStream.cpp b/dbms/src/Formats/MySQLBlockInputStream.cpp index 1eeb0981afd..1896dbcc4b6 100644 --- a/dbms/src/Formats/MySQLBlockInputStream.cpp +++ b/dbms/src/Formats/MySQLBlockInputStream.cpp @@ -20,8 +20,8 @@ namespace ErrorCodes MySQLBlockInputStream::MySQLBlockInputStream( - const mysqlxx::PoolWithFailover::Entry & entry, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size, const bool auto_close) - : entry{entry}, query{this->entry->query(query_str)}, result{query.use()}, max_block_size{max_block_size}, auto_close{auto_close} + const mysqlxx::PoolWithFailover::Entry & entry_, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size_, const bool auto_close_) + : entry{entry_}, query{this->entry->query(query_str)}, result{query.use()}, max_block_size{max_block_size_}, auto_close{auto_close_} { if (sample_block.columns() != result.getNumFields()) throw Exception{"mysqlxx::UseQueryResult contains " + toString(result.getNumFields()) + " columns while " @@ -40,46 +40,46 @@ namespace { switch (type) { - case ValueType::UInt8: + case ValueType::vtUInt8: static_cast(column).insertValue(value.getUInt()); break; - case ValueType::UInt16: + case ValueType::vtUInt16: static_cast(column).insertValue(value.getUInt()); break; - case ValueType::UInt32: + case ValueType::vtUInt32: static_cast(column).insertValue(value.getUInt()); break; - case ValueType::UInt64: + case ValueType::vtUInt64: static_cast(column).insertValue(value.getUInt()); break; - case ValueType::Int8: + case ValueType::vtInt8: static_cast(column).insertValue(value.getInt()); break; - case ValueType::Int16: + case ValueType::vtInt16: static_cast(column).insertValue(value.getInt()); break; - case ValueType::Int32: + case ValueType::vtInt32: static_cast(column).insertValue(value.getInt()); break; - case ValueType::Int64: + case ValueType::vtInt64: static_cast(column).insertValue(value.getInt()); break; - case ValueType::Float32: + case ValueType::vtFloat32: static_cast(column).insertValue(value.getDouble()); break; - case ValueType::Float64: + case ValueType::vtFloat64: static_cast(column).insertValue(value.getDouble()); break; - case ValueType::String: + case ValueType::vtString: static_cast(column).insertData(value.data(), value.size()); break; - case ValueType::Date: + case ValueType::vtDate: static_cast(column).insertValue(UInt16(value.getDate().getDayNum())); break; - case ValueType::DateTime: + case ValueType::vtDateTime: static_cast(column).insertValue(UInt32(value.getDateTime())); break; - case ValueType::UUID: + case ValueType::vtUUID: static_cast(column).insert(parse(value.data(), value.size())); break; } diff --git a/dbms/src/Formats/MySQLBlockInputStream.h b/dbms/src/Formats/MySQLBlockInputStream.h index bba523ddab7..238994acbd8 100644 --- a/dbms/src/Formats/MySQLBlockInputStream.h +++ b/dbms/src/Formats/MySQLBlockInputStream.h @@ -15,11 +15,11 @@ class MySQLBlockInputStream final : public IBlockInputStream { public: MySQLBlockInputStream( - const mysqlxx::PoolWithFailover::Entry & entry, + const mysqlxx::PoolWithFailover::Entry & entry_, const std::string & query_str, const Block & sample_block, - const UInt64 max_block_size, - const bool auto_close = false); + const UInt64 max_block_size_, + const bool auto_close_ = false); String getName() const override { return "MySQL"; } diff --git a/dbms/src/Formats/ProtobufWriter.h b/dbms/src/Formats/ProtobufWriter.h index f11fbcbc391..6e50e023532 100644 --- a/dbms/src/Formats/ProtobufWriter.h +++ b/dbms/src/Formats/ProtobufWriter.h @@ -108,7 +108,7 @@ private: { size_t start; size_t end; - Piece(size_t start, size_t end) : start(start), end(end) {} + Piece(size_t start_, size_t end_) : start(start_), end(end_) {} Piece() = default; }; @@ -116,8 +116,8 @@ private: { size_t num_pieces_at_start; size_t num_bytes_skipped_at_start; - NestedInfo(size_t num_pieces_at_start, size_t num_bytes_skipped_at_start) - : num_pieces_at_start(num_pieces_at_start), num_bytes_skipped_at_start(num_bytes_skipped_at_start) + NestedInfo(size_t num_pieces_at_start_, size_t num_bytes_skipped_at_start_) + : num_pieces_at_start(num_pieces_at_start_), num_bytes_skipped_at_start(num_bytes_skipped_at_start_) { } }; diff --git a/dbms/src/Formats/TabSeparatedRowInputStream.cpp b/dbms/src/Formats/TabSeparatedRowInputStream.cpp index 0c16c14e306..69850dbc455 100644 --- a/dbms/src/Formats/TabSeparatedRowInputStream.cpp +++ b/dbms/src/Formats/TabSeparatedRowInputStream.cpp @@ -48,8 +48,8 @@ static void checkForCarriageReturn(ReadBuffer & istr) TabSeparatedRowInputStream::TabSeparatedRowInputStream( - ReadBuffer & istr_, const Block & header_, bool with_names_, bool with_types_, const FormatSettings & format_settings) - : istr(istr_), header(header_), with_names(with_names_), with_types(with_types_), format_settings(format_settings) + ReadBuffer & istr_, const Block & header_, bool with_names_, bool with_types_, const FormatSettings & format_settings_) + : istr(istr_), header(header_), with_names(with_names_), with_types(with_types_), format_settings(format_settings_) { const auto num_columns = header.columns(); diff --git a/dbms/src/Formats/TabSeparatedRowInputStream.h b/dbms/src/Formats/TabSeparatedRowInputStream.h index 3a0ed13c1bd..f8ebebbdfe4 100644 --- a/dbms/src/Formats/TabSeparatedRowInputStream.h +++ b/dbms/src/Formats/TabSeparatedRowInputStream.h @@ -23,7 +23,7 @@ public: * with_types - on the next line header with type names */ TabSeparatedRowInputStream( - ReadBuffer & istr_, const Block & header_, bool with_names_, bool with_types_, const FormatSettings & format_settings); + ReadBuffer & istr_, const Block & header_, bool with_names_, bool with_types_, const FormatSettings & format_settings_); bool read(MutableColumns & columns, RowReadExtension & ext) override; void readPrefix() override; diff --git a/dbms/src/Functions/FunctionJoinGet.h b/dbms/src/Functions/FunctionJoinGet.h index 81f54b84333..9885b05657d 100644 --- a/dbms/src/Functions/FunctionJoinGet.h +++ b/dbms/src/Functions/FunctionJoinGet.h @@ -14,12 +14,12 @@ public: static constexpr auto name = "joinGet"; FunctionJoinGet( - TableStructureReadLockHolder table_lock, StoragePtr storage_join, JoinPtr join, const String & attr_name, DataTypePtr return_type) - : table_lock(std::move(table_lock)) - , storage_join(std::move(storage_join)) - , join(std::move(join)) - , attr_name(attr_name) - , return_type(std::move(return_type)) + TableStructureReadLockHolder table_lock_, StoragePtr storage_join_, JoinPtr join_, const String & attr_name_, DataTypePtr return_type_) + : table_lock(std::move(table_lock_)) + , storage_join(std::move(storage_join_)) + , join(std::move(join_)) + , attr_name(attr_name_) + , return_type(std::move(return_type_)) { } @@ -47,7 +47,7 @@ public: static constexpr auto name = "joinGet"; static FunctionBuilderPtr create(const Context & context) { return std::make_shared(context); } - FunctionBuilderJoinGet(const Context & context) : context(context) {} + FunctionBuilderJoinGet(const Context & context_) : context(context_) {} String getName() const override { return name; } diff --git a/dbms/src/Functions/FunctionsComparison.h b/dbms/src/Functions/FunctionsComparison.h index 2da3e0a8970..c3d29339d55 100644 --- a/dbms/src/Functions/FunctionsComparison.h +++ b/dbms/src/Functions/FunctionsComparison.h @@ -554,8 +554,8 @@ public: static constexpr auto name = Name::name; static FunctionPtr create(const Context & context) { return std::make_shared(context); } - FunctionComparison(const Context & context) - : context(context), + FunctionComparison(const Context & context_) + : context(context_), check_decimal_overflow(decimalCheckComparisonOverflow(context)) {} diff --git a/dbms/src/Functions/FunctionsConversion.h b/dbms/src/Functions/FunctionsConversion.h index 30dbb380e25..f903210ef4f 100644 --- a/dbms/src/Functions/FunctionsConversion.h +++ b/dbms/src/Functions/FunctionsConversion.h @@ -1485,8 +1485,8 @@ class PreparedFunctionCast : public PreparedFunctionImpl public: using WrapperType = std::function; - explicit PreparedFunctionCast(WrapperType && wrapper_function, const char * name) - : wrapper_function(std::move(wrapper_function)), name(name) {} + explicit PreparedFunctionCast(WrapperType && wrapper_function_, const char * name_) + : wrapper_function(std::move(wrapper_function_)), name(name_) {} String getName() const override { return name; } @@ -1520,10 +1520,10 @@ public: using WrapperType = std::function; using MonotonicityForRange = std::function; - FunctionCast(const Context & context, const char * name, MonotonicityForRange && monotonicity_for_range - , const DataTypes & argument_types, const DataTypePtr & return_type) - : context(context), name(name), monotonicity_for_range(monotonicity_for_range) - , argument_types(argument_types), return_type(return_type) + FunctionCast(const Context & context_, const char * name_, MonotonicityForRange && monotonicity_for_range_ + , const DataTypes & argument_types_, const DataTypePtr & return_type_) + : context(context_), name(name_), monotonicity_for_range(monotonicity_for_range_) + , argument_types(argument_types_), return_type(return_type_) { } @@ -2164,7 +2164,7 @@ public: static constexpr auto name = "CAST"; static FunctionBuilderPtr create(const Context & context) { return std::make_shared(context); } - FunctionBuilderCast(const Context & context) : context(context) {} + FunctionBuilderCast(const Context & context_) : context(context_) {} String getName() const override { return name; } diff --git a/dbms/src/Functions/FunctionsExternalDictionaries.h b/dbms/src/Functions/FunctionsExternalDictionaries.h index 877d3e6f0f2..d86bc291212 100644 --- a/dbms/src/Functions/FunctionsExternalDictionaries.h +++ b/dbms/src/Functions/FunctionsExternalDictionaries.h @@ -74,7 +74,7 @@ public: return std::make_shared(context.getExternalDictionaries()); } - FunctionDictHas(const ExternalDictionaries & dictionaries) : dictionaries(dictionaries) {} + FunctionDictHas(const ExternalDictionaries & dictionaries_) : dictionaries(dictionaries_) {} String getName() const override { return name; } @@ -219,7 +219,7 @@ public: return std::make_shared(context.getExternalDictionaries()); } - FunctionDictGetString(const ExternalDictionaries & dictionaries) : dictionaries(dictionaries) {} + FunctionDictGetString(const ExternalDictionaries & dictionaries_) : dictionaries(dictionaries_) {} String getName() const override { return name; } @@ -414,7 +414,7 @@ public: return std::make_shared(context.getExternalDictionaries()); } - FunctionDictGetStringOrDefault(const ExternalDictionaries & dictionaries) : dictionaries(dictionaries) {} + FunctionDictGetStringOrDefault(const ExternalDictionaries & dictionaries_) : dictionaries(dictionaries_) {} String getName() const override { return name; } @@ -729,8 +729,8 @@ public: return std::make_shared(context.getExternalDictionaries(), dec_scale); } - FunctionDictGet(const ExternalDictionaries & dictionaries, UInt32 dec_scale = 0) - : dictionaries(dictionaries) + FunctionDictGet(const ExternalDictionaries & dictionaries_, UInt32 dec_scale = 0) + : dictionaries(dictionaries_) , decimal_scale(dec_scale) {} @@ -1000,8 +1000,8 @@ public: return std::make_shared(context.getExternalDictionaries(), dec_scale); } - FunctionDictGetOrDefault(const ExternalDictionaries & dictionaries, UInt32 dec_scale = 0) - : dictionaries(dictionaries) + FunctionDictGetOrDefault(const ExternalDictionaries & dictionaries_, UInt32 dec_scale = 0) + : dictionaries(dictionaries_) , decimal_scale(dec_scale) {} @@ -1290,7 +1290,7 @@ public: return std::make_shared(context.getExternalDictionaries(), context); } - FunctionDictGetNoType(const ExternalDictionaries & dictionaries, const Context & context) : dictionaries(dictionaries), context(context) {} + FunctionDictGetNoType(const ExternalDictionaries & dictionaries_, const Context & context_) : dictionaries(dictionaries_), context(context_) {} String getName() const override { return name; } @@ -1439,7 +1439,7 @@ public: return std::make_shared(context.getExternalDictionaries(), context); } - FunctionDictGetNoTypeOrDefault(const ExternalDictionaries & dictionaries, const Context & context) : dictionaries(dictionaries), context(context) {} + FunctionDictGetNoTypeOrDefault(const ExternalDictionaries & dictionaries_, const Context & context_) : dictionaries(dictionaries_), context(context_) {} String getName() const override { return name; } @@ -1582,7 +1582,7 @@ public: return std::make_shared(context.getExternalDictionaries()); } - FunctionDictGetHierarchy(const ExternalDictionaries & dictionaries) : dictionaries(dictionaries) {} + FunctionDictGetHierarchy(const ExternalDictionaries & dictionaries_) : dictionaries(dictionaries_) {} String getName() const override { return name; } @@ -1739,7 +1739,7 @@ public: return std::make_shared(context.getExternalDictionaries()); } - FunctionDictIsIn(const ExternalDictionaries & dictionaries) : dictionaries(dictionaries) {} + FunctionDictIsIn(const ExternalDictionaries & dictionaries_) : dictionaries(dictionaries_) {} String getName() const override { return name; } diff --git a/dbms/src/Functions/FunctionsExternalModels.h b/dbms/src/Functions/FunctionsExternalModels.h index ab193e0a2bc..210729db478 100644 --- a/dbms/src/Functions/FunctionsExternalModels.h +++ b/dbms/src/Functions/FunctionsExternalModels.h @@ -17,7 +17,7 @@ public: static FunctionPtr create(const Context & context); - explicit FunctionModelEvaluate(const ExternalModels & models) : models(models) {} + explicit FunctionModelEvaluate(const ExternalModels & models_) : models(models_) {} String getName() const override { return name; } diff --git a/dbms/src/Functions/FunctionsMiscellaneous.h b/dbms/src/Functions/FunctionsMiscellaneous.h index 6803e16abbe..96539f9559f 100644 --- a/dbms/src/Functions/FunctionsMiscellaneous.h +++ b/dbms/src/Functions/FunctionsMiscellaneous.h @@ -16,11 +16,11 @@ class FunctionExpression : public IFunctionBase, public IPreparedFunction, public std::enable_shared_from_this { public: - FunctionExpression(const ExpressionActionsPtr & expression_actions, - const DataTypes & argument_types, const Names & argument_names, - const DataTypePtr & return_type, const std::string & return_name) - : expression_actions(expression_actions), argument_types(argument_types), - argument_names(argument_names), return_type(return_type), return_name(return_name) + FunctionExpression(const ExpressionActionsPtr & expression_actions_, + const DataTypes & argument_types_, const Names & argument_names_, + const DataTypePtr & return_type_, const std::string & return_name_) + : expression_actions(expression_actions_), argument_types(argument_types_), + argument_names(argument_names_), return_type(return_type_), return_name(return_name_) { } @@ -65,11 +65,11 @@ class FunctionCapture : public IFunctionBase, public IPreparedFunction, public F public std::enable_shared_from_this { public: - FunctionCapture(const ExpressionActionsPtr & expression_actions, const Names & captured, - const NamesAndTypesList & lambda_arguments, - const DataTypePtr & function_return_type, const std::string & expression_return_name) - : expression_actions(expression_actions), captured_names(captured), lambda_arguments(lambda_arguments) - , function_return_type(function_return_type), expression_return_name(expression_return_name) + FunctionCapture(const ExpressionActionsPtr & expression_actions_, const Names & captured, + const NamesAndTypesList & lambda_arguments_, + const DataTypePtr & function_return_type_, const std::string & expression_return_name_) + : expression_actions(expression_actions_), captured_names(captured), lambda_arguments(lambda_arguments_) + , function_return_type(function_return_type_), expression_return_name(expression_return_name_) { const auto & all_arguments = expression_actions->getRequiredColumnsWithTypes(); diff --git a/dbms/src/Functions/FunctionsRound.h b/dbms/src/Functions/FunctionsRound.h index 38b72274f5a..bc276435cc8 100644 --- a/dbms/src/Functions/FunctionsRound.h +++ b/dbms/src/Functions/FunctionsRound.h @@ -574,7 +574,7 @@ class FunctionRoundDown : public IFunction public: static constexpr auto name = "roundDown"; static FunctionPtr create(const Context & context) { return std::make_shared(context); } - FunctionRoundDown(const Context & context) : context(context) {} + FunctionRoundDown(const Context & context_) : context(context_) {} public: String getName() const override { return name; } diff --git a/dbms/src/Functions/GatherUtils/Sinks.h b/dbms/src/Functions/GatherUtils/Sinks.h index cd454f7ca2e..c6925fab865 100644 --- a/dbms/src/Functions/GatherUtils/Sinks.h +++ b/dbms/src/Functions/GatherUtils/Sinks.h @@ -196,8 +196,8 @@ struct NullableArraySink : public ArraySink NullMap & null_map; - NullableArraySink(ColumnArray & arr, NullMap & null_map, size_t column_size) - : ArraySink(arr, column_size), null_map(null_map) + NullableArraySink(ColumnArray & arr, NullMap & null_map_, size_t column_size) + : ArraySink(arr, column_size), null_map(null_map_) { } diff --git a/dbms/src/Functions/GatherUtils/Sources.h b/dbms/src/Functions/GatherUtils/Sources.h index 41e1a7a0b84..d43dc69b2b0 100644 --- a/dbms/src/Functions/GatherUtils/Sources.h +++ b/dbms/src/Functions/GatherUtils/Sources.h @@ -585,8 +585,8 @@ struct NullableArraySource : public ArraySource const NullMap & null_map; - NullableArraySource(const ColumnArray & arr, const NullMap & null_map) - : ArraySource(arr), null_map(null_map) + NullableArraySource(const ColumnArray & arr, const NullMap & null_map_) + : ArraySource(arr), null_map(null_map_) { } @@ -743,7 +743,7 @@ struct NullableValueSource : public ValueSource const NullMap & null_map; template - explicit NullableValueSource(const Column & col, const NullMap & null_map) : ValueSource(col), null_map(null_map) {} + explicit NullableValueSource(const Column & col, const NullMap & null_map_) : ValueSource(col), null_map(null_map_) {} void accept(ValueSourceVisitor & visitor) override { visitor.visit(*this); } diff --git a/dbms/src/Functions/GeoUtils.h b/dbms/src/Functions/GeoUtils.h index 9c5ebf98b16..2191290d858 100644 --- a/dbms/src/Functions/GeoUtils.h +++ b/dbms/src/Functions/GeoUtils.h @@ -91,8 +91,8 @@ public: using Box = boost::geometry::model::box; using Segment = boost::geometry::model::segment; - explicit PointInPolygonWithGrid(const Polygon & polygon, UInt16 grid_size = 8) - : grid_size(std::max(1, grid_size)), polygon(polygon) {} + explicit PointInPolygonWithGrid(const Polygon & polygon_, UInt16 grid_size_ = 8) + : grid_size(std::max(1, grid_size_)), polygon(polygon_) {} void init(); @@ -510,7 +510,7 @@ public: using Polygon = boost::geometry::model::polygon; using Box = boost::geometry::model::box; - explicit PointInPolygon(const Polygon & polygon) : polygon(polygon) {} + explicit PointInPolygon(const Polygon & polygon_) : polygon(polygon_) {} void init() { diff --git a/dbms/src/Functions/IFunction.h b/dbms/src/Functions/IFunction.h index ef7e882e700..287e7a84170 100644 --- a/dbms/src/Functions/IFunction.h +++ b/dbms/src/Functions/IFunction.h @@ -408,7 +408,7 @@ protected: class DefaultExecutable final : public PreparedFunctionImpl { public: - explicit DefaultExecutable(std::shared_ptr function) : function(std::move(function)) {} + explicit DefaultExecutable(std::shared_ptr function_) : function(std::move(function_)) {} String getName() const override { return function->getName(); } @@ -434,8 +434,8 @@ private: class DefaultFunction final : public IFunctionBase { public: - DefaultFunction(std::shared_ptr function, DataTypes arguments, DataTypePtr return_type) - : function(std::move(function)), arguments(std::move(arguments)), return_type(std::move(return_type)) {} + DefaultFunction(std::shared_ptr function_, DataTypes arguments_, DataTypePtr return_type_) + : function(std::move(function_)), arguments(std::move(arguments_)), return_type(std::move(return_type_)) {} String getName() const override { return function->getName(); } @@ -478,7 +478,7 @@ private: class DefaultFunctionBuilder : public FunctionBuilderImpl { public: - explicit DefaultFunctionBuilder(std::shared_ptr function) : function(std::move(function)) {} + explicit DefaultFunctionBuilder(std::shared_ptr function_) : function(std::move(function_)) {} void checkNumberOfArguments(size_t number_of_arguments) const override { diff --git a/dbms/src/Functions/RapidJSONParser.h b/dbms/src/Functions/RapidJSONParser.h index c88d61fb69d..ff4ecd506fd 100644 --- a/dbms/src/Functions/RapidJSONParser.h +++ b/dbms/src/Functions/RapidJSONParser.h @@ -32,7 +32,7 @@ struct RapidJSONParser { public: Iterator() {} - Iterator(const rapidjson::Document & document) : value(&document) {} + Iterator(const rapidjson::Document & document_) : value(&document_) {} Iterator(const Iterator & src) : value(src.value) , is_object_member(src.is_object_member) diff --git a/dbms/src/Functions/array/array.cpp b/dbms/src/Functions/array/array.cpp index 6d641d13a69..0aa1f6f9bae 100644 --- a/dbms/src/Functions/array/array.cpp +++ b/dbms/src/Functions/array/array.cpp @@ -19,8 +19,8 @@ public: return std::make_shared(context); } - FunctionArray(const Context & context) - : context(context) + FunctionArray(const Context & context_) + : context(context_) { } diff --git a/dbms/src/Functions/array/arrayConcat.cpp b/dbms/src/Functions/array/arrayConcat.cpp index 42b92116bf2..32ba791ac5f 100644 --- a/dbms/src/Functions/array/arrayConcat.cpp +++ b/dbms/src/Functions/array/arrayConcat.cpp @@ -27,7 +27,7 @@ class FunctionArrayConcat : public IFunction public: static constexpr auto name = "arrayConcat"; static FunctionPtr create(const Context & context) { return std::make_shared(context); } - FunctionArrayConcat(const Context & context) : context(context) {} + FunctionArrayConcat(const Context & context_) : context(context_) {} String getName() const override { return name; } diff --git a/dbms/src/Functions/array/arrayIntersect.cpp b/dbms/src/Functions/array/arrayIntersect.cpp index 4e88bbad920..b735d5497d6 100644 --- a/dbms/src/Functions/array/arrayIntersect.cpp +++ b/dbms/src/Functions/array/arrayIntersect.cpp @@ -35,7 +35,7 @@ class FunctionArrayIntersect : public IFunction public: static constexpr auto name = "arrayIntersect"; static FunctionPtr create(const Context & context) { return std::make_shared(context); } - FunctionArrayIntersect(const Context & context) : context(context) {} + FunctionArrayIntersect(const Context & context_) : context(context_) {} String getName() const override { return name; } @@ -81,8 +81,8 @@ private: const DataTypePtr & data_type; ColumnPtr & result; - NumberExecutor(const UnpackedArrays & arrays, const DataTypePtr & data_type, ColumnPtr & result) - : arrays(arrays), data_type(data_type), result(result) {} + NumberExecutor(const UnpackedArrays & arrays_, const DataTypePtr & data_type_, ColumnPtr & result_) + : arrays(arrays_), data_type(data_type_), result(result_) {} template void operator()(); diff --git a/dbms/src/Functions/array/arrayPop.h b/dbms/src/Functions/array/arrayPop.h index 0336d2e20eb..f860dd4eede 100644 --- a/dbms/src/Functions/array/arrayPop.h +++ b/dbms/src/Functions/array/arrayPop.h @@ -18,7 +18,7 @@ namespace ErrorCodes class FunctionArrayPop : public IFunction { public: - FunctionArrayPop(bool pop_front, const char * name) : pop_front(pop_front), name(name) {} + FunctionArrayPop(bool pop_front_, const char * name_) : pop_front(pop_front_), name(name_) {} String getName() const override { return name; } diff --git a/dbms/src/Functions/array/arrayPush.h b/dbms/src/Functions/array/arrayPush.h index 4d06571ea71..3b471987cb7 100644 --- a/dbms/src/Functions/array/arrayPush.h +++ b/dbms/src/Functions/array/arrayPush.h @@ -21,8 +21,8 @@ namespace ErrorCodes class FunctionArrayPush : public IFunction { public: - FunctionArrayPush(const Context & context, bool push_front, const char * name) - : context(context), push_front(push_front), name(name) {} + FunctionArrayPush(const Context & context_, bool push_front_, const char * name_) + : context(context_), push_front(push_front_), name(name_) {} String getName() const override { return name; } diff --git a/dbms/src/Functions/array/arrayPushBack.cpp b/dbms/src/Functions/array/arrayPushBack.cpp index c5677cd3072..a9c4ed88a7a 100644 --- a/dbms/src/Functions/array/arrayPushBack.cpp +++ b/dbms/src/Functions/array/arrayPushBack.cpp @@ -10,7 +10,7 @@ class FunctionArrayPushBack : public FunctionArrayPush public: static constexpr auto name = "arrayPushBack"; static FunctionPtr create(const Context & context) { return std::make_shared(context); } - FunctionArrayPushBack(const Context & context) : FunctionArrayPush(context, false, name) {} + FunctionArrayPushBack(const Context & context_) : FunctionArrayPush(context_, false, name) {} }; void registerFunctionArrayPushBack(FunctionFactory & factory) diff --git a/dbms/src/Functions/array/arrayPushFront.cpp b/dbms/src/Functions/array/arrayPushFront.cpp index 99172e0180c..e0cc56c8ae2 100644 --- a/dbms/src/Functions/array/arrayPushFront.cpp +++ b/dbms/src/Functions/array/arrayPushFront.cpp @@ -11,7 +11,7 @@ class FunctionArrayPushFront : public FunctionArrayPush public: static constexpr auto name = "arrayPushFront"; static FunctionPtr create(const Context & context) { return std::make_shared(context); } - FunctionArrayPushFront(const Context & context) : FunctionArrayPush(context, true, name) {} + FunctionArrayPushFront(const Context & context_) : FunctionArrayPush(context_, true, name) {} }; diff --git a/dbms/src/Functions/array/arrayResize.cpp b/dbms/src/Functions/array/arrayResize.cpp index d4f37823e0b..201ee967b76 100644 --- a/dbms/src/Functions/array/arrayResize.cpp +++ b/dbms/src/Functions/array/arrayResize.cpp @@ -27,7 +27,7 @@ class FunctionArrayResize : public IFunction public: static constexpr auto name = "arrayResize"; static FunctionPtr create(const Context & context) { return std::make_shared(context); } - FunctionArrayResize(const Context & context) : context(context) {} + FunctionArrayResize(const Context & context_) : context(context_) {} String getName() const override { return name; } diff --git a/dbms/src/Functions/array/arraySort.cpp b/dbms/src/Functions/array/arraySort.cpp index 59e65602539..17a711e8902 100644 --- a/dbms/src/Functions/array/arraySort.cpp +++ b/dbms/src/Functions/array/arraySort.cpp @@ -23,7 +23,7 @@ struct ArraySortImpl { const IColumn & column; - Less(const IColumn & column) : column(column) {} + Less(const IColumn & column_) : column(column_) {} bool operator()(size_t lhs, size_t rhs) const { diff --git a/dbms/src/Functions/array/hasAll.cpp b/dbms/src/Functions/array/hasAll.cpp index 19278e3e78e..6ae1640e382 100644 --- a/dbms/src/Functions/array/hasAll.cpp +++ b/dbms/src/Functions/array/hasAll.cpp @@ -10,7 +10,7 @@ class FunctionArrayHasAll : public FunctionArrayHasAllAny public: static constexpr auto name = "hasAll"; static FunctionPtr create(const Context & context) { return std::make_shared(context); } - FunctionArrayHasAll(const Context & context) : FunctionArrayHasAllAny(context, true, name) {} + FunctionArrayHasAll(const Context & context_) : FunctionArrayHasAllAny(context_, true, name) {} }; void registerFunctionHasAll(FunctionFactory & factory) diff --git a/dbms/src/Functions/array/hasAllAny.h b/dbms/src/Functions/array/hasAllAny.h index b688406fd91..ef69594d01c 100644 --- a/dbms/src/Functions/array/hasAllAny.h +++ b/dbms/src/Functions/array/hasAllAny.h @@ -27,8 +27,8 @@ namespace ErrorCodes class FunctionArrayHasAllAny : public IFunction { public: - FunctionArrayHasAllAny(const Context & context, bool all, const char * name) - : context(context), all(all), name(name) {} + FunctionArrayHasAllAny(const Context & context_, bool all_, const char * name_) + : context(context_), all(all_), name(name_) {} String getName() const override { return name; } diff --git a/dbms/src/Functions/array/hasAny.cpp b/dbms/src/Functions/array/hasAny.cpp index 08275e1ba8e..756e5311b50 100644 --- a/dbms/src/Functions/array/hasAny.cpp +++ b/dbms/src/Functions/array/hasAny.cpp @@ -10,7 +10,7 @@ class FunctionArrayHasAny : public FunctionArrayHasAllAny public: static constexpr auto name = "hasAny"; static FunctionPtr create(const Context & context) { return std::make_shared(context); } - FunctionArrayHasAny(const Context & context) : FunctionArrayHasAllAny(context, false, name) {} + FunctionArrayHasAny(const Context & context_) : FunctionArrayHasAllAny(context_, false, name) {} }; void registerFunctionHasAny(FunctionFactory & factory) diff --git a/dbms/src/Functions/coalesce.cpp b/dbms/src/Functions/coalesce.cpp index 7ef7eeadfdb..947ecd0e199 100644 --- a/dbms/src/Functions/coalesce.cpp +++ b/dbms/src/Functions/coalesce.cpp @@ -26,7 +26,7 @@ public: return std::make_shared(context); } - FunctionCoalesce(const Context & context) : context(context) {} + FunctionCoalesce(const Context & context_) : context(context_) {} std::string getName() const override { diff --git a/dbms/src/Functions/concat.cpp b/dbms/src/Functions/concat.cpp index b613f91f12c..c4fa2044321 100644 --- a/dbms/src/Functions/concat.cpp +++ b/dbms/src/Functions/concat.cpp @@ -32,7 +32,7 @@ class ConcatImpl : public IFunction { public: static constexpr auto name = Name::name; - ConcatImpl(const Context & context) : context(context) {} + ConcatImpl(const Context & context_) : context(context_) {} static FunctionPtr create(const Context & context) { return std::make_shared(context); } String getName() const override { return name; } @@ -190,7 +190,7 @@ public: static constexpr auto name = "concat"; static FunctionBuilderPtr create(const Context & context) { return std::make_shared(context); } - FunctionBuilderConcat(const Context & context) : context(context) {} + FunctionBuilderConcat(const Context & context_) : context(context_) {} String getName() const override { return name; } size_t getNumberOfArguments() const override { return 0; } diff --git a/dbms/src/Functions/currentDatabase.cpp b/dbms/src/Functions/currentDatabase.cpp index 6c5137fe2ee..b1276d8546f 100644 --- a/dbms/src/Functions/currentDatabase.cpp +++ b/dbms/src/Functions/currentDatabase.cpp @@ -18,7 +18,7 @@ public: return std::make_shared(context.getCurrentDatabase()); } - explicit FunctionCurrentDatabase(const String & db_name) : db_name{db_name} + explicit FunctionCurrentDatabase(const String & db_name_) : db_name{db_name_} { } diff --git a/dbms/src/Functions/evalMLMethod.cpp b/dbms/src/Functions/evalMLMethod.cpp index e49bd917d1d..9f7445ba863 100644 --- a/dbms/src/Functions/evalMLMethod.cpp +++ b/dbms/src/Functions/evalMLMethod.cpp @@ -34,7 +34,7 @@ public: { return std::make_shared(context); } - FunctionEvalMLMethod(const Context & context) : context(context) + FunctionEvalMLMethod(const Context & context_) : context(context_) {} String getName() const override diff --git a/dbms/src/Functions/formatDateTime.cpp b/dbms/src/Functions/formatDateTime.cpp index ffbf391db6f..8cecdb69717 100644 --- a/dbms/src/Functions/formatDateTime.cpp +++ b/dbms/src/Functions/formatDateTime.cpp @@ -79,7 +79,7 @@ private: Func func; size_t shift; - Action(Func func, size_t shift = 0) : func(func), shift(shift) {} + Action(Func func_, size_t shift_ = 0) : func(func_), shift(shift_) {} void perform(char *& target, Time source, const DateLUTImpl & timezone) { diff --git a/dbms/src/Functions/if.cpp b/dbms/src/Functions/if.cpp index 6676ad87d75..a406b63be8b 100644 --- a/dbms/src/Functions/if.cpp +++ b/dbms/src/Functions/if.cpp @@ -170,7 +170,7 @@ class FunctionIf : public FunctionIfBase public: static constexpr auto name = "if"; static FunctionPtr create(const Context & context) { return std::make_shared(context); } - FunctionIf(const Context & context) : context(context) {} + FunctionIf(const Context & context_) : context(context_) {} private: template diff --git a/dbms/src/Functions/ifNull.cpp b/dbms/src/Functions/ifNull.cpp index bf517293409..2c552d86ffe 100644 --- a/dbms/src/Functions/ifNull.cpp +++ b/dbms/src/Functions/ifNull.cpp @@ -19,7 +19,7 @@ class FunctionIfNull : public IFunction public: static constexpr auto name = "ifNull"; - FunctionIfNull(const Context & context) : context(context) {} + FunctionIfNull(const Context & context_) : context(context_) {} static FunctionPtr create(const Context & context) { diff --git a/dbms/src/Functions/multiIf.cpp b/dbms/src/Functions/multiIf.cpp index 7ee5f1c5e67..f56889f7a01 100644 --- a/dbms/src/Functions/multiIf.cpp +++ b/dbms/src/Functions/multiIf.cpp @@ -32,7 +32,7 @@ class FunctionMultiIf final : public FunctionIfBase public: static constexpr auto name = "multiIf"; static FunctionPtr create(const Context & context) { return std::make_shared(context); } - FunctionMultiIf(const Context & context) : context(context) {} + FunctionMultiIf(const Context & context_) : context(context_) {} public: String getName() const override { return name; } diff --git a/dbms/src/Functions/nullIf.cpp b/dbms/src/Functions/nullIf.cpp index 0039dbad2b1..59ce4b54c82 100644 --- a/dbms/src/Functions/nullIf.cpp +++ b/dbms/src/Functions/nullIf.cpp @@ -25,7 +25,7 @@ public: return std::make_shared(context); } - FunctionNullIf(const Context & context) : context(context) {} + FunctionNullIf(const Context & context_) : context(context_) {} std::string getName() const override { diff --git a/dbms/src/Functions/reverse.cpp b/dbms/src/Functions/reverse.cpp index d8e6c6de194..269a3e3f7c2 100644 --- a/dbms/src/Functions/reverse.cpp +++ b/dbms/src/Functions/reverse.cpp @@ -118,7 +118,7 @@ public: static constexpr auto name = "reverse"; static FunctionBuilderPtr create(const Context & context) { return std::make_shared(context); } - FunctionBuilderReverse(const Context & context) : context(context) {} + FunctionBuilderReverse(const Context & context_) : context(context_) {} String getName() const override { return name; } size_t getNumberOfArguments() const override { return 1; } diff --git a/dbms/src/IO/LimitReadBuffer.cpp b/dbms/src/IO/LimitReadBuffer.cpp index a6e4e7a7b16..f36facfdd99 100644 --- a/dbms/src/IO/LimitReadBuffer.cpp +++ b/dbms/src/IO/LimitReadBuffer.cpp @@ -36,8 +36,8 @@ bool LimitReadBuffer::nextImpl() } -LimitReadBuffer::LimitReadBuffer(ReadBuffer & in, UInt64 limit, bool throw_exception, std::string exception_message) - : ReadBuffer(in.position(), 0), in(in), limit(limit), throw_exception(throw_exception), exception_message(std::move(exception_message)) +LimitReadBuffer::LimitReadBuffer(ReadBuffer & in_, UInt64 limit_, bool throw_exception_, std::string exception_message_) + : ReadBuffer(in_.position(), 0), in(in_), limit(limit_), throw_exception(throw_exception_), exception_message(std::move(exception_message_)) { size_t remaining_bytes_in_buffer = in.buffer().end() - in.position(); if (remaining_bytes_in_buffer > limit) diff --git a/dbms/src/IO/LimitReadBuffer.h b/dbms/src/IO/LimitReadBuffer.h index d16579be7c5..545de6fd4a2 100644 --- a/dbms/src/IO/LimitReadBuffer.h +++ b/dbms/src/IO/LimitReadBuffer.h @@ -21,7 +21,7 @@ private: bool nextImpl() override; public: - LimitReadBuffer(ReadBuffer & in, UInt64 limit, bool throw_exception, std::string exception_message = {}); + LimitReadBuffer(ReadBuffer & in_, UInt64 limit_, bool throw_exception_, std::string exception_message_ = {}); ~LimitReadBuffer() override; }; diff --git a/dbms/src/IO/MMapReadBufferFromFile.cpp b/dbms/src/IO/MMapReadBufferFromFile.cpp index e478a11c16a..45558b540e5 100644 --- a/dbms/src/IO/MMapReadBufferFromFile.cpp +++ b/dbms/src/IO/MMapReadBufferFromFile.cpp @@ -34,10 +34,10 @@ void MMapReadBufferFromFile::open(const std::string & file_name) } -MMapReadBufferFromFile::MMapReadBufferFromFile(const std::string & file_name, size_t offset, size_t length) +MMapReadBufferFromFile::MMapReadBufferFromFile(const std::string & file_name, size_t offset, size_t length_) { open(file_name); - init(fd, offset, length); + init(fd, offset, length_); } diff --git a/dbms/src/IO/MMapReadBufferFromFile.h b/dbms/src/IO/MMapReadBufferFromFile.h index c1762bd54f5..6790f817b93 100644 --- a/dbms/src/IO/MMapReadBufferFromFile.h +++ b/dbms/src/IO/MMapReadBufferFromFile.h @@ -16,7 +16,7 @@ namespace DB class MMapReadBufferFromFile : public MMapReadBufferFromFileDescriptor { public: - MMapReadBufferFromFile(const std::string & file_name, size_t offset, size_t length); + MMapReadBufferFromFile(const std::string & file_name, size_t offset, size_t length_); /// Map till end of file. MMapReadBufferFromFile(const std::string & file_name, size_t offset); diff --git a/dbms/src/IO/MMapReadBufferFromFileDescriptor.cpp b/dbms/src/IO/MMapReadBufferFromFileDescriptor.cpp index 4643b9b626c..4852f9e57e9 100644 --- a/dbms/src/IO/MMapReadBufferFromFileDescriptor.cpp +++ b/dbms/src/IO/MMapReadBufferFromFileDescriptor.cpp @@ -57,17 +57,17 @@ void MMapReadBufferFromFileDescriptor::init(int fd_, size_t offset) } -MMapReadBufferFromFileDescriptor::MMapReadBufferFromFileDescriptor(int fd, size_t offset, size_t length) +MMapReadBufferFromFileDescriptor::MMapReadBufferFromFileDescriptor(int fd_, size_t offset_, size_t length_) : MMapReadBufferFromFileDescriptor() { - init(fd, offset, length); + init(fd_, offset_, length_); } -MMapReadBufferFromFileDescriptor::MMapReadBufferFromFileDescriptor(int fd, size_t offset) +MMapReadBufferFromFileDescriptor::MMapReadBufferFromFileDescriptor(int fd_, size_t offset_) : MMapReadBufferFromFileDescriptor() { - init(fd, offset); + init(fd_, offset_); } diff --git a/dbms/src/IO/MMapReadBufferFromFileDescriptor.h b/dbms/src/IO/MMapReadBufferFromFileDescriptor.h index f31aac0bbf9..aaef8c3212a 100644 --- a/dbms/src/IO/MMapReadBufferFromFileDescriptor.h +++ b/dbms/src/IO/MMapReadBufferFromFileDescriptor.h @@ -20,10 +20,10 @@ protected: void init(int fd_, size_t offset); public: - MMapReadBufferFromFileDescriptor(int fd, size_t offset, size_t length); + MMapReadBufferFromFileDescriptor(int fd_, size_t offset_, size_t length_); /// Map till end of file. - MMapReadBufferFromFileDescriptor(int fd, size_t offset); + MMapReadBufferFromFileDescriptor(int fd_, size_t offset_); ~MMapReadBufferFromFileDescriptor() override; diff --git a/dbms/src/IO/ReadWriteBufferFromHTTP.h b/dbms/src/IO/ReadWriteBufferFromHTTP.h index 62f2b0351f6..d36633220b4 100644 --- a/dbms/src/IO/ReadWriteBufferFromHTTP.h +++ b/dbms/src/IO/ReadWriteBufferFromHTTP.h @@ -43,14 +43,14 @@ namespace detail using OutStreamCallback = std::function; explicit ReadWriteBufferFromHTTPBase(SessionPtr session_, - Poco::URI uri, - const std::string & method = {}, + Poco::URI uri_, + const std::string & method_ = {}, OutStreamCallback out_stream_callback = {}, const Poco::Net::HTTPBasicCredentials & credentials = {}, size_t buffer_size_ = DBMS_DEFAULT_BUFFER_SIZE) : ReadBuffer(nullptr, 0) - , uri {uri} - , method {!method.empty() ? method : out_stream_callback ? Poco::Net::HTTPRequest::HTTP_POST : Poco::Net::HTTPRequest::HTTP_GET} + , uri {uri_} + , method {!method_.empty() ? method_ : out_stream_callback ? Poco::Net::HTTPRequest::HTTP_POST : Poco::Net::HTTPRequest::HTTP_GET} , session {session_} { // With empty path poco will send "POST HTTP/1.1" its bug. diff --git a/dbms/src/IO/WriteBufferValidUTF8.cpp b/dbms/src/IO/WriteBufferValidUTF8.cpp index 01e011982cd..edff9e5bcf4 100644 --- a/dbms/src/IO/WriteBufferValidUTF8.cpp +++ b/dbms/src/IO/WriteBufferValidUTF8.cpp @@ -32,9 +32,9 @@ extern const UInt8 length_of_utf8_sequence[256] = WriteBufferValidUTF8::WriteBufferValidUTF8( - WriteBuffer & output_buffer, bool group_replacements, const char * replacement, size_t size) - : BufferWithOwnMemory(std::max(static_cast(32), size)), output_buffer(output_buffer), - group_replacements(group_replacements), replacement(replacement) + WriteBuffer & output_buffer_, bool group_replacements_, const char * replacement_, size_t size) + : BufferWithOwnMemory(std::max(static_cast(32), size)), output_buffer(output_buffer_), + group_replacements(group_replacements_), replacement(replacement_) { } diff --git a/dbms/src/IO/WriteBufferValidUTF8.h b/dbms/src/IO/WriteBufferValidUTF8.h index 49243a1844f..31151eefbfb 100644 --- a/dbms/src/IO/WriteBufferValidUTF8.h +++ b/dbms/src/IO/WriteBufferValidUTF8.h @@ -30,9 +30,9 @@ public: static const size_t DEFAULT_SIZE; WriteBufferValidUTF8( - WriteBuffer & output_buffer, - bool group_replacements = true, - const char * replacement = "\xEF\xBF\xBD", + WriteBuffer & output_buffer_, + bool group_replacements_ = true, + const char * replacement_ = "\xEF\xBF\xBD", size_t size = DEFAULT_SIZE); virtual ~WriteBufferValidUTF8() override diff --git a/dbms/src/Interpreters/Aggregator.cpp b/dbms/src/Interpreters/Aggregator.cpp index b3c0aa87f8a..23a06ea58d6 100644 --- a/dbms/src/Interpreters/Aggregator.cpp +++ b/dbms/src/Interpreters/Aggregator.cpp @@ -1809,7 +1809,7 @@ private: std::condition_variable condvar; ThreadPool pool; - explicit ParallelMergeData(size_t threads) : pool(threads) {} + explicit ParallelMergeData(size_t threads_) : pool(threads_) {} }; std::unique_ptr parallel_merge_data; diff --git a/dbms/src/Interpreters/CatBoostModel.cpp b/dbms/src/Interpreters/CatBoostModel.cpp index 3e6e66b5c3f..b2e2a4c3d73 100644 --- a/dbms/src/Interpreters/CatBoostModel.cpp +++ b/dbms/src/Interpreters/CatBoostModel.cpp @@ -76,7 +76,7 @@ private: CatBoostWrapperAPI::ModelCalcerHandle * handle; const CatBoostWrapperAPI * api; public: - explicit CatBoostModelHolder(const CatBoostWrapperAPI * api) : api(api) { handle = api->ModelCalcerCreate(); } + explicit CatBoostModelHolder(const CatBoostWrapperAPI * api_) : api(api_) { handle = api->ModelCalcerCreate(); } ~CatBoostModelHolder() { api->ModelCalcerDelete(handle); } CatBoostWrapperAPI::ModelCalcerHandle * get() { return handle; } @@ -86,7 +86,7 @@ public: class CatBoostModelImpl : public ICatBoostModel { public: - CatBoostModelImpl(const CatBoostWrapperAPI * api, const std::string & model_path) : api(api) + CatBoostModelImpl(const CatBoostWrapperAPI * api_, const std::string & model_path) : api(api_) { auto handle_ = std::make_unique(api); if (!handle_) @@ -502,8 +502,8 @@ std::shared_ptr getCatBoostWrapperHolder(const std::string & CatBoostModel::CatBoostModel(std::string name_, std::string model_path_, std::string lib_path_, - const ExternalLoadableLifetime & lifetime) - : name(std::move(name_)), model_path(std::move(model_path_)), lib_path(std::move(lib_path_)), lifetime(lifetime) + const ExternalLoadableLifetime & lifetime_) + : name(std::move(name_)), model_path(std::move(model_path_)), lib_path(std::move(lib_path_)), lifetime(lifetime_) { api_provider = getCatBoostWrapperHolder(lib_path); api = &api_provider->getAPI(); diff --git a/dbms/src/Interpreters/Context.cpp b/dbms/src/Interpreters/Context.cpp index 992593d852c..83f3763bb11 100644 --- a/dbms/src/Interpreters/Context.cpp +++ b/dbms/src/Interpreters/Context.cpp @@ -1189,13 +1189,13 @@ void Context::setCurrentQueryId(const String & query_id) random.words.b = thread_local_rng(); /// Use protected constructor. - struct UUID : Poco::UUID + struct qUUID : Poco::UUID { - UUID(const char * bytes, Poco::UUID::Version version) + qUUID(const char * bytes, Poco::UUID::Version version) : Poco::UUID(bytes, version) {} }; - query_id_to_set = UUID(random.bytes, Poco::UUID::UUID_RANDOM).toString(); + query_id_to_set = qUUID(random.bytes, Poco::UUID::UUID_RANDOM).toString(); } client_info.current_query_id = query_id_to_set; diff --git a/dbms/src/Interpreters/DDLWorker.cpp b/dbms/src/Interpreters/DDLWorker.cpp index b9ab41e38dc..afa4fca79f8 100644 --- a/dbms/src/Interpreters/DDLWorker.cpp +++ b/dbms/src/Interpreters/DDLWorker.cpp @@ -1058,8 +1058,8 @@ class DDLQueryStatusInputStream : public IBlockInputStream { public: - DDLQueryStatusInputStream(const String & zk_node_path, const DDLLogEntry & entry, const Context & context) - : node_path(zk_node_path), context(context), watch(CLOCK_MONOTONIC_COARSE), log(&Logger::get("DDLQueryStatusInputStream")) + DDLQueryStatusInputStream(const String & zk_node_path, const DDLLogEntry & entry, const Context & context_) + : node_path(zk_node_path), context(context_), watch(CLOCK_MONOTONIC_COARSE), log(&Logger::get("DDLQueryStatusInputStream")) { sample = Block{ {std::make_shared(), "host"}, diff --git a/dbms/src/Interpreters/ExpressionAnalyzer.h b/dbms/src/Interpreters/ExpressionAnalyzer.h index 644d10da1be..0a370593e90 100644 --- a/dbms/src/Interpreters/ExpressionAnalyzer.h +++ b/dbms/src/Interpreters/ExpressionAnalyzer.h @@ -88,12 +88,12 @@ private: const SizeLimits size_limits_for_join; const String join_default_strictness; - ExtractedSettings(const Settings & settings) - : use_index_for_in_with_subqueries(settings.use_index_for_in_with_subqueries), - join_use_nulls(settings.join_use_nulls), - size_limits_for_set(settings.max_rows_in_set, settings.max_bytes_in_set, settings.set_overflow_mode), - size_limits_for_join(settings.max_rows_in_join, settings.max_bytes_in_join, settings.join_overflow_mode), - join_default_strictness(settings.join_default_strictness.toString()) + ExtractedSettings(const Settings & settings_) + : use_index_for_in_with_subqueries(settings_.use_index_for_in_with_subqueries), + join_use_nulls(settings_.join_use_nulls), + size_limits_for_set(settings_.max_rows_in_set, settings_.max_bytes_in_set, settings_.set_overflow_mode), + size_limits_for_join(settings_.max_rows_in_join, settings_.max_bytes_in_join, settings_.join_overflow_mode), + join_default_strictness(settings_.join_default_strictness.toString()) {} }; diff --git a/dbms/src/Interpreters/ExternalDictionaries.cpp b/dbms/src/Interpreters/ExternalDictionaries.cpp index 9f20b492f51..e1cbd377978 100644 --- a/dbms/src/Interpreters/ExternalDictionaries.cpp +++ b/dbms/src/Interpreters/ExternalDictionaries.cpp @@ -9,11 +9,11 @@ namespace DB ExternalDictionaries::ExternalDictionaries( std::unique_ptr config_repository, const Poco::Util::AbstractConfiguration & config, - Context & context) + Context & context_) : ExternalLoader(config, "external dictionary", &Logger::get("ExternalDictionaries")), - context(context) + context(context_) { addConfigRepository(std::move(config_repository), {"dictionary", "name", "dictionaries_config"}); enableAsyncLoading(true); diff --git a/dbms/src/Interpreters/ExternalDictionaries.h b/dbms/src/Interpreters/ExternalDictionaries.h index e1ef53bbd3f..c071349cc97 100644 --- a/dbms/src/Interpreters/ExternalDictionaries.h +++ b/dbms/src/Interpreters/ExternalDictionaries.h @@ -21,7 +21,7 @@ public: ExternalDictionaries( std::unique_ptr config_repository, const Poco::Util::AbstractConfiguration & config, - Context & context); + Context & context_); DictPtr getDictionary(const std::string & name) const { diff --git a/dbms/src/Interpreters/ExternalLoader.h b/dbms/src/Interpreters/ExternalLoader.h index 4c94b8d69cd..8a52d991759 100644 --- a/dbms/src/Interpreters/ExternalLoader.h +++ b/dbms/src/Interpreters/ExternalLoader.h @@ -19,8 +19,8 @@ struct ExternalLoaderUpdateSettings UInt64 backoff_max_sec = 10 * 60; ExternalLoaderUpdateSettings() = default; - ExternalLoaderUpdateSettings(UInt64 check_period_sec, UInt64 backoff_initial_sec, UInt64 backoff_max_sec) - : check_period_sec(check_period_sec), backoff_initial_sec(backoff_initial_sec), backoff_max_sec(backoff_max_sec) {} + ExternalLoaderUpdateSettings(UInt64 check_period_sec_, UInt64 backoff_initial_sec_, UInt64 backoff_max_sec_) + : check_period_sec(check_period_sec_), backoff_initial_sec(backoff_initial_sec_), backoff_max_sec(backoff_max_sec_) {} }; diff --git a/dbms/src/Interpreters/ExternalModels.cpp b/dbms/src/Interpreters/ExternalModels.cpp index cb3e65d6150..f3c1310410b 100644 --- a/dbms/src/Interpreters/ExternalModels.cpp +++ b/dbms/src/Interpreters/ExternalModels.cpp @@ -12,11 +12,11 @@ namespace ErrorCodes ExternalModels::ExternalModels( std::unique_ptr config_repository, - Context & context) - : ExternalLoader(context.getConfigRef(), + Context & context_) + : ExternalLoader(context_.getConfigRef(), "external model", &Logger::get("ExternalModels")), - context(context) + context(context_) { addConfigRepository(std::move(config_repository), {"model", "name", "models_config"}); enablePeriodicUpdates(true); diff --git a/dbms/src/Interpreters/ExternalModels.h b/dbms/src/Interpreters/ExternalModels.h index ff5b9dffcfc..2c4706b0664 100644 --- a/dbms/src/Interpreters/ExternalModels.h +++ b/dbms/src/Interpreters/ExternalModels.h @@ -20,7 +20,7 @@ public: /// Models will be loaded immediately and then will be updated in separate thread, each 'reload_period' seconds. ExternalModels( std::unique_ptr config_repository, - Context & context); + Context & context_); ModelPtr getModel(const std::string & name) const { diff --git a/dbms/src/Interpreters/InJoinSubqueriesPreprocessor.h b/dbms/src/Interpreters/InJoinSubqueriesPreprocessor.h index c1d0afe7873..00984832ad6 100644 --- a/dbms/src/Interpreters/InJoinSubqueriesPreprocessor.h +++ b/dbms/src/Interpreters/InJoinSubqueriesPreprocessor.h @@ -46,8 +46,8 @@ public: virtual ~CheckShardsAndTables() {} }; - InJoinSubqueriesPreprocessor(const Context & context, CheckShardsAndTables::Ptr _checker = std::make_unique()) - : context(context) + InJoinSubqueriesPreprocessor(const Context & context_, CheckShardsAndTables::Ptr _checker = std::make_unique()) + : context(context_) , checker(std::move(_checker)) {} diff --git a/dbms/src/Interpreters/InterpreterSelectQuery.cpp b/dbms/src/Interpreters/InterpreterSelectQuery.cpp index 9682d0e29e4..5fe473f5d2e 100644 --- a/dbms/src/Interpreters/InterpreterSelectQuery.cpp +++ b/dbms/src/Interpreters/InterpreterSelectQuery.cpp @@ -156,9 +156,9 @@ String generateFilterActions(ExpressionActionsPtr & actions, const StoragePtr & InterpreterSelectQuery::InterpreterSelectQuery( const ASTPtr & query_ptr_, const Context & context_, - const SelectQueryOptions & options, - const Names & required_result_column_names) - : InterpreterSelectQuery(query_ptr_, context_, nullptr, nullptr, options, required_result_column_names) + const SelectQueryOptions & options_, + const Names & required_result_column_names_) + : InterpreterSelectQuery(query_ptr_, context_, nullptr, nullptr, options_, required_result_column_names_) { } @@ -166,16 +166,16 @@ InterpreterSelectQuery::InterpreterSelectQuery( const ASTPtr & query_ptr_, const Context & context_, const BlockInputStreamPtr & input_, - const SelectQueryOptions & options) - : InterpreterSelectQuery(query_ptr_, context_, input_, nullptr, options.copy().noSubquery()) + const SelectQueryOptions & options_) + : InterpreterSelectQuery(query_ptr_, context_, input_, nullptr, options_.copy().noSubquery()) {} InterpreterSelectQuery::InterpreterSelectQuery( const ASTPtr & query_ptr_, const Context & context_, const StoragePtr & storage_, - const SelectQueryOptions & options) - : InterpreterSelectQuery(query_ptr_, context_, nullptr, storage_, options.copy().noSubquery()) + const SelectQueryOptions & options_) + : InterpreterSelectQuery(query_ptr_, context_, nullptr, storage_, options_.copy().noSubquery()) {} InterpreterSelectQuery::~InterpreterSelectQuery() = default; diff --git a/dbms/src/Interpreters/InterpreterSelectQuery.h b/dbms/src/Interpreters/InterpreterSelectQuery.h index f6f3c0baf19..0e2cfcd4c7b 100644 --- a/dbms/src/Interpreters/InterpreterSelectQuery.h +++ b/dbms/src/Interpreters/InterpreterSelectQuery.h @@ -46,7 +46,7 @@ public: const ASTPtr & query_ptr_, const Context & context_, const SelectQueryOptions &, - const Names & required_result_column_names = Names{}); + const Names & required_result_column_names_ = Names{}); /// Read data not from the table specified in the query, but from the prepared source `input`. InterpreterSelectQuery( diff --git a/dbms/src/Interpreters/Join.cpp b/dbms/src/Interpreters/Join.cpp index 9f1a69fba70..0ee93122275 100644 --- a/dbms/src/Interpreters/Join.cpp +++ b/dbms/src/Interpreters/Join.cpp @@ -81,14 +81,14 @@ static ColumnWithTypeAndName correctNullability(ColumnWithTypeAndName && column, } -Join::Join(const Names & key_names_right_, bool use_nulls_, const SizeLimits & limits, +Join::Join(const Names & key_names_right_, bool use_nulls_, const SizeLimits & limits_, ASTTableJoin::Kind kind_, ASTTableJoin::Strictness strictness_, bool any_take_last_row_) : kind(kind_), strictness(strictness_), key_names_right(key_names_right_), use_nulls(use_nulls_), any_take_last_row(any_take_last_row_), log(&Logger::get("Join")), - limits(limits) + limits(limits_) { } diff --git a/dbms/src/Interpreters/Join.h b/dbms/src/Interpreters/Join.h index f57755fad91..fcff80aad62 100644 --- a/dbms/src/Interpreters/Join.h +++ b/dbms/src/Interpreters/Join.h @@ -121,7 +121,7 @@ using MappedAsof = WithFlags; class Join { public: - Join(const Names & key_names_right_, bool use_nulls_, const SizeLimits & limits, + Join(const Names & key_names_right_, bool use_nulls_, const SizeLimits & limits_, ASTTableJoin::Kind kind_, ASTTableJoin::Strictness strictness_, bool any_take_last_row_ = false); bool empty() { return type == Type::EMPTY; } diff --git a/dbms/src/Interpreters/MutationsInterpreter.h b/dbms/src/Interpreters/MutationsInterpreter.h index e49ef284028..9329c9848b6 100644 --- a/dbms/src/Interpreters/MutationsInterpreter.h +++ b/dbms/src/Interpreters/MutationsInterpreter.h @@ -59,7 +59,7 @@ private: struct Stage { - Stage(const Context & context) : expressions_chain(context) {} + Stage(const Context & context_) : expressions_chain(context_) {} ASTs filters; std::unordered_map column_to_updated; diff --git a/dbms/src/Interpreters/PredicateExpressionsOptimizer.h b/dbms/src/Interpreters/PredicateExpressionsOptimizer.h index f9df113abf2..4fa5cb20e2c 100644 --- a/dbms/src/Interpreters/PredicateExpressionsOptimizer.h +++ b/dbms/src/Interpreters/PredicateExpressionsOptimizer.h @@ -43,13 +43,13 @@ class PredicateExpressionsOptimizer const bool join_use_nulls; template - ExtractedSettings(const T & settings) - : max_ast_depth(settings.max_ast_depth), - max_expanded_ast_elements(settings.max_expanded_ast_elements), - count_distinct_implementation(settings.count_distinct_implementation), - enable_optimize_predicate_expression(settings.enable_optimize_predicate_expression), - enable_optimize_predicate_expression_to_final_subquery(settings.enable_optimize_predicate_expression_to_final_subquery), - join_use_nulls(settings.join_use_nulls) + ExtractedSettings(const T & settings_) + : max_ast_depth(settings_.max_ast_depth), + max_expanded_ast_elements(settings_.max_expanded_ast_elements), + count_distinct_implementation(settings_.count_distinct_implementation), + enable_optimize_predicate_expression(settings_.enable_optimize_predicate_expression), + enable_optimize_predicate_expression_to_final_subquery(settings_.enable_optimize_predicate_expression_to_final_subquery), + join_use_nulls(settings_.join_use_nulls) {} }; diff --git a/dbms/src/Interpreters/Set.h b/dbms/src/Interpreters/Set.h index 61314d3582e..aad10451de0 100644 --- a/dbms/src/Interpreters/Set.h +++ b/dbms/src/Interpreters/Set.h @@ -31,9 +31,9 @@ public: /// (that is useful only for checking that some value is in the set and may not store the original values), /// store all set elements in explicit form. /// This is needed for subsequent use for index. - Set(const SizeLimits & limits, bool fill_set_elements) + Set(const SizeLimits & limits_, bool fill_set_elements_) : log(&Logger::get("Set")), - limits(limits), fill_set_elements(fill_set_elements) + limits(limits_), fill_set_elements(fill_set_elements_) { } diff --git a/dbms/src/Interpreters/loadMetadata.cpp b/dbms/src/Interpreters/loadMetadata.cpp index 84a3adffe07..00090d1d309 100644 --- a/dbms/src/Interpreters/loadMetadata.cpp +++ b/dbms/src/Interpreters/loadMetadata.cpp @@ -107,8 +107,8 @@ void loadMetadata(Context & context) databases.emplace(unescapeForFileName(it.name()), it.path().toString()); } - for (const auto & [name, path] : databases) - loadDatabase(context, name, path, has_force_restore_data_flag); + for (const auto & [name, db_path] : databases) + loadDatabase(context, name, db_path, has_force_restore_data_flag); if (has_force_restore_data_flag) { diff --git a/dbms/src/Parsers/CommonParsers.h b/dbms/src/Parsers/CommonParsers.h index 44c8ab17fb7..60b35c33814 100644 --- a/dbms/src/Parsers/CommonParsers.h +++ b/dbms/src/Parsers/CommonParsers.h @@ -30,7 +30,7 @@ class ParserToken : public IParserBase private: TokenType token_type; public: - ParserToken(TokenType token_type) : token_type(token_type) {} + ParserToken(TokenType token_type_) : token_type(token_type_) {} protected: const char * getName() const override { return "token"; } diff --git a/dbms/src/Parsers/Lexer.h b/dbms/src/Parsers/Lexer.h index 3f2712bae08..f705bfcf2d2 100644 --- a/dbms/src/Parsers/Lexer.h +++ b/dbms/src/Parsers/Lexer.h @@ -85,7 +85,7 @@ struct Token size_t size() const { return end - begin; } Token() = default; - Token(TokenType type, const char * begin, const char * end) : type(type), begin(begin), end(end) {} + Token(TokenType type_, const char * begin_, const char * end_) : type(type_), begin(begin_), end(end_) {} bool isSignificant() const { return type != TokenType::Whitespace && type != TokenType::Comment; } bool isError() const { return type > TokenType::EndOfStream; } @@ -96,8 +96,8 @@ struct Token class Lexer { public: - Lexer(const char * begin, const char * end, size_t max_query_size = 0) - : begin(begin), pos(begin), end(end), max_query_size(max_query_size) {} + Lexer(const char * begin_, const char * end_, size_t max_query_size_ = 0) + : begin(begin_), pos(begin_), end(end_), max_query_size(max_query_size_) {} Token nextToken(); private: diff --git a/dbms/src/Parsers/ParserInsertQuery.h b/dbms/src/Parsers/ParserInsertQuery.h index 86198365edc..5669d48ffc6 100644 --- a/dbms/src/Parsers/ParserInsertQuery.h +++ b/dbms/src/Parsers/ParserInsertQuery.h @@ -30,7 +30,7 @@ private: const char * getName() const override { return "INSERT query"; } bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; public: - ParserInsertQuery(const char * end) : end(end) {} + ParserInsertQuery(const char * end_) : end(end_) {} }; } diff --git a/dbms/src/Parsers/ParserQuery.h b/dbms/src/Parsers/ParserQuery.h index cf8837cb7be..e9a2aae29a7 100644 --- a/dbms/src/Parsers/ParserQuery.h +++ b/dbms/src/Parsers/ParserQuery.h @@ -16,8 +16,8 @@ private: bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; public: - ParserQuery(const char * end, bool enable_explain_ = false) - : end(end), + ParserQuery(const char * end_, bool enable_explain_ = false) + : end(end_), enable_explain(enable_explain_) {} }; diff --git a/dbms/src/Parsers/ParserTablesInSelectQuery.h b/dbms/src/Parsers/ParserTablesInSelectQuery.h index da8bd19c382..9d46fc40fa1 100644 --- a/dbms/src/Parsers/ParserTablesInSelectQuery.h +++ b/dbms/src/Parsers/ParserTablesInSelectQuery.h @@ -19,7 +19,7 @@ protected: class ParserTablesInSelectQueryElement : public IParserBase { public: - ParserTablesInSelectQueryElement(bool is_first) : is_first(is_first) {} + ParserTablesInSelectQueryElement(bool is_first_) : is_first(is_first_) {} protected: const char * getName() const { return "table, table function, subquery or list of joined tables"; } diff --git a/dbms/src/Parsers/TokenIterator.h b/dbms/src/Parsers/TokenIterator.h index 09724cc46c0..078421c99c9 100644 --- a/dbms/src/Parsers/TokenIterator.h +++ b/dbms/src/Parsers/TokenIterator.h @@ -57,7 +57,7 @@ private: size_t index = 0; public: - explicit TokenIterator(Tokens & tokens) : tokens(&tokens) {} + explicit TokenIterator(Tokens & tokens_) : tokens(&tokens_) {} const Token & get() { return (*tokens)[index]; } const Token & operator*() { return get(); } diff --git a/dbms/src/Processors/Executors/PipelineExecutor.cpp b/dbms/src/Processors/Executors/PipelineExecutor.cpp index e45bded427c..31cca4e1a48 100644 --- a/dbms/src/Processors/Executors/PipelineExecutor.cpp +++ b/dbms/src/Processors/Executors/PipelineExecutor.cpp @@ -28,8 +28,8 @@ static bool checkCanAddAdditionalInfoToException(const DB::Exception & exception && exception.code() != ErrorCodes::QUERY_WAS_CANCELLED; } -PipelineExecutor::PipelineExecutor(Processors & processors) - : processors(processors) +PipelineExecutor::PipelineExecutor(Processors & processors_) + : processors(processors_) , cancelled(false) , finished(false) , num_processing_executors(0) diff --git a/dbms/src/Processors/Executors/PipelineExecutor.h b/dbms/src/Processors/Executors/PipelineExecutor.h index 02149cb042f..e448d5e00dd 100644 --- a/dbms/src/Processors/Executors/PipelineExecutor.h +++ b/dbms/src/Processors/Executors/PipelineExecutor.h @@ -24,7 +24,7 @@ public: /// During pipeline execution new processors can appear. They will be added to existing set. /// /// Explicit graph representation is built in constructor. Throws if graph is not correct. - explicit PipelineExecutor(Processors & processors); + explicit PipelineExecutor(Processors & processors_); /// Execute pipeline in multiple threads. Must be called once. /// In case of exception during execution throws any occurred. diff --git a/dbms/src/Processors/Formats/IInputFormat.h b/dbms/src/Processors/Formats/IInputFormat.h index ed26f60c058..424aed455a0 100644 --- a/dbms/src/Processors/Formats/IInputFormat.h +++ b/dbms/src/Processors/Formats/IInputFormat.h @@ -23,8 +23,8 @@ protected: #pragma GCC diagnostic pop public: - IInputFormat(Block header, ReadBuffer & in) - : ISource(std::move(header)), in(in) + IInputFormat(Block header, ReadBuffer & in_) + : ISource(std::move(header)), in(in_) { } diff --git a/dbms/src/Processors/Formats/IOutputFormat.cpp b/dbms/src/Processors/Formats/IOutputFormat.cpp index 63e846aa796..971ad95d946 100644 --- a/dbms/src/Processors/Formats/IOutputFormat.cpp +++ b/dbms/src/Processors/Formats/IOutputFormat.cpp @@ -5,8 +5,8 @@ namespace DB { -IOutputFormat::IOutputFormat(const Block & header, WriteBuffer & out) - : IProcessor({header, header, header}, {}), out(out) +IOutputFormat::IOutputFormat(const Block & header_, WriteBuffer & out_) + : IProcessor({header_, header_, header_}, {}), out(out_) { } diff --git a/dbms/src/Processors/Formats/IOutputFormat.h b/dbms/src/Processors/Formats/IOutputFormat.h index 53e5b9e2158..5200b897643 100644 --- a/dbms/src/Processors/Formats/IOutputFormat.h +++ b/dbms/src/Processors/Formats/IOutputFormat.h @@ -39,7 +39,7 @@ protected: virtual void finalize() {} public: - IOutputFormat(const Block & header, WriteBuffer & out); + IOutputFormat(const Block & header_, WriteBuffer & out_); Status prepare() override; void work() override; diff --git a/dbms/src/Processors/Formats/IRowInputFormat.h b/dbms/src/Processors/Formats/IRowInputFormat.h index 26d1a11a657..72a6c813701 100644 --- a/dbms/src/Processors/Formats/IRowInputFormat.h +++ b/dbms/src/Processors/Formats/IRowInputFormat.h @@ -45,8 +45,8 @@ public: IRowInputFormat( Block header, ReadBuffer & in_, - Params params) - : IInputFormat(std::move(header), in_), params(params) + Params params_) + : IInputFormat(std::move(header), in_), params(params_) { } diff --git a/dbms/src/Processors/Formats/Impl/BinaryRowInputFormat.cpp b/dbms/src/Processors/Formats/Impl/BinaryRowInputFormat.cpp index 20f40fe1e41..53e00d295f1 100644 --- a/dbms/src/Processors/Formats/Impl/BinaryRowInputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/BinaryRowInputFormat.cpp @@ -7,8 +7,8 @@ namespace DB { -BinaryRowInputFormat::BinaryRowInputFormat(ReadBuffer & in_, Block header, Params params, bool with_names_, bool with_types_) - : IRowInputFormat(std::move(header), in_, params), with_names(with_names_), with_types(with_types_) +BinaryRowInputFormat::BinaryRowInputFormat(ReadBuffer & in_, Block header, Params params_, bool with_names_, bool with_types_) + : IRowInputFormat(std::move(header), in_, params_), with_names(with_names_), with_types(with_types_) { } diff --git a/dbms/src/Processors/Formats/Impl/BinaryRowInputFormat.h b/dbms/src/Processors/Formats/Impl/BinaryRowInputFormat.h index 9a5a3fe63e1..e96a516c1a7 100644 --- a/dbms/src/Processors/Formats/Impl/BinaryRowInputFormat.h +++ b/dbms/src/Processors/Formats/Impl/BinaryRowInputFormat.h @@ -15,7 +15,7 @@ class ReadBuffer; class BinaryRowInputFormat : public IRowInputFormat { public: - BinaryRowInputFormat(ReadBuffer & in_, Block header, Params params, bool with_names_, bool with_types_); + BinaryRowInputFormat(ReadBuffer & in_, Block header, Params params_, bool with_names_, bool with_types_); bool readRow(MutableColumns & columns, RowReadExtension &) override; void readPrefix() override; diff --git a/dbms/src/Processors/Formats/Impl/CSVRowInputFormat.cpp b/dbms/src/Processors/Formats/Impl/CSVRowInputFormat.cpp index 701878ff57b..b13436a6600 100644 --- a/dbms/src/Processors/Formats/Impl/CSVRowInputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/CSVRowInputFormat.cpp @@ -18,10 +18,10 @@ namespace ErrorCodes CSVRowInputFormat::CSVRowInputFormat( - ReadBuffer & in_, Block header, Params params, bool with_names_, const FormatSettings & format_settings) - : IRowInputFormat(std::move(header), in_, std::move(params)) + ReadBuffer & in_, Block header_, Params params_, bool with_names_, const FormatSettings & format_settings_) + : IRowInputFormat(std::move(header_), in_, std::move(params_)) , with_names(with_names_) - , format_settings(format_settings) + , format_settings(format_settings_) { auto & sample = getPort().getHeader(); size_t num_columns = sample.columns(); @@ -40,7 +40,7 @@ CSVRowInputFormat::CSVRowInputFormat( /// If input_format_null_as_default=1 we need ColumnNullable of type DataTypeNullable(nested_type) /// to parse value as nullable before inserting it in corresponding column of not-nullable type. /// Constructing temporary column for each row is slow, so we prepare it here - if (format_settings.csv.null_as_default && !column_info.type->isNullable() && column_info.type->canBeInsideNullable()) + if (format_settings_.csv.null_as_default && !column_info.type->isNullable() && column_info.type->canBeInsideNullable()) { column_idx_to_nullable_column_idx[i] = nullable_columns.size(); nullable_types.emplace_back(std::make_shared(column_info.type)); diff --git a/dbms/src/Processors/Formats/Impl/CSVRowInputFormat.h b/dbms/src/Processors/Formats/Impl/CSVRowInputFormat.h index 6935325f01f..59b24ae0140 100644 --- a/dbms/src/Processors/Formats/Impl/CSVRowInputFormat.h +++ b/dbms/src/Processors/Formats/Impl/CSVRowInputFormat.h @@ -19,7 +19,7 @@ public: /** with_names - in the first line the header with column names * with_types - on the next line header with type names */ - CSVRowInputFormat(ReadBuffer & in_, Block header, Params params, bool with_names, const FormatSettings & format_settings); + CSVRowInputFormat(ReadBuffer & in_, Block header_, Params params_, bool with_names_, const FormatSettings & format_settings_); String getName() const override { return "CSVRowInputFormat"; } diff --git a/dbms/src/Processors/Formats/Impl/CSVRowOutputFormat.cpp b/dbms/src/Processors/Formats/Impl/CSVRowOutputFormat.cpp index c5246dfb1cc..8f17b8f15c8 100644 --- a/dbms/src/Processors/Formats/Impl/CSVRowOutputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/CSVRowOutputFormat.cpp @@ -8,8 +8,8 @@ namespace DB { -CSVRowOutputFormat::CSVRowOutputFormat(WriteBuffer & out_, const Block & header, bool with_names_, const FormatSettings & format_settings) - : IRowOutputFormat(header, out_), with_names(with_names_), format_settings(format_settings) +CSVRowOutputFormat::CSVRowOutputFormat(WriteBuffer & out_, const Block & header_, bool with_names_, const FormatSettings & format_settings_) + : IRowOutputFormat(header_, out_), with_names(with_names_), format_settings(format_settings_) { auto & sample = getPort(PortKind::Main).getHeader(); size_t columns = sample.columns(); diff --git a/dbms/src/Processors/Formats/Impl/CSVRowOutputFormat.h b/dbms/src/Processors/Formats/Impl/CSVRowOutputFormat.h index 5593fc98455..803d3aa80a6 100644 --- a/dbms/src/Processors/Formats/Impl/CSVRowOutputFormat.h +++ b/dbms/src/Processors/Formats/Impl/CSVRowOutputFormat.h @@ -20,7 +20,7 @@ public: /** with_names - output in the first line a header with column names * with_types - output in the next line header with the names of the types */ - CSVRowOutputFormat(WriteBuffer & out_, const Block & header, bool with_names_, const FormatSettings & format_settings); + CSVRowOutputFormat(WriteBuffer & out_, const Block & header_, bool with_names_, const FormatSettings & format_settings_); String getName() const override { return "CSVRowOutputFormat"; } diff --git a/dbms/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp b/dbms/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp index 42ef04b64b1..2652304fcb0 100644 --- a/dbms/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp @@ -178,8 +178,8 @@ void CapnProtoRowInputFormat::createActions(const NestedFieldList & sorted_field } } -CapnProtoRowInputFormat::CapnProtoRowInputFormat(ReadBuffer & in_, Block header, Params params, const FormatSchemaInfo & info) - : IRowInputFormat(std::move(header), in_, std::move(params)), parser(std::make_shared()) +CapnProtoRowInputFormat::CapnProtoRowInputFormat(ReadBuffer & in_, Block header, Params params_, const FormatSchemaInfo & info) + : IRowInputFormat(std::move(header), in_, std::move(params_)), parser(std::make_shared()) { // Parse the schema and fetch the root object diff --git a/dbms/src/Processors/Formats/Impl/CapnProtoRowInputFormat.h b/dbms/src/Processors/Formats/Impl/CapnProtoRowInputFormat.h index b941ceb514d..b7021ea7db7 100644 --- a/dbms/src/Processors/Formats/Impl/CapnProtoRowInputFormat.h +++ b/dbms/src/Processors/Formats/Impl/CapnProtoRowInputFormat.h @@ -33,7 +33,7 @@ public: * schema_file - location of the capnproto schema, e.g. "schema.capnp" * root_object - name to the root object, e.g. "Message" */ - CapnProtoRowInputFormat(ReadBuffer & in_, Block header, Params params, const FormatSchemaInfo & info); + CapnProtoRowInputFormat(ReadBuffer & in_, Block header, Params params_, const FormatSchemaInfo & info); String getName() const override { return "CapnProtoRowInputFormat"; } diff --git a/dbms/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp b/dbms/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp index 100edb20f37..9730ae3f3cc 100644 --- a/dbms/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp @@ -27,8 +27,8 @@ enum JSONEachRowRowInputFormat::JSONEachRowRowInputFormat( - ReadBuffer & in_, const Block & header, Params params, const FormatSettings & format_settings) - : IRowInputFormat(header, in_, std::move(params)), format_settings(format_settings), name_map(header.columns()) + ReadBuffer & in_, const Block & header_, Params params_, const FormatSettings & format_settings_) + : IRowInputFormat(header_, in_, std::move(params_)), format_settings(format_settings_), name_map(header_.columns()) { /// In this format, BOM at beginning of stream cannot be confused with value, so it is safe to skip it. skipBOMIfExists(in); @@ -38,7 +38,7 @@ JSONEachRowRowInputFormat::JSONEachRowRowInputFormat( { const String & column_name = columnName(i); name_map[column_name] = i; /// NOTE You could place names more cache-locally. - if (format_settings.import_nested_json) + if (format_settings_.import_nested_json) { const auto splitted = Nested::splitName(column_name); if (!splitted.second.empty()) diff --git a/dbms/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.h b/dbms/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.h index 1aed7c9dc49..17711b5f27d 100644 --- a/dbms/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.h +++ b/dbms/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.h @@ -20,7 +20,7 @@ class ReadBuffer; class JSONEachRowRowInputFormat : public IRowInputFormat { public: - JSONEachRowRowInputFormat(ReadBuffer & in_, const Block & header, Params params, const FormatSettings & format_settings); + JSONEachRowRowInputFormat(ReadBuffer & in_, const Block & header_, Params params_, const FormatSettings & format_settings_); String getName() const override { return "JSONEachRowRowInputFormat"; } diff --git a/dbms/src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.cpp b/dbms/src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.cpp index 112021dce42..cf23e06c9a6 100644 --- a/dbms/src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.cpp @@ -8,8 +8,8 @@ namespace DB { -JSONEachRowRowOutputFormat::JSONEachRowRowOutputFormat(WriteBuffer & out_, const Block & header, const FormatSettings & settings) - : IRowOutputFormat(header, out_), settings(settings) +JSONEachRowRowOutputFormat::JSONEachRowRowOutputFormat(WriteBuffer & out_, const Block & header_, const FormatSettings & settings_) + : IRowOutputFormat(header_, out_), settings(settings_) { auto & sample = getPort(PortKind::Main).getHeader(); size_t columns = sample.columns(); diff --git a/dbms/src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.h b/dbms/src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.h index a45f193ea39..66b3fa88652 100644 --- a/dbms/src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.h +++ b/dbms/src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.h @@ -15,7 +15,7 @@ namespace DB class JSONEachRowRowOutputFormat : public IRowOutputFormat { public: - JSONEachRowRowOutputFormat(WriteBuffer & out_, const Block & header, const FormatSettings & settings); + JSONEachRowRowOutputFormat(WriteBuffer & out_, const Block & header_, const FormatSettings & settings_); String getName() const override { return "JSONEachRowRowOutputFormat"; } diff --git a/dbms/src/Processors/Formats/Impl/MySQLOutputFormat.cpp b/dbms/src/Processors/Formats/Impl/MySQLOutputFormat.cpp index 2e48d0643e9..f046c810fbd 100644 --- a/dbms/src/Processors/Formats/Impl/MySQLOutputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/MySQLOutputFormat.cpp @@ -14,13 +14,13 @@ namespace DB using namespace MySQLProtocol; -MySQLOutputFormat::MySQLOutputFormat(WriteBuffer & out_, const Block & header, const Context & context, const FormatSettings & settings) - : IOutputFormat(header, out_) - , context(context) - , packet_sender(out, const_cast(context.mysql.sequence_id)) /// TODO: fix it - , format_settings(settings) +MySQLOutputFormat::MySQLOutputFormat(WriteBuffer & out_, const Block & header_, const Context & context_, const FormatSettings & settings_) + : IOutputFormat(header_, out_) + , context(context_) + , packet_sender(out, const_cast(context_.mysql.sequence_id)) /// TODO: fix it + , format_settings(settings_) { - packet_sender.max_packet_size = context.mysql.max_packet_size; + packet_sender.max_packet_size = context_.mysql.max_packet_size; } void MySQLOutputFormat::initialize() diff --git a/dbms/src/Processors/Formats/Impl/MySQLOutputFormat.h b/dbms/src/Processors/Formats/Impl/MySQLOutputFormat.h index e6b319f659a..d5691936862 100644 --- a/dbms/src/Processors/Formats/Impl/MySQLOutputFormat.h +++ b/dbms/src/Processors/Formats/Impl/MySQLOutputFormat.h @@ -19,7 +19,7 @@ class Context; class MySQLOutputFormat: public IOutputFormat { public: - MySQLOutputFormat(WriteBuffer & out_, const Block & header, const Context & context, const FormatSettings & settings); + MySQLOutputFormat(WriteBuffer & out_, const Block & header_, const Context & context_, const FormatSettings & settings_); String getName() const override { return "MySQLOutputFormat"; } diff --git a/dbms/src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.cpp b/dbms/src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.cpp index d2a4842fa24..7f30c5bfdc4 100644 --- a/dbms/src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.cpp @@ -12,8 +12,8 @@ namespace DB { ODBCDriver2BlockOutputFormat::ODBCDriver2BlockOutputFormat( - WriteBuffer & out_, const Block & header, const FormatSettings & format_settings) - : IOutputFormat(header, out_), format_settings(format_settings) + WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings_) + : IOutputFormat(header_, out_), format_settings(format_settings_) { } diff --git a/dbms/src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.h b/dbms/src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.h index 5a6ed4efc09..7510ce4640a 100644 --- a/dbms/src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.h +++ b/dbms/src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.h @@ -20,7 +20,7 @@ class WriteBuffer; class ODBCDriver2BlockOutputFormat final : public IOutputFormat { public: - ODBCDriver2BlockOutputFormat(WriteBuffer & out_, const Block & header, const FormatSettings & format_settings); + ODBCDriver2BlockOutputFormat(WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings_); String getName() const override { return "ODBCDriver2BlockOutputFormat"; } diff --git a/dbms/src/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.cpp b/dbms/src/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.cpp index fc8796c8799..3f84bacbfaf 100644 --- a/dbms/src/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.cpp @@ -8,8 +8,8 @@ namespace DB { -ODBCDriverBlockOutputFormat::ODBCDriverBlockOutputFormat(WriteBuffer & out_, const Block & header, const FormatSettings & format_settings) - : IOutputFormat(header, out_), format_settings(format_settings) +ODBCDriverBlockOutputFormat::ODBCDriverBlockOutputFormat(WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings_) + : IOutputFormat(header_, out_), format_settings(format_settings_) { } diff --git a/dbms/src/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.h b/dbms/src/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.h index 3a0e6e29c40..768b8f2683d 100644 --- a/dbms/src/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.h +++ b/dbms/src/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.h @@ -21,7 +21,7 @@ class WriteBuffer; class ODBCDriverBlockOutputFormat : public IOutputFormat { public: - ODBCDriverBlockOutputFormat(WriteBuffer & out_, const Block & header, const FormatSettings & format_settings); + ODBCDriverBlockOutputFormat(WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings_); String getName() const override { return "ODBCDriverBlockOutputFormat"; } diff --git a/dbms/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp b/dbms/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp index e5f08c8d645..a2f2fd33e24 100644 --- a/dbms/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp @@ -45,8 +45,8 @@ namespace ErrorCodes extern const int THERE_IS_NO_COLUMN; } -ParquetBlockInputFormat::ParquetBlockInputFormat(ReadBuffer & in_, Block header, const Context & context) - : IInputFormat(std::move(header), in_), context{context} +ParquetBlockInputFormat::ParquetBlockInputFormat(ReadBuffer & in_, Block header_, const Context & context_) + : IInputFormat(std::move(header_), in_), context{context_} { } diff --git a/dbms/src/Processors/Formats/Impl/ParquetBlockInputFormat.h b/dbms/src/Processors/Formats/Impl/ParquetBlockInputFormat.h index 8fa9013fbd1..172d3a365d4 100644 --- a/dbms/src/Processors/Formats/Impl/ParquetBlockInputFormat.h +++ b/dbms/src/Processors/Formats/Impl/ParquetBlockInputFormat.h @@ -16,7 +16,7 @@ class Context; class ParquetBlockInputFormat: public IInputFormat { public: - ParquetBlockInputFormat(ReadBuffer & in_, Block header, const Context & context); + ParquetBlockInputFormat(ReadBuffer & in_, Block header_, const Context & context_); String getName() const override { return "ParquetBlockInputFormat"; } diff --git a/dbms/src/Processors/Formats/Impl/ParquetBlockOutputFormat.cpp b/dbms/src/Processors/Formats/Impl/ParquetBlockOutputFormat.cpp index 3a3540ede7e..e8196c5bf59 100644 --- a/dbms/src/Processors/Formats/Impl/ParquetBlockOutputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/ParquetBlockOutputFormat.cpp @@ -35,8 +35,8 @@ namespace ErrorCodes extern const int UNKNOWN_TYPE; } -ParquetBlockOutputFormat::ParquetBlockOutputFormat(WriteBuffer & out_, const Block & header, const FormatSettings & format_settings) - : IOutputFormat(header, out_), format_settings{format_settings} +ParquetBlockOutputFormat::ParquetBlockOutputFormat(WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings_) + : IOutputFormat(header_, out_), format_settings{format_settings_} { } diff --git a/dbms/src/Processors/Formats/Impl/ParquetBlockOutputFormat.h b/dbms/src/Processors/Formats/Impl/ParquetBlockOutputFormat.h index f7ca6f11f00..11d746a0a6d 100644 --- a/dbms/src/Processors/Formats/Impl/ParquetBlockOutputFormat.h +++ b/dbms/src/Processors/Formats/Impl/ParquetBlockOutputFormat.h @@ -24,7 +24,7 @@ namespace DB class ParquetBlockOutputFormat : public IOutputFormat { public: - ParquetBlockOutputFormat(WriteBuffer & out_, const Block & header, const FormatSettings & format_settings); + ParquetBlockOutputFormat(WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings_); String getName() const override { return "ParquetBlockOutputFormat"; } void consume(Chunk) override; diff --git a/dbms/src/Processors/Formats/Impl/PrettyBlockOutputFormat.cpp b/dbms/src/Processors/Formats/Impl/PrettyBlockOutputFormat.cpp index 6868a3b2987..84c4fc7cbc3 100644 --- a/dbms/src/Processors/Formats/Impl/PrettyBlockOutputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/PrettyBlockOutputFormat.cpp @@ -18,8 +18,8 @@ namespace ErrorCodes PrettyBlockOutputFormat::PrettyBlockOutputFormat( - WriteBuffer & out_, const Block & header, const FormatSettings & format_settings_) - : IOutputFormat(header, out_), format_settings(format_settings_) + WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings_) + : IOutputFormat(header_, out_), format_settings(format_settings_) { struct winsize w; if (0 == ioctl(STDOUT_FILENO, TIOCGWINSZ, &w)) @@ -54,8 +54,8 @@ void PrettyBlockOutputFormat::calculateWidths( for (size_t j = 0; j < num_rows; ++j) { { - WriteBufferFromString out(serialized_value); - elem.type->serializeAsText(*column, j, out, format_settings); + WriteBufferFromString out_(serialized_value); + elem.type->serializeAsText(*column, j, out_, format_settings); } widths[i][j] = std::min(format_settings.pretty.max_column_pad_width, diff --git a/dbms/src/Processors/Formats/Impl/PrettyBlockOutputFormat.h b/dbms/src/Processors/Formats/Impl/PrettyBlockOutputFormat.h index 34bbbc3000c..eae1c3e9eb1 100644 --- a/dbms/src/Processors/Formats/Impl/PrettyBlockOutputFormat.h +++ b/dbms/src/Processors/Formats/Impl/PrettyBlockOutputFormat.h @@ -18,7 +18,7 @@ class PrettyBlockOutputFormat : public IOutputFormat { public: /// no_escapes - do not use ANSI escape sequences - to display in the browser, not in the console. - PrettyBlockOutputFormat(WriteBuffer & out_, const Block & header, const FormatSettings & format_settings_); + PrettyBlockOutputFormat(WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings_); String getName() const override { return "PrettyBlockOutputFormat"; } diff --git a/dbms/src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp b/dbms/src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp index 4f535308b8f..09410a06c0c 100644 --- a/dbms/src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp @@ -11,10 +11,10 @@ namespace DB { -ProtobufRowInputFormat::ProtobufRowInputFormat(ReadBuffer & in_, const Block & header, Params params, const FormatSchemaInfo & info) - : IRowInputFormat(header, in_, params) - , data_types(header.getDataTypes()) - , reader(in, ProtobufSchemas::instance().getMessageTypeForFormatSchema(info), header.getNames()) +ProtobufRowInputFormat::ProtobufRowInputFormat(ReadBuffer & in_, const Block & header_, Params params_, const FormatSchemaInfo & info_) + : IRowInputFormat(header_, in_, params_) + , data_types(header_.getDataTypes()) + , reader(in, ProtobufSchemas::instance().getMessageTypeForFormatSchema(info_), header_.getNames()) { } diff --git a/dbms/src/Processors/Formats/Impl/ProtobufRowInputFormat.h b/dbms/src/Processors/Formats/Impl/ProtobufRowInputFormat.h index 89ace7fec90..ebc2283d25c 100644 --- a/dbms/src/Processors/Formats/Impl/ProtobufRowInputFormat.h +++ b/dbms/src/Processors/Formats/Impl/ProtobufRowInputFormat.h @@ -24,7 +24,7 @@ class FormatSchemaInfo; class ProtobufRowInputFormat : public IRowInputFormat { public: - ProtobufRowInputFormat(ReadBuffer & in_, const Block & header, Params params, const FormatSchemaInfo & info); + ProtobufRowInputFormat(ReadBuffer & in_, const Block & header_, Params params_, const FormatSchemaInfo & info_); ~ProtobufRowInputFormat() override; String getName() const override { return "ProtobufRowInputFormat"; } diff --git a/dbms/src/Processors/Formats/Impl/TSKVRowInputFormat.cpp b/dbms/src/Processors/Formats/Impl/TSKVRowInputFormat.cpp index b03480834c5..35a0b4b7a7c 100644 --- a/dbms/src/Processors/Formats/Impl/TSKVRowInputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/TSKVRowInputFormat.cpp @@ -15,8 +15,8 @@ namespace ErrorCodes } -TSKVRowInputFormat::TSKVRowInputFormat(ReadBuffer & in_, Block header, Params params, const FormatSettings & format_settings) - : IRowInputFormat(std::move(header), in_, std::move(params)), format_settings(format_settings), name_map(header.columns()) +TSKVRowInputFormat::TSKVRowInputFormat(ReadBuffer & in_, Block header_, Params params_, const FormatSettings & format_settings_) + : IRowInputFormat(std::move(header_), in_, std::move(params_)), format_settings(format_settings_), name_map(header_.columns()) { /// In this format, we assume that column name cannot contain BOM, /// so BOM at beginning of stream cannot be confused with name of field, and it is safe to skip it. diff --git a/dbms/src/Processors/Formats/Impl/TSKVRowInputFormat.h b/dbms/src/Processors/Formats/Impl/TSKVRowInputFormat.h index 4d9c55f6efc..52330665395 100644 --- a/dbms/src/Processors/Formats/Impl/TSKVRowInputFormat.h +++ b/dbms/src/Processors/Formats/Impl/TSKVRowInputFormat.h @@ -23,7 +23,7 @@ class ReadBuffer; class TSKVRowInputFormat : public IRowInputFormat { public: - TSKVRowInputFormat(ReadBuffer & in_, Block header, Params params, const FormatSettings & format_settings); + TSKVRowInputFormat(ReadBuffer & in_, Block header_, Params params_, const FormatSettings & format_settings_); String getName() const override { return "TSKVRowInputFormat"; } diff --git a/dbms/src/Processors/Formats/Impl/TabSeparatedRawRowOutputFormat.h b/dbms/src/Processors/Formats/Impl/TabSeparatedRawRowOutputFormat.h index 2d2d7cf4ad4..c2cf31fc196 100644 --- a/dbms/src/Processors/Formats/Impl/TabSeparatedRawRowOutputFormat.h +++ b/dbms/src/Processors/Formats/Impl/TabSeparatedRawRowOutputFormat.h @@ -13,8 +13,8 @@ namespace DB class TabSeparatedRawRowOutputFormat : public TabSeparatedRowOutputFormat { public: - TabSeparatedRawRowOutputFormat(WriteBuffer & out_, const Block & header, bool with_names_, bool with_types_, const FormatSettings & format_settings_) - : TabSeparatedRowOutputFormat(out_, header, with_names_, with_types_, format_settings_) {} + TabSeparatedRawRowOutputFormat(WriteBuffer & out_, const Block & header_, bool with_names_, bool with_types_, const FormatSettings & format_settings_) + : TabSeparatedRowOutputFormat(out_, header_, with_names_, with_types_, format_settings_) {} String getName() const override { return "TabSeparatedRawRowOutputFormat"; } diff --git a/dbms/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp b/dbms/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp index 5834d46b322..39c06c9441b 100644 --- a/dbms/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp @@ -42,8 +42,8 @@ static void checkForCarriageReturn(ReadBuffer & istr) TabSeparatedRowInputFormat::TabSeparatedRowInputFormat( - ReadBuffer & in_, Block header, bool with_names, bool with_types, Params params, const FormatSettings & format_settings) - : IRowInputFormat(std::move(header), in_, std::move(params)), with_names(with_names), with_types(with_types), format_settings(format_settings) + ReadBuffer & in_, Block header_, bool with_names_, bool with_types_, Params params_, const FormatSettings & format_settings_) + : IRowInputFormat(std::move(header_), in_, std::move(params_)), with_names(with_names_), with_types(with_types_), format_settings(format_settings_) { auto & sample = getPort().getHeader(); size_t num_columns = sample.columns(); diff --git a/dbms/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.h b/dbms/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.h index 47256e0b9a7..076cbb60152 100644 --- a/dbms/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.h +++ b/dbms/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.h @@ -20,7 +20,7 @@ public: * with_types - on the next line header with type names */ TabSeparatedRowInputFormat( - ReadBuffer & in_, Block header, bool with_names, bool with_types, Params params, const FormatSettings & format_settings); + ReadBuffer & in_, Block header_, bool with_names_, bool with_types_, Params params_, const FormatSettings & format_settings_); String getName() const override { return "TabSeparatedRowInputFormat"; } diff --git a/dbms/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.cpp b/dbms/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.cpp index 608f2e8b5d0..92058323102 100644 --- a/dbms/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.cpp @@ -8,8 +8,8 @@ namespace DB { TabSeparatedRowOutputFormat::TabSeparatedRowOutputFormat( - WriteBuffer & out_, const Block & header, bool with_names, bool with_types, const FormatSettings & format_settings) - : IRowOutputFormat(header, out_), with_names(with_names), with_types(with_types), format_settings(format_settings) + WriteBuffer & out_, const Block & header_, bool with_names_, bool with_types_, const FormatSettings & format_settings_) + : IRowOutputFormat(header_, out_), with_names(with_names_), with_types(with_types_), format_settings(format_settings_) { } diff --git a/dbms/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.h b/dbms/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.h index 7ebe12bc30d..a00cd2d8fdd 100644 --- a/dbms/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.h +++ b/dbms/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.h @@ -18,7 +18,7 @@ public: /** with_names - output in the first line a header with column names * with_types - output the next line header with the names of the types */ - TabSeparatedRowOutputFormat(WriteBuffer & out_, const Block & header, bool with_names, bool with_types, const FormatSettings & format_settings); + TabSeparatedRowOutputFormat(WriteBuffer & out_, const Block & header_, bool with_names_, bool with_types_, const FormatSettings & format_settings_); String getName() const override { return "TabSeparatedRowOutputFormat"; } diff --git a/dbms/src/Processors/Formats/Impl/ValuesRowInputFormat.cpp b/dbms/src/Processors/Formats/Impl/ValuesRowInputFormat.cpp index 337085198a3..5eeb29d91e7 100644 --- a/dbms/src/Processors/Formats/Impl/ValuesRowInputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/ValuesRowInputFormat.cpp @@ -29,9 +29,9 @@ namespace ErrorCodes ValuesRowInputFormat::ValuesRowInputFormat( - ReadBuffer & in_, Block header, Params params, const Context & context_, const FormatSettings & format_settings) - : IRowInputFormat(std::move(header), in_, params) - , context(std::make_unique(context_)), format_settings(format_settings) + ReadBuffer & in_, Block header_, Params params_, const Context & context_, const FormatSettings & format_settings_) + : IRowInputFormat(std::move(header_), in_, params_) + , context(std::make_unique(context_)), format_settings(format_settings_) { /// In this format, BOM at beginning of stream cannot be confused with value, so it is safe to skip it. skipBOMIfExists(in); diff --git a/dbms/src/Processors/Formats/Impl/ValuesRowInputFormat.h b/dbms/src/Processors/Formats/Impl/ValuesRowInputFormat.h index f7ad3b470e6..81ad0c5319e 100644 --- a/dbms/src/Processors/Formats/Impl/ValuesRowInputFormat.h +++ b/dbms/src/Processors/Formats/Impl/ValuesRowInputFormat.h @@ -21,7 +21,7 @@ public: * If interpret_expressions is true, it will, in addition, try to use SQL parser and interpreter * in case when streaming parser could not parse field (this is very slow). */ - ValuesRowInputFormat(ReadBuffer & in_, Block header, Params params, const Context & context_, const FormatSettings & format_settings); + ValuesRowInputFormat(ReadBuffer & in_, Block header_, Params params_, const Context & context_, const FormatSettings & format_settings_); String getName() const override { return "ValuesRowInputFormat"; } diff --git a/dbms/src/Processors/Formats/Impl/ValuesRowOutputFormat.cpp b/dbms/src/Processors/Formats/Impl/ValuesRowOutputFormat.cpp index 234a9da5c67..80c4135dfb3 100644 --- a/dbms/src/Processors/Formats/Impl/ValuesRowOutputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/ValuesRowOutputFormat.cpp @@ -10,8 +10,8 @@ namespace DB { -ValuesRowOutputFormat::ValuesRowOutputFormat(WriteBuffer & out_, const Block & header, const FormatSettings & format_settings) - : IRowOutputFormat(header, out_), format_settings(format_settings) +ValuesRowOutputFormat::ValuesRowOutputFormat(WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings_) + : IRowOutputFormat(header_, out_), format_settings(format_settings_) { } diff --git a/dbms/src/Processors/Formats/Impl/ValuesRowOutputFormat.h b/dbms/src/Processors/Formats/Impl/ValuesRowOutputFormat.h index 5f82e78d3c0..f7a28002c92 100644 --- a/dbms/src/Processors/Formats/Impl/ValuesRowOutputFormat.h +++ b/dbms/src/Processors/Formats/Impl/ValuesRowOutputFormat.h @@ -15,7 +15,7 @@ class WriteBuffer; class ValuesRowOutputFormat : public IRowOutputFormat { public: - ValuesRowOutputFormat(WriteBuffer & out_, const Block & header, const FormatSettings & format_settings); + ValuesRowOutputFormat(WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings_); String getName() const override { return "ValuesRowOutputFormat"; } diff --git a/dbms/src/Processors/Formats/Impl/VerticalRowOutputFormat.cpp b/dbms/src/Processors/Formats/Impl/VerticalRowOutputFormat.cpp index 55f04584c19..744ad2d0953 100644 --- a/dbms/src/Processors/Formats/Impl/VerticalRowOutputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/VerticalRowOutputFormat.cpp @@ -11,8 +11,8 @@ namespace DB { VerticalRowOutputFormat::VerticalRowOutputFormat( - WriteBuffer & out_, const Block & header, const FormatSettings & format_settings) - : IRowOutputFormat(header, out_), format_settings(format_settings) + WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings_) + : IRowOutputFormat(header_, out_), format_settings(format_settings_) { auto & sample = getPort(PortKind::Main).getHeader(); size_t columns = sample.columns(); diff --git a/dbms/src/Processors/Formats/Impl/VerticalRowOutputFormat.h b/dbms/src/Processors/Formats/Impl/VerticalRowOutputFormat.h index a535d1e9c5b..2a0d248bab8 100644 --- a/dbms/src/Processors/Formats/Impl/VerticalRowOutputFormat.h +++ b/dbms/src/Processors/Formats/Impl/VerticalRowOutputFormat.h @@ -18,7 +18,7 @@ class Context; class VerticalRowOutputFormat : public IRowOutputFormat { public: - VerticalRowOutputFormat(WriteBuffer & out_, const Block & header, const FormatSettings & format_settings); + VerticalRowOutputFormat(WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings_); String getName() const override { return "VerticalRowOutputFormat"; } diff --git a/dbms/src/Processors/Formats/Impl/XMLRowOutputFormat.cpp b/dbms/src/Processors/Formats/Impl/XMLRowOutputFormat.cpp index 5df58a5c733..545f80692cc 100644 --- a/dbms/src/Processors/Formats/Impl/XMLRowOutputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/XMLRowOutputFormat.cpp @@ -7,8 +7,8 @@ namespace DB { -XMLRowOutputFormat::XMLRowOutputFormat(WriteBuffer & out_, const Block & header, const FormatSettings & format_settings) - : IRowOutputFormat(header, out_), format_settings(format_settings) +XMLRowOutputFormat::XMLRowOutputFormat(WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings_) + : IRowOutputFormat(header_, out_), format_settings(format_settings_) { auto & sample = getPort(PortKind::Main).getHeader(); NamesAndTypesList columns(sample.getNamesAndTypesList()); diff --git a/dbms/src/Processors/Formats/Impl/XMLRowOutputFormat.h b/dbms/src/Processors/Formats/Impl/XMLRowOutputFormat.h index 102b11490fe..b2370090c32 100644 --- a/dbms/src/Processors/Formats/Impl/XMLRowOutputFormat.h +++ b/dbms/src/Processors/Formats/Impl/XMLRowOutputFormat.h @@ -16,7 +16,7 @@ namespace DB class XMLRowOutputFormat : public IRowOutputFormat { public: - XMLRowOutputFormat(WriteBuffer & out_, const Block & header, const FormatSettings & format_settings); + XMLRowOutputFormat(WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings_); String getName() const override { return "XMLRowOutputFormat"; } diff --git a/dbms/src/Processors/ISimpleTransform.cpp b/dbms/src/Processors/ISimpleTransform.cpp index 39736973a16..292ea24b0ef 100644 --- a/dbms/src/Processors/ISimpleTransform.cpp +++ b/dbms/src/Processors/ISimpleTransform.cpp @@ -4,11 +4,11 @@ namespace DB { -ISimpleTransform::ISimpleTransform(Block input_header, Block output_header, bool skip_empty_chunks) - : IProcessor({std::move(input_header)}, {std::move(output_header)}) +ISimpleTransform::ISimpleTransform(Block input_header_, Block output_header_, bool skip_empty_chunks_) + : IProcessor({std::move(input_header_)}, {std::move(output_header_)}) , input(inputs.front()) , output(outputs.front()) - , skip_empty_chunks(skip_empty_chunks) + , skip_empty_chunks(skip_empty_chunks_) { } diff --git a/dbms/src/Processors/ISimpleTransform.h b/dbms/src/Processors/ISimpleTransform.h index 82e383ceeb0..0a952840964 100644 --- a/dbms/src/Processors/ISimpleTransform.h +++ b/dbms/src/Processors/ISimpleTransform.h @@ -30,7 +30,7 @@ protected: void stopReading() { no_more_data_needed = true; } public: - ISimpleTransform(Block input_header, Block output_header, bool skip_empty_chunks); + ISimpleTransform(Block input_header_, Block output_header_, bool skip_empty_chunks_); Status prepare() override; void work() override; diff --git a/dbms/src/Processors/LimitTransform.cpp b/dbms/src/Processors/LimitTransform.cpp index f591ecfb046..1be10c405bb 100644 --- a/dbms/src/Processors/LimitTransform.cpp +++ b/dbms/src/Processors/LimitTransform.cpp @@ -5,12 +5,12 @@ namespace DB { LimitTransform::LimitTransform( - const Block & header, size_t limit, size_t offset, - bool always_read_till_end) - : IProcessor({header}, {header}) + const Block & header_, size_t limit_, size_t offset_, + bool always_read_till_end_) + : IProcessor({header_}, {header_}) , input(inputs.front()), output(outputs.front()) - , limit(limit), offset(offset) - , always_read_till_end(always_read_till_end) + , limit(limit_), offset(offset_) + , always_read_till_end(always_read_till_end_) { } diff --git a/dbms/src/Processors/LimitTransform.h b/dbms/src/Processors/LimitTransform.h index eb5a8fe8d5a..f80ca263c95 100644 --- a/dbms/src/Processors/LimitTransform.h +++ b/dbms/src/Processors/LimitTransform.h @@ -25,8 +25,8 @@ private: public: LimitTransform( - const Block & header, size_t limit, size_t offset, - bool always_read_till_end = false); + const Block & header_, size_t limit_, size_t offset_, + bool always_read_till_end_ = false); String getName() const override { return "Limit"; } diff --git a/dbms/src/Processors/Port.h b/dbms/src/Processors/Port.h index 99ad7df4b50..1758327e914 100644 --- a/dbms/src/Processors/Port.h +++ b/dbms/src/Processors/Port.h @@ -179,8 +179,8 @@ protected: public: using Data = State::Data; - Port(Block header) : header(std::move(header)) {} - Port(Block header, IProcessor * processor) : header(std::move(header)), processor(processor) {} + Port(Block header_) : header(std::move(header_)) {} + Port(Block header_, IProcessor * processor_) : header(std::move(header_)), processor(processor_) {} const Block & getHeader() const { return header; } bool ALWAYS_INLINE isConnected() const { return state != nullptr; } diff --git a/dbms/src/Processors/Sources/SourceFromInputStream.cpp b/dbms/src/Processors/Sources/SourceFromInputStream.cpp index d9d74a5cde6..f60bc703ec0 100644 --- a/dbms/src/Processors/Sources/SourceFromInputStream.cpp +++ b/dbms/src/Processors/Sources/SourceFromInputStream.cpp @@ -6,9 +6,9 @@ namespace DB { -SourceFromInputStream::SourceFromInputStream(BlockInputStreamPtr stream_, bool force_add_aggregating_info) +SourceFromInputStream::SourceFromInputStream(BlockInputStreamPtr stream_, bool force_add_aggregating_info_) : ISource(stream_->getHeader()) - , force_add_aggregating_info(force_add_aggregating_info) + , force_add_aggregating_info(force_add_aggregating_info_) , stream(std::move(stream_)) { auto & sample = getPort().getHeader(); diff --git a/dbms/src/Processors/Sources/SourceFromInputStream.h b/dbms/src/Processors/Sources/SourceFromInputStream.h index 46e0b3fb04b..0e6c698f260 100644 --- a/dbms/src/Processors/Sources/SourceFromInputStream.h +++ b/dbms/src/Processors/Sources/SourceFromInputStream.h @@ -10,7 +10,7 @@ using BlockInputStreamPtr = std::shared_ptr; class SourceFromInputStream : public ISource { public: - explicit SourceFromInputStream(BlockInputStreamPtr stream_, bool force_add_aggregating_info = false); + explicit SourceFromInputStream(BlockInputStreamPtr stream_, bool force_add_aggregating_info_ = false); String getName() const override { return "SourceFromInputStream"; } Status prepare() override; diff --git a/dbms/src/Processors/Transforms/AggregatingTransform.cpp b/dbms/src/Processors/Transforms/AggregatingTransform.cpp index 5993584f5c9..55fe66b7d9f 100644 --- a/dbms/src/Processors/Transforms/AggregatingTransform.cpp +++ b/dbms/src/Processors/Transforms/AggregatingTransform.cpp @@ -61,8 +61,8 @@ namespace class ConvertingAggregatedToBlocksTransform : public ISource { public: - ConvertingAggregatedToBlocksTransform(Block header, AggregatingTransformParamsPtr params_, BlockInputStreamPtr stream) - : ISource(std::move(header)), params(std::move(params_)), stream(std::move(stream)) {} + ConvertingAggregatedToBlocksTransform(Block header, AggregatingTransformParamsPtr params_, BlockInputStreamPtr stream_) + : ISource(std::move(header)), params(std::move(params_)), stream(std::move(stream_)) {} String getName() const override { return "ConvertingAggregatedToBlocksTransform"; } @@ -99,15 +99,15 @@ AggregatingTransform::AggregatingTransform(Block header, AggregatingTransformPar AggregatingTransform::AggregatingTransform( Block header, AggregatingTransformParamsPtr params_, ManyAggregatedDataPtr many_data_, - size_t current_variant, size_t temporary_data_merge_threads, size_t max_threads) + size_t current_variant, size_t temporary_data_merge_threads_, size_t max_threads_) : IProcessor({std::move(header)}, {params_->getHeader()}), params(std::move(params_)) , key(params->params.keys_size) , key_columns(params->params.keys_size) , aggregate_columns(params->params.aggregates_size) , many_data(std::move(many_data_)) , variants(*many_data->variants[current_variant]) - , max_threads(std::min(many_data->variants.size(), max_threads)) - , temporary_data_merge_threads(temporary_data_merge_threads) + , max_threads(std::min(many_data->variants.size(), max_threads_)) + , temporary_data_merge_threads(temporary_data_merge_threads_) { } diff --git a/dbms/src/Processors/Transforms/AggregatingTransform.h b/dbms/src/Processors/Transforms/AggregatingTransform.h index 64ba10e1801..17786ccfa1a 100644 --- a/dbms/src/Processors/Transforms/AggregatingTransform.h +++ b/dbms/src/Processors/Transforms/AggregatingTransform.h @@ -24,8 +24,8 @@ struct AggregatingTransformParams Aggregator aggregator; bool final; - AggregatingTransformParams(const Aggregator::Params & params, bool final) - : params(params), aggregator(params), final(final) {} + AggregatingTransformParams(const Aggregator::Params & params_, bool final_) + : params(params_), aggregator(params), final(final_) {} Block getHeader() const { return aggregator.getHeader(final); } }; diff --git a/dbms/src/Processors/Transforms/ConvertingTransform.cpp b/dbms/src/Processors/Transforms/ConvertingTransform.cpp index 49dbb748591..8729b896084 100644 --- a/dbms/src/Processors/Transforms/ConvertingTransform.cpp +++ b/dbms/src/Processors/Transforms/ConvertingTransform.cpp @@ -33,12 +33,12 @@ static ColumnPtr castColumnWithDiagnostic( } ConvertingTransform::ConvertingTransform( - Block source_header, - Block result_header, - MatchColumnsMode mode, - const Context & context) - : ISimpleTransform(std::move(source_header), std::move(result_header), false) - , context(context) + Block source_header_, + Block result_header_, + MatchColumnsMode mode_, + const Context & context_) + : ISimpleTransform(std::move(source_header_), std::move(result_header_), false) + , context(context_) , conversion(getOutputPort().getHeader().columns()) { auto & source = getInputPort().getHeader(); @@ -47,14 +47,14 @@ ConvertingTransform::ConvertingTransform( size_t num_input_columns = source.columns(); size_t num_result_columns = result.columns(); - if (mode == MatchColumnsMode::Position && num_input_columns != num_result_columns) + if (mode_ == MatchColumnsMode::Position && num_input_columns != num_result_columns) throw Exception("Number of columns doesn't match", ErrorCodes::NUMBER_OF_COLUMNS_DOESNT_MATCH); for (size_t result_col_num = 0; result_col_num < num_result_columns; ++result_col_num) { const auto & res_elem = result.getByPosition(result_col_num); - switch (mode) + switch (mode_) { case MatchColumnsMode::Position: conversion[result_col_num] = result_col_num; diff --git a/dbms/src/Processors/Transforms/ConvertingTransform.h b/dbms/src/Processors/Transforms/ConvertingTransform.h index d6e6219316a..b2412802ed6 100644 --- a/dbms/src/Processors/Transforms/ConvertingTransform.h +++ b/dbms/src/Processors/Transforms/ConvertingTransform.h @@ -29,10 +29,10 @@ public: }; ConvertingTransform( - Block source_header, - Block result_header, - MatchColumnsMode mode, - const Context & context); + Block source_header_, + Block result_header_, + MatchColumnsMode mode_, + const Context & context_); String getName() const override { return "Converting"; } diff --git a/dbms/src/Processors/Transforms/CreatingSetsTransform.cpp b/dbms/src/Processors/Transforms/CreatingSetsTransform.cpp index 29bc4030b81..71fe743fd49 100644 --- a/dbms/src/Processors/Transforms/CreatingSetsTransform.cpp +++ b/dbms/src/Processors/Transforms/CreatingSetsTransform.cpp @@ -21,15 +21,15 @@ namespace ErrorCodes CreatingSetsTransform::CreatingSetsTransform( - Block out_header, + Block out_header_, const SubqueriesForSets & subqueries_for_sets_, - const SizeLimits & network_transfer_limits, - const Context & context) - : IProcessor({}, {std::move(out_header)}) + const SizeLimits & network_transfer_limits_, + const Context & context_) + : IProcessor({}, {std::move(out_header_)}) , subqueries_for_sets(subqueries_for_sets_) , cur_subquery(subqueries_for_sets.begin()) - , network_transfer_limits(network_transfer_limits) - , context(context) + , network_transfer_limits(network_transfer_limits_) + , context(context_) { } diff --git a/dbms/src/Processors/Transforms/CreatingSetsTransform.h b/dbms/src/Processors/Transforms/CreatingSetsTransform.h index b5f7ea63748..00f64440393 100644 --- a/dbms/src/Processors/Transforms/CreatingSetsTransform.h +++ b/dbms/src/Processors/Transforms/CreatingSetsTransform.h @@ -17,10 +17,10 @@ class CreatingSetsTransform : public IProcessor { public: CreatingSetsTransform( - Block out_header, + Block out_header_, const SubqueriesForSets & subqueries_for_sets_, - const SizeLimits & network_transfer_limits, - const Context & context); + const SizeLimits & network_transfer_limits_, + const Context & context_); String getName() const override { return "CreatingSetsTransform"; } Status prepare() override; diff --git a/dbms/src/Processors/Transforms/DistinctTransform.cpp b/dbms/src/Processors/Transforms/DistinctTransform.cpp index 7cd9a54e055..f9383f1a5e5 100644 --- a/dbms/src/Processors/Transforms/DistinctTransform.cpp +++ b/dbms/src/Processors/Transforms/DistinctTransform.cpp @@ -9,23 +9,23 @@ namespace ErrorCodes } DistinctTransform::DistinctTransform( - const Block & header, - const SizeLimits & set_size_limits, - UInt64 limit_hint, - const Names & columns) - : ISimpleTransform(header, header, true) - , limit_hint(limit_hint) - , set_size_limits(set_size_limits) + const Block & header_, + const SizeLimits & set_size_limits_, + UInt64 limit_hint_, + const Names & columns_) + : ISimpleTransform(header_, header_, true) + , limit_hint(limit_hint_) + , set_size_limits(set_size_limits_) { - size_t num_columns = columns.empty() ? header.columns() : columns.size(); + size_t num_columns = columns_.empty() ? header_.columns() : columns_.size(); - key_columns_pos.reserve(columns.size()); + key_columns_pos.reserve(columns_.size()); for (size_t i = 0; i < num_columns; ++i) { - auto pos = columns.empty() ? i - : header.getPositionByName(columns[i]); + auto pos = columns_.empty() ? i + : header_.getPositionByName(columns_[i]); - auto & col = header.getByPosition(pos).column; + auto & col = header_.getByPosition(pos).column; if (!(col && isColumnConst(*col))) key_columns_pos.emplace_back(pos); diff --git a/dbms/src/Processors/Transforms/DistinctTransform.h b/dbms/src/Processors/Transforms/DistinctTransform.h index 05ef9e9c334..236f9026c63 100644 --- a/dbms/src/Processors/Transforms/DistinctTransform.h +++ b/dbms/src/Processors/Transforms/DistinctTransform.h @@ -11,10 +11,10 @@ class DistinctTransform : public ISimpleTransform { public: DistinctTransform( - const Block & header, - const SizeLimits & set_size_limits, - UInt64 limit_hint, - const Names & columns); + const Block & header_, + const SizeLimits & set_size_limits_, + UInt64 limit_hint_, + const Names & columns_); String getName() const override { return "DistinctTransform"; } diff --git a/dbms/src/Processors/Transforms/ExpressionTransform.cpp b/dbms/src/Processors/Transforms/ExpressionTransform.cpp index c42ef92b085..2ae9dd6f57f 100644 --- a/dbms/src/Processors/Transforms/ExpressionTransform.cpp +++ b/dbms/src/Processors/Transforms/ExpressionTransform.cpp @@ -11,11 +11,11 @@ static Block transformHeader(Block header, const ExpressionActionsPtr & expressi } -ExpressionTransform::ExpressionTransform(const Block & header, ExpressionActionsPtr expression, bool on_totals, bool default_totals) - : ISimpleTransform(header, transformHeader(header, expression), on_totals) - , expression(std::move(expression)) - , on_totals(on_totals) - , default_totals(default_totals) +ExpressionTransform::ExpressionTransform(const Block & header_, ExpressionActionsPtr expression_, bool on_totals_, bool default_totals_) + : ISimpleTransform(header_, transformHeader(header_, expression), on_totals_) + , expression(std::move(expression_)) + , on_totals(on_totals_) + , default_totals(default_totals_) { } diff --git a/dbms/src/Processors/Transforms/ExpressionTransform.h b/dbms/src/Processors/Transforms/ExpressionTransform.h index 8face634f96..6c6d474d872 100644 --- a/dbms/src/Processors/Transforms/ExpressionTransform.h +++ b/dbms/src/Processors/Transforms/ExpressionTransform.h @@ -10,7 +10,7 @@ using ExpressionActionsPtr = std::shared_ptr; class ExpressionTransform : public ISimpleTransform { public: - ExpressionTransform(const Block & header, ExpressionActionsPtr expression, bool on_totals = false, bool default_totals = false); + ExpressionTransform(const Block & header_, ExpressionActionsPtr expression_, bool on_totals_ = false, bool default_totals_ = false); String getName() const override { return "ExpressionTransform"; } diff --git a/dbms/src/Processors/Transforms/FilterTransform.cpp b/dbms/src/Processors/Transforms/FilterTransform.cpp index 725b5ceb01b..8d1fcfa0168 100644 --- a/dbms/src/Processors/Transforms/FilterTransform.cpp +++ b/dbms/src/Processors/Transforms/FilterTransform.cpp @@ -42,14 +42,14 @@ static Block transformHeader( } FilterTransform::FilterTransform( - const Block & header, + const Block & header_, ExpressionActionsPtr expression_, String filter_column_name_, - bool remove_filter_column) - : ISimpleTransform(header, transformHeader(header, expression_, filter_column_name_, remove_filter_column), true) + bool remove_filter_column_) + : ISimpleTransform(header_, transformHeader(header_, expression_, filter_column_name_, remove_filter_column_), true) , expression(std::move(expression_)) , filter_column_name(std::move(filter_column_name_)) - , remove_filter_column(remove_filter_column) + , remove_filter_column(remove_filter_column_) { transformed_header = getInputPort().getHeader(); expression->execute(transformed_header); diff --git a/dbms/src/Processors/Transforms/FilterTransform.h b/dbms/src/Processors/Transforms/FilterTransform.h index 32cdbb79d50..127eb5a8039 100644 --- a/dbms/src/Processors/Transforms/FilterTransform.h +++ b/dbms/src/Processors/Transforms/FilterTransform.h @@ -16,7 +16,7 @@ class FilterTransform : public ISimpleTransform { public: FilterTransform( - const Block & header, ExpressionActionsPtr expression, String filter_column_name, bool remove_filter_column); + const Block & header_, ExpressionActionsPtr expression_, String filter_column_name_, bool remove_filter_column_); String getName() const override { return "FilterTransform"; } diff --git a/dbms/src/Processors/Transforms/LimitsCheckingTransform.cpp b/dbms/src/Processors/Transforms/LimitsCheckingTransform.cpp index 092327a0d8e..5eee08efcfc 100644 --- a/dbms/src/Processors/Transforms/LimitsCheckingTransform.cpp +++ b/dbms/src/Processors/Transforms/LimitsCheckingTransform.cpp @@ -38,9 +38,9 @@ void ProcessorProfileInfo::update(const Chunk & block) bytes += block.bytes(); } -LimitsCheckingTransform::LimitsCheckingTransform(const Block & header, LocalLimits limits) - : ISimpleTransform(header, header, false) - , limits(std::move(limits)) +LimitsCheckingTransform::LimitsCheckingTransform(const Block & header_, LocalLimits limits_) + : ISimpleTransform(header_, header_, false) + , limits(std::move(limits_)) { } diff --git a/dbms/src/Processors/Transforms/LimitsCheckingTransform.h b/dbms/src/Processors/Transforms/LimitsCheckingTransform.h index a08e9ea9c67..53116446a75 100644 --- a/dbms/src/Processors/Transforms/LimitsCheckingTransform.h +++ b/dbms/src/Processors/Transforms/LimitsCheckingTransform.h @@ -30,7 +30,7 @@ public: using LimitsMode = IBlockInputStream::LimitsMode; /// LIMITS_CURRENT - LimitsCheckingTransform(const Block & header, LocalLimits limits); + LimitsCheckingTransform(const Block & header_, LocalLimits limits_); /// LIMITS_TOTAL /// LimitsCheckingTransform(const Block & header, LocalLimits limits, QueryStatus * process_list_elem); diff --git a/dbms/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.cpp b/dbms/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.cpp index a573a6270e9..b9067e955f4 100644 --- a/dbms/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.cpp +++ b/dbms/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.cpp @@ -15,10 +15,10 @@ struct ChunksToMerge : public ChunkInfo }; GroupingAggregatedTransform::GroupingAggregatedTransform( - const Block & header, size_t num_inputs, AggregatingTransformParamsPtr params) - : IProcessor(InputPorts(num_inputs, header), { Block() }) - , num_inputs(num_inputs) - , params(std::move(params)) + const Block & header_, size_t num_inputs_, AggregatingTransformParamsPtr params_) + : IProcessor(InputPorts(num_inputs_, header_), { Block() }) + , num_inputs(num_inputs_) + , params(std::move(params_)) , last_bucket_number(num_inputs, -1) , read_from_input(num_inputs, false) { @@ -285,8 +285,8 @@ void GroupingAggregatedTransform::work() } -MergingAggregatedBucketTransform::MergingAggregatedBucketTransform(AggregatingTransformParamsPtr params) - : ISimpleTransform({}, params->getHeader(), false), params(std::move(params)) +MergingAggregatedBucketTransform::MergingAggregatedBucketTransform(AggregatingTransformParamsPtr params_) + : ISimpleTransform({}, params_->getHeader(), false), params(std::move(params_)) { setInputNotNeededAfterRead(true); } @@ -333,10 +333,10 @@ void MergingAggregatedBucketTransform::transform(Chunk & chunk) } -SortingAggregatedTransform::SortingAggregatedTransform(size_t num_inputs, AggregatingTransformParamsPtr params) - : IProcessor(InputPorts(num_inputs, params->getHeader()), {params->getHeader()}) - , num_inputs(num_inputs) - , params(std::move(params)) +SortingAggregatedTransform::SortingAggregatedTransform(size_t num_inputs_, AggregatingTransformParamsPtr params_) + : IProcessor(InputPorts(num_inputs_, params->getHeader()), {params_->getHeader()}) + , num_inputs(num_inputs_) + , params(std::move(params_)) , last_bucket_number(num_inputs, -1) , is_input_finished(num_inputs, false) { diff --git a/dbms/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.h b/dbms/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.h index eff71e954a9..0c5986c2156 100644 --- a/dbms/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.h +++ b/dbms/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.h @@ -14,7 +14,7 @@ namespace DB class GroupingAggregatedTransform : public IProcessor { public: - GroupingAggregatedTransform(const Block & header, size_t num_inputs, AggregatingTransformParamsPtr params); + GroupingAggregatedTransform(const Block & header_, size_t num_inputs_, AggregatingTransformParamsPtr params_); String getName() const override { return "GroupingAggregatedTransform"; } /// Special setting: in case if single source can return several chunks with same bucket. diff --git a/dbms/src/Processors/Transforms/MergingAggregatedTransform.cpp b/dbms/src/Processors/Transforms/MergingAggregatedTransform.cpp index 32b833044cd..48fda8d6b8c 100644 --- a/dbms/src/Processors/Transforms/MergingAggregatedTransform.cpp +++ b/dbms/src/Processors/Transforms/MergingAggregatedTransform.cpp @@ -5,9 +5,9 @@ namespace DB { MergingAggregatedTransform::MergingAggregatedTransform( - Block header, AggregatingTransformParamsPtr params, size_t max_threads) - : IAccumulatingTransform(std::move(header), params->getHeader()) - , params(std::move(params)), max_threads(max_threads) + Block header_, AggregatingTransformParamsPtr params_, size_t max_threads_) + : IAccumulatingTransform(std::move(header_), params_->getHeader()) + , params(std::move(params_)), max_threads(max_threads_) { } diff --git a/dbms/src/Processors/Transforms/MergingAggregatedTransform.h b/dbms/src/Processors/Transforms/MergingAggregatedTransform.h index a8c52f2b047..cb1ce01976c 100644 --- a/dbms/src/Processors/Transforms/MergingAggregatedTransform.h +++ b/dbms/src/Processors/Transforms/MergingAggregatedTransform.h @@ -12,7 +12,7 @@ namespace DB class MergingAggregatedTransform : public IAccumulatingTransform { public: - MergingAggregatedTransform(Block header, AggregatingTransformParamsPtr params, size_t max_threads); + MergingAggregatedTransform(Block header_, AggregatingTransformParamsPtr params_, size_t max_threads_); String getName() const override { return "MergingAggregatedTransform"; } protected: diff --git a/dbms/src/Processors/Transforms/MergingSortedTransform.cpp b/dbms/src/Processors/Transforms/MergingSortedTransform.cpp index b0283b0a56e..8857ec876d7 100644 --- a/dbms/src/Processors/Transforms/MergingSortedTransform.cpp +++ b/dbms/src/Processors/Transforms/MergingSortedTransform.cpp @@ -9,13 +9,13 @@ MergingSortedTransform::MergingSortedTransform( const Block & header, size_t num_inputs, const SortDescription & description_, - size_t max_block_size, - UInt64 limit, - bool quiet, - bool have_all_inputs) + size_t max_block_size_, + UInt64 limit_, + bool quiet_, + bool have_all_inputs_) : IProcessor(InputPorts(num_inputs, header), {header}) - , description(description_), max_block_size(max_block_size), limit(limit), quiet(quiet) - , have_all_inputs(have_all_inputs) + , description(description_), max_block_size(max_block_size_), limit(limit_), quiet(quiet_) + , have_all_inputs(have_all_inputs_) , merged_data(header), source_chunks(num_inputs), cursors(num_inputs) { auto & sample = outputs.front().getHeader(); diff --git a/dbms/src/Processors/Transforms/PartialSortingTransform.cpp b/dbms/src/Processors/Transforms/PartialSortingTransform.cpp index 0f15c34c7ff..7e217ea0a07 100644 --- a/dbms/src/Processors/Transforms/PartialSortingTransform.cpp +++ b/dbms/src/Processors/Transforms/PartialSortingTransform.cpp @@ -5,9 +5,9 @@ namespace DB { PartialSortingTransform::PartialSortingTransform( - const Block & header, SortDescription & description, UInt64 limit, bool do_count_rows) - : ISimpleTransform(header, header, false) - , description(description), limit(limit), do_count_rows(do_count_rows) + const Block & header_, SortDescription & description_, UInt64 limit_, bool do_count_rows_) + : ISimpleTransform(header_, header_, false) + , description(description_), limit(limit_), do_count_rows(do_count_rows_) { } diff --git a/dbms/src/Processors/Transforms/PartialSortingTransform.h b/dbms/src/Processors/Transforms/PartialSortingTransform.h index 645b4ebab07..905b294c0be 100644 --- a/dbms/src/Processors/Transforms/PartialSortingTransform.h +++ b/dbms/src/Processors/Transforms/PartialSortingTransform.h @@ -14,10 +14,10 @@ public: /// limit - if not 0, then you can sort each block not completely, but only `limit` first rows by order. /// When count_rows is false, getNumReadRows() will always return 0. PartialSortingTransform( - const Block & header, - SortDescription & description, - UInt64 limit = 0, - bool do_count_rows = true); + const Block & header_, + SortDescription & description_, + UInt64 limit_ = 0, + bool do_count_rows_ = true); String getName() const override { return "PartialSortingTransform"; } diff --git a/dbms/src/Processors/tests/processors_test.cpp b/dbms/src/Processors/tests/processors_test.cpp index b663cf319ad..519eb79e017 100644 --- a/dbms/src/Processors/tests/processors_test.cpp +++ b/dbms/src/Processors/tests/processors_test.cpp @@ -31,9 +31,9 @@ class NumbersSource : public ISource public: String getName() const override { return "Numbers"; } - NumbersSource(UInt64 start_number, unsigned sleep_useconds) + NumbersSource(UInt64 start_number, unsigned sleep_useconds_) : ISource(Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }})), - current_number(start_number), sleep_useconds(sleep_useconds) + current_number(start_number), sleep_useconds(sleep_useconds_) { } @@ -61,9 +61,9 @@ protected: public: String getName() const override { return "SleepyNumbers"; } - SleepyNumbersSource(UInt64 start_number, unsigned sleep_useconds) + SleepyNumbersSource(UInt64 start_number, unsigned sleep_useconds_) : IProcessor({}, {Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }})}) - , output(outputs.front()), current_number(start_number), sleep_useconds(sleep_useconds) + , output(outputs.front()), current_number(start_number), sleep_useconds(sleep_useconds_) { } @@ -122,9 +122,9 @@ class PrintSink : public ISink public: String getName() const override { return "Print"; } - PrintSink(String prefix) + PrintSink(String prefix_) : ISink(Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }})), - prefix(std::move(prefix)) + prefix(std::move(prefix_)) { } diff --git a/dbms/src/Processors/tests/processors_test_aggregation.cpp b/dbms/src/Processors/tests/processors_test_aggregation.cpp index 116518391d6..a645804eba8 100644 --- a/dbms/src/Processors/tests/processors_test_aggregation.cpp +++ b/dbms/src/Processors/tests/processors_test_aggregation.cpp @@ -41,9 +41,9 @@ class NumbersSource : public ISource public: String getName() const override { return "Numbers"; } - NumbersSource(UInt64 start_number, UInt64 step, UInt64 block_size, unsigned sleep_useconds) + NumbersSource(UInt64 start_number, UInt64 step_, UInt64 block_size_, unsigned sleep_useconds_) : ISource(Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }})), - current_number(start_number), step(step), block_size(block_size), sleep_useconds(sleep_useconds) + current_number(start_number), step(step_), block_size(block_size_), sleep_useconds(sleep_useconds_) { } @@ -72,9 +72,9 @@ class PrintSink : public ISink public: String getName() const override { return "Print"; } - PrintSink(String prefix, Block header) + PrintSink(String prefix_, Block header) : ISink(std::move(header)), - prefix(std::move(prefix)) + prefix(std::move(prefix_)) { } diff --git a/dbms/src/Processors/tests/processors_test_chain.cpp b/dbms/src/Processors/tests/processors_test_chain.cpp index dfcd2c6b5ee..b6a4f0ad653 100644 --- a/dbms/src/Processors/tests/processors_test_chain.cpp +++ b/dbms/src/Processors/tests/processors_test_chain.cpp @@ -28,9 +28,9 @@ class NumbersSource : public ISource public: String getName() const override { return "Numbers"; } - NumbersSource(UInt64 start_number, unsigned sleep_useconds) + NumbersSource(UInt64 start_number, unsigned sleep_useconds_) : ISource(Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }})), - current_number(start_number), sleep_useconds(sleep_useconds) + current_number(start_number), sleep_useconds(sleep_useconds_) { } @@ -52,12 +52,12 @@ private: class SleepyTransform : public ISimpleTransform { public: - explicit SleepyTransform(unsigned sleep_useconds) + explicit SleepyTransform(unsigned sleep_useconds_) : ISimpleTransform( Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }}), Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }}), /*skip_empty_chunks =*/ false) - , sleep_useconds(sleep_useconds) {} + , sleep_useconds(sleep_useconds_) {} String getName() const override { return "SleepyTransform"; } @@ -76,9 +76,9 @@ class PrintSink : public ISink public: String getName() const override { return "Print"; } - PrintSink(String prefix) + PrintSink(String prefix_) : ISink(Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }})), - prefix(std::move(prefix)) + prefix(std::move(prefix_)) { } diff --git a/dbms/src/Processors/tests/processors_test_expand_pipeline.cpp b/dbms/src/Processors/tests/processors_test_expand_pipeline.cpp index fa977dc7ba8..1d03d75c55d 100644 --- a/dbms/src/Processors/tests/processors_test_expand_pipeline.cpp +++ b/dbms/src/Processors/tests/processors_test_expand_pipeline.cpp @@ -26,9 +26,9 @@ class PrintSink : public ISink public: String getName() const override { return "Print"; } - PrintSink(String prefix) + PrintSink(String prefix_) : ISink(Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }})), - prefix(std::move(prefix)) + prefix(std::move(prefix_)) { } @@ -64,9 +64,9 @@ class OneNumberSource : public ISource public: String getName() const override { return "OneNumber"; } - OneNumberSource(UInt64 number) + OneNumberSource(UInt64 number_) : ISource(Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }})), - number(number) + number(number_) { } diff --git a/dbms/src/Processors/tests/processors_test_merge.cpp b/dbms/src/Processors/tests/processors_test_merge.cpp index 00e322430e5..3842286bc59 100644 --- a/dbms/src/Processors/tests/processors_test_merge.cpp +++ b/dbms/src/Processors/tests/processors_test_merge.cpp @@ -161,9 +161,9 @@ class NumbersSource : public ISource public: String getName() const override { return "Numbers"; } - NumbersSource(UInt64 start_number, UInt64 step, unsigned sleep_useconds) + NumbersSource(UInt64 start_number, UInt64 step_, unsigned sleep_useconds_) : ISource(Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }})), - current_number(start_number), step(step), sleep_useconds(sleep_useconds) + current_number(start_number), step(step_), sleep_useconds(sleep_useconds_) { } @@ -187,12 +187,12 @@ private: class SleepyTransform : public ISimpleTransform { public: - explicit SleepyTransform(unsigned sleep_useconds) + explicit SleepyTransform(unsigned sleep_useconds_) : ISimpleTransform( Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }}), Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }}), false) - , sleep_useconds(sleep_useconds) {} + , sleep_useconds(sleep_useconds_) {} String getName() const override { return "SleepyTransform"; } @@ -211,9 +211,9 @@ class PrintSink : public ISink public: String getName() const override { return "Print"; } - PrintSink(String prefix) + PrintSink(String prefix_) : ISink(Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }})), - prefix(std::move(prefix)) + prefix(std::move(prefix_)) { } diff --git a/dbms/src/Processors/tests/processors_test_merge_sorting_transform.cpp b/dbms/src/Processors/tests/processors_test_merge_sorting_transform.cpp index 258e89e67e7..a5059011e9b 100644 --- a/dbms/src/Processors/tests/processors_test_merge_sorting_transform.cpp +++ b/dbms/src/Processors/tests/processors_test_merge_sorting_transform.cpp @@ -31,9 +31,9 @@ class NumbersSource : public ISource public: String getName() const override { return "Numbers"; } - NumbersSource(UInt64 count, UInt64 block_size, unsigned sleep_useconds) + NumbersSource(UInt64 count_, UInt64 block_size_, unsigned sleep_useconds_) : ISource(Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }})), - count(count), block_size(block_size), sleep_useconds(sleep_useconds) + count(count_), block_size(block_size_), sleep_useconds(sleep_useconds_) { } diff --git a/dbms/src/Processors/tests/processors_test_merging_sorted_transform.cpp b/dbms/src/Processors/tests/processors_test_merging_sorted_transform.cpp index 214044dfd31..af27973e3fd 100644 --- a/dbms/src/Processors/tests/processors_test_merging_sorted_transform.cpp +++ b/dbms/src/Processors/tests/processors_test_merging_sorted_transform.cpp @@ -29,9 +29,9 @@ class NumbersSource : public ISource public: String getName() const override { return "Numbers"; } - NumbersSource(UInt64 start_number, UInt64 step, UInt64 block_size, unsigned sleep_useconds) + NumbersSource(UInt64 start_number, UInt64 step_, UInt64 block_size_, unsigned sleep_useconds_) : ISource(Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }})), - current_number(start_number), step(step), block_size(block_size), sleep_useconds(sleep_useconds) + current_number(start_number), step(step_), block_size(block_size_), sleep_useconds(sleep_useconds_) { } @@ -59,12 +59,12 @@ private: class SleepyTransform : public ISimpleTransform { public: - explicit SleepyTransform(unsigned sleep_useconds) + explicit SleepyTransform(unsigned sleep_useconds_) : ISimpleTransform( Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }}), Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }}), false) - , sleep_useconds(sleep_useconds) {} + , sleep_useconds(sleep_useconds_) {} String getName() const override { return "SleepyTransform"; } @@ -83,9 +83,9 @@ class PrintSink : public ISink public: String getName() const override { return "Print"; } - PrintSink(String prefix) + PrintSink(String prefix_) : ISink(Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }})), - prefix(std::move(prefix)) + prefix(std::move(prefix_)) { } diff --git a/dbms/src/Storages/AlterCommands.h b/dbms/src/Storages/AlterCommands.h index 4905b80f92f..130e11208cd 100644 --- a/dbms/src/Storages/AlterCommands.h +++ b/dbms/src/Storages/AlterCommands.h @@ -72,13 +72,13 @@ struct AlterCommand CompressionCodecPtr codec; AlterCommand() = default; - AlterCommand(const Type type, const String & column_name, const DataTypePtr & data_type, - const ColumnDefaultKind default_kind, const ASTPtr & default_expression, - const String & after_column, const String & comment, - const bool if_exists, const bool if_not_exists) - : type{type}, column_name{column_name}, data_type{data_type}, default_kind{default_kind}, - default_expression{default_expression}, comment(comment), after_column{after_column}, - if_exists(if_exists), if_not_exists(if_not_exists) + AlterCommand(const Type type_, const String & column_name_, const DataTypePtr & data_type_, + const ColumnDefaultKind default_kind_, const ASTPtr & default_expression_, + const String & after_column_, const String & comment_, + const bool if_exists_, const bool if_not_exists_) + : type{type_}, column_name{column_name_}, data_type{data_type_}, default_kind{default_kind_}, + default_expression{default_expression_}, comment(comment_), after_column{after_column_}, + if_exists(if_exists_), if_not_exists(if_not_exists_) {} static std::optional parse(const ASTAlterCommand * command); diff --git a/dbms/src/Storages/Distributed/DirectoryMonitor.cpp b/dbms/src/Storages/Distributed/DirectoryMonitor.cpp index ff780f6f7b7..7eefc68f3a8 100644 --- a/dbms/src/Storages/Distributed/DirectoryMonitor.cpp +++ b/dbms/src/Storages/Distributed/DirectoryMonitor.cpp @@ -60,13 +60,13 @@ namespace StorageDistributedDirectoryMonitor::StorageDistributedDirectoryMonitor( - StorageDistributed & storage, const std::string & name, const ConnectionPoolPtr & pool, ActionBlocker & monitor_blocker) - : storage(storage), pool{pool}, path{storage.path + name + '/'} + StorageDistributed & storage_, const std::string & name_, const ConnectionPoolPtr & pool_, ActionBlocker & monitor_blocker_) + : storage(storage_), pool{pool_}, path{storage.path + name_ + '/'} , current_batch_file_path{path + "current_batch.txt"} , default_sleep_time{storage.global_context.getSettingsRef().distributed_directory_monitor_sleep_time_ms.totalMilliseconds()} , sleep_time{default_sleep_time} , log{&Logger::get(getLoggerName())} - , monitor_blocker(monitor_blocker) + , monitor_blocker(monitor_blocker_) { const Settings & settings = storage.global_context.getSettingsRef(); should_batch_inserts = settings.distributed_directory_monitor_batch_inserts; diff --git a/dbms/src/Storages/Distributed/DirectoryMonitor.h b/dbms/src/Storages/Distributed/DirectoryMonitor.h index 9416db9be2c..f185d64b66f 100644 --- a/dbms/src/Storages/Distributed/DirectoryMonitor.h +++ b/dbms/src/Storages/Distributed/DirectoryMonitor.h @@ -20,7 +20,7 @@ class StorageDistributedDirectoryMonitor { public: StorageDistributedDirectoryMonitor( - StorageDistributed & storage, const std::string & name, const ConnectionPoolPtr & pool, ActionBlocker & monitor_blocker); + StorageDistributed & storage_, const std::string & name_, const ConnectionPoolPtr & pool_, ActionBlocker & monitor_blocker_); ~StorageDistributedDirectoryMonitor(); diff --git a/dbms/src/Storages/Distributed/DistributedBlockOutputStream.cpp b/dbms/src/Storages/Distributed/DistributedBlockOutputStream.cpp index 97104fc8f71..b1b63258f06 100644 --- a/dbms/src/Storages/Distributed/DistributedBlockOutputStream.cpp +++ b/dbms/src/Storages/Distributed/DistributedBlockOutputStream.cpp @@ -59,9 +59,9 @@ namespace ErrorCodes DistributedBlockOutputStream::DistributedBlockOutputStream( - const Context & context_, StorageDistributed & storage, const ASTPtr & query_ast, const ClusterPtr & cluster_, + const Context & context_, StorageDistributed & storage_, const ASTPtr & query_ast_, const ClusterPtr & cluster_, bool insert_sync_, UInt64 insert_timeout_) - : context(context_), storage(storage), query_ast(query_ast), query_string(queryToString(query_ast)), + : context(context_), storage(storage_), query_ast(query_ast_), query_string(queryToString(query_ast_)), cluster(cluster_), insert_sync(insert_sync_), insert_timeout(insert_timeout_), log(&Logger::get("DistributedBlockOutputStream")) { diff --git a/dbms/src/Storages/Distributed/DistributedBlockOutputStream.h b/dbms/src/Storages/Distributed/DistributedBlockOutputStream.h index f71585b8026..0d5a2e08b11 100644 --- a/dbms/src/Storages/Distributed/DistributedBlockOutputStream.h +++ b/dbms/src/Storages/Distributed/DistributedBlockOutputStream.h @@ -35,7 +35,7 @@ class StorageDistributed; class DistributedBlockOutputStream : public IBlockOutputStream { public: - DistributedBlockOutputStream(const Context & context_, StorageDistributed & storage, const ASTPtr & query_ast, + DistributedBlockOutputStream(const Context & context_, StorageDistributed & storage_, const ASTPtr & query_ast_, const ClusterPtr & cluster_, bool insert_sync_, UInt64 insert_timeout_); Block getHeader() const override; @@ -98,8 +98,8 @@ private: struct JobReplica { JobReplica() = default; - JobReplica(size_t shard_index, size_t replica_index, bool is_local_job, const Block & sample_block) - : shard_index(shard_index), replica_index(replica_index), is_local_job(is_local_job), current_shard_block(sample_block.cloneEmpty()) {} + JobReplica(size_t shard_index_, size_t replica_index_, bool is_local_job_, const Block & sample_block) + : shard_index(shard_index_), replica_index(replica_index_), is_local_job(is_local_job_), current_shard_block(sample_block.cloneEmpty()) {} size_t shard_index = 0; size_t replica_index = 0; diff --git a/dbms/src/Storages/Kafka/KafkaBlockInputStream.cpp b/dbms/src/Storages/Kafka/KafkaBlockInputStream.cpp index 19b496e0e60..624936c1626 100644 --- a/dbms/src/Storages/Kafka/KafkaBlockInputStream.cpp +++ b/dbms/src/Storages/Kafka/KafkaBlockInputStream.cpp @@ -51,9 +51,9 @@ void KafkaBlockInputStream::readPrefixImpl() buffer->subBufferAs()->subscribe(storage.getTopics()); - const auto & limits = getLimits(); + const auto & limits_ = getLimits(); const size_t poll_timeout = buffer->subBufferAs()->pollTimeout(); - size_t rows_portion_size = poll_timeout ? std::min(max_block_size, limits.max_execution_time.totalMilliseconds() / poll_timeout) : max_block_size; + size_t rows_portion_size = poll_timeout ? std::min(max_block_size, limits_.max_execution_time.totalMilliseconds() / poll_timeout) : max_block_size; rows_portion_size = std::max(rows_portion_size, 1ul); auto non_virtual_header = storage.getSampleBlockNonMaterialized(); /// FIXME: add materialized columns support @@ -67,7 +67,7 @@ void KafkaBlockInputStream::readPrefixImpl() auto child = FormatFactory::instance().getInput( storage.getFormatName(), *buffer, non_virtual_header, context, max_block_size, rows_portion_size, read_callback); - child->setLimits(limits); + child->setLimits(limits_); addChild(child); broken = true; diff --git a/dbms/src/Storages/MarkCache.h b/dbms/src/Storages/MarkCache.h index 3ddef1b3b46..9ce04c01e43 100644 --- a/dbms/src/Storages/MarkCache.h +++ b/dbms/src/Storages/MarkCache.h @@ -38,8 +38,8 @@ private: using Base = LRUCache; public: - MarkCache(size_t max_size_in_bytes, const Delay & expiration_delay) - : Base(max_size_in_bytes, expiration_delay) {} + MarkCache(size_t max_size_in_bytes, const Delay & expiration_delay_) + : Base(max_size_in_bytes, expiration_delay_) {} /// Calculate key from path to file and offset. static UInt128 hash(const String & path_to_file) diff --git a/dbms/src/Storages/MergeTree/IMergedBlockOutputStream.cpp b/dbms/src/Storages/MergeTree/IMergedBlockOutputStream.cpp index fc19fbd6792..2bd0ebb61ea 100644 --- a/dbms/src/Storages/MergeTree/IMergedBlockOutputStream.cpp +++ b/dbms/src/Storages/MergeTree/IMergedBlockOutputStream.cpp @@ -309,20 +309,20 @@ void IMergedBlockOutputStream::writeFinalMark( IMergedBlockOutputStream::ColumnStream::ColumnStream( const String & escaped_column_name_, - const String & data_path, + const String & data_path_, const std::string & data_file_extension_, - const std::string & marks_path, + const std::string & marks_path_, const std::string & marks_file_extension_, - const CompressionCodecPtr & compression_codec, - size_t max_compress_block_size, - size_t estimated_size, - size_t aio_threshold) : + const CompressionCodecPtr & compression_codec_, + size_t max_compress_block_size_, + size_t estimated_size_, + size_t aio_threshold_) : escaped_column_name(escaped_column_name_), data_file_extension{data_file_extension_}, marks_file_extension{marks_file_extension_}, - plain_file(createWriteBufferFromFileBase(data_path + data_file_extension, estimated_size, aio_threshold, max_compress_block_size)), - plain_hashing(*plain_file), compressed_buf(plain_hashing, compression_codec), compressed(compressed_buf), - marks_file(marks_path + marks_file_extension, 4096, O_TRUNC | O_CREAT | O_WRONLY), marks(marks_file) + plain_file(createWriteBufferFromFileBase(data_path_ + data_file_extension, estimated_size_, aio_threshold_, max_compress_block_size_)), + plain_hashing(*plain_file), compressed_buf(plain_hashing, compression_codec_), compressed(compressed_buf), + marks_file(marks_path_ + marks_file_extension, 4096, O_TRUNC | O_CREAT | O_WRONLY), marks(marks_file) { } diff --git a/dbms/src/Storages/MergeTree/IMergedBlockOutputStream.h b/dbms/src/Storages/MergeTree/IMergedBlockOutputStream.h index b9d083f3a19..5f1cdc60107 100644 --- a/dbms/src/Storages/MergeTree/IMergedBlockOutputStream.h +++ b/dbms/src/Storages/MergeTree/IMergedBlockOutputStream.h @@ -33,14 +33,14 @@ protected: { ColumnStream( const String & escaped_column_name_, - const String & data_path, + const String & data_path_, const std::string & data_file_extension_, - const std::string & marks_path, + const std::string & marks_path_, const std::string & marks_file_extension_, - const CompressionCodecPtr & compression_codec, - size_t max_compress_block_size, - size_t estimated_size, - size_t aio_threshold); + const CompressionCodecPtr & compression_codec_, + size_t max_compress_block_size_, + size_t estimated_size_, + size_t aio_threshold_); String escaped_column_name; std::string data_file_extension; diff --git a/dbms/src/Storages/MergeTree/LevelMergeSelector.h b/dbms/src/Storages/MergeTree/LevelMergeSelector.h index fa35500dcae..4ce6624bea1 100644 --- a/dbms/src/Storages/MergeTree/LevelMergeSelector.h +++ b/dbms/src/Storages/MergeTree/LevelMergeSelector.h @@ -17,7 +17,7 @@ public: size_t parts_to_merge = 10; }; - explicit LevelMergeSelector(const Settings & settings) : settings(settings) {} + explicit LevelMergeSelector(const Settings & settings_) : settings(settings_) {} PartsInPartition select( const Partitions & partitions, diff --git a/dbms/src/Storages/MergeTree/MarkRange.h b/dbms/src/Storages/MergeTree/MarkRange.h index 8be0305b807..657ffe32f78 100644 --- a/dbms/src/Storages/MergeTree/MarkRange.h +++ b/dbms/src/Storages/MergeTree/MarkRange.h @@ -16,7 +16,7 @@ struct MarkRange size_t end; MarkRange() = default; - MarkRange(const size_t begin, const size_t end) : begin{begin}, end{end} {} + MarkRange(const size_t begin_, const size_t end_) : begin{begin_}, end{end_} {} }; using MarkRanges = std::vector; diff --git a/dbms/src/Storages/MergeTree/MergeList.cpp b/dbms/src/Storages/MergeTree/MergeList.cpp index 7d4d7f92496..3e4537ad45c 100644 --- a/dbms/src/Storages/MergeTree/MergeList.cpp +++ b/dbms/src/Storages/MergeTree/MergeList.cpp @@ -14,8 +14,8 @@ namespace CurrentMetrics namespace DB { -MergeListElement::MergeListElement(const std::string & database, const std::string & table, const FutureMergedMutatedPart & future_part) - : database{database}, table{table}, partition_id{future_part.part_info.partition_id} +MergeListElement::MergeListElement(const std::string & database_, const std::string & table_, const FutureMergedMutatedPart & future_part) + : database{database_}, table{table_}, partition_id{future_part.part_info.partition_id} , result_part_name{future_part.name} , result_data_version{future_part.part_info.getDataVersion()} , num_parts{future_part.parts.size()} diff --git a/dbms/src/Storages/MergeTree/MergeList.h b/dbms/src/Storages/MergeTree/MergeList.h index dc2d1c80682..0a25277a6ed 100644 --- a/dbms/src/Storages/MergeTree/MergeList.h +++ b/dbms/src/Storages/MergeTree/MergeList.h @@ -110,7 +110,7 @@ public: MergeListEntry(const MergeListEntry &) = delete; MergeListEntry & operator=(const MergeListEntry &) = delete; - MergeListEntry(MergeList & list, const container_t::iterator it) : list(list), it{it} {} + MergeListEntry(MergeList & list_, const container_t::iterator it_) : list(list_), it{it_} {} ~MergeListEntry(); MergeListElement * operator->() { return &*it; } diff --git a/dbms/src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp b/dbms/src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp index 10d7e3750e4..0489182fe55 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp @@ -20,27 +20,27 @@ namespace ErrorCodes MergeTreeBaseSelectBlockInputStream::MergeTreeBaseSelectBlockInputStream( - const MergeTreeData & storage, - const PrewhereInfoPtr & prewhere_info, - UInt64 max_block_size_rows, - UInt64 preferred_block_size_bytes, - UInt64 preferred_max_column_in_block_size_bytes, - UInt64 min_bytes_to_use_direct_io, - UInt64 max_read_buffer_size, - bool use_uncompressed_cache, - bool save_marks_in_cache, - const Names & virt_column_names) + const MergeTreeData & storage_, + const PrewhereInfoPtr & prewhere_info_, + UInt64 max_block_size_rows_, + UInt64 preferred_block_size_bytes_, + UInt64 preferred_max_column_in_block_size_bytes_, + UInt64 min_bytes_to_use_direct_io_, + UInt64 max_read_buffer_size_, + bool use_uncompressed_cache_, + bool save_marks_in_cache_, + const Names & virt_column_names_) : - storage(storage), - prewhere_info(prewhere_info), - max_block_size_rows(max_block_size_rows), - preferred_block_size_bytes(preferred_block_size_bytes), - preferred_max_column_in_block_size_bytes(preferred_max_column_in_block_size_bytes), - min_bytes_to_use_direct_io(min_bytes_to_use_direct_io), - max_read_buffer_size(max_read_buffer_size), - use_uncompressed_cache(use_uncompressed_cache), - save_marks_in_cache(save_marks_in_cache), - virt_column_names(virt_column_names) + storage(storage_), + prewhere_info(prewhere_info_), + max_block_size_rows(max_block_size_rows_), + preferred_block_size_bytes(preferred_block_size_bytes_), + preferred_max_column_in_block_size_bytes(preferred_max_column_in_block_size_bytes_), + min_bytes_to_use_direct_io(min_bytes_to_use_direct_io_), + max_read_buffer_size(max_read_buffer_size_), + use_uncompressed_cache(use_uncompressed_cache_), + save_marks_in_cache(save_marks_in_cache_), + virt_column_names(virt_column_names_) { } diff --git a/dbms/src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.h b/dbms/src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.h index a7e37f68f0c..640f73652e4 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.h +++ b/dbms/src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.h @@ -18,16 +18,16 @@ class MergeTreeBaseSelectBlockInputStream : public IBlockInputStream { public: MergeTreeBaseSelectBlockInputStream( - const MergeTreeData & storage, - const PrewhereInfoPtr & prewhere_info, - UInt64 max_block_size_rows, - UInt64 preferred_block_size_bytes, - UInt64 preferred_max_column_in_block_size_bytes, - UInt64 min_bytes_to_use_direct_io, - UInt64 max_read_buffer_size, - bool use_uncompressed_cache, - bool save_marks_in_cache = true, - const Names & virt_column_names = {}); + const MergeTreeData & storage_, + const PrewhereInfoPtr & prewhere_info_, + UInt64 max_block_size_rows_, + UInt64 preferred_block_size_bytes_, + UInt64 preferred_max_column_in_block_size_bytes_, + UInt64 min_bytes_to_use_direct_io_, + UInt64 max_read_buffer_size_, + bool use_uncompressed_cache_, + bool save_marks_in_cache_ = true, + const Names & virt_column_names_ = {}); ~MergeTreeBaseSelectBlockInputStream() override; diff --git a/dbms/src/Storages/MergeTree/MergeTreeBlockOutputStream.h b/dbms/src/Storages/MergeTree/MergeTreeBlockOutputStream.h index ace24b474f1..8f957d631d3 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeBlockOutputStream.h +++ b/dbms/src/Storages/MergeTree/MergeTreeBlockOutputStream.h @@ -13,8 +13,8 @@ class StorageMergeTree; class MergeTreeBlockOutputStream : public IBlockOutputStream { public: - MergeTreeBlockOutputStream(StorageMergeTree & storage_, size_t max_parts_per_block) - : storage(storage_), max_parts_per_block(max_parts_per_block) {} + MergeTreeBlockOutputStream(StorageMergeTree & storage_, size_t max_parts_per_block_) + : storage(storage_), max_parts_per_block(max_parts_per_block_) {} Block getHeader() const override; void write(const Block & block) override; diff --git a/dbms/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp b/dbms/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp index 96ece027694..7dc9a40e89a 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp @@ -66,13 +66,13 @@ NameSet injectRequiredColumns(const MergeTreeData & storage, const MergeTreeData MergeTreeReadTask::MergeTreeReadTask( - const MergeTreeData::DataPartPtr & data_part, const MarkRanges & mark_ranges, const size_t part_index_in_query, - const Names & ordered_names, const NameSet & column_name_set, const NamesAndTypesList & columns, - const NamesAndTypesList & pre_columns, const bool remove_prewhere_column, const bool should_reorder, - MergeTreeBlockSizePredictorPtr && size_predictor) - : data_part{data_part}, mark_ranges{mark_ranges}, part_index_in_query{part_index_in_query}, - ordered_names{ordered_names}, column_name_set{column_name_set}, columns{columns}, pre_columns{pre_columns}, - remove_prewhere_column{remove_prewhere_column}, should_reorder{should_reorder}, size_predictor{std::move(size_predictor)} + const MergeTreeData::DataPartPtr & data_part_, const MarkRanges & mark_ranges_, const size_t part_index_in_query_, + const Names & ordered_names_, const NameSet & column_name_set_, const NamesAndTypesList & columns_, + const NamesAndTypesList & pre_columns_, const bool remove_prewhere_column_, const bool should_reorder_, + MergeTreeBlockSizePredictorPtr && size_predictor_) + : data_part{data_part_}, mark_ranges{mark_ranges_}, part_index_in_query{part_index_in_query_}, + ordered_names{ordered_names_}, column_name_set{column_name_set_}, columns{columns_}, pre_columns{pre_columns_}, + remove_prewhere_column{remove_prewhere_column_}, should_reorder{should_reorder_}, size_predictor{std::move(size_predictor_)} {} MergeTreeReadTask::~MergeTreeReadTask() = default; diff --git a/dbms/src/Storages/MergeTree/MergeTreeBlockReadUtils.h b/dbms/src/Storages/MergeTree/MergeTreeBlockReadUtils.h index f0e24d96add..a031255b3ab 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeBlockReadUtils.h +++ b/dbms/src/Storages/MergeTree/MergeTreeBlockReadUtils.h @@ -56,10 +56,10 @@ struct MergeTreeReadTask bool isFinished() const { return mark_ranges.empty() && range_reader.isCurrentRangeFinished(); } MergeTreeReadTask( - const MergeTreeData::DataPartPtr & data_part, const MarkRanges & mark_ranges, const size_t part_index_in_query, - const Names & ordered_names, const NameSet & column_name_set, const NamesAndTypesList & columns, - const NamesAndTypesList & pre_columns, const bool remove_prewhere_column, const bool should_reorder, - MergeTreeBlockSizePredictorPtr && size_predictor); + const MergeTreeData::DataPartPtr & data_part_, const MarkRanges & mark_ranges_, const size_t part_index_in_query_, + const Names & ordered_names_, const NameSet & column_name_set_, const NamesAndTypesList & columns_, + const NamesAndTypesList & pre_columns_, const bool remove_prewhere_column_, const bool should_reorder_, + MergeTreeBlockSizePredictorPtr && size_predictor_); virtual ~MergeTreeReadTask(); }; diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataPart.h b/dbms/src/Storages/MergeTree/MergeTreeDataPart.h index f41ea8af424..e46a64f0546 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeDataPart.h +++ b/dbms/src/Storages/MergeTree/MergeTreeDataPart.h @@ -153,7 +153,7 @@ struct MergeTreeDataPart struct StatesFilter { std::initializer_list affordable_states; - StatesFilter(const std::initializer_list & affordable_states) : affordable_states(affordable_states) {} + StatesFilter(const std::initializer_list & affordable_states_) : affordable_states(affordable_states_) {} bool operator() (const std::shared_ptr & part) const { diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp b/dbms/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp index b86da56649d..ba44053afa7 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp @@ -40,8 +40,8 @@ bool MergeTreeIndexBloomFilter::mayBenefitFromIndexForIn(const ASTPtr & node) co { const String & column_name = node->getColumnName(); - for (const auto & name : columns) - if (column_name == name) + for (const auto & cname : columns) + if (column_name == cname) return true; if (const auto * func = typeid_cast(node.get())) diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp b/dbms/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp index 9c8a9d4b41c..f2892fc51a7 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp @@ -76,11 +76,11 @@ bool maybeTrueOnBloomFilter(const IColumn * hash_column, const BloomFilterPtr & } MergeTreeIndexConditionBloomFilter::MergeTreeIndexConditionBloomFilter( - const SelectQueryInfo & info, const Context & context, const Block & header, size_t hash_functions) - : header(header), context(context), query_info(info), hash_functions(hash_functions) + const SelectQueryInfo & info_, const Context & context_, const Block & header_, size_t hash_functions_) + : header(header_), context(context_), query_info(info_), hash_functions(hash_functions_) { auto atomFromAST = [this](auto & node, auto &, auto & constants, auto & out) { return traverseAtomAST(node, constants, out); }; - rpn = std::move(RPNBuilder(info, context, atomFromAST).extractRPN()); + rpn = std::move(RPNBuilder(info_, context, atomFromAST).extractRPN()); } bool MergeTreeIndexConditionBloomFilter::alwaysUnknownOrTrue() const diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.h b/dbms/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.h index 6c268cadbb6..d3c62bbcaa7 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.h +++ b/dbms/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.h @@ -37,7 +37,7 @@ public: std::vector> predicate; }; - MergeTreeIndexConditionBloomFilter(const SelectQueryInfo & info, const Context & context, const Block & header, size_t hash_functions); + MergeTreeIndexConditionBloomFilter(const SelectQueryInfo & info_, const Context & context_, const Block & header_, size_t hash_functions_); bool alwaysUnknownOrTrue() const override; diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexFullText.cpp b/dbms/src/Storages/MergeTree/MergeTreeIndexFullText.cpp index be9994ece64..3625c6f1aa5 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeIndexFullText.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeIndexFullText.cpp @@ -61,9 +61,9 @@ bool MergeTreeConditionFullText::createFunctionEqualsCondition(RPNElement & out, return true; } -MergeTreeIndexGranuleFullText::MergeTreeIndexGranuleFullText(const MergeTreeIndexFullText & index) +MergeTreeIndexGranuleFullText::MergeTreeIndexGranuleFullText(const MergeTreeIndexFullText & index_) : IMergeTreeIndexGranule() - , index(index) + , index(index_) , bloom_filters( index.columns.size(), BloomFilter(index.bloom_filter_size, index.bloom_filter_hashes, index.seed)) , has_elems(false) {} @@ -87,8 +87,8 @@ void MergeTreeIndexGranuleFullText::deserializeBinary(ReadBuffer & istr) } -MergeTreeIndexAggregatorFullText::MergeTreeIndexAggregatorFullText(const MergeTreeIndexFullText & index) - : index(index), granule(std::make_shared(index)) {} +MergeTreeIndexAggregatorFullText::MergeTreeIndexAggregatorFullText(const MergeTreeIndexFullText & index_) + : index(index_), granule(std::make_shared(index)) {} MergeTreeIndexGranulePtr MergeTreeIndexAggregatorFullText::getGranuleAndReset() { diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexFullText.h b/dbms/src/Storages/MergeTree/MergeTreeIndexFullText.h index f6230134596..e276d811cd9 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeIndexFullText.h +++ b/dbms/src/Storages/MergeTree/MergeTreeIndexFullText.h @@ -16,7 +16,7 @@ class MergeTreeIndexFullText; struct MergeTreeIndexGranuleFullText : public IMergeTreeIndexGranule { explicit MergeTreeIndexGranuleFullText( - const MergeTreeIndexFullText & index); + const MergeTreeIndexFullText & index_); ~MergeTreeIndexGranuleFullText() override = default; diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.cpp b/dbms/src/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.cpp index 4eee7309811..7d681825b0c 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.cpp @@ -11,25 +11,25 @@ namespace DB { -MergeTreeIndexGranuleBloomFilter::MergeTreeIndexGranuleBloomFilter(size_t bits_per_row, size_t hash_functions, size_t index_columns) - : bits_per_row(bits_per_row), hash_functions(hash_functions) +MergeTreeIndexGranuleBloomFilter::MergeTreeIndexGranuleBloomFilter(size_t bits_per_row_, size_t hash_functions_, size_t index_columns_) + : bits_per_row(bits_per_row_), hash_functions(hash_functions_) { total_rows = 0; - bloom_filters.resize(index_columns); + bloom_filters.resize(index_columns_); } MergeTreeIndexGranuleBloomFilter::MergeTreeIndexGranuleBloomFilter( - size_t bits_per_row, size_t hash_functions, size_t total_rows, const Blocks & granule_index_blocks) - : total_rows(total_rows), bits_per_row(bits_per_row), hash_functions(hash_functions) + size_t bits_per_row_, size_t hash_functions_, size_t total_rows_, const Blocks & granule_index_blocks_) + : total_rows(total_rows_), bits_per_row(bits_per_row_), hash_functions(hash_functions_) { - if (granule_index_blocks.empty() || !total_rows) + if (granule_index_blocks_.empty() || !total_rows) throw Exception("LOGICAL ERROR: granule_index_blocks empty or total_rows is zero.", ErrorCodes::LOGICAL_ERROR); - assertGranuleBlocksStructure(granule_index_blocks); + assertGranuleBlocksStructure(granule_index_blocks_); - for (size_t index = 0; index < granule_index_blocks.size(); ++index) + for (size_t index = 0; index < granule_index_blocks_.size(); ++index) { - Block granule_index_block = granule_index_blocks[index]; + Block granule_index_block = granule_index_blocks_[index]; if (unlikely(!granule_index_block || !granule_index_block.rows())) throw Exception("LOGICAL ERROR: granule_index_block is empty.", ErrorCodes::LOGICAL_ERROR); diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.h b/dbms/src/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.h index 79670678e79..673c5ac4706 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.h +++ b/dbms/src/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.h @@ -9,9 +9,9 @@ namespace DB class MergeTreeIndexGranuleBloomFilter : public IMergeTreeIndexGranule { public: - MergeTreeIndexGranuleBloomFilter(size_t bits_per_row, size_t hash_functions, size_t index_columns); + MergeTreeIndexGranuleBloomFilter(size_t bits_per_row_, size_t hash_functions_, size_t index_columns_); - MergeTreeIndexGranuleBloomFilter(size_t bits_per_row, size_t hash_functions, size_t total_rows, const Blocks & granule_index_blocks); + MergeTreeIndexGranuleBloomFilter(size_t bits_per_row_, size_t hash_functions_, size_t total_rows_, const Blocks & granule_index_blocks_); bool empty() const override; diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexMinMax.cpp b/dbms/src/Storages/MergeTree/MergeTreeIndexMinMax.cpp index 0d9c4722a25..360e69eacc6 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeIndexMinMax.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeIndexMinMax.cpp @@ -16,12 +16,12 @@ namespace ErrorCodes } -MergeTreeIndexGranuleMinMax::MergeTreeIndexGranuleMinMax(const MergeTreeIndexMinMax & index) - : IMergeTreeIndexGranule(), index(index), parallelogram() {} +MergeTreeIndexGranuleMinMax::MergeTreeIndexGranuleMinMax(const MergeTreeIndexMinMax & index_) + : IMergeTreeIndexGranule(), index(index_), parallelogram() {} MergeTreeIndexGranuleMinMax::MergeTreeIndexGranuleMinMax( - const MergeTreeIndexMinMax & index, std::vector && parallelogram) - : IMergeTreeIndexGranule(), index(index), parallelogram(std::move(parallelogram)) {} + const MergeTreeIndexMinMax & index_, std::vector && parallelogram_) + : IMergeTreeIndexGranule(), index(index_), parallelogram(std::move(parallelogram_)) {} void MergeTreeIndexGranuleMinMax::serializeBinary(WriteBuffer & ostr) const { @@ -83,8 +83,8 @@ void MergeTreeIndexGranuleMinMax::deserializeBinary(ReadBuffer & istr) } -MergeTreeIndexAggregatorMinMax::MergeTreeIndexAggregatorMinMax(const MergeTreeIndexMinMax & index) - : index(index) {} +MergeTreeIndexAggregatorMinMax::MergeTreeIndexAggregatorMinMax(const MergeTreeIndexMinMax & index_) + : index(index_) {} MergeTreeIndexGranulePtr MergeTreeIndexAggregatorMinMax::getGranuleAndReset() { @@ -125,8 +125,8 @@ void MergeTreeIndexAggregatorMinMax::update(const Block & block, size_t * pos, s MergeTreeIndexConditionMinMax::MergeTreeIndexConditionMinMax( const SelectQueryInfo &query, const Context &context, - const MergeTreeIndexMinMax &index) - : IMergeTreeIndexCondition(), index(index), condition(query, context, index.columns, index.expr) {} + const MergeTreeIndexMinMax &index_) + : IMergeTreeIndexCondition(), index(index_), condition(query, context, index.columns, index.expr) {} bool MergeTreeIndexConditionMinMax::alwaysUnknownOrTrue() const { @@ -169,8 +169,8 @@ bool MergeTreeIndexMinMax::mayBenefitFromIndexForIn(const ASTPtr & node) const { const String column_name = node->getColumnName(); - for (const auto & name : columns) - if (column_name == name) + for (const auto & cname : columns) + if (column_name == cname) return true; if (const auto * func = typeid_cast(node.get())) diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexMinMax.h b/dbms/src/Storages/MergeTree/MergeTreeIndexMinMax.h index 5b514cdc738..873ea6ec98b 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeIndexMinMax.h +++ b/dbms/src/Storages/MergeTree/MergeTreeIndexMinMax.h @@ -15,8 +15,8 @@ class MergeTreeIndexMinMax; struct MergeTreeIndexGranuleMinMax : public IMergeTreeIndexGranule { - explicit MergeTreeIndexGranuleMinMax(const MergeTreeIndexMinMax & index); - MergeTreeIndexGranuleMinMax(const MergeTreeIndexMinMax & index, std::vector && parallelogram); + explicit MergeTreeIndexGranuleMinMax(const MergeTreeIndexMinMax & index_); + MergeTreeIndexGranuleMinMax(const MergeTreeIndexMinMax & index_, std::vector && parallelogram_); ~MergeTreeIndexGranuleMinMax() override = default; void serializeBinary(WriteBuffer & ostr) const override; diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexReader.cpp b/dbms/src/Storages/MergeTree/MergeTreeIndexReader.cpp index cd2725719a7..05f09041fed 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeIndexReader.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeIndexReader.cpp @@ -5,12 +5,12 @@ namespace DB { MergeTreeIndexReader::MergeTreeIndexReader( - MergeTreeIndexPtr index, MergeTreeData::DataPartPtr part, size_t marks_count, const MarkRanges & all_mark_ranges) - : index(index), stream( - part->getFullPath() + index->getFileName(), ".idx", marks_count, - all_mark_ranges, nullptr, false, nullptr, - part->getFileSizeOrZero(index->getFileName() + ".idx"), 0, DBMS_DEFAULT_BUFFER_SIZE, - &part->index_granularity_info, + MergeTreeIndexPtr index_, MergeTreeData::DataPartPtr part_, size_t marks_count_, const MarkRanges & all_mark_ranges_) + : index(index_), stream( + part_->getFullPath() + index->getFileName(), ".idx", marks_count_, + all_mark_ranges_, nullptr, false, nullptr, + part_->getFileSizeOrZero(index->getFileName() + ".idx"), 0, DBMS_DEFAULT_BUFFER_SIZE, + &part_->index_granularity_info, ReadBufferFromFileBase::ProfileCallback{}, CLOCK_MONOTONIC_COARSE) { stream.seekToStart(); diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexReader.h b/dbms/src/Storages/MergeTree/MergeTreeIndexReader.h index 38dbd69f6e6..9b5b1c7fcb2 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeIndexReader.h +++ b/dbms/src/Storages/MergeTree/MergeTreeIndexReader.h @@ -11,10 +11,10 @@ class MergeTreeIndexReader { public: MergeTreeIndexReader( - MergeTreeIndexPtr index, - MergeTreeData::DataPartPtr part, - size_t marks_count, - const MarkRanges & all_mark_ranges); + MergeTreeIndexPtr index_, + MergeTreeData::DataPartPtr part_, + size_t marks_count_, + const MarkRanges & all_mark_ranges_); void seek(size_t mark); diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexSet.cpp b/dbms/src/Storages/MergeTree/MergeTreeIndexSet.cpp index 6d3f4dc9be7..40aba822353 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeIndexSet.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeIndexSet.cpp @@ -21,16 +21,16 @@ namespace ErrorCodes const Field UNKNOWN_FIELD(3u); -MergeTreeIndexGranuleSet::MergeTreeIndexGranuleSet(const MergeTreeIndexSet & index) +MergeTreeIndexGranuleSet::MergeTreeIndexGranuleSet(const MergeTreeIndexSet & index_) : IMergeTreeIndexGranule() - , index(index) + , index(index_) , block(index.header.cloneEmpty()) {} MergeTreeIndexGranuleSet::MergeTreeIndexGranuleSet( - const MergeTreeIndexSet & index, MutableColumns && mutable_columns) + const MergeTreeIndexSet & index_, MutableColumns && mutable_columns_) : IMergeTreeIndexGranule() - , index(index) - , block(index.header.cloneWithColumns(std::move(mutable_columns))) {} + , index(index_) + , block(index.header.cloneWithColumns(std::move(mutable_columns_))) {} void MergeTreeIndexGranuleSet::serializeBinary(WriteBuffer & ostr) const { @@ -94,8 +94,8 @@ void MergeTreeIndexGranuleSet::deserializeBinary(ReadBuffer & istr) } -MergeTreeIndexAggregatorSet::MergeTreeIndexAggregatorSet(const MergeTreeIndexSet & index) - : index(index), columns(index.header.cloneEmptyColumns()) +MergeTreeIndexAggregatorSet::MergeTreeIndexAggregatorSet(const MergeTreeIndexSet & index_) + : index(index_), columns(index.header.cloneEmptyColumns()) { ColumnRawPtrs column_ptrs; column_ptrs.reserve(index.columns.size()); @@ -215,8 +215,8 @@ MergeTreeIndexGranulePtr MergeTreeIndexAggregatorSet::getGranuleAndReset() MergeTreeIndexConditionSet::MergeTreeIndexConditionSet( const SelectQueryInfo & query, const Context & context, - const MergeTreeIndexSet &index) - : IMergeTreeIndexCondition(), index(index) + const MergeTreeIndexSet &index_) + : IMergeTreeIndexCondition(), index(index_) { for (size_t i = 0, size = index.columns.size(); i < size; ++i) { diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexSet.h b/dbms/src/Storages/MergeTree/MergeTreeIndexSet.h index 04f4d2bec1e..b6c8c6cfa06 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeIndexSet.h +++ b/dbms/src/Storages/MergeTree/MergeTreeIndexSet.h @@ -16,8 +16,8 @@ class MergeTreeIndexSet; struct MergeTreeIndexGranuleSet : public IMergeTreeIndexGranule { - explicit MergeTreeIndexGranuleSet(const MergeTreeIndexSet & index); - MergeTreeIndexGranuleSet(const MergeTreeIndexSet & index, MutableColumns && columns); + explicit MergeTreeIndexGranuleSet(const MergeTreeIndexSet & index_); + MergeTreeIndexGranuleSet(const MergeTreeIndexSet & index_, MutableColumns && columns_); void serializeBinary(WriteBuffer & ostr) const override; void deserializeBinary(ReadBuffer & istr) override; diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndices.h b/dbms/src/Storages/MergeTree/MergeTreeIndices.h index 2a00c902810..c430d1e8135 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeIndices.h +++ b/dbms/src/Storages/MergeTree/MergeTreeIndices.h @@ -77,18 +77,18 @@ class IMergeTreeIndex { public: IMergeTreeIndex( - String name, - ExpressionActionsPtr expr, - const Names & columns, - const DataTypes & data_types, - const Block & header, - size_t granularity) - : name(name) - , expr(expr) - , columns(columns) - , data_types(data_types) - , header(header) - , granularity(granularity) {} + String name_, + ExpressionActionsPtr expr_, + const Names & columns_, + const DataTypes & data_types_, + const Block & header_, + size_t granularity_) + : name(name_) + , expr(expr_) + , columns(columns_) + , data_types(data_types_) + , header(header_) + , granularity(granularity_) {} virtual ~IMergeTreeIndex() = default; diff --git a/dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp b/dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp index bc1468f2fb7..2aae847217e 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp @@ -13,9 +13,9 @@ namespace DB { MergeTreeRangeReader::DelayedStream::DelayedStream( - size_t from_mark, MergeTreeReader * merge_tree_reader) + size_t from_mark, MergeTreeReader * merge_tree_reader_) : current_mark(from_mark), current_offset(0), num_delayed_rows(0) - , merge_tree_reader(merge_tree_reader) + , merge_tree_reader(merge_tree_reader_) , index_granularity(&(merge_tree_reader->data_part->index_granularity)) , continue_reading(false), is_finished(false) { @@ -108,10 +108,10 @@ size_t MergeTreeRangeReader::DelayedStream::finalize(Block & block) MergeTreeRangeReader::Stream::Stream( - size_t from_mark, size_t to_mark, MergeTreeReader * merge_tree_reader) + size_t from_mark, size_t to_mark, MergeTreeReader * merge_tree_reader_) : current_mark(from_mark), offset_after_current_mark(0) , last_mark(to_mark) - , merge_tree_reader(merge_tree_reader) + , merge_tree_reader(merge_tree_reader_) , index_granularity(&(merge_tree_reader->data_part->index_granularity)) , current_mark_index_granularity(index_granularity->getMarkRows(from_mark)) , stream(from_mark, merge_tree_reader) @@ -406,15 +406,15 @@ void MergeTreeRangeReader::ReadResult::setFilter(const ColumnPtr & new_filter) MergeTreeRangeReader::MergeTreeRangeReader( - MergeTreeReader * merge_tree_reader, MergeTreeRangeReader * prev_reader, - ExpressionActionsPtr alias_actions, ExpressionActionsPtr prewhere_actions, - const String * prewhere_column_name, const Names * ordered_names, - bool always_reorder, bool remove_prewhere_column, bool last_reader_in_chain) - : merge_tree_reader(merge_tree_reader), index_granularity(&(merge_tree_reader->data_part->index_granularity)) - , prev_reader(prev_reader), prewhere_column_name(prewhere_column_name) - , ordered_names(ordered_names), alias_actions(alias_actions), prewhere_actions(std::move(prewhere_actions)) - , always_reorder(always_reorder), remove_prewhere_column(remove_prewhere_column) - , last_reader_in_chain(last_reader_in_chain), is_initialized(true) + MergeTreeReader * merge_tree_reader_, MergeTreeRangeReader * prev_reader_, + ExpressionActionsPtr alias_actions_, ExpressionActionsPtr prewhere_actions_, + const String * prewhere_column_name_, const Names * ordered_names_, + bool always_reorder_, bool remove_prewhere_column_, bool last_reader_in_chain_) + : merge_tree_reader(merge_tree_reader_), index_granularity(&(merge_tree_reader->data_part->index_granularity)) + , prev_reader(prev_reader_), prewhere_column_name(prewhere_column_name_) + , ordered_names(ordered_names_), alias_actions(alias_actions_), prewhere_actions(std::move(prewhere_actions_)) + , always_reorder(always_reorder_), remove_prewhere_column(remove_prewhere_column_) + , last_reader_in_chain(last_reader_in_chain_), is_initialized(true) { } diff --git a/dbms/src/Storages/MergeTree/MergeTreeRangeReader.h b/dbms/src/Storages/MergeTree/MergeTreeRangeReader.h index c80fff31419..9552373901c 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeRangeReader.h +++ b/dbms/src/Storages/MergeTree/MergeTreeRangeReader.h @@ -20,10 +20,10 @@ class MergeTreeIndexGranularity; class MergeTreeRangeReader { public: - MergeTreeRangeReader(MergeTreeReader * merge_tree_reader, MergeTreeRangeReader * prev_reader, - ExpressionActionsPtr alias_actions, ExpressionActionsPtr prewhere_actions, - const String * prewhere_column_name, const Names * ordered_names, - bool always_reorder, bool remove_prewhere_column, bool last_reader_in_chain); + MergeTreeRangeReader(MergeTreeReader * merge_tree_reader_, MergeTreeRangeReader * prev_reader_, + ExpressionActionsPtr alias_actions_, ExpressionActionsPtr prewhere_actions_, + const String * prewhere_column_name_, const Names * ordered_names_, + bool always_reorder_, bool remove_prewhere_column_, bool last_reader_in_chain_); MergeTreeRangeReader() = default; diff --git a/dbms/src/Storages/MergeTree/MergeTreeReadPool.cpp b/dbms/src/Storages/MergeTree/MergeTreeReadPool.cpp index d98fb8ac87e..6298c098220 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeReadPool.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeReadPool.cpp @@ -19,14 +19,14 @@ namespace DB MergeTreeReadPool::MergeTreeReadPool( - const size_t threads, const size_t sum_marks, const size_t min_marks_for_concurrent_read, - RangesInDataParts parts, const MergeTreeData & data, const PrewhereInfoPtr & prewhere_info, - const bool check_columns, const Names & column_names, - const BackoffSettings & backoff_settings, size_t preferred_block_size_bytes, - const bool do_not_steal_tasks) - : backoff_settings{backoff_settings}, backoff_state{threads}, data{data}, - column_names{column_names}, do_not_steal_tasks{do_not_steal_tasks}, - predict_block_size_bytes{preferred_block_size_bytes > 0}, prewhere_info{prewhere_info}, parts_ranges{parts} + const size_t threads_, const size_t sum_marks_, const size_t min_marks_for_concurrent_read_, + RangesInDataParts parts_, const MergeTreeData & data_, const PrewhereInfoPtr & prewhere_info_, + const bool check_columns_, const Names & column_names_, + const BackoffSettings & backoff_settings_, size_t preferred_block_size_bytes_, + const bool do_not_steal_tasks_) + : backoff_settings{backoff_settings_}, backoff_state{threads_}, data{data_}, + column_names{column_names_}, do_not_steal_tasks{do_not_steal_tasks_}, + predict_block_size_bytes{preferred_block_size_bytes_ > 0}, prewhere_info{prewhere_info_}, parts_ranges{parts_} { /// reverse from right-to-left to left-to-right /// because 'reverse' was done in MergeTreeDataSelectExecutor @@ -34,8 +34,8 @@ MergeTreeReadPool::MergeTreeReadPool( std::reverse(std::begin(part_ranges.ranges), std::end(part_ranges.ranges)); /// parts don't contain duplicate MergeTreeDataPart's. - const auto per_part_sum_marks = fillPerPartInfo(parts, check_columns); - fillPerThreadInfo(threads, sum_marks, per_part_sum_marks, parts, min_marks_for_concurrent_read); + const auto per_part_sum_marks = fillPerPartInfo(parts_, check_columns_); + fillPerThreadInfo(threads_, sum_marks_, per_part_sum_marks, parts_, min_marks_for_concurrent_read_); } diff --git a/dbms/src/Storages/MergeTree/MergeTreeReadPool.h b/dbms/src/Storages/MergeTree/MergeTreeReadPool.h index e7c8cb7c7da..2e9cb76f0cd 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeReadPool.h +++ b/dbms/src/Storages/MergeTree/MergeTreeReadPool.h @@ -66,11 +66,11 @@ private: public: MergeTreeReadPool( - const size_t threads, const size_t sum_marks, const size_t min_marks_for_concurrent_read, - RangesInDataParts parts, const MergeTreeData & data, const PrewhereInfoPtr & prewhere_info, - const bool check_columns, const Names & column_names, - const BackoffSettings & backoff_settings, size_t preferred_block_size_bytes, - const bool do_not_steal_tasks = false); + const size_t threads_, const size_t sum_marks_, const size_t min_marks_for_concurrent_read_, + RangesInDataParts parts_, const MergeTreeData & data_, const PrewhereInfoPtr & prewhere_info_, + const bool check_columns_, const Names & column_names_, + const BackoffSettings & backoff_settings_, size_t preferred_block_size_bytes_, + const bool do_not_steal_tasks_ = false); MergeTreeReadTaskPtr getTask(const size_t min_marks_to_read, const size_t thread, const Names & ordered_names); diff --git a/dbms/src/Storages/MergeTree/MergeTreeReader.cpp b/dbms/src/Storages/MergeTree/MergeTreeReader.cpp index 9f794f1a884..d9732c8ac6f 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeReader.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeReader.cpp @@ -31,21 +31,21 @@ namespace ErrorCodes MergeTreeReader::~MergeTreeReader() = default; -MergeTreeReader::MergeTreeReader(const String & path, - const MergeTreeData::DataPartPtr & data_part, const NamesAndTypesList & columns, - UncompressedCache * uncompressed_cache, MarkCache * mark_cache, bool save_marks_in_cache, - const MergeTreeData & storage, const MarkRanges & all_mark_ranges, - size_t aio_threshold, size_t max_read_buffer_size, const ValueSizeMap & avg_value_size_hints, - const ReadBufferFromFileBase::ProfileCallback & profile_callback, - clockid_t clock_type) - : data_part(data_part), avg_value_size_hints(avg_value_size_hints), path(path), columns(columns) - , uncompressed_cache(uncompressed_cache), mark_cache(mark_cache), save_marks_in_cache(save_marks_in_cache), storage(storage) - , all_mark_ranges(all_mark_ranges), aio_threshold(aio_threshold), max_read_buffer_size(max_read_buffer_size) +MergeTreeReader::MergeTreeReader(const String & path_, + const MergeTreeData::DataPartPtr & data_part_, const NamesAndTypesList & columns_, + UncompressedCache * uncompressed_cache_, MarkCache * mark_cache_, bool save_marks_in_cache_, + const MergeTreeData & storage_, const MarkRanges & all_mark_ranges_, + size_t aio_threshold_, size_t max_read_buffer_size_, const ValueSizeMap & avg_value_size_hints_, + const ReadBufferFromFileBase::ProfileCallback & profile_callback_, + clockid_t clock_type_) + : data_part(data_part_), avg_value_size_hints(avg_value_size_hints_), path(path_), columns(columns_) + , uncompressed_cache(uncompressed_cache_), mark_cache(mark_cache_), save_marks_in_cache(save_marks_in_cache_), storage(storage_) + , all_mark_ranges(all_mark_ranges_), aio_threshold(aio_threshold_), max_read_buffer_size(max_read_buffer_size_) { try { for (const NameAndTypePair & column : columns) - addStreams(column.name, *column.type, profile_callback, clock_type); + addStreams(column.name, *column.type, profile_callback_, clock_type_); } catch (...) { diff --git a/dbms/src/Storages/MergeTree/MergeTreeReader.h b/dbms/src/Storages/MergeTree/MergeTreeReader.h index 6d23d413cf4..25f4c9ddd32 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeReader.h +++ b/dbms/src/Storages/MergeTree/MergeTreeReader.h @@ -19,16 +19,16 @@ public: using ValueSizeMap = std::map; using DeserializeBinaryBulkStateMap = std::map; - MergeTreeReader(const String & path, /// Path to the directory containing the part - const MergeTreeData::DataPartPtr & data_part, const NamesAndTypesList & columns, - UncompressedCache * uncompressed_cache, - MarkCache * mark_cache, - bool save_marks_in_cache, - const MergeTreeData & storage, const MarkRanges & all_mark_ranges, - size_t aio_threshold, size_t max_read_buffer_size, - const ValueSizeMap & avg_value_size_hints = ValueSizeMap{}, - const ReadBufferFromFileBase::ProfileCallback & profile_callback = ReadBufferFromFileBase::ProfileCallback{}, - clockid_t clock_type = CLOCK_MONOTONIC_COARSE); + MergeTreeReader(const String & path_, /// Path to the directory containing the part + const MergeTreeData::DataPartPtr & data_part_, const NamesAndTypesList & columns_, + UncompressedCache * uncompressed_cache_, + MarkCache * mark_cache_, + bool save_marks_in_cache_, + const MergeTreeData & storage_, const MarkRanges & all_mark_ranges_, + size_t aio_threshold_, size_t max_read_buffer_size_, + const ValueSizeMap & avg_value_size_hints_ = ValueSizeMap{}, + const ReadBufferFromFileBase::ProfileCallback & profile_callback_ = ReadBufferFromFileBase::ProfileCallback{}, + clockid_t clock_type_ = CLOCK_MONOTONIC_COARSE); ~MergeTreeReader(); diff --git a/dbms/src/Storages/MergeTree/MergeTreeSelectBlockInputStream.cpp b/dbms/src/Storages/MergeTree/MergeTreeSelectBlockInputStream.cpp index 2d48b362902..7a6e6f197dd 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeSelectBlockInputStream.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeSelectBlockInputStream.cpp @@ -23,7 +23,7 @@ MergeTreeSelectBlockInputStream::MergeTreeSelectBlockInputStream( const MarkRanges & mark_ranges_, bool use_uncompressed_cache_, const PrewhereInfoPtr & prewhere_info_, - bool check_columns, + bool check_columns_, size_t min_bytes_to_use_direct_io_, size_t max_read_buffer_size_, bool save_marks_in_cache_, @@ -39,7 +39,7 @@ MergeTreeSelectBlockInputStream::MergeTreeSelectBlockInputStream( part_columns_lock(data_part->columns_lock), all_mark_ranges(mark_ranges_), part_index_in_query(part_index_in_query_), - check_columns(check_columns), + check_columns(check_columns_), path(data_part->getFullPath()) { /// Let's estimate total number of rows for progress bar. diff --git a/dbms/src/Storages/MergeTree/MergeTreeSelectBlockInputStream.h b/dbms/src/Storages/MergeTree/MergeTreeSelectBlockInputStream.h index 4faeaa0d397..0fc9830f5d0 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeSelectBlockInputStream.h +++ b/dbms/src/Storages/MergeTree/MergeTreeSelectBlockInputStream.h @@ -22,7 +22,7 @@ public: UInt64 max_block_size_rows, size_t preferred_block_size_bytes, size_t preferred_max_column_in_block_size_bytes, - Names column_names, + Names column_names_, const MarkRanges & mark_ranges, bool use_uncompressed_cache, const PrewhereInfoPtr & prewhere_info, diff --git a/dbms/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputStream.cpp b/dbms/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputStream.cpp index 1642b9da602..9c34782dec8 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputStream.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputStream.cpp @@ -8,8 +8,8 @@ namespace DB MergeTreeThreadSelectBlockInputStream::MergeTreeThreadSelectBlockInputStream( - const size_t thread, - const MergeTreeReadPoolPtr & pool, + const size_t thread_, + const MergeTreeReadPoolPtr & pool_, const size_t min_marks_to_read_, const UInt64 max_block_size_rows_, size_t preferred_block_size_bytes_, @@ -23,8 +23,8 @@ MergeTreeThreadSelectBlockInputStream::MergeTreeThreadSelectBlockInputStream( MergeTreeBaseSelectBlockInputStream{storage_, prewhere_info_, max_block_size_rows_, preferred_block_size_bytes_, preferred_max_column_in_block_size_bytes_, settings.min_bytes_to_use_direct_io, settings.max_read_buffer_size, use_uncompressed_cache_, true, virt_column_names_}, - thread{thread}, - pool{pool} + thread{thread_}, + pool{pool_} { /// round min_marks_to_read up to nearest multiple of block_size expressed in marks /// If granularity is adaptive it doesn't make sense @@ -70,7 +70,7 @@ bool MergeTreeThreadSelectBlockInputStream::getNewTask() const std::string path = task->data_part->getFullPath(); /// Allows pool to reduce number of threads in case of too slow reads. - auto profile_callback = [this](ReadBufferFromFileBase::ProfileInfo info) { pool->profileFeedback(info); }; + auto profile_callback = [this](ReadBufferFromFileBase::ProfileInfo info_) { pool->profileFeedback(info_); }; if (!reader) { diff --git a/dbms/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputStream.h b/dbms/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputStream.h index 592b6b71f73..3c7dfb7927d 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputStream.h +++ b/dbms/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputStream.h @@ -15,17 +15,17 @@ class MergeTreeThreadSelectBlockInputStream : public MergeTreeBaseSelectBlockInp { public: MergeTreeThreadSelectBlockInputStream( - const size_t thread, - const std::shared_ptr & pool, - const size_t min_marks_to_read, - const UInt64 max_block_size, - size_t preferred_block_size_bytes, - size_t preferred_max_column_in_block_size_bytes, - const MergeTreeData & storage, - const bool use_uncompressed_cache, - const PrewhereInfoPtr & prewhere_info, - const Settings & settings, - const Names & virt_column_names); + const size_t thread_, + const std::shared_ptr & pool_, + const size_t min_marks_to_read_, + const UInt64 max_block_size_, + size_t preferred_block_size_bytes_, + size_t preferred_max_column_in_block_size_bytes_, + const MergeTreeData & storage_, + const bool use_uncompressed_cache_, + const PrewhereInfoPtr & prewhere_info_, + const Settings & settings_, + const Names & virt_column_names_); String getName() const override { return "MergeTreeThread"; } diff --git a/dbms/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp b/dbms/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp index 5b132c6d7d0..a772e0a204b 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp @@ -32,13 +32,13 @@ MergeTreeWhereOptimizer::MergeTreeWhereOptimizer( SelectQueryInfo & query_info, const Context & context, const MergeTreeData & data, - const Names & queried_columns, - Logger * log) + const Names & queried_columns_, + Logger * log_) : table_columns{ext::map(data.getColumns().getAllPhysical(), [] (const NameAndTypePair & col) { return col.name; })}, - queried_columns{queried_columns}, + queried_columns{queried_columns_}, block_with_constants{KeyCondition::getBlockWithConstants(query_info.query, query_info.syntax_analyzer_result, context)}, - log{log} + log{log_} { if (!data.primary_key_columns.empty()) first_primary_key_column = data.primary_key_columns[0]; diff --git a/dbms/src/Storages/MergeTree/MergeTreeWhereOptimizer.h b/dbms/src/Storages/MergeTree/MergeTreeWhereOptimizer.h index ccc1195cada..bdfcaad6a51 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeWhereOptimizer.h +++ b/dbms/src/Storages/MergeTree/MergeTreeWhereOptimizer.h @@ -31,8 +31,8 @@ public: SelectQueryInfo & query_info, const Context & context, const MergeTreeData & data, - const Names & queried_column_names, - Poco::Logger * log); + const Names & queried_column_names_, + Poco::Logger * log_); private: void optimize(ASTSelectQuery & select) const; diff --git a/dbms/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp b/dbms/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp index a5418cb036e..25d2e1c1d51 100644 --- a/dbms/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp +++ b/dbms/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp @@ -6,7 +6,7 @@ namespace DB MergedColumnOnlyOutputStream::MergedColumnOnlyOutputStream( MergeTreeData & storage_, const Block & header_, String part_path_, bool sync_, CompressionCodecPtr default_codec_, bool skip_offsets_, - WrittenOffsetColumns & already_written_offset_columns, + WrittenOffsetColumns & already_written_offset_columns_, const MergeTreeIndexGranularity & index_granularity_) : IMergedBlockOutputStream( storage_, storage_.global_context.getSettings().min_compress_block_size, @@ -15,7 +15,7 @@ MergedColumnOnlyOutputStream::MergedColumnOnlyOutputStream( false, index_granularity_), header(header_), part_path(part_path_), sync(sync_), skip_offsets(skip_offsets_), - already_written_offset_columns(already_written_offset_columns) + already_written_offset_columns(already_written_offset_columns_) { } diff --git a/dbms/src/Storages/MergeTree/MergedColumnOnlyOutputStream.h b/dbms/src/Storages/MergeTree/MergedColumnOnlyOutputStream.h index 8080f5658db..e4be57a64de 100644 --- a/dbms/src/Storages/MergeTree/MergedColumnOnlyOutputStream.h +++ b/dbms/src/Storages/MergeTree/MergedColumnOnlyOutputStream.h @@ -15,7 +15,7 @@ public: MergedColumnOnlyOutputStream( MergeTreeData & storage_, const Block & header_, String part_path_, bool sync_, CompressionCodecPtr default_codec_, bool skip_offsets_, - WrittenOffsetColumns & already_written_offset_columns, + WrittenOffsetColumns & already_written_offset_columns_, const MergeTreeIndexGranularity & index_granularity_); Block getHeader() const override { return header; } diff --git a/dbms/src/Storages/MergeTree/RangesInDataPart.h b/dbms/src/Storages/MergeTree/RangesInDataPart.h index a93a2103841..4f5d34e118d 100644 --- a/dbms/src/Storages/MergeTree/RangesInDataPart.h +++ b/dbms/src/Storages/MergeTree/RangesInDataPart.h @@ -16,9 +16,9 @@ struct RangesInDataPart RangesInDataPart() = default; - RangesInDataPart(const MergeTreeData::DataPartPtr & data_part, const size_t part_index_in_query, - const MarkRanges & ranges = MarkRanges{}) - : data_part{data_part}, part_index_in_query{part_index_in_query}, ranges{ranges} + RangesInDataPart(const MergeTreeData::DataPartPtr & data_part_, const size_t part_index_in_query_, + const MarkRanges & ranges_ = MarkRanges{}) + : data_part{data_part_}, part_index_in_query{part_index_in_query_}, ranges{ranges_} { } diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp index 335a88313b6..3bb7e04fe6e 100644 --- a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp +++ b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp @@ -33,8 +33,8 @@ namespace ErrorCodes ReplicatedMergeTreeBlockOutputStream::ReplicatedMergeTreeBlockOutputStream( - StorageReplicatedMergeTree & storage_, size_t quorum_, size_t quorum_timeout_ms_, size_t max_parts_per_block, bool deduplicate_) - : storage(storage_), quorum(quorum_), quorum_timeout_ms(quorum_timeout_ms_), max_parts_per_block(max_parts_per_block), deduplicate(deduplicate_), + StorageReplicatedMergeTree & storage_, size_t quorum_, size_t quorum_timeout_ms_, size_t max_parts_per_block_, bool deduplicate_) + : storage(storage_), quorum(quorum_), quorum_timeout_ms(quorum_timeout_ms_), max_parts_per_block(max_parts_per_block_), deduplicate(deduplicate_), log(&Logger::get(storage.getLogName() + " (Replicated OutputStream)")) { /// The quorum value `1` has the same meaning as if it is disabled. diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.h b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.h index e20a36a9440..0f6fc1e7cee 100644 --- a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.h +++ b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.h @@ -23,7 +23,7 @@ class ReplicatedMergeTreeBlockOutputStream : public IBlockOutputStream { public: ReplicatedMergeTreeBlockOutputStream(StorageReplicatedMergeTree & storage_, - size_t quorum_, size_t quorum_timeout_ms_, size_t max_parts_per_block, + size_t quorum_, size_t quorum_timeout_ms_, size_t max_parts_per_block_, bool deduplicate_); Block getHeader() const override; diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h b/dbms/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h index 322ee593c46..198c9714f64 100644 --- a/dbms/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h +++ b/dbms/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h @@ -41,7 +41,7 @@ public: { ReplicatedMergeTreePartCheckThread * parent; - TemporarilyStop(ReplicatedMergeTreePartCheckThread * parent) : parent(parent) + TemporarilyStop(ReplicatedMergeTreePartCheckThread * parent_) : parent(parent_) { parent->stop(); } diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index a6be0c3b9a9..665e8c9bd5c 100644 --- a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -1014,8 +1014,8 @@ Int64 ReplicatedMergeTreeQueue::getCurrentMutationVersion(const String & partiti } -ReplicatedMergeTreeQueue::CurrentlyExecuting::CurrentlyExecuting(const ReplicatedMergeTreeQueue::LogEntryPtr & entry_, ReplicatedMergeTreeQueue & queue) - : entry(entry_), queue(queue) +ReplicatedMergeTreeQueue::CurrentlyExecuting::CurrentlyExecuting(const ReplicatedMergeTreeQueue::LogEntryPtr & entry_, ReplicatedMergeTreeQueue & queue_) + : entry(entry_), queue(queue_) { entry->currently_executing = true; ++entry->num_tries; @@ -1452,7 +1452,7 @@ ReplicatedMergeTreeMergePredicate::ReplicatedMergeTreeMergePredicate( for (const String & partition : partitions) lock_futures.push_back(zookeeper->asyncGetChildren(queue.zookeeper_path + "/block_numbers/" + partition)); - struct BlockInfo + struct BlockInfo_ { String partition; Int64 number; @@ -1460,7 +1460,7 @@ ReplicatedMergeTreeMergePredicate::ReplicatedMergeTreeMergePredicate( std::future contents_future; }; - std::vector block_infos; + std::vector block_infos; for (size_t i = 0; i < partitions.size(); ++i) { Strings partition_block_numbers = lock_futures[i].get().names; @@ -1472,13 +1472,13 @@ ReplicatedMergeTreeMergePredicate::ReplicatedMergeTreeMergePredicate( { Int64 block_number = parse(entry.substr(strlen("block-"))); String zk_path = queue.zookeeper_path + "/block_numbers/" + partitions[i] + "/" + entry; - block_infos.push_back( - BlockInfo{partitions[i], block_number, zk_path, zookeeper->asyncTryGet(zk_path)}); + block_infos.emplace_back( + BlockInfo_{partitions[i], block_number, zk_path, zookeeper->asyncTryGet(zk_path)}); } } } - for (BlockInfo & block : block_infos) + for (auto & block : block_infos) { Coordination::GetResponse resp = block.contents_future.get(); if (!resp.error && lock_holder_paths.count(resp.data)) diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h index 0d439d7b610..47d82f4a9a2 100644 --- a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h +++ b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h @@ -138,7 +138,7 @@ private: friend class SubscriberHandler; struct SubscriberHandler : public boost::noncopyable { - SubscriberHandler(SubscriberIterator it, ReplicatedMergeTreeQueue & queue) : it(it), queue(queue) {} + SubscriberHandler(SubscriberIterator it_, ReplicatedMergeTreeQueue & queue_) : it(it_), queue(queue_) {} ~SubscriberHandler(); private: @@ -215,7 +215,7 @@ private: friend class ReplicatedMergeTreeQueue; /// Created only in the selectEntryToProcess function. It is called under mutex. - CurrentlyExecuting(const ReplicatedMergeTreeQueue::LogEntryPtr & entry_, ReplicatedMergeTreeQueue & queue); + CurrentlyExecuting(const ReplicatedMergeTreeQueue::LogEntryPtr & entry_, ReplicatedMergeTreeQueue & queue_); /// In case of fetch, we determine actual part during the execution, so we need to update entry. It is called under state_mutex. static void setActualPartName(ReplicatedMergeTreeQueue::LogEntry & entry, const String & actual_part_name, diff --git a/dbms/src/Storages/MergeTree/SimpleMergeSelector.h b/dbms/src/Storages/MergeTree/SimpleMergeSelector.h index e699f605b8f..729eaa966e9 100644 --- a/dbms/src/Storages/MergeTree/SimpleMergeSelector.h +++ b/dbms/src/Storages/MergeTree/SimpleMergeSelector.h @@ -71,7 +71,7 @@ public: double heuristic_to_remove_small_parts_at_right_max_ratio = 0.01; }; - explicit SimpleMergeSelector(const Settings & settings) : settings(settings) {} + explicit SimpleMergeSelector(const Settings & settings_) : settings(settings_) {} PartsInPartition select( const Partitions & partitions, diff --git a/dbms/src/Storages/MergeTree/checkDataPart.cpp b/dbms/src/Storages/MergeTree/checkDataPart.cpp index 2ae83b5076a..2303ec38efa 100644 --- a/dbms/src/Storages/MergeTree/checkDataPart.cpp +++ b/dbms/src/Storages/MergeTree/checkDataPart.cpp @@ -72,11 +72,11 @@ public: Stream( const String & path, - const String & base_name, + const String & base_name_, const String & bin_file_extension_, const String & mrk_file_extension_, const MergeTreeIndexGranularity & index_granularity_) - : base_name(base_name) + : base_name(base_name_) , bin_file_extension(bin_file_extension_) , mrk_file_extension(mrk_file_extension_) , bin_file_path(path + base_name + bin_file_extension) diff --git a/dbms/src/Storages/StorageDistributed.cpp b/dbms/src/Storages/StorageDistributed.cpp index 2ecae5789dc..e862d27fdaa 100644 --- a/dbms/src/Storages/StorageDistributed.cpp +++ b/dbms/src/Storages/StorageDistributed.cpp @@ -190,8 +190,8 @@ static ExpressionActionsPtr buildShardingKeyExpression(const ASTPtr & sharding_k } StorageDistributed::StorageDistributed( - const String & database_name, - const String & table_name, + const String & database_name_, + const String & table_name_, const ColumnsDescription & columns_, const String & remote_database_, const String & remote_table_, @@ -199,8 +199,8 @@ StorageDistributed::StorageDistributed( const Context & context_, const ASTPtr & sharding_key_, const String & data_path_, - bool attach) - : IStorage{columns_}, table_name(table_name), database_name(database_name), + bool attach_) + : IStorage{columns_}, table_name(table_name_), database_name(database_name_), remote_database(remote_database_), remote_table(remote_table_), global_context(context_), cluster_name(global_context.getMacros()->expand(cluster_name_)), has_sharding_key(sharding_key_), sharding_key_expr(sharding_key_ ? buildShardingKeyExpression(sharding_key_, global_context, getColumns().getAllPhysical(), false) : nullptr), @@ -208,7 +208,7 @@ StorageDistributed::StorageDistributed( path(data_path_.empty() ? "" : (data_path_ + escapeForFileName(table_name) + '/')) { /// Sanity check. Skip check if the table is already created to allow the server to start. - if (!attach && !cluster_name.empty()) + if (!attach_ && !cluster_name.empty()) { size_t num_local_shards = global_context.getCluster(cluster_name)->getLocalShardCount(); if (num_local_shards && remote_database == database_name && remote_table == table_name) @@ -218,7 +218,7 @@ StorageDistributed::StorageDistributed( StorageDistributed::StorageDistributed( - const String & database_name, + const String & database_name_, const String & table_name_, const ColumnsDescription & columns_, ASTPtr remote_table_function_ptr_, @@ -227,7 +227,7 @@ StorageDistributed::StorageDistributed( const ASTPtr & sharding_key_, const String & data_path_, bool attach) - : StorageDistributed(database_name, table_name_, columns_, String{}, String{}, cluster_name_, context_, sharding_key_, data_path_, attach) + : StorageDistributed(database_name_, table_name_, columns_, String{}, String{}, cluster_name_, context_, sharding_key_, data_path_, attach) { remote_table_function_ptr = remote_table_function_ptr_; } diff --git a/dbms/src/Storages/StorageDistributed.h b/dbms/src/Storages/StorageDistributed.h index 86fe80f575f..6885a758e9e 100644 --- a/dbms/src/Storages/StorageDistributed.h +++ b/dbms/src/Storages/StorageDistributed.h @@ -158,7 +158,7 @@ public: protected: StorageDistributed( - const String & database_name, + const String & database_name_, const String & table_name_, const ColumnsDescription & columns_, const String & remote_database_, @@ -167,7 +167,7 @@ protected: const Context & context_, const ASTPtr & sharding_key_, const String & data_path_, - bool attach); + bool attach_); StorageDistributed( const String & database_name, diff --git a/dbms/src/Storages/StorageFile.cpp b/dbms/src/Storages/StorageFile.cpp index 5bc23d00975..5162e667133 100644 --- a/dbms/src/Storages/StorageFile.cpp +++ b/dbms/src/Storages/StorageFile.cpp @@ -192,8 +192,8 @@ BlockInputStreams StorageFile::read( unsigned /*num_streams*/) { BlockInputStreamPtr block_input = std::make_shared(*this, context, max_block_size); - const ColumnsDescription & columns = getColumns(); - auto column_defaults = columns.getDefaults(); + const ColumnsDescription & columns_ = getColumns(); + auto column_defaults = columns_.getDefaults(); if (column_defaults.empty()) return {block_input}; return {std::make_shared(block_input, column_defaults, context)}; diff --git a/dbms/src/Storages/StorageLog.cpp b/dbms/src/Storages/StorageLog.cpp index 12e45ca96bc..d17caeb0046 100644 --- a/dbms/src/Storages/StorageLog.cpp +++ b/dbms/src/Storages/StorageLog.cpp @@ -87,8 +87,8 @@ private: struct Stream { - Stream(const std::string & data_path, size_t offset, size_t max_read_buffer_size) - : plain(data_path, std::min(static_cast(max_read_buffer_size), Poco::File(data_path).getSize())), + Stream(const std::string & data_path, size_t offset, size_t max_read_buffer_size_) + : plain(data_path, std::min(static_cast(max_read_buffer_size_), Poco::File(data_path).getSize())), compressed(plain) { if (offset) diff --git a/dbms/src/Storages/StorageMergeTree.cpp b/dbms/src/Storages/StorageMergeTree.cpp index 9c4b6559f39..83bfadc482b 100644 --- a/dbms/src/Storages/StorageMergeTree.cpp +++ b/dbms/src/Storages/StorageMergeTree.cpp @@ -204,8 +204,8 @@ std::vector StorageMergeTree::prepar const auto & columns_for_parts = new_columns.getAllPhysical(); - const Settings & settings = context.getSettingsRef(); - size_t thread_pool_size = std::min(parts.size(), settings.max_alter_threads); + const Settings & settings_ = context.getSettingsRef(); + size_t thread_pool_size = std::min(parts.size(), settings_.max_alter_threads); ThreadPool thread_pool(thread_pool_size); diff --git a/dbms/src/Storages/StorageMySQL.cpp b/dbms/src/Storages/StorageMySQL.cpp index 25a06a8bd4e..4ad00338793 100644 --- a/dbms/src/Storages/StorageMySQL.cpp +++ b/dbms/src/Storages/StorageMySQL.cpp @@ -39,61 +39,61 @@ String backQuoteMySQL(const String & x) StorageMySQL::StorageMySQL( const std::string & database_name_, const std::string & table_name_, - mysqlxx::Pool && pool, - const std::string & remote_database_name, - const std::string & remote_table_name, - const bool replace_query, - const std::string & on_duplicate_clause, + mysqlxx::Pool && pool_, + const std::string & remote_database_name_, + const std::string & remote_table_name_, + const bool replace_query_, + const std::string & on_duplicate_clause_, const ColumnsDescription & columns_, - const Context & context) + const Context & context_) : IStorage{columns_} , table_name(table_name_) , database_name(database_name_) - , remote_database_name(remote_database_name) - , remote_table_name(remote_table_name) - , replace_query{replace_query} - , on_duplicate_clause{on_duplicate_clause} - , pool(std::move(pool)) - , global_context(context) + , remote_database_name(remote_database_name_) + , remote_table_name(remote_table_name_) + , replace_query{replace_query_} + , on_duplicate_clause{on_duplicate_clause_} + , pool(std::move(pool_)) + , global_context(context_) { } BlockInputStreams StorageMySQL::read( - const Names & column_names, - const SelectQueryInfo & query_info, - const Context & context, + const Names & column_names_, + const SelectQueryInfo & query_info_, + const Context & context_, QueryProcessingStage::Enum /*processed_stage*/, - size_t max_block_size, + size_t max_block_size_, unsigned) { - check(column_names); + check(column_names_); String query = transformQueryForExternalDatabase( - *query_info.query, getColumns().getOrdinary(), IdentifierQuotingStyle::BackticksMySQL, remote_database_name, remote_table_name, context); + *query_info_.query, getColumns().getOrdinary(), IdentifierQuotingStyle::BackticksMySQL, remote_database_name, remote_table_name, context_); Block sample_block; - for (const String & column_name : column_names) + for (const String & column_name : column_names_) { auto column_data = getColumn(column_name); sample_block.insert({ column_data.type, column_data.name }); } - return { std::make_shared(pool.Get(), query, sample_block, max_block_size) }; + return { std::make_shared(pool.Get(), query, sample_block, max_block_size_) }; } class StorageMySQLBlockOutputStream : public IBlockOutputStream { public: - explicit StorageMySQLBlockOutputStream(const StorageMySQL & storage, - const std::string & remote_database_name, - const std::string & remote_table_name , - const mysqlxx::PoolWithFailover::Entry & entry, + explicit StorageMySQLBlockOutputStream(const StorageMySQL & storage_, + const std::string & remote_database_name_, + const std::string & remote_table_name_, + const mysqlxx::PoolWithFailover::Entry & entry_, const size_t & mysql_max_rows_to_insert) - : storage{storage} - , remote_database_name{remote_database_name} - , remote_table_name{remote_table_name} - , entry{entry} + : storage{storage_} + , remote_database_name{remote_database_name_} + , remote_table_name{remote_table_name_} + , entry{entry_} , max_batch_rows{mysql_max_rows_to_insert} { } diff --git a/dbms/src/Storages/StorageMySQL.h b/dbms/src/Storages/StorageMySQL.h index f28bb2aaa55..3b891566ed0 100644 --- a/dbms/src/Storages/StorageMySQL.h +++ b/dbms/src/Storages/StorageMySQL.h @@ -22,13 +22,13 @@ public: StorageMySQL( const std::string & database_name_, const std::string & table_name_, - mysqlxx::Pool && pool, - const std::string & remote_database_name, - const std::string & remote_table_name, - const bool replace_query, - const std::string & on_duplicate_clause, - const ColumnsDescription & columns, - const Context & context); + mysqlxx::Pool && pool_, + const std::string & remote_database_name_, + const std::string & remote_table_name_, + const bool replace_query_, + const std::string & on_duplicate_clause_, + const ColumnsDescription & columns_, + const Context & context_); std::string getName() const override { return "MySQL"; } std::string getTableName() const override { return table_name; } diff --git a/dbms/src/Storages/StorageReplicatedMergeTree.cpp b/dbms/src/Storages/StorageReplicatedMergeTree.cpp index 5184f448305..5addd26482c 100644 --- a/dbms/src/Storages/StorageReplicatedMergeTree.cpp +++ b/dbms/src/Storages/StorageReplicatedMergeTree.cpp @@ -2892,14 +2892,14 @@ BlockInputStreams StorageReplicatedMergeTree::read( const size_t max_block_size, const unsigned num_streams) { - const Settings & settings = context.getSettingsRef(); + const Settings & settings_ = context.getSettingsRef(); /** The `select_sequential_consistency` setting has two meanings: * 1. To throw an exception if on a replica there are not all parts which have been written down on quorum of remaining replicas. * 2. Do not read parts that have not yet been written to the quorum of the replicas. * For this you have to synchronously go to ZooKeeper. */ - if (settings.select_sequential_consistency) + if (settings_.select_sequential_consistency) { ReplicatedMergeTreeQuorumAddedParts::PartitionIdToMaxBlock max_added_blocks; diff --git a/dbms/src/Storages/StorageTinyLog.cpp b/dbms/src/Storages/StorageTinyLog.cpp index 096fe5b76e5..778b07503a8 100644 --- a/dbms/src/Storages/StorageTinyLog.cpp +++ b/dbms/src/Storages/StorageTinyLog.cpp @@ -86,8 +86,8 @@ private: struct Stream { - Stream(const std::string & data_path, size_t max_read_buffer_size) - : plain(data_path, std::min(static_cast(max_read_buffer_size), Poco::File(data_path).getSize())), + Stream(const std::string & data_path, size_t max_read_buffer_size_) + : plain(data_path, std::min(static_cast(max_read_buffer_size_), Poco::File(data_path).getSize())), compressed(plain) { } diff --git a/dbms/src/Storages/StorageValues.cpp b/dbms/src/Storages/StorageValues.cpp index 79d1641f6c2..452d815e5ea 100644 --- a/dbms/src/Storages/StorageValues.cpp +++ b/dbms/src/Storages/StorageValues.cpp @@ -7,10 +7,10 @@ namespace DB { -StorageValues::StorageValues(const std::string & database_name_, const std::string & table_name_, const ColumnsDescription & columns, const Block & res_block_) +StorageValues::StorageValues(const std::string & database_name_, const std::string & table_name_, const ColumnsDescription & columns_, const Block & res_block_) : database_name(database_name_), table_name(table_name_), res_block(res_block_) { - setColumns(columns); + setColumns(columns_); } BlockInputStreams StorageValues::read( diff --git a/dbms/src/Storages/StorageValues.h b/dbms/src/Storages/StorageValues.h index f5c6881ae36..36c3bc15301 100644 --- a/dbms/src/Storages/StorageValues.h +++ b/dbms/src/Storages/StorageValues.h @@ -30,7 +30,7 @@ private: Block res_block; protected: - StorageValues(const std::string & database_name_, const std::string & table_name_, const ColumnsDescription & columns, const Block & res_block_); + StorageValues(const std::string & database_name_, const std::string & table_name_, const ColumnsDescription & columns_, const Block & res_block_); }; } diff --git a/dbms/src/Storages/System/StorageSystemColumns.cpp b/dbms/src/Storages/System/StorageSystemColumns.cpp index 288549f45c3..30b673ddbbb 100644 --- a/dbms/src/Storages/System/StorageSystemColumns.cpp +++ b/dbms/src/Storages/System/StorageSystemColumns.cpp @@ -54,15 +54,15 @@ class ColumnsBlockInputStream : public IBlockInputStream { public: ColumnsBlockInputStream( - const std::vector & columns_mask, - const Block & header, - UInt64 max_block_size, - ColumnPtr databases, - ColumnPtr tables, - Storages storages, + const std::vector & columns_mask_, + const Block & header_, + UInt64 max_block_size_, + ColumnPtr databases_, + ColumnPtr tables_, + Storages storages_, String query_id_) - : columns_mask(columns_mask), header(header), max_block_size(max_block_size) - , databases(databases), tables(tables), storages(std::move(storages)) + : columns_mask(columns_mask_), header(header_), max_block_size(max_block_size_) + , databases(databases_), tables(tables_), storages(std::move(storages_)) , query_id(std::move(query_id_)), total_tables(tables->size()) { } diff --git a/dbms/src/Storages/System/StorageSystemDataTypeFamilies.cpp b/dbms/src/Storages/System/StorageSystemDataTypeFamilies.cpp index 28fb2076c21..0957e8f8349 100644 --- a/dbms/src/Storages/System/StorageSystemDataTypeFamilies.cpp +++ b/dbms/src/Storages/System/StorageSystemDataTypeFamilies.cpp @@ -20,13 +20,13 @@ void StorageSystemDataTypeFamilies::fillData(MutableColumns & res_columns, const { const auto & factory = DataTypeFactory::instance(); auto names = factory.getAllRegisteredNames(); - for (const auto & name : names) + for (const auto & dtf_name : names) { - res_columns[0]->insert(name); - res_columns[1]->insert(factory.isCaseInsensitive(name)); + res_columns[0]->insert(dtf_name); + res_columns[1]->insert(factory.isCaseInsensitive(dtf_name)); - if (factory.isAlias(name)) - res_columns[2]->insert(factory.aliasTo(name)); + if (factory.isAlias(dtf_name)) + res_columns[2]->insert(factory.aliasTo(dtf_name)); else res_columns[2]->insertDefault(); } diff --git a/dbms/src/Storages/System/StorageSystemDetachedParts.cpp b/dbms/src/Storages/System/StorageSystemDetachedParts.cpp index dbad9d8b604..617b52d8e5f 100644 --- a/dbms/src/Storages/System/StorageSystemDetachedParts.cpp +++ b/dbms/src/Storages/System/StorageSystemDetachedParts.cpp @@ -52,7 +52,7 @@ protected: /// Create the result. Block block = getSampleBlock(); - MutableColumns columns = block.cloneEmptyColumns(); + MutableColumns new_columns = block.cloneEmptyColumns(); while (StoragesInfo info = stream.next()) { @@ -60,19 +60,19 @@ protected: for (auto & p : parts) { size_t i = 0; - columns[i++]->insert(info.database); - columns[i++]->insert(info.table); - columns[i++]->insert(p.partition_id); - columns[i++]->insert(p.getPartName()); - columns[i++]->insert(p.prefix); - columns[i++]->insert(p.min_block); - columns[i++]->insert(p.max_block); - columns[i++]->insert(p.level); + new_columns[i++]->insert(info.database); + new_columns[i++]->insert(info.table); + new_columns[i++]->insert(p.partition_id); + new_columns[i++]->insert(p.getPartName()); + new_columns[i++]->insert(p.prefix); + new_columns[i++]->insert(p.min_block); + new_columns[i++]->insert(p.max_block); + new_columns[i++]->insert(p.level); } } return BlockInputStreams(1, std::make_shared( - block.cloneWithColumns(std::move(columns)))); + block.cloneWithColumns(std::move(new_columns)))); } }; diff --git a/dbms/src/Storages/System/StorageSystemFormats.cpp b/dbms/src/Storages/System/StorageSystemFormats.cpp index f0d97db5a98..158d0a662f2 100644 --- a/dbms/src/Storages/System/StorageSystemFormats.cpp +++ b/dbms/src/Storages/System/StorageSystemFormats.cpp @@ -20,10 +20,10 @@ void StorageSystemFormats::fillData(MutableColumns & res_columns, const Context const auto & formats = FormatFactory::instance().getAllFormats(); for (const auto & pair : formats) { - const auto & [name, creators] = pair; + const auto & [format_name, creators] = pair; UInt64 has_input_format(creators.inout_creator != nullptr || creators.input_processor_creator != nullptr); UInt64 has_output_format(creators.output_creator != nullptr || creators.output_processor_creator != nullptr); - res_columns[0]->insert(name); + res_columns[0]->insert(format_name); res_columns[1]->insert(has_input_format); res_columns[2]->insert(has_output_format); } diff --git a/dbms/src/Storages/System/StorageSystemFunctions.cpp b/dbms/src/Storages/System/StorageSystemFunctions.cpp index c9601373df3..e46b7007dc2 100644 --- a/dbms/src/Storages/System/StorageSystemFunctions.cpp +++ b/dbms/src/Storages/System/StorageSystemFunctions.cpp @@ -38,16 +38,16 @@ void StorageSystemFunctions::fillData(MutableColumns & res_columns, const Contex { const auto & functions_factory = FunctionFactory::instance(); const auto & function_names = functions_factory.getAllRegisteredNames(); - for (const auto & name : function_names) + for (const auto & function_name : function_names) { - fillRow(res_columns, name, UInt64(0), functions_factory); + fillRow(res_columns, function_name, UInt64(0), functions_factory); } const auto & aggregate_functions_factory = AggregateFunctionFactory::instance(); const auto & aggregate_function_names = aggregate_functions_factory.getAllRegisteredNames(); - for (const auto & name : aggregate_function_names) + for (const auto & function_name : aggregate_function_names) { - fillRow(res_columns, name, UInt64(1), aggregate_functions_factory); + fillRow(res_columns, function_name, UInt64(1), aggregate_functions_factory); } } } diff --git a/dbms/src/Storages/System/StorageSystemModels.cpp b/dbms/src/Storages/System/StorageSystemModels.cpp index b595df1e1b2..2db690ea4c3 100644 --- a/dbms/src/Storages/System/StorageSystemModels.cpp +++ b/dbms/src/Storages/System/StorageSystemModels.cpp @@ -30,9 +30,9 @@ void StorageSystemModels::fillData(MutableColumns & res_columns, const Context & const auto & external_models = context.getExternalModels(); auto load_results = external_models.getCurrentLoadResults(); - for (const auto & [name, load_result] : load_results) + for (const auto & [model_name, load_result] : load_results) { - res_columns[0]->insert(name); + res_columns[0]->insert(model_name); res_columns[1]->insert(static_cast(load_result.status)); res_columns[2]->insert(load_result.origin); diff --git a/dbms/src/Storages/System/StorageSystemParts.cpp b/dbms/src/Storages/System/StorageSystemParts.cpp index f8fffd2d9c9..65d17f096c3 100644 --- a/dbms/src/Storages/System/StorageSystemParts.cpp +++ b/dbms/src/Storages/System/StorageSystemParts.cpp @@ -14,8 +14,8 @@ namespace DB { -StorageSystemParts::StorageSystemParts(const std::string & name) - : StorageSystemPartsBase(name, +StorageSystemParts::StorageSystemParts(const std::string & name_) + : StorageSystemPartsBase(name_, { {"partition", std::make_shared()}, {"name", std::make_shared()}, @@ -55,7 +55,7 @@ StorageSystemParts::StorageSystemParts(const std::string & name) { } -void StorageSystemParts::processNextStorage(MutableColumns & columns, const StoragesInfo & info, bool has_state_column) +void StorageSystemParts::processNextStorage(MutableColumns & columns_, const StoragesInfo & info, bool has_state_column) { using State = MergeTreeDataPart::State; MergeTreeData::DataPartStateVector all_parts_state; @@ -74,56 +74,56 @@ void StorageSystemParts::processNextStorage(MutableColumns & columns, const Stor { WriteBufferFromOwnString out; part->partition.serializeText(*info.data, out, format_settings); - columns[i++]->insert(out.str()); + columns_[i++]->insert(out.str()); } - columns[i++]->insert(part->name); - columns[i++]->insert(part_state == State::Committed); - columns[i++]->insert(part->getMarksCount()); - columns[i++]->insert(part->rows_count); - columns[i++]->insert(part->bytes_on_disk.load(std::memory_order_relaxed)); - columns[i++]->insert(columns_size.data_compressed); - columns[i++]->insert(columns_size.data_uncompressed); - columns[i++]->insert(columns_size.marks); - columns[i++]->insert(static_cast(part->modification_time)); + columns_[i++]->insert(part->name); + columns_[i++]->insert(part_state == State::Committed); + columns_[i++]->insert(part->getMarksCount()); + columns_[i++]->insert(part->rows_count); + columns_[i++]->insert(part->bytes_on_disk.load(std::memory_order_relaxed)); + columns_[i++]->insert(columns_size.data_compressed); + columns_[i++]->insert(columns_size.data_uncompressed); + columns_[i++]->insert(columns_size.marks); + columns_[i++]->insert(static_cast(part->modification_time)); time_t remove_time = part->remove_time.load(std::memory_order_relaxed); - columns[i++]->insert(static_cast(remove_time == std::numeric_limits::max() ? 0 : remove_time)); + columns_[i++]->insert(static_cast(remove_time == std::numeric_limits::max() ? 0 : remove_time)); /// For convenience, in returned refcount, don't add references that was due to local variables in this method: all_parts, active_parts. - columns[i++]->insert(static_cast(part.use_count() - 1)); + columns_[i++]->insert(static_cast(part.use_count() - 1)); - columns[i++]->insert(part->getMinDate()); - columns[i++]->insert(part->getMaxDate()); - columns[i++]->insert(part->getMinTime()); - columns[i++]->insert(part->getMaxTime()); - columns[i++]->insert(part->info.partition_id); - columns[i++]->insert(part->info.min_block); - columns[i++]->insert(part->info.max_block); - columns[i++]->insert(part->info.level); - columns[i++]->insert(static_cast(part->info.getDataVersion())); - columns[i++]->insert(part->getIndexSizeInBytes()); - columns[i++]->insert(part->getIndexSizeInAllocatedBytes()); - columns[i++]->insert(part->is_frozen); + columns_[i++]->insert(part->getMinDate()); + columns_[i++]->insert(part->getMaxDate()); + columns_[i++]->insert(part->getMinTime()); + columns_[i++]->insert(part->getMaxTime()); + columns_[i++]->insert(part->info.partition_id); + columns_[i++]->insert(part->info.min_block); + columns_[i++]->insert(part->info.max_block); + columns_[i++]->insert(part->info.level); + columns_[i++]->insert(static_cast(part->info.getDataVersion())); + columns_[i++]->insert(part->getIndexSizeInBytes()); + columns_[i++]->insert(part->getIndexSizeInAllocatedBytes()); + columns_[i++]->insert(part->is_frozen); - columns[i++]->insert(info.database); - columns[i++]->insert(info.table); - columns[i++]->insert(info.engine); - columns[i++]->insert(part->getFullPath()); + columns_[i++]->insert(info.database); + columns_[i++]->insert(info.table); + columns_[i++]->insert(info.engine); + columns_[i++]->insert(part->getFullPath()); if (has_state_column) - columns[i++]->insert(part->stateString()); + columns_[i++]->insert(part->stateString()); MinimalisticDataPartChecksums helper; helper.computeTotalChecksums(part->checksums); auto checksum = helper.hash_of_all_files; - columns[i++]->insert(getHexUIntLowercase(checksum.first) + getHexUIntLowercase(checksum.second)); + columns_[i++]->insert(getHexUIntLowercase(checksum.first) + getHexUIntLowercase(checksum.second)); checksum = helper.hash_of_uncompressed_files; - columns[i++]->insert(getHexUIntLowercase(checksum.first) + getHexUIntLowercase(checksum.second)); + columns_[i++]->insert(getHexUIntLowercase(checksum.first) + getHexUIntLowercase(checksum.second)); checksum = helper.uncompressed_hash_of_compressed_files; - columns[i++]->insert(getHexUIntLowercase(checksum.first) + getHexUIntLowercase(checksum.second)); + columns_[i++]->insert(getHexUIntLowercase(checksum.first) + getHexUIntLowercase(checksum.second)); } } diff --git a/dbms/src/Storages/System/StorageSystemParts.h b/dbms/src/Storages/System/StorageSystemParts.h index f0c5071b1c5..eb1ded1c5d6 100644 --- a/dbms/src/Storages/System/StorageSystemParts.h +++ b/dbms/src/Storages/System/StorageSystemParts.h @@ -18,7 +18,7 @@ public: std::string getName() const override { return "SystemParts"; } protected: - explicit StorageSystemParts(const std::string & name); + explicit StorageSystemParts(const std::string & name_); void processNextStorage(MutableColumns & columns, const StoragesInfo & info, bool has_state_column) override; }; diff --git a/dbms/src/Storages/System/StorageSystemPartsBase.cpp b/dbms/src/Storages/System/StorageSystemPartsBase.cpp index 303a8ddd939..7a242e74e75 100644 --- a/dbms/src/Storages/System/StorageSystemPartsBase.cpp +++ b/dbms/src/Storages/System/StorageSystemPartsBase.cpp @@ -253,21 +253,21 @@ bool StorageSystemPartsBase::hasColumn(const String & column_name) const StorageSystemPartsBase::StorageSystemPartsBase(std::string name_, NamesAndTypesList && columns_) : name(std::move(name_)) { - ColumnsDescription columns(std::move(columns_)); + ColumnsDescription tmp_columns(std::move(columns_)); auto add_alias = [&](const String & alias_name, const String & column_name) { - ColumnDescription column(alias_name, columns.get(column_name).type, false); + ColumnDescription column(alias_name, tmp_columns.get(column_name).type, false); column.default_desc.kind = ColumnDefaultKind::Alias; column.default_desc.expression = std::make_shared(column_name); - columns.add(column); + tmp_columns.add(column); }; /// Add aliases for old column names for backwards compatibility. add_alias("bytes", "bytes_on_disk"); add_alias("marks_size", "marks_bytes"); - setColumns(columns); + setColumns(tmp_columns); } } diff --git a/dbms/src/Storages/System/StorageSystemPartsColumns.cpp b/dbms/src/Storages/System/StorageSystemPartsColumns.cpp index ab688b514e7..09229d79665 100644 --- a/dbms/src/Storages/System/StorageSystemPartsColumns.cpp +++ b/dbms/src/Storages/System/StorageSystemPartsColumns.cpp @@ -15,8 +15,8 @@ namespace DB { -StorageSystemPartsColumns::StorageSystemPartsColumns(const std::string & name) - : StorageSystemPartsBase(name, +StorageSystemPartsColumns::StorageSystemPartsColumns(const std::string & name_) + : StorageSystemPartsBase(name_, { {"partition", std::make_shared()}, {"name", std::make_shared()}, @@ -58,7 +58,7 @@ StorageSystemPartsColumns::StorageSystemPartsColumns(const std::string & name) { } -void StorageSystemPartsColumns::processNextStorage(MutableColumns & columns, const StoragesInfo & info, bool has_state_column) +void StorageSystemPartsColumns::processNextStorage(MutableColumns & columns_, const StoragesInfo & info, bool has_state_column) { /// Prepare information about columns in storage. struct ColumnInfo @@ -106,59 +106,59 @@ void StorageSystemPartsColumns::processNextStorage(MutableColumns & columns, con { WriteBufferFromOwnString out; part->partition.serializeText(*info.data, out, format_settings); - columns[j++]->insert(out.str()); + columns_[j++]->insert(out.str()); } - columns[j++]->insert(part->name); - columns[j++]->insert(part_state == State::Committed); - columns[j++]->insert(part->getMarksCount()); + columns_[j++]->insert(part->name); + columns_[j++]->insert(part_state == State::Committed); + columns_[j++]->insert(part->getMarksCount()); - columns[j++]->insert(part->rows_count); - columns[j++]->insert(part->bytes_on_disk.load(std::memory_order_relaxed)); - columns[j++]->insert(columns_size.data_compressed); - columns[j++]->insert(columns_size.data_uncompressed); - columns[j++]->insert(columns_size.marks); - columns[j++]->insert(UInt64(part->modification_time)); - columns[j++]->insert(UInt64(part->remove_time.load(std::memory_order_relaxed))); + columns_[j++]->insert(part->rows_count); + columns_[j++]->insert(part->bytes_on_disk.load(std::memory_order_relaxed)); + columns_[j++]->insert(columns_size.data_compressed); + columns_[j++]->insert(columns_size.data_uncompressed); + columns_[j++]->insert(columns_size.marks); + columns_[j++]->insert(UInt64(part->modification_time)); + columns_[j++]->insert(UInt64(part->remove_time.load(std::memory_order_relaxed))); - columns[j++]->insert(UInt64(use_count)); + columns_[j++]->insert(UInt64(use_count)); - columns[j++]->insert(min_date); - columns[j++]->insert(max_date); - columns[j++]->insert(part->info.partition_id); - columns[j++]->insert(part->info.min_block); - columns[j++]->insert(part->info.max_block); - columns[j++]->insert(part->info.level); - columns[j++]->insert(UInt64(part->info.getDataVersion())); - columns[j++]->insert(index_size_in_bytes); - columns[j++]->insert(index_size_in_allocated_bytes); + columns_[j++]->insert(min_date); + columns_[j++]->insert(max_date); + columns_[j++]->insert(part->info.partition_id); + columns_[j++]->insert(part->info.min_block); + columns_[j++]->insert(part->info.max_block); + columns_[j++]->insert(part->info.level); + columns_[j++]->insert(UInt64(part->info.getDataVersion())); + columns_[j++]->insert(index_size_in_bytes); + columns_[j++]->insert(index_size_in_allocated_bytes); - columns[j++]->insert(info.database); - columns[j++]->insert(info.table); - columns[j++]->insert(info.engine); - columns[j++]->insert(part->getFullPath()); - columns[j++]->insert(column.name); - columns[j++]->insert(column.type->getName()); + columns_[j++]->insert(info.database); + columns_[j++]->insert(info.table); + columns_[j++]->insert(info.engine); + columns_[j++]->insert(part->getFullPath()); + columns_[j++]->insert(column.name); + columns_[j++]->insert(column.type->getName()); auto column_info_it = columns_info.find(column.name); if (column_info_it != columns_info.end()) { - columns[j++]->insert(column_info_it->second.default_kind); - columns[j++]->insert(column_info_it->second.default_expression); + columns_[j++]->insert(column_info_it->second.default_kind); + columns_[j++]->insert(column_info_it->second.default_expression); } else { - columns[j++]->insertDefault(); - columns[j++]->insertDefault(); + columns_[j++]->insertDefault(); + columns_[j++]->insertDefault(); } ColumnSize column_size = part->getColumnSize(column.name, *column.type); - columns[j++]->insert(column_size.data_compressed + column_size.marks); - columns[j++]->insert(column_size.data_compressed); - columns[j++]->insert(column_size.data_uncompressed); - columns[j++]->insert(column_size.marks); + columns_[j++]->insert(column_size.data_compressed + column_size.marks); + columns_[j++]->insert(column_size.data_compressed); + columns_[j++]->insert(column_size.data_uncompressed); + columns_[j++]->insert(column_size.marks); if (has_state_column) - columns[j++]->insert(part->stateString()); + columns_[j++]->insert(part->stateString()); } } } diff --git a/dbms/src/Storages/System/StorageSystemReplicationQueue.cpp b/dbms/src/Storages/System/StorageSystemReplicationQueue.cpp index 4ac81521b0b..55ca3274278 100644 --- a/dbms/src/Storages/System/StorageSystemReplicationQueue.cpp +++ b/dbms/src/Storages/System/StorageSystemReplicationQueue.cpp @@ -108,8 +108,8 @@ void StorageSystemReplicationQueue::fillData(MutableColumns & res_columns, const Array parts_to_merge; parts_to_merge.reserve(entry.source_parts.size()); - for (const auto & name : entry.source_parts) - parts_to_merge.push_back(name); + for (const auto & part_name : entry.source_parts) + parts_to_merge.push_back(part_name); size_t col_num = 0; res_columns[col_num++]->insert(database); diff --git a/dbms/src/Storages/System/StorageSystemTableFunctions.cpp b/dbms/src/Storages/System/StorageSystemTableFunctions.cpp index 367595e9742..65b1dc41879 100644 --- a/dbms/src/Storages/System/StorageSystemTableFunctions.cpp +++ b/dbms/src/Storages/System/StorageSystemTableFunctions.cpp @@ -12,9 +12,9 @@ NamesAndTypesList StorageSystemTableFunctions::getNamesAndTypes() void StorageSystemTableFunctions::fillData(MutableColumns & res_columns, const Context &, const SelectQueryInfo &) const { const auto & functions_names = TableFunctionFactory::instance().getAllRegisteredNames(); - for (const auto & name : functions_names) + for (const auto & function_name : functions_names) { - res_columns[0]->insert(name); + res_columns[0]->insert(function_name); } } diff --git a/dbms/src/Storages/System/StorageSystemTables.cpp b/dbms/src/Storages/System/StorageSystemTables.cpp index e962b9883b3..113c09165d9 100644 --- a/dbms/src/Storages/System/StorageSystemTables.cpp +++ b/dbms/src/Storages/System/StorageSystemTables.cpp @@ -65,12 +65,12 @@ class TablesBlockInputStream : public IBlockInputStream { public: TablesBlockInputStream( - std::vector columns_mask, - Block header, - UInt64 max_block_size, - ColumnPtr databases, - const Context & context) - : columns_mask(std::move(columns_mask)), header(std::move(header)), max_block_size(max_block_size), databases(std::move(databases)), context(context) {} + std::vector columns_mask_, + Block header_, + UInt64 max_block_size_, + ColumnPtr databases_, + const Context & context_) + : columns_mask(std::move(columns_mask_)), header(std::move(header_)), max_block_size(max_block_size_), databases(std::move(databases_)), context(context_) {} String getName() const override { return "Tables"; } Block getHeader() const override { return header; } diff --git a/dbms/src/TableFunctions/TableFunctionRemote.cpp b/dbms/src/TableFunctions/TableFunctionRemote.cpp index f6fa75bb2d2..9d0a8024c0e 100644 --- a/dbms/src/TableFunctions/TableFunctionRemote.cpp +++ b/dbms/src/TableFunctions/TableFunctionRemote.cpp @@ -180,8 +180,8 @@ StoragePtr TableFunctionRemote::executeImpl(const ASTPtr & ast_function, const C } -TableFunctionRemote::TableFunctionRemote(const std::string & name, bool secure) - : name{name}, secure{secure} +TableFunctionRemote::TableFunctionRemote(const std::string & name_, bool secure_) + : name{name_}, secure{secure_} { is_cluster_function = name == "cluster"; diff --git a/dbms/src/TableFunctions/TableFunctionRemote.h b/dbms/src/TableFunctions/TableFunctionRemote.h index c9a98cbbc16..ef2e5cf190c 100644 --- a/dbms/src/TableFunctions/TableFunctionRemote.h +++ b/dbms/src/TableFunctions/TableFunctionRemote.h @@ -16,7 +16,7 @@ namespace DB class TableFunctionRemote : public ITableFunction { public: - TableFunctionRemote(const std::string & name, bool secure = false); + TableFunctionRemote(const std::string & name_, bool secure_ = false); std::string getName() const override { return name; } diff --git a/libs/libcommon/include/ext/enumerate.h b/libs/libcommon/include/ext/enumerate.h index bb6f63ee148..9a55d853e35 100644 --- a/libs/libcommon/include/ext/enumerate.h +++ b/libs/libcommon/include/ext/enumerate.h @@ -26,7 +26,7 @@ namespace ext std::size_t idx; It it; - enumerate_iterator(const std::size_t idx, It it) : idx{idx}, it{it} {} + enumerate_iterator(const std::size_t idx_, It it_) : idx{idx_}, it{it_} {} auto operator*() const { return reference(idx, *it); } @@ -42,7 +42,7 @@ namespace ext Collection & collection; - enumerate_wrapper(Collection & collection) : collection(collection) {} + enumerate_wrapper(Collection & collection_) : collection(collection_) {} auto begin() { return iterator(0, std::begin(collection)); } auto end() { return iterator(ext::size(collection), std::end(collection)); } diff --git a/libs/libcommon/include/ext/scope_guard.h b/libs/libcommon/include/ext/scope_guard.h index 4162d80b77d..c2c7e5ec630 100644 --- a/libs/libcommon/include/ext/scope_guard.h +++ b/libs/libcommon/include/ext/scope_guard.h @@ -9,13 +9,13 @@ template class scope_guard { const F function; public: - constexpr scope_guard(const F & function) : function{function} {} - constexpr scope_guard(F && function) : function{std::move(function)} {} + constexpr scope_guard(const F & function_) : function{function_} {} + constexpr scope_guard(F && function_) : function{std::move(function_)} {} ~scope_guard() { function(); } }; template -inline scope_guard make_scope_guard(F && function) { return std::forward(function); } +inline scope_guard make_scope_guard(F && function_) { return std::forward(function_); } } diff --git a/libs/libloggers/loggers/ExtendedLogChannel.h b/libs/libloggers/loggers/ExtendedLogChannel.h index e70cd7b3094..5388c6aa467 100644 --- a/libs/libloggers/loggers/ExtendedLogChannel.h +++ b/libs/libloggers/loggers/ExtendedLogChannel.h @@ -13,7 +13,7 @@ namespace DB class ExtendedLogMessage { public: - explicit ExtendedLogMessage(const Poco::Message & base) : base(base) {} + explicit ExtendedLogMessage(const Poco::Message & base_) : base(base_) {} /// Attach additional data to the message static ExtendedLogMessage getFrom(const Poco::Message & base); From 7e6b1333a1c3046e518d2713ca2724382d8b7bb6 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 10 Aug 2019 00:58:17 +0300 Subject: [PATCH 60/84] Renamed Yandex CTO just in case --- cmake/find_fastops.cmake | 2 +- dbms/src/Functions/FunctionMathUnary.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmake/find_fastops.cmake b/cmake/find_fastops.cmake index c8ddbaf80a7..df1750b8bef 100644 --- a/cmake/find_fastops.cmake +++ b/cmake/find_fastops.cmake @@ -1,4 +1,4 @@ -option (ENABLE_FASTOPS "Enable fast vectorized mathematical functions library by Michael Parakhin" ${NOT_UNBUNDLED}) +option (ENABLE_FASTOPS "Enable fast vectorized mathematical functions library by Mikhail Parakhin" ${NOT_UNBUNDLED}) if (ENABLE_FASTOPS) if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/fastops/fastops/fastops.h") diff --git a/dbms/src/Functions/FunctionMathUnary.h b/dbms/src/Functions/FunctionMathUnary.h index 363951510fe..caa61d76758 100644 --- a/dbms/src/Functions/FunctionMathUnary.h +++ b/dbms/src/Functions/FunctionMathUnary.h @@ -31,7 +31,7 @@ #endif -/** FastOps is a fast vector math library from Michael Parakhin (former Yandex CTO), +/** FastOps is a fast vector math library from Mikhail Parakhin (former Yandex CTO), * Enabled by default. */ #if USE_FASTOPS From 0a9787c7f3eb295ed94c7a6d35fe702ad59648e3 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 10 Aug 2019 19:38:25 +0300 Subject: [PATCH 61/84] Fixed possible stack overflow in parser --- dbms/programs/client/Client.cpp | 2 +- dbms/src/Parsers/IParser.h | 30 ++++++++++++++++++- dbms/src/Parsers/IParserBase.cpp | 2 ++ dbms/src/Parsers/parseQuery.cpp | 2 +- .../Formats/Impl/ValuesRowInputFormat.cpp | 3 +- .../parseColumnsListForTableFunction.cpp | 2 +- 6 files changed, 35 insertions(+), 6 deletions(-) diff --git a/dbms/programs/client/Client.cpp b/dbms/programs/client/Client.cpp index 091a1ac063f..cf72d7a87c3 100644 --- a/dbms/programs/client/Client.cpp +++ b/dbms/programs/client/Client.cpp @@ -711,7 +711,7 @@ private: if (ignore_error) { Tokens tokens(begin, end); - TokenIterator token_iterator(tokens); + IParser::Pos token_iterator(tokens); while (token_iterator->type != TokenType::Semicolon && token_iterator.isValid()) ++token_iterator; begin = token_iterator->end; diff --git a/dbms/src/Parsers/IParser.h b/dbms/src/Parsers/IParser.h index b300e99c4cf..0d39b29ae96 100644 --- a/dbms/src/Parsers/IParser.h +++ b/dbms/src/Parsers/IParser.h @@ -12,6 +12,13 @@ namespace DB { +namespace ErrorCodes +{ + extern const int TOO_DEEP_RECURSION; + extern const int LOGICAL_ERROR; +} + + /** Collects variants, how parser could proceed further at rightmost position. */ struct Expected @@ -44,7 +51,28 @@ struct Expected class IParser { public: - using Pos = TokenIterator; + /// Token iterator augmented with depth information. This allows to control recursion depth. + struct Pos : TokenIterator + { + using TokenIterator::TokenIterator; + + uint32_t depth = 0; + uint32_t max_depth = 1000; + + void increaseDepth() + { + ++depth; + if (depth > max_depth) + throw Exception("Maximum parse depth exceeded", ErrorCodes::TOO_DEEP_RECURSION); + } + + void decreaseDepth() + { + if (depth == 0) + throw Exception("Logical error in parser: incorrect calculation of parse depth", ErrorCodes::LOGICAL_ERROR); + --depth; + } + }; /** Get the text of this parser parses. */ virtual const char * getName() const = 0; diff --git a/dbms/src/Parsers/IParserBase.cpp b/dbms/src/Parsers/IParserBase.cpp index ddbac8e92ee..64162a595c9 100644 --- a/dbms/src/Parsers/IParserBase.cpp +++ b/dbms/src/Parsers/IParserBase.cpp @@ -15,7 +15,9 @@ bool IParserBase::parse(Pos & pos, ASTPtr & node, Expected & expected) Pos begin = pos; expected.add(pos, getName()); + pos.increaseDepth(); bool res = parseImpl(pos, node, expected); + pos.decreaseDepth(); if (!res) { diff --git a/dbms/src/Parsers/parseQuery.cpp b/dbms/src/Parsers/parseQuery.cpp index 3d761d09b13..da779ae83ba 100644 --- a/dbms/src/Parsers/parseQuery.cpp +++ b/dbms/src/Parsers/parseQuery.cpp @@ -218,7 +218,7 @@ ASTPtr tryParseQuery( size_t max_query_size) { Tokens tokens(pos, end, max_query_size); - TokenIterator token_iterator(tokens); + IParser::Pos token_iterator(tokens); if (token_iterator->isEnd() || token_iterator->type == TokenType::Semicolon) diff --git a/dbms/src/Processors/Formats/Impl/ValuesRowInputFormat.cpp b/dbms/src/Processors/Formats/Impl/ValuesRowInputFormat.cpp index 337085198a3..7174da6c1a9 100644 --- a/dbms/src/Processors/Formats/Impl/ValuesRowInputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/ValuesRowInputFormat.cpp @@ -2,7 +2,6 @@ #include #include #include -#include #include #include #include @@ -105,7 +104,7 @@ bool ValuesRowInputFormat::readRow(MutableColumns & columns, RowReadExtension &) Expected expected; Tokens tokens(prev_in_position, in.buffer().end()); - TokenIterator token_iterator(tokens); + IParser::Pos token_iterator(tokens); ASTPtr ast; if (!parser.parse(token_iterator, ast, expected)) diff --git a/dbms/src/TableFunctions/parseColumnsListForTableFunction.cpp b/dbms/src/TableFunctions/parseColumnsListForTableFunction.cpp index be7f2efb7ac..cdc9063d415 100644 --- a/dbms/src/TableFunctions/parseColumnsListForTableFunction.cpp +++ b/dbms/src/TableFunctions/parseColumnsListForTableFunction.cpp @@ -17,7 +17,7 @@ ColumnsDescription parseColumnsListFromString(const std::string & structure, con Expected expected; Tokens tokens(structure.c_str(), structure.c_str() + structure.size()); - TokenIterator token_iterator(tokens); + IParser::Pos token_iterator(tokens); ParserColumnDeclarationList parser; ASTPtr columns_list_raw; From 02a6b2c1ab8f05d158871fadf08c5825b67f0326 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 10 Aug 2019 19:40:57 +0300 Subject: [PATCH 62/84] Added a test --- .../0_stateless/00984_parser_stack_overflow.reference | 1 + .../queries/0_stateless/00984_parser_stack_overflow.sh | 6 ++++++ 2 files changed, 7 insertions(+) create mode 100644 dbms/tests/queries/0_stateless/00984_parser_stack_overflow.reference create mode 100755 dbms/tests/queries/0_stateless/00984_parser_stack_overflow.sh diff --git a/dbms/tests/queries/0_stateless/00984_parser_stack_overflow.reference b/dbms/tests/queries/0_stateless/00984_parser_stack_overflow.reference new file mode 100644 index 00000000000..a4009b07e58 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00984_parser_stack_overflow.reference @@ -0,0 +1 @@ +exceeded diff --git a/dbms/tests/queries/0_stateless/00984_parser_stack_overflow.sh b/dbms/tests/queries/0_stateless/00984_parser_stack_overflow.sh new file mode 100755 index 00000000000..674515a5b80 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00984_parser_stack_overflow.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CURDIR/../shell_config.sh + +perl -e 'print "(" x 10000 ' | $CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" --data-binary @- | grep -oF 'exceeded' From e11ba9ded90c0bff46f136b4f6dc8a8f1e5239ec Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 10 Aug 2019 19:47:59 +0300 Subject: [PATCH 63/84] One more test --- .../queries/0_stateless/00984_parser_stack_overflow.reference | 1 + dbms/tests/queries/0_stateless/00984_parser_stack_overflow.sh | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/dbms/tests/queries/0_stateless/00984_parser_stack_overflow.reference b/dbms/tests/queries/0_stateless/00984_parser_stack_overflow.reference index a4009b07e58..bc791c0e6fa 100644 --- a/dbms/tests/queries/0_stateless/00984_parser_stack_overflow.reference +++ b/dbms/tests/queries/0_stateless/00984_parser_stack_overflow.reference @@ -1 +1,2 @@ exceeded +20002 diff --git a/dbms/tests/queries/0_stateless/00984_parser_stack_overflow.sh b/dbms/tests/queries/0_stateless/00984_parser_stack_overflow.sh index 674515a5b80..2c593845371 100755 --- a/dbms/tests/queries/0_stateless/00984_parser_stack_overflow.sh +++ b/dbms/tests/queries/0_stateless/00984_parser_stack_overflow.sh @@ -3,4 +3,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . $CURDIR/../shell_config.sh +# Too deep recursion perl -e 'print "(" x 10000 ' | $CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" --data-binary @- | grep -oF 'exceeded' + +# But this is Ok +perl -e 'print "SELECT 1" . (",1" x 10000) ' | $CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" --data-binary @- | wc -c From b133ea85b456f3255a6d030024fa5452da8f51c0 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 10 Aug 2019 19:51:09 +0300 Subject: [PATCH 64/84] More tests --- .../0_stateless/00984_parser_stack_overflow.reference | 2 ++ .../queries/0_stateless/00984_parser_stack_overflow.sh | 6 ++++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/dbms/tests/queries/0_stateless/00984_parser_stack_overflow.reference b/dbms/tests/queries/0_stateless/00984_parser_stack_overflow.reference index bc791c0e6fa..50d27ab5d84 100644 --- a/dbms/tests/queries/0_stateless/00984_parser_stack_overflow.reference +++ b/dbms/tests/queries/0_stateless/00984_parser_stack_overflow.reference @@ -1,2 +1,4 @@ exceeded +exceeded +exceeded 20002 diff --git a/dbms/tests/queries/0_stateless/00984_parser_stack_overflow.sh b/dbms/tests/queries/0_stateless/00984_parser_stack_overflow.sh index 2c593845371..cfb08d30cdf 100755 --- a/dbms/tests/queries/0_stateless/00984_parser_stack_overflow.sh +++ b/dbms/tests/queries/0_stateless/00984_parser_stack_overflow.sh @@ -4,7 +4,9 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . $CURDIR/../shell_config.sh # Too deep recursion -perl -e 'print "(" x 10000 ' | $CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" --data-binary @- | grep -oF 'exceeded' +perl -e 'print "(" x 10000' | $CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" --data-binary @- | grep -oF 'exceeded' +perl -e 'print "SELECT " . ("[" x 10000)' | $CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" --data-binary @- | grep -oF 'exceeded' +perl -e 'print "SELECT " . ("([" x 5000)' | $CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" --data-binary @- | grep -oF 'exceeded' # But this is Ok -perl -e 'print "SELECT 1" . (",1" x 10000) ' | $CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" --data-binary @- | wc -c +perl -e 'print "SELECT 1" . (",1" x 10000)' | $CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" --data-binary @- | wc -c From c80aeb0ef1b04fda55b4613ac4fc270a82f59e41 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 10 Aug 2019 20:08:14 +0300 Subject: [PATCH 65/84] Fixed another case; added a test --- dbms/src/Parsers/ExpressionListParsers.cpp | 5 +++++ .../0_stateless/00984_parser_stack_overflow.reference | 2 ++ .../tests/queries/0_stateless/00984_parser_stack_overflow.sh | 2 ++ 3 files changed, 9 insertions(+) diff --git a/dbms/src/Parsers/ExpressionListParsers.cpp b/dbms/src/Parsers/ExpressionListParsers.cpp index 4c8ef64b804..6f2e3f103c6 100644 --- a/dbms/src/Parsers/ExpressionListParsers.cpp +++ b/dbms/src/Parsers/ExpressionListParsers.cpp @@ -139,6 +139,7 @@ bool ParserLeftAssociativeBinaryOperatorList::parseImpl(Pos & pos, ASTPtr & node { bool first = true; + auto current_depth = pos.depth; while (1) { if (first) @@ -190,10 +191,14 @@ bool ParserLeftAssociativeBinaryOperatorList::parseImpl(Pos & pos, ASTPtr & node ++pos; } + /// Left associative operator chain is parsed as a tree: ((((1 + 1) + 1) + 1) + 1)... + /// We must account it's depth - otherwise we may end up with stack overflow later - on destruction of AST. + pos.increaseDepth(); node = function; } } + pos.depth = current_depth; return true; } diff --git a/dbms/tests/queries/0_stateless/00984_parser_stack_overflow.reference b/dbms/tests/queries/0_stateless/00984_parser_stack_overflow.reference index 50d27ab5d84..a46c80e9233 100644 --- a/dbms/tests/queries/0_stateless/00984_parser_stack_overflow.reference +++ b/dbms/tests/queries/0_stateless/00984_parser_stack_overflow.reference @@ -1,4 +1,6 @@ exceeded exceeded exceeded +exceeded 20002 +1 diff --git a/dbms/tests/queries/0_stateless/00984_parser_stack_overflow.sh b/dbms/tests/queries/0_stateless/00984_parser_stack_overflow.sh index cfb08d30cdf..64fae3fb0f9 100755 --- a/dbms/tests/queries/0_stateless/00984_parser_stack_overflow.sh +++ b/dbms/tests/queries/0_stateless/00984_parser_stack_overflow.sh @@ -7,6 +7,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) perl -e 'print "(" x 10000' | $CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" --data-binary @- | grep -oF 'exceeded' perl -e 'print "SELECT " . ("[" x 10000)' | $CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" --data-binary @- | grep -oF 'exceeded' perl -e 'print "SELECT " . ("([" x 5000)' | $CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" --data-binary @- | grep -oF 'exceeded' +perl -e 'print "SELECT 1" . ("+1" x 10000)' | $CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" --data-binary @- | grep -oF 'exceeded' # But this is Ok perl -e 'print "SELECT 1" . (",1" x 10000)' | $CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" --data-binary @- | wc -c +perl -e 'print "SELECT 1" . (" OR 1" x 10000)' | $CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" --data-binary @- From afef5c6c70888f45f82f0abea448d6199e350f9b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 10 Aug 2019 20:51:47 +0300 Subject: [PATCH 66/84] Added stack protection; added a test --- dbms/src/Common/ErrorCodes.cpp | 1 + dbms/src/Common/checkStackSize.cpp | 62 +++++++++++++++++++ dbms/src/Common/checkStackSize.h | 7 +++ .../ClusterProxy/SelectStreamFactory.cpp | 3 + .../Interpreters/InterpreterInsertQuery.cpp | 2 + .../Interpreters/InterpreterSelectQuery.cpp | 3 + dbms/src/Storages/StorageMerge.cpp | 2 + .../00985_merge_stack_overflow.reference | 0 .../00985_merge_stack_overflow.sql | 11 ++++ 9 files changed, 91 insertions(+) create mode 100644 dbms/src/Common/checkStackSize.cpp create mode 100644 dbms/src/Common/checkStackSize.h create mode 100644 dbms/tests/queries/0_stateless/00985_merge_stack_overflow.reference create mode 100644 dbms/tests/queries/0_stateless/00985_merge_stack_overflow.sql diff --git a/dbms/src/Common/ErrorCodes.cpp b/dbms/src/Common/ErrorCodes.cpp index cd4601e5b3d..4128ddb8edc 100644 --- a/dbms/src/Common/ErrorCodes.cpp +++ b/dbms/src/Common/ErrorCodes.cpp @@ -442,6 +442,7 @@ namespace ErrorCodes extern const int CANNOT_PARSE_DWARF = 465; extern const int INSECURE_PATH = 466; extern const int CANNOT_PARSE_BOOL = 467; + extern const int CANNOT_PTHREAD_ATTR = 468; extern const int KEEPER_EXCEPTION = 999; extern const int POCO_EXCEPTION = 1000; diff --git a/dbms/src/Common/checkStackSize.cpp b/dbms/src/Common/checkStackSize.cpp new file mode 100644 index 00000000000..e7f91bc3330 --- /dev/null +++ b/dbms/src/Common/checkStackSize.cpp @@ -0,0 +1,62 @@ +#include +#include +#include + +#include +#include +#include + + +namespace DB +{ + namespace ErrorCodes + { + extern const int CANNOT_PTHREAD_ATTR; + extern const int LOGICAL_ERROR; + extern const int TOO_DEEP_RECURSION; + } +} + + +static thread_local void * stack_address = nullptr; +static thread_local size_t max_stack_size = 0; + +void checkStackSize() +{ + using namespace DB; + + if (!stack_address) + { + pthread_attr_t attr; + if (0 != pthread_getattr_np(pthread_self(), &attr)) + throwFromErrno("Cannot pthread_getattr_np", ErrorCodes::CANNOT_PTHREAD_ATTR); + + SCOPE_EXIT({ pthread_attr_destroy(&attr); }); + + if (0 != pthread_attr_getstack(&attr, &stack_address, &max_stack_size)) + throwFromErrno("Cannot pthread_getattr_np", ErrorCodes::CANNOT_PTHREAD_ATTR); + } + + const void * frame_address = __builtin_frame_address(0); + uintptr_t int_frame_address = reinterpret_cast(frame_address); + uintptr_t int_stack_address = reinterpret_cast(stack_address); + + /// We assume that stack grows towards lower addresses. And that it starts to grow from the end of a chunk of memory of max_stack_size. + if (int_frame_address > int_stack_address + max_stack_size) + throw Exception("Logical error: frame address is greater than stack begin address", ErrorCodes::LOGICAL_ERROR); + + size_t stack_size = int_stack_address + max_stack_size - int_frame_address; + + /// Just check if we have already eat more than a half of stack size. It's a bit overkill (a half of stack size is wasted). + /// It's safe to assume that overflow in multiplying by two cannot occur. + if (stack_size * 2 > max_stack_size) + { + std::stringstream message; + message << "Stack size too large" + << ". Stack address: " << stack_address + << ", frame address: " << frame_address + << ", stack size: " << stack_size + << ", maximum stack size: " << max_stack_size; + throw Exception(message.str(), ErrorCodes::TOO_DEEP_RECURSION); + } +} diff --git a/dbms/src/Common/checkStackSize.h b/dbms/src/Common/checkStackSize.h new file mode 100644 index 00000000000..355ceed430b --- /dev/null +++ b/dbms/src/Common/checkStackSize.h @@ -0,0 +1,7 @@ +#pragma once + +/** If the stack is large enough and is near its size, throw an exception. + * You can call this function in "heavy" functions that may be called recursively + * to prevent possible stack overflows. + */ +void checkStackSize(); diff --git a/dbms/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp b/dbms/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp index ba0571d1863..9e49d302100 100644 --- a/dbms/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp +++ b/dbms/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include @@ -58,6 +59,8 @@ namespace BlockInputStreamPtr createLocalStream(const ASTPtr & query_ast, const Context & context, QueryProcessingStage::Enum processed_stage) { + checkStackSize(); + InterpreterSelectQuery interpreter{query_ast, context, SelectQueryOptions(processed_stage)}; BlockInputStreamPtr stream = interpreter.execute().in; diff --git a/dbms/src/Interpreters/InterpreterInsertQuery.cpp b/dbms/src/Interpreters/InterpreterInsertQuery.cpp index dbb90028316..648f13bec62 100644 --- a/dbms/src/Interpreters/InterpreterInsertQuery.cpp +++ b/dbms/src/Interpreters/InterpreterInsertQuery.cpp @@ -2,6 +2,7 @@ #include #include +#include #include #include @@ -39,6 +40,7 @@ InterpreterInsertQuery::InterpreterInsertQuery( const ASTPtr & query_ptr_, const Context & context_, bool allow_materialized_) : query_ptr(query_ptr_), context(context_), allow_materialized(allow_materialized_) { + checkStackSize(); } diff --git a/dbms/src/Interpreters/InterpreterSelectQuery.cpp b/dbms/src/Interpreters/InterpreterSelectQuery.cpp index 9682d0e29e4..aea37c7fa36 100644 --- a/dbms/src/Interpreters/InterpreterSelectQuery.cpp +++ b/dbms/src/Interpreters/InterpreterSelectQuery.cpp @@ -58,6 +58,7 @@ #include #include #include +#include #include #include #include @@ -211,6 +212,8 @@ InterpreterSelectQuery::InterpreterSelectQuery( , input(input_) , log(&Logger::get("InterpreterSelectQuery")) { + checkStackSize(); + initSettings(); const Settings & settings = context.getSettingsRef(); diff --git a/dbms/src/Storages/StorageMerge.cpp b/dbms/src/Storages/StorageMerge.cpp index 3487a1becf5..913ab6af4ea 100644 --- a/dbms/src/Storages/StorageMerge.cpp +++ b/dbms/src/Storages/StorageMerge.cpp @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -387,6 +388,7 @@ StorageMerge::StorageListWithLocks StorageMerge::getSelectedTables(const ASTPtr DatabaseIteratorPtr StorageMerge::getDatabaseIterator(const Context & context) const { + checkStackSize(); auto database = context.getDatabase(source_database); auto table_name_match = [this](const String & table_name_) { return table_name_regexp.match(table_name_); }; return database->getIterator(global_context, table_name_match); diff --git a/dbms/tests/queries/0_stateless/00985_merge_stack_overflow.reference b/dbms/tests/queries/0_stateless/00985_merge_stack_overflow.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/queries/0_stateless/00985_merge_stack_overflow.sql b/dbms/tests/queries/0_stateless/00985_merge_stack_overflow.sql new file mode 100644 index 00000000000..3a3e5640a38 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00985_merge_stack_overflow.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS merge1; +DROP TABLE IF EXISTS merge2; + +CREATE TABLE IF NOT EXISTS merge1 (x UInt64) ENGINE = Merge(currentDatabase(), '^merge\\d$'); +CREATE TABLE IF NOT EXISTS merge2 (x UInt64) ENGINE = Merge(currentDatabase(), '^merge\\d$'); + +SELECT * FROM merge1; -- { serverError 306 } +SELECT * FROM merge2; -- { serverError 306 } + +DROP TABLE merge1; +DROP TABLE merge2; From 692ce0f6c59b9078da182c05d4b60d4d824bf9de Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 10 Aug 2019 21:01:35 +0300 Subject: [PATCH 67/84] Added two more tests --- ...6_materialized_view_stack_overflow.reference | 0 .../00986_materialized_view_stack_overflow.sql | 17 +++++++++++++++++ .../00987_distributed_stack_overflow.reference | 0 .../00987_distributed_stack_overflow.sql | 13 +++++++++++++ 4 files changed, 30 insertions(+) create mode 100644 dbms/tests/queries/0_stateless/00986_materialized_view_stack_overflow.reference create mode 100644 dbms/tests/queries/0_stateless/00986_materialized_view_stack_overflow.sql create mode 100644 dbms/tests/queries/0_stateless/00987_distributed_stack_overflow.reference create mode 100644 dbms/tests/queries/0_stateless/00987_distributed_stack_overflow.sql diff --git a/dbms/tests/queries/0_stateless/00986_materialized_view_stack_overflow.reference b/dbms/tests/queries/0_stateless/00986_materialized_view_stack_overflow.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/queries/0_stateless/00986_materialized_view_stack_overflow.sql b/dbms/tests/queries/0_stateless/00986_materialized_view_stack_overflow.sql new file mode 100644 index 00000000000..a39688d81a7 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00986_materialized_view_stack_overflow.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS test1; +DROP TABLE IF EXISTS test2; +DROP TABLE IF EXISTS mv1; +DROP TABLE IF EXISTS mv2; + +CREATE TABLE test1 (a UInt8) ENGINE MergeTree ORDER BY a; +CREATE TABLE test2 (a UInt8) ENGINE MergeTree ORDER BY a; + +CREATE MATERIALIZED VIEW mv1 TO test1 AS SELECT a FROM test2; +CREATE MATERIALIZED VIEW mv2 TO test2 AS SELECT a FROM test1; + +insert into test1 values (1); -- { serverError 306 } + +DROP TABLE test1; +DROP TABLE test2; +DROP TABLE mv1; +DROP TABLE mv2; diff --git a/dbms/tests/queries/0_stateless/00987_distributed_stack_overflow.reference b/dbms/tests/queries/0_stateless/00987_distributed_stack_overflow.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/queries/0_stateless/00987_distributed_stack_overflow.sql b/dbms/tests/queries/0_stateless/00987_distributed_stack_overflow.sql new file mode 100644 index 00000000000..8bc7b6963b9 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00987_distributed_stack_overflow.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS distr1; +DROP TABLE IF EXISTS distr2; + +CREATE TABLE distr (x UInt8) ENGINE = Distributed(test_shard_localhost, currentDatabase(), distr); -- { serverError 269 } + +CREATE TABLE distr1 (x UInt8) ENGINE = Distributed(test_shard_localhost, currentDatabase(), distr2); +CREATE TABLE distr2 (x UInt8) ENGINE = Distributed(test_shard_localhost, currentDatabase(), distr1); + +SELECT * FROM distr1; -- { serverError 306 } +SELECT * FROM distr2; -- { serverError 306 } + +DROP TABLE distr1; +DROP TABLE distr2; From cce3ab08bb2cc45bae7f13d49a79e7ebba4f006b Mon Sep 17 00:00:00 2001 From: Ivan <5627721+abyss7@users.noreply.github.com> Date: Sun, 11 Aug 2019 00:00:13 +0300 Subject: [PATCH 68/84] Do not convert columns by position when pushing to materialized views (#6415) * Do not convert columns by position * Update 00984_materialized_view_to_columns.reference --- .../PushingToViewsBlockOutputStream.cpp | 2 +- .../00984_materialized_view_to_columns.reference | 1 + .../00984_materialized_view_to_columns.sql | 15 +++++++++++++++ 3 files changed, 17 insertions(+), 1 deletion(-) create mode 100644 dbms/tests/queries/0_stateless/00984_materialized_view_to_columns.reference create mode 100644 dbms/tests/queries/0_stateless/00984_materialized_view_to_columns.sql diff --git a/dbms/src/DataStreams/PushingToViewsBlockOutputStream.cpp b/dbms/src/DataStreams/PushingToViewsBlockOutputStream.cpp index 7a71384c8dd..0879642c8a9 100644 --- a/dbms/src/DataStreams/PushingToViewsBlockOutputStream.cpp +++ b/dbms/src/DataStreams/PushingToViewsBlockOutputStream.cpp @@ -193,7 +193,7 @@ void PushingToViewsBlockOutputStream::process(const Block & block, size_t view_n /// and two-level aggregation is triggered). in = std::make_shared( in, context.getSettingsRef().min_insert_block_size_rows, context.getSettingsRef().min_insert_block_size_bytes); - in = std::make_shared(context, in, view.out->getHeader(), ConvertingBlockInputStream::MatchColumnsMode::Position); + in = std::make_shared(context, in, view.out->getHeader(), ConvertingBlockInputStream::MatchColumnsMode::Name); in->readPrefix(); diff --git a/dbms/tests/queries/0_stateless/00984_materialized_view_to_columns.reference b/dbms/tests/queries/0_stateless/00984_materialized_view_to_columns.reference new file mode 100644 index 00000000000..2ad35ec5445 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00984_materialized_view_to_columns.reference @@ -0,0 +1 @@ +1 test diff --git a/dbms/tests/queries/0_stateless/00984_materialized_view_to_columns.sql b/dbms/tests/queries/0_stateless/00984_materialized_view_to_columns.sql new file mode 100644 index 00000000000..948b32fe27b --- /dev/null +++ b/dbms/tests/queries/0_stateless/00984_materialized_view_to_columns.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS test1; +DROP TABLE IF EXISTS test2; +DROP TABLE IF EXISTS mv; + +CREATE TABLE test1 (a UInt8, b String) ENGINE MergeTree ORDER BY a; +CREATE TABLE test2 (c UInt8, d String) ENGINE MergeTree ORDER BY c; +CREATE MATERIALIZED VIEW mv TO test1 (b String, a UInt8) AS SELECT d AS b, c AS a FROM test2; + +INSERT INTO test2 VALUES (1, 'test'); + +SELECT * FROM test1; + +DROP TABLE test1; +DROP TABLE test2; +DROP TABLE mv; From efa51a6cd9007ec853feae5fffe1e1610b26a6d6 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 11 Aug 2019 01:36:55 +0300 Subject: [PATCH 69/84] Batch aggregation (experimental) --- .../AggregateFunctions/IAggregateFunction.cpp | 17 +++++++ .../AggregateFunctions/IAggregateFunction.h | 5 ++ dbms/src/Interpreters/Aggregator.cpp | 50 ++++++++++++++++--- dbms/src/Interpreters/Aggregator.h | 11 ++-- 4 files changed, 74 insertions(+), 9 deletions(-) create mode 100644 dbms/src/AggregateFunctions/IAggregateFunction.cpp diff --git a/dbms/src/AggregateFunctions/IAggregateFunction.cpp b/dbms/src/AggregateFunctions/IAggregateFunction.cpp new file mode 100644 index 00000000000..135f6c2662f --- /dev/null +++ b/dbms/src/AggregateFunctions/IAggregateFunction.cpp @@ -0,0 +1,17 @@ +#include + +namespace DB +{ + +void IAggregateFunction::addBatch( + size_t batch_size, + AggregateDataPtr * places, + size_t place_offset, + const IColumn ** columns, + Arena * arena) const +{ + for (size_t i = 0; i < batch_size; ++i) + add(places[i] + place_offset, columns, i, arena); +} + +} diff --git a/dbms/src/AggregateFunctions/IAggregateFunction.h b/dbms/src/AggregateFunctions/IAggregateFunction.h index 94feb2456cf..ae6937a04bd 100644 --- a/dbms/src/AggregateFunctions/IAggregateFunction.h +++ b/dbms/src/AggregateFunctions/IAggregateFunction.h @@ -128,6 +128,11 @@ public: using AddFunc = void (*)(const IAggregateFunction *, AggregateDataPtr, const IColumn **, size_t, Arena *); virtual AddFunc getAddressOfAddFunction() const = 0; + /** Contains a loop with calls to "add" function. You can collect arguments into array "places" + * and do a single call to "addBatch" for devirtualization and inlining. + */ + void addBatch(size_t batch_size, AggregateDataPtr * places, size_t place_offset, const IColumn ** columns, Arena * arena) const; + /** This is used for runtime code generation to determine, which header files to include in generated source. * Always implement it as * const char * getHeaderFilePath() const override { return __FILE__; } diff --git a/dbms/src/Interpreters/Aggregator.cpp b/dbms/src/Interpreters/Aggregator.cpp index 23a06ea58d6..f3b30d51d0d 100644 --- a/dbms/src/Interpreters/Aggregator.cpp +++ b/dbms/src/Interpreters/Aggregator.cpp @@ -583,16 +583,16 @@ void NO_INLINE Aggregator::executeImpl( size_t rows, ColumnRawPtrs & key_columns, AggregateFunctionInstruction * aggregate_instructions, - StringRefs & keys, bool no_more_keys, AggregateDataPtr overflow_row) const { typename Method::State state(key_columns, key_sizes, aggregation_state_cache); if (!no_more_keys) - executeImplCase(method, state, aggregates_pool, rows, key_columns, aggregate_instructions, keys, overflow_row); + //executeImplCase(method, state, aggregates_pool, rows, aggregate_instructions, overflow_row); + executeImplBatch(method, state, aggregates_pool, rows, aggregate_instructions); else - executeImplCase(method, state, aggregates_pool, rows, key_columns, aggregate_instructions, keys, overflow_row); + executeImplCase(method, state, aggregates_pool, rows, aggregate_instructions, overflow_row); } @@ -602,9 +602,7 @@ void NO_INLINE Aggregator::executeImplCase( typename Method::State & state, Arena * aggregates_pool, size_t rows, - ColumnRawPtrs & /*key_columns*/, AggregateFunctionInstruction * aggregate_instructions, - StringRefs & /*keys*/, AggregateDataPtr overflow_row) const { /// NOTE When editing this code, also pay attention to SpecializedAggregator.h. @@ -655,6 +653,46 @@ void NO_INLINE Aggregator::executeImplCase( } +template +void NO_INLINE Aggregator::executeImplBatch( + Method & method, + typename Method::State & state, + Arena * aggregates_pool, + size_t rows, + AggregateFunctionInstruction * aggregate_instructions) const +{ + PODArray places(rows); + + /// For all rows. + for (size_t i = 0; i < rows; ++i) + { + AggregateDataPtr aggregate_data = nullptr; + + auto emplace_result = state.emplaceKey(method.data, i, *aggregates_pool); + + /// If a new key is inserted, initialize the states of the aggregate functions, and possibly something related to the key. + if (emplace_result.isInserted()) + { + /// exception-safety - if you can not allocate memory or create states, then destructors will not be called. + emplace_result.setMapped(nullptr); + + aggregate_data = aggregates_pool->alignedAlloc(total_size_of_aggregate_states, align_aggregate_states); + createAggregateStates(aggregate_data); + + emplace_result.setMapped(aggregate_data); + } + else + aggregate_data = emplace_result.getMapped(); + + places[i] = aggregate_data; + } + + /// Add values to the aggregate functions. + for (AggregateFunctionInstruction * inst = aggregate_instructions; inst->that; ++inst) + inst->that->addBatch(rows, places.data(), inst->state_offset, inst->arguments, aggregates_pool); +} + + void NO_INLINE Aggregator::executeWithoutKeyImpl( AggregatedDataWithoutKey & res, size_t rows, @@ -826,7 +864,7 @@ bool Aggregator::executeOnBlock(const Block & block, AggregatedDataVariants & re #define M(NAME, IS_TWO_LEVEL) \ else if (result.type == AggregatedDataVariants::Type::NAME) \ executeImpl(*result.NAME, result.aggregates_pool, rows, key_columns, aggregate_functions_instructions.data(), \ - key, no_more_keys, overflow_row_ptr); + no_more_keys, overflow_row_ptr); if (false) {} APPLY_FOR_AGGREGATED_VARIANTS(M) diff --git a/dbms/src/Interpreters/Aggregator.h b/dbms/src/Interpreters/Aggregator.h index cf4b590258f..e089f4707d2 100644 --- a/dbms/src/Interpreters/Aggregator.h +++ b/dbms/src/Interpreters/Aggregator.h @@ -1003,7 +1003,6 @@ protected: size_t rows, ColumnRawPtrs & key_columns, AggregateFunctionInstruction * aggregate_instructions, - StringRefs & keys, bool no_more_keys, AggregateDataPtr overflow_row) const; @@ -1014,11 +1013,17 @@ protected: typename Method::State & state, Arena * aggregates_pool, size_t rows, - ColumnRawPtrs & key_columns, AggregateFunctionInstruction * aggregate_instructions, - StringRefs & keys, AggregateDataPtr overflow_row) const; + template + void executeImplBatch( + Method & method, + typename Method::State & state, + Arena * aggregates_pool, + size_t rows, + AggregateFunctionInstruction * aggregate_instructions) const; + /// For case when there are no keys (all aggregate into one row). void executeWithoutKeyImpl( AggregatedDataWithoutKey & res, From c98d2fe6e13a6089f32f5d215ab8462844755cf6 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 11 Aug 2019 01:54:33 +0300 Subject: [PATCH 70/84] Addition to prev. revision --- .../AggregateFunctions/IAggregateFunction.cpp | 17 ----------------- .../src/AggregateFunctions/IAggregateFunction.h | 9 ++++++++- 2 files changed, 8 insertions(+), 18 deletions(-) delete mode 100644 dbms/src/AggregateFunctions/IAggregateFunction.cpp diff --git a/dbms/src/AggregateFunctions/IAggregateFunction.cpp b/dbms/src/AggregateFunctions/IAggregateFunction.cpp deleted file mode 100644 index 135f6c2662f..00000000000 --- a/dbms/src/AggregateFunctions/IAggregateFunction.cpp +++ /dev/null @@ -1,17 +0,0 @@ -#include - -namespace DB -{ - -void IAggregateFunction::addBatch( - size_t batch_size, - AggregateDataPtr * places, - size_t place_offset, - const IColumn ** columns, - Arena * arena) const -{ - for (size_t i = 0; i < batch_size; ++i) - add(places[i] + place_offset, columns, i, arena); -} - -} diff --git a/dbms/src/AggregateFunctions/IAggregateFunction.h b/dbms/src/AggregateFunctions/IAggregateFunction.h index ae6937a04bd..101b194227b 100644 --- a/dbms/src/AggregateFunctions/IAggregateFunction.h +++ b/dbms/src/AggregateFunctions/IAggregateFunction.h @@ -131,7 +131,7 @@ public: /** Contains a loop with calls to "add" function. You can collect arguments into array "places" * and do a single call to "addBatch" for devirtualization and inlining. */ - void addBatch(size_t batch_size, AggregateDataPtr * places, size_t place_offset, const IColumn ** columns, Arena * arena) const; + virtual void addBatch(size_t batch_size, AggregateDataPtr * places, size_t place_offset, const IColumn ** columns, Arena * arena) const = 0; /** This is used for runtime code generation to determine, which header files to include in generated source. * Always implement it as @@ -161,7 +161,14 @@ private: public: IAggregateFunctionHelper(const DataTypes & argument_types_, const Array & parameters_) : IAggregateFunction(argument_types_, parameters_) {} + AddFunc getAddressOfAddFunction() const override { return &addFree; } + + void addBatch(size_t batch_size, AggregateDataPtr * places, size_t place_offset, const IColumn ** columns, Arena * arena) const override + { + for (size_t i = 0; i < batch_size; ++i) + static_cast(this)->add(places[i] + place_offset, columns, i, arena); + } }; From 3885cc20b86215ddb7674047f55a439a6dd6a898 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 11 Aug 2019 02:22:23 +0300 Subject: [PATCH 71/84] Improved performance of aggregation without key --- dbms/src/AggregateFunctions/IAggregateFunction.h | 10 ++++++++++ dbms/src/Interpreters/Aggregator.cpp | 9 +++------ 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/dbms/src/AggregateFunctions/IAggregateFunction.h b/dbms/src/AggregateFunctions/IAggregateFunction.h index 101b194227b..ed535d91ece 100644 --- a/dbms/src/AggregateFunctions/IAggregateFunction.h +++ b/dbms/src/AggregateFunctions/IAggregateFunction.h @@ -133,6 +133,10 @@ public: */ virtual void addBatch(size_t batch_size, AggregateDataPtr * places, size_t place_offset, const IColumn ** columns, Arena * arena) const = 0; + /** The same for single place. + */ + virtual void addBatchSinglePlace(size_t batch_size, AggregateDataPtr place, const IColumn ** columns, Arena * arena) const = 0; + /** This is used for runtime code generation to determine, which header files to include in generated source. * Always implement it as * const char * getHeaderFilePath() const override { return __FILE__; } @@ -169,6 +173,12 @@ public: for (size_t i = 0; i < batch_size; ++i) static_cast(this)->add(places[i] + place_offset, columns, i, arena); } + + void addBatchSinglePlace(size_t batch_size, AggregateDataPtr place, const IColumn ** columns, Arena * arena) const override + { + for (size_t i = 0; i < batch_size; ++i) + static_cast(this)->add(place, columns, i, arena); + } }; diff --git a/dbms/src/Interpreters/Aggregator.cpp b/dbms/src/Interpreters/Aggregator.cpp index f3b30d51d0d..2498bcec7fd 100644 --- a/dbms/src/Interpreters/Aggregator.cpp +++ b/dbms/src/Interpreters/Aggregator.cpp @@ -708,12 +708,9 @@ void NO_INLINE Aggregator::executeWithoutKeyImpl( agg_count->addDelta(res, rows); else { - for (size_t i = 0; i < rows; ++i) - { - /// Adding values - for (AggregateFunctionInstruction * inst = aggregate_instructions; inst->that; ++inst) - (*inst->func)(inst->that, res + inst->state_offset, inst->arguments, i, arena); - } + /// Adding values + for (AggregateFunctionInstruction * inst = aggregate_instructions; inst->that; ++inst) + inst->that->addBatchSinglePlace(rows, res + inst->state_offset, inst->arguments, arena); } } From 011e50cae6b15d6ae1bf51621c5fdc505a84ae27 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 11 Aug 2019 02:27:25 +0300 Subject: [PATCH 72/84] Removed useless function arguments --- dbms/src/Interpreters/Aggregator.cpp | 5 ++--- dbms/src/Interpreters/Aggregator.h | 3 --- dbms/src/Interpreters/SpecializedAggregator.h | 7 ++----- 3 files changed, 4 insertions(+), 11 deletions(-) diff --git a/dbms/src/Interpreters/Aggregator.cpp b/dbms/src/Interpreters/Aggregator.cpp index 2498bcec7fd..c19a54a07af 100644 --- a/dbms/src/Interpreters/Aggregator.cpp +++ b/dbms/src/Interpreters/Aggregator.cpp @@ -289,7 +289,7 @@ void Aggregator::compileIfPossible(AggregatedDataVariants::Type type) "template void Aggregator::executeSpecialized<\n" " " << method_typename << ", TypeList<" << aggregate_functions_typenames << ">>(\n" " " << method_typename << " &, Arena *, size_t, ColumnRawPtrs &,\n" - " AggregateColumns &, StringRefs &, bool, AggregateDataPtr) const;\n" + " AggregateColumns &, bool, AggregateDataPtr) const;\n" "\n" "static void wrapper" << suffix << "(\n" " const Aggregator & aggregator,\n" @@ -298,13 +298,12 @@ void Aggregator::compileIfPossible(AggregatedDataVariants::Type type) " size_t rows,\n" " ColumnRawPtrs & key_columns,\n" " Aggregator::AggregateColumns & aggregate_columns,\n" - " StringRefs & keys,\n" " bool no_more_keys,\n" " AggregateDataPtr overflow_row)\n" "{\n" " aggregator.executeSpecialized<\n" " " << method_typename << ", TypeList<" << aggregate_functions_typenames << ">>(\n" - " method, arena, rows, key_columns, aggregate_columns, keys, no_more_keys, overflow_row);\n" + " method, arena, rows, key_columns, aggregate_columns, no_more_keys, overflow_row);\n" "}\n" "\n" "void * getPtr" << suffix << "() __attribute__((__visibility__(\"default\")));\n" diff --git a/dbms/src/Interpreters/Aggregator.h b/dbms/src/Interpreters/Aggregator.h index e089f4707d2..4b6f7d16b90 100644 --- a/dbms/src/Interpreters/Aggregator.h +++ b/dbms/src/Interpreters/Aggregator.h @@ -1047,7 +1047,6 @@ public: size_t rows, ColumnRawPtrs & key_columns, AggregateColumns & aggregate_columns, - StringRefs & keys, bool no_more_keys, AggregateDataPtr overflow_row) const; @@ -1057,9 +1056,7 @@ public: typename Method::State & state, Arena * aggregates_pool, size_t rows, - ColumnRawPtrs & key_columns, AggregateColumns & aggregate_columns, - StringRefs & keys, AggregateDataPtr overflow_row) const; template diff --git a/dbms/src/Interpreters/SpecializedAggregator.h b/dbms/src/Interpreters/SpecializedAggregator.h index 8ec6b297111..4136f2162ac 100644 --- a/dbms/src/Interpreters/SpecializedAggregator.h +++ b/dbms/src/Interpreters/SpecializedAggregator.h @@ -103,7 +103,6 @@ void NO_INLINE Aggregator::executeSpecialized( size_t rows, ColumnRawPtrs & key_columns, AggregateColumns & aggregate_columns, - StringRefs & keys, bool no_more_keys, AggregateDataPtr overflow_row) const { @@ -111,10 +110,10 @@ void NO_INLINE Aggregator::executeSpecialized( if (!no_more_keys) executeSpecializedCase( - method, state, aggregates_pool, rows, key_columns, aggregate_columns, keys, overflow_row); + method, state, aggregates_pool, rows, aggregate_columns, overflow_row); else executeSpecializedCase( - method, state, aggregates_pool, rows, key_columns, aggregate_columns, keys, overflow_row); + method, state, aggregates_pool, rows, aggregate_columns, overflow_row); } #pragma GCC diagnostic push @@ -126,9 +125,7 @@ void NO_INLINE Aggregator::executeSpecializedCase( typename Method::State & state, Arena * aggregates_pool, size_t rows, - ColumnRawPtrs & /*key_columns*/, AggregateColumns & aggregate_columns, - StringRefs & /*keys*/, AggregateDataPtr overflow_row) const { /// For all rows. From f00fa640ba4c7098e2ca3eaf4ee633ca4ccf82a0 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 11 Aug 2019 02:40:15 +0300 Subject: [PATCH 73/84] Removed old optimization that is now dominated by the new optimization --- .../AggregateFunctions/AggregateFunctionCount.h | 6 ------ dbms/src/Interpreters/Aggregator.cpp | 17 +++-------------- 2 files changed, 3 insertions(+), 20 deletions(-) diff --git a/dbms/src/AggregateFunctions/AggregateFunctionCount.h b/dbms/src/AggregateFunctions/AggregateFunctionCount.h index e0371a78644..c1b96a4fe4a 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionCount.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionCount.h @@ -62,12 +62,6 @@ public: static_cast(to).getData().push_back(data(place).count); } - /// May be used for optimization. - void addDelta(AggregateDataPtr place, UInt64 x) const - { - data(place).count += x; - } - const char * getHeaderFilePath() const override { return __FILE__; } }; diff --git a/dbms/src/Interpreters/Aggregator.cpp b/dbms/src/Interpreters/Aggregator.cpp index c19a54a07af..3985ec94c66 100644 --- a/dbms/src/Interpreters/Aggregator.cpp +++ b/dbms/src/Interpreters/Aggregator.cpp @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include @@ -698,19 +697,9 @@ void NO_INLINE Aggregator::executeWithoutKeyImpl( AggregateFunctionInstruction * aggregate_instructions, Arena * arena) const { - /// Optimization in the case of a single aggregate function `count`. - AggregateFunctionCount * agg_count = params.aggregates_size == 1 - ? typeid_cast(aggregate_functions[0]) - : nullptr; - - if (agg_count) - agg_count->addDelta(res, rows); - else - { - /// Adding values - for (AggregateFunctionInstruction * inst = aggregate_instructions; inst->that; ++inst) - inst->that->addBatchSinglePlace(rows, res + inst->state_offset, inst->arguments, arena); - } + /// Adding values + for (AggregateFunctionInstruction * inst = aggregate_instructions; inst->that; ++inst) + inst->that->addBatchSinglePlace(rows, res + inst->state_offset, inst->arguments, arena); } From cc5b34d778b7bbd1cb9123f0c67a2bafedbfff56 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 11 Aug 2019 04:00:54 +0300 Subject: [PATCH 74/84] Addition to prev. revision --- dbms/src/Interpreters/SpecializedAggregator.h | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/dbms/src/Interpreters/SpecializedAggregator.h b/dbms/src/Interpreters/SpecializedAggregator.h index 4136f2162ac..9a238c77032 100644 --- a/dbms/src/Interpreters/SpecializedAggregator.h +++ b/dbms/src/Interpreters/SpecializedAggregator.h @@ -181,20 +181,10 @@ void NO_INLINE Aggregator::executeSpecializedWithoutKey( AggregateColumns & aggregate_columns, Arena * arena) const { - /// Optimization in the case of a single aggregate function `count`. - AggregateFunctionCount * agg_count = params.aggregates_size == 1 - ? typeid_cast(aggregate_functions[0]) - : nullptr; - - if (agg_count) - agg_count->addDelta(res, rows); - else + for (size_t i = 0; i < rows; ++i) { - for (size_t i = 0; i < rows; ++i) - { - AggregateFunctionsList::forEach(AggregateFunctionsUpdater( - aggregate_functions, offsets_of_aggregate_states, aggregate_columns, res, i, arena)); - } + AggregateFunctionsList::forEach(AggregateFunctionsUpdater( + aggregate_functions, offsets_of_aggregate_states, aggregate_columns, res, i, arena)); } } From ed7db76c949493913e174c83d051f4fbaee1d97c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 11 Aug 2019 22:14:42 +0300 Subject: [PATCH 75/84] Speed-up parts removal #6372 --- dbms/src/Storages/MergeTree/MergeTreeData.cpp | 62 ++++++++++++++++--- dbms/src/Storages/MergeTree/MergeTreeData.h | 1 + .../Storages/MergeTree/MergeTreeSettings.h | 4 +- dbms/src/Storages/StorageMergeTree.cpp | 2 +- .../00988_parallel_parts_removal.reference | 2 + .../00988_parallel_parts_removal.sql | 18 ++++++ 6 files changed, 77 insertions(+), 12 deletions(-) create mode 100644 dbms/tests/queries/0_stateless/00988_parallel_parts_removal.reference create mode 100644 dbms/tests/queries/0_stateless/00988_parallel_parts_removal.sql diff --git a/dbms/src/Storages/MergeTree/MergeTreeData.cpp b/dbms/src/Storages/MergeTree/MergeTreeData.cpp index dfc59654629..6840ea3b869 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeData.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeData.cpp @@ -1064,17 +1064,58 @@ void MergeTreeData::removePartsFinally(const MergeTreeData::DataPartsVector & pa void MergeTreeData::clearOldPartsFromFilesystem() { - auto parts_to_remove = grabOldParts(); - - for (const DataPartPtr & part : parts_to_remove) - { - LOG_DEBUG(log, "Removing part from filesystem " << part->name); - part->remove(); - } - + DataPartsVector parts_to_remove = grabOldParts(); + clearPartsFromFilesystem(parts_to_remove); removePartsFinally(parts_to_remove); } +void MergeTreeData::clearPartsFromFilesystem(const DataPartsVector & parts_to_remove) +{ + if (parts_to_remove.size() > 1 && settings.max_part_removal_threads > 1 && parts_to_remove.size() > settings.concurrent_part_removal_threshold) + { + /// Parallel parts removal. + + size_t num_threads = std::min(size_t(settings.max_part_removal_threads), parts_to_remove.size()); + + std::mutex mutex; + ThreadPool pool(num_threads); + DataPartsVector parts_to_process = parts_to_remove; + + /// NOTE: Under heavy system load you may get "Cannot schedule a task" from ThreadPool. + + for (size_t i = 0; i < num_threads; ++i) + { + pool.schedule([&] + { + for (auto & part : parts_to_process) + { + /// Take out a part to remove. + DataPartPtr part_to_remove; + { + std::lock_guard lock(mutex); + if (!part) + continue; + std::swap(part_to_remove, part); + } + + LOG_DEBUG(log, "Removing part from filesystem " << part_to_remove->name); + part_to_remove->remove(); + } + }); + } + + pool.wait(); + } + else + { + for (const DataPartPtr & part : parts_to_remove) + { + LOG_DEBUG(log, "Removing part from filesystem " << part->name); + part->remove(); + } + } +} + void MergeTreeData::setPath(const String & new_full_path) { if (Poco::File{new_full_path}.exists()) @@ -1094,6 +1135,8 @@ void MergeTreeData::dropAllData() LOG_TRACE(log, "dropAllData: removing data from memory."); + DataPartsVector all_parts(data_parts_by_info.begin(), data_parts_by_info.end()); + data_parts_indexes.clear(); column_sizes.clear(); @@ -1102,8 +1145,7 @@ void MergeTreeData::dropAllData() LOG_TRACE(log, "dropAllData: removing data from filesystem."); /// Removing of each data part before recursive removal of directory is to speed-up removal, because there will be less number of syscalls. - for (DataPartPtr part : data_parts_by_info) /// a copy intended - part->remove(); + clearPartsFromFilesystem(all_parts); Poco::File(full_path).remove(true); diff --git a/dbms/src/Storages/MergeTree/MergeTreeData.h b/dbms/src/Storages/MergeTree/MergeTreeData.h index c6b85e6d98f..b43851eb7d9 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeData.h +++ b/dbms/src/Storages/MergeTree/MergeTreeData.h @@ -477,6 +477,7 @@ public: /// Delete irrelevant parts from memory and disk. void clearOldPartsFromFilesystem(); + void clearPartsFromFilesystem(const DataPartsVector & parts); /// Delete all directories which names begin with "tmp" /// Set non-negative parameter value to override MergeTreeSettings temporary_directories_lifetime diff --git a/dbms/src/Storages/MergeTree/MergeTreeSettings.h b/dbms/src/Storages/MergeTree/MergeTreeSettings.h index 9bd58e77f9c..abe8e995d8d 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeSettings.h +++ b/dbms/src/Storages/MergeTree/MergeTreeSettings.h @@ -80,7 +80,9 @@ struct MergeTreeSettings : public SettingsCollection M(SettingUInt64, index_granularity_bytes, 10 * 1024 * 1024, "Approximate amount of bytes in single granule (0 - disabled).") \ M(SettingInt64, merge_with_ttl_timeout, 3600 * 24, "Minimal time in seconds, when merge with TTL can be repeated.") \ M(SettingBool, write_final_mark, 1, "Write final mark after end of column (0 - disabled, do nothing if index_granularity_bytes=0)") \ - M(SettingBool, enable_mixed_granularity_parts, 0, "Enable parts with adaptive and non adaptive granularity") + M(SettingBool, enable_mixed_granularity_parts, 0, "Enable parts with adaptive and non adaptive granularity") \ + M(SettingMaxThreads, max_part_removal_threads, 0, "The number of theads for concurrent removal of inactive data parts. One is usually enough, but in 'Google Compute Environment SSD Persistent Disks' file removal (unlink) operation is extraordinarily slow and you probably have to increase this number (recommended is up to 16).") \ + M(SettingUInt64, concurrent_part_removal_threshold, 100, "Activate concurrent part removal (see 'max_part_removal_threads') only if the number of inactive data parts is at least this.") \ DECLARE_SETTINGS_COLLECTION(LIST_OF_MERGE_TREE_SETTINGS) diff --git a/dbms/src/Storages/StorageMergeTree.cpp b/dbms/src/Storages/StorageMergeTree.cpp index 83bfadc482b..bb5e4c7f869 100644 --- a/dbms/src/Storages/StorageMergeTree.cpp +++ b/dbms/src/Storages/StorageMergeTree.cpp @@ -761,7 +761,7 @@ BackgroundProcessingPoolTaskResult StorageMergeTree::backgroundTask() else return BackgroundProcessingPoolTaskResult::ERROR; } - catch (Exception & e) + catch (const Exception & e) { if (e.code() == ErrorCodes::ABORTED) { diff --git a/dbms/tests/queries/0_stateless/00988_parallel_parts_removal.reference b/dbms/tests/queries/0_stateless/00988_parallel_parts_removal.reference new file mode 100644 index 00000000000..aa6663eb9c7 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00988_parallel_parts_removal.reference @@ -0,0 +1,2 @@ +1000 499500 +1000 499500 diff --git a/dbms/tests/queries/0_stateless/00988_parallel_parts_removal.sql b/dbms/tests/queries/0_stateless/00988_parallel_parts_removal.sql new file mode 100644 index 00000000000..0dccd3df048 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00988_parallel_parts_removal.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS mt; + +CREATE TABLE mt (x UInt64) ENGINE = MergeTree ORDER BY x SETTINGS max_part_removal_threads = 16, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0, old_parts_lifetime = 1, parts_to_delay_insert = 100000, parts_to_throw_insert = 100000; + +SYSTEM STOP MERGES; + +SET max_block_size = 1, min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0; +INSERT INTO mt SELECT * FROM numbers(1000); +SET max_block_size = 65536; + +SELECT count(), sum(x) FROM mt; + +SYSTEM START MERGES; +OPTIMIZE TABLE mt FINAL; + +SELECT count(), sum(x) FROM mt; + +DROP TABLE mt; From 2f73b72007fbd1fad61d62dad974d6e1fa9dddec Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 11 Aug 2019 22:24:42 +0300 Subject: [PATCH 76/84] Style --- dbms/src/Storages/MergeTree/MergeTreeData.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dbms/src/Storages/MergeTree/MergeTreeData.cpp b/dbms/src/Storages/MergeTree/MergeTreeData.cpp index dfc59654629..d06b74ded6a 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeData.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeData.cpp @@ -743,7 +743,8 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks) auto lock = lockParts(); data_parts_indexes.clear(); - bool has_adaptive_parts = false, has_non_adaptive_parts = false; + bool has_adaptive_parts = false; + bool has_non_adaptive_parts = false; for (const String & file_name : part_file_names) { MergeTreePartInfo part_info; From 0b1a4420cb0ddab6e7307c7f9c711622b9db5594 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 11 Aug 2019 22:30:39 +0300 Subject: [PATCH 77/84] More simple --- dbms/src/Storages/MergeTree/MergeTreeData.cpp | 22 +++---------------- 1 file changed, 3 insertions(+), 19 deletions(-) diff --git a/dbms/src/Storages/MergeTree/MergeTreeData.cpp b/dbms/src/Storages/MergeTree/MergeTreeData.cpp index 658dd107d62..f5fa48682ed 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeData.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeData.cpp @@ -1075,31 +1075,15 @@ void MergeTreeData::clearPartsFromFilesystem(const DataPartsVector & parts_to_re /// Parallel parts removal. size_t num_threads = std::min(size_t(settings.max_part_removal_threads), parts_to_remove.size()); - - std::mutex mutex; ThreadPool pool(num_threads); - DataPartsVector parts_to_process = parts_to_remove; /// NOTE: Under heavy system load you may get "Cannot schedule a task" from ThreadPool. - - for (size_t i = 0; i < num_threads; ++i) + for (const DataPartPtr & part : parts_to_remove) { pool.schedule([&] { - for (auto & part : parts_to_process) - { - /// Take out a part to remove. - DataPartPtr part_to_remove; - { - std::lock_guard lock(mutex); - if (!part) - continue; - std::swap(part_to_remove, part); - } - - LOG_DEBUG(log, "Removing part from filesystem " << part_to_remove->name); - part_to_remove->remove(); - } + LOG_DEBUG(log, "Removing part from filesystem " << part->name); + part->remove(); }); } From fbaf6b9a4b59d8bf60946c90375bf6fe37f8e344 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 11 Aug 2019 23:02:51 +0300 Subject: [PATCH 78/84] Parallel loading of data parts #6074 --- dbms/src/Storages/MergeTree/MergeTreeData.cpp | 183 ++++++++++-------- .../Storages/MergeTree/MergeTreeSettings.h | 1 + 2 files changed, 104 insertions(+), 80 deletions(-) diff --git a/dbms/src/Storages/MergeTree/MergeTreeData.cpp b/dbms/src/Storages/MergeTree/MergeTreeData.cpp index f5fa48682ed..44e610b105c 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeData.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeData.cpp @@ -734,112 +734,135 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks) part_file_names.push_back(it.name()); } + auto part_lock = lockParts(); + data_parts_indexes.clear(); + + if (part_file_names.empty()) + { + LOG_DEBUG(log, "There is no data parts"); + return; + } + + /// Parallel loading of data parts. + size_t num_threads = std::min(size_t(settings.max_part_loading_threads), part_file_names.size()); + + std::mutex mutex; + DataPartsVector broken_parts_to_remove; DataPartsVector broken_parts_to_detach; size_t suspicious_broken_parts = 0; - auto lock = lockParts(); - data_parts_indexes.clear(); + std::atomic has_adaptive_parts = false; + std::atomic has_non_adaptive_parts = false; + + ThreadPool pool(num_threads); - bool has_adaptive_parts = false; - bool has_non_adaptive_parts = false; for (const String & file_name : part_file_names) { - MergeTreePartInfo part_info; - if (!MergeTreePartInfo::tryParsePartName(file_name, &part_info, format_version)) - continue; - - MutableDataPartPtr part = std::make_shared(*this, file_name, part_info); - part->relative_path = file_name; - bool broken = false; - - try + pool.schedule([&] { - part->loadColumnsChecksumsIndexes(require_part_metadata, true); - } - catch (const Exception & e) - { - /// Don't count the part as broken if there is not enough memory to load it. - /// In fact, there can be many similar situations. - /// But it is OK, because there is a safety guard against deleting too many parts. - if (e.code() == ErrorCodes::MEMORY_LIMIT_EXCEEDED - || e.code() == ErrorCodes::CANNOT_ALLOCATE_MEMORY - || e.code() == ErrorCodes::CANNOT_MUNMAP - || e.code() == ErrorCodes::CANNOT_MREMAP) - throw; + MergeTreePartInfo part_info; + if (!MergeTreePartInfo::tryParsePartName(file_name, &part_info, format_version)) + return; - broken = true; - tryLogCurrentException(__PRETTY_FUNCTION__); - } - catch (...) - { - broken = true; - tryLogCurrentException(__PRETTY_FUNCTION__); - } + MutableDataPartPtr part = std::make_shared(*this, file_name, part_info); + part->relative_path = file_name; + bool broken = false; - /// Ignore and possibly delete broken parts that can appear as a result of hard server restart. - if (broken) - { - if (part->info.level == 0) + try { - /// It is impossible to restore level 0 parts. - LOG_ERROR(log, "Considering to remove broken part " << full_path + file_name << " because it's impossible to repair."); - broken_parts_to_remove.push_back(part); + part->loadColumnsChecksumsIndexes(require_part_metadata, true); } - else + catch (const Exception & e) { - /// Count the number of parts covered by the broken part. If it is at least two, assume that - /// the broken part was created as a result of merging them and we won't lose data if we - /// delete it. - size_t contained_parts = 0; + /// Don't count the part as broken if there is not enough memory to load it. + /// In fact, there can be many similar situations. + /// But it is OK, because there is a safety guard against deleting too many parts. + if (e.code() == ErrorCodes::MEMORY_LIMIT_EXCEEDED + || e.code() == ErrorCodes::CANNOT_ALLOCATE_MEMORY + || e.code() == ErrorCodes::CANNOT_MUNMAP + || e.code() == ErrorCodes::CANNOT_MREMAP) + throw; - LOG_ERROR(log, "Part " << full_path + file_name << " is broken. Looking for parts to replace it."); + broken = true; + tryLogCurrentException(__PRETTY_FUNCTION__); + } + catch (...) + { + broken = true; + tryLogCurrentException(__PRETTY_FUNCTION__); + } - for (const String & contained_name : part_file_names) + /// Ignore and possibly delete broken parts that can appear as a result of hard server restart. + if (broken) + { + if (part->info.level == 0) { - if (contained_name == file_name) - continue; - - MergeTreePartInfo contained_part_info; - if (!MergeTreePartInfo::tryParsePartName(contained_name, &contained_part_info, format_version)) - continue; - - if (part->info.contains(contained_part_info)) - { - LOG_ERROR(log, "Found part " << full_path + contained_name); - ++contained_parts; - } - } - - if (contained_parts >= 2) - { - LOG_ERROR(log, "Considering to remove broken part " << full_path + file_name << " because it covers at least 2 other parts"); + /// It is impossible to restore level 0 parts. + LOG_ERROR(log, "Considering to remove broken part " << full_path << file_name << " because it's impossible to repair."); + std::lock_guard loading_lock(mutex); broken_parts_to_remove.push_back(part); } else { - LOG_ERROR(log, "Detaching broken part " << full_path + file_name - << " because it covers less than 2 parts. You need to resolve this manually"); - broken_parts_to_detach.push_back(part); - ++suspicious_broken_parts; + /// Count the number of parts covered by the broken part. If it is at least two, assume that + /// the broken part was created as a result of merging them and we won't lose data if we + /// delete it. + size_t contained_parts = 0; + + LOG_ERROR(log, "Part " << full_path << file_name << " is broken. Looking for parts to replace it."); + + for (const String & contained_name : part_file_names) + { + if (contained_name == file_name) + continue; + + MergeTreePartInfo contained_part_info; + if (!MergeTreePartInfo::tryParsePartName(contained_name, &contained_part_info, format_version)) + continue; + + if (part->info.contains(contained_part_info)) + { + LOG_ERROR(log, "Found part " << full_path << contained_name); + ++contained_parts; + } + } + + if (contained_parts >= 2) + { + LOG_ERROR(log, "Considering to remove broken part " << full_path << file_name << " because it covers at least 2 other parts"); + std::lock_guard loading_lock(mutex); + broken_parts_to_remove.push_back(part); + } + else + { + LOG_ERROR(log, "Detaching broken part " << full_path << file_name + << " because it covers less than 2 parts. You need to resolve this manually"); + std::lock_guard loading_lock(mutex); + broken_parts_to_detach.push_back(part); + ++suspicious_broken_parts; + } } + + return; } + if (!part->index_granularity_info.is_adaptive) + has_non_adaptive_parts.store(true, std::memory_order_relaxed); + else + has_adaptive_parts.store(true, std::memory_order_relaxed); - continue; - } - if (!part->index_granularity_info.is_adaptive) - has_non_adaptive_parts = true; - else - has_adaptive_parts = true; + part->modification_time = Poco::File(full_path + file_name).getLastModified().epochTime(); + /// Assume that all parts are Committed, covered parts will be detected and marked as Outdated later + part->state = DataPartState::Committed; - part->modification_time = Poco::File(full_path + file_name).getLastModified().epochTime(); - /// Assume that all parts are Committed, covered parts will be detected and marked as Outdated later - part->state = DataPartState::Committed; - - if (!data_parts_indexes.insert(part).second) - throw Exception("Part " + part->name + " already exists", ErrorCodes::DUPLICATE_DATA_PART); + std::lock_guard loading_lock(mutex); + if (!data_parts_indexes.insert(part).second) + throw Exception("Part " + part->name + " already exists", ErrorCodes::DUPLICATE_DATA_PART); + }); } + pool.wait(); + if (has_non_adaptive_parts && has_adaptive_parts && !settings.enable_mixed_granularity_parts) throw Exception("Table contains parts with adaptive and non adaptive marks, but `setting enable_mixed_granularity_parts` is disabled", ErrorCodes::LOGICAL_ERROR); diff --git a/dbms/src/Storages/MergeTree/MergeTreeSettings.h b/dbms/src/Storages/MergeTree/MergeTreeSettings.h index abe8e995d8d..e670000ecc5 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeSettings.h +++ b/dbms/src/Storages/MergeTree/MergeTreeSettings.h @@ -81,6 +81,7 @@ struct MergeTreeSettings : public SettingsCollection M(SettingInt64, merge_with_ttl_timeout, 3600 * 24, "Minimal time in seconds, when merge with TTL can be repeated.") \ M(SettingBool, write_final_mark, 1, "Write final mark after end of column (0 - disabled, do nothing if index_granularity_bytes=0)") \ M(SettingBool, enable_mixed_granularity_parts, 0, "Enable parts with adaptive and non adaptive granularity") \ + M(SettingMaxThreads, max_part_loading_threads, 0, "The number of theads to load data parts at startup.") \ M(SettingMaxThreads, max_part_removal_threads, 0, "The number of theads for concurrent removal of inactive data parts. One is usually enough, but in 'Google Compute Environment SSD Persistent Disks' file removal (unlink) operation is extraordinarily slow and you probably have to increase this number (recommended is up to 16).") \ M(SettingUInt64, concurrent_part_removal_threshold, 100, "Activate concurrent part removal (see 'max_part_removal_threads') only if the number of inactive data parts is at least this.") \ From 28e0e64638b2487282bb0f5de85ee530dc1e55c0 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 11 Aug 2019 23:04:14 +0300 Subject: [PATCH 79/84] Added a test --- .../00989_parallel_parts_loading.reference | 2 ++ .../00989_parallel_parts_loading.sql | 19 +++++++++++++++++++ 2 files changed, 21 insertions(+) create mode 100644 dbms/tests/queries/0_stateless/00989_parallel_parts_loading.reference create mode 100644 dbms/tests/queries/0_stateless/00989_parallel_parts_loading.sql diff --git a/dbms/tests/queries/0_stateless/00989_parallel_parts_loading.reference b/dbms/tests/queries/0_stateless/00989_parallel_parts_loading.reference new file mode 100644 index 00000000000..aa6663eb9c7 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00989_parallel_parts_loading.reference @@ -0,0 +1,2 @@ +1000 499500 +1000 499500 diff --git a/dbms/tests/queries/0_stateless/00989_parallel_parts_loading.sql b/dbms/tests/queries/0_stateless/00989_parallel_parts_loading.sql new file mode 100644 index 00000000000..5e0011483b3 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00989_parallel_parts_loading.sql @@ -0,0 +1,19 @@ +DROP TABLE IF EXISTS mt; + +CREATE TABLE mt (x UInt64) ENGINE = MergeTree ORDER BY x SETTINGS max_part_loading_threads = 16, parts_to_delay_insert = 100000, parts_to_throw_insert = 100000; + +SYSTEM STOP MERGES; + +SET max_block_size = 1, min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0; +INSERT INTO mt SELECT * FROM numbers(1000); +SET max_block_size = 65536; + +SELECT count(), sum(x) FROM mt; + +DETACH TABLE mt; +ATTACH TABLE mt; + +SELECT count(), sum(x) FROM mt; + +SYSTEM START MERGES; +DROP TABLE mt; From 5830526fa164b904f4cc392fc922865d1d542d1d Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 11 Aug 2019 23:37:53 +0300 Subject: [PATCH 80/84] Slightly better information messages --- dbms/programs/server/Server.cpp | 11 ++++++++--- dbms/src/Common/getExecutablePath.cpp | 13 +++++++++++++ dbms/src/Common/getExecutablePath.h | 11 +++++++++++ libs/libcommon/include/common/logger_useful.h | 1 + libs/libdaemon/src/BaseDaemon.cpp | 10 +++++----- 5 files changed, 38 insertions(+), 8 deletions(-) create mode 100644 dbms/src/Common/getExecutablePath.cpp create mode 100644 dbms/src/Common/getExecutablePath.h diff --git a/dbms/programs/server/Server.cpp b/dbms/programs/server/Server.cpp index 464de1c7066..ef61537e38d 100644 --- a/dbms/programs/server/Server.cpp +++ b/dbms/programs/server/Server.cpp @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -212,6 +213,10 @@ int Server::main(const std::vector & /*args*/) const auto memory_amount = getMemoryAmount(); #if defined(__linux__) + std::string executable_path = getExecutablePath(); + if (executable_path.empty()) + executable_path = "/usr/bin/clickhouse"; /// It is used for information messages. + /// After full config loaded { if (config().getBool("mlock_executable", false)) @@ -228,7 +233,7 @@ int Server::main(const std::vector & /*args*/) { LOG_INFO(log, "It looks like the process has no CAP_IPC_LOCK capability, binary mlock will be disabled." " It could happen due to incorrect ClickHouse package installation." - " You could resolve the problem manually with 'sudo setcap cap_ipc_lock=+ep /usr/bin/clickhouse'." + " You could resolve the problem manually with 'sudo setcap cap_ipc_lock=+ep " << executable_path << "'." " Note that it will not work on 'nosuid' mounted filesystems."); } } @@ -547,7 +552,7 @@ int Server::main(const std::vector & /*args*/) { LOG_INFO(log, "It looks like the process has no CAP_NET_ADMIN capability, 'taskstats' performance statistics will be disabled." " It could happen due to incorrect ClickHouse package installation." - " You could resolve the problem manually with 'sudo setcap cap_net_admin=+ep /usr/bin/clickhouse'." + " You could resolve the problem manually with 'sudo setcap cap_net_admin=+ep " << executable_path << "'." " Note that it will not work on 'nosuid' mounted filesystems." " It also doesn't work if you run clickhouse-server inside network namespace as it happens in some containers."); } @@ -556,7 +561,7 @@ int Server::main(const std::vector & /*args*/) { LOG_INFO(log, "It looks like the process has no CAP_SYS_NICE capability, the setting 'os_thread_nice' will have no effect." " It could happen due to incorrect ClickHouse package installation." - " You could resolve the problem manually with 'sudo setcap cap_sys_nice=+ep /usr/bin/clickhouse'." + " You could resolve the problem manually with 'sudo setcap cap_sys_nice=+ep " << executable_path << "'." " Note that it will not work on 'nosuid' mounted filesystems."); } #else diff --git a/dbms/src/Common/getExecutablePath.cpp b/dbms/src/Common/getExecutablePath.cpp new file mode 100644 index 00000000000..d1ce3b18b8a --- /dev/null +++ b/dbms/src/Common/getExecutablePath.cpp @@ -0,0 +1,13 @@ +#include +#include + + +std::string getExecutablePath() +{ + std::error_code ec; + std::filesystem::path canonical_path = std::filesystem::canonical("/proc/self/exe", ec); + + if (ec) + return {}; + return canonical_path; +} diff --git a/dbms/src/Common/getExecutablePath.h b/dbms/src/Common/getExecutablePath.h new file mode 100644 index 00000000000..78adaff4828 --- /dev/null +++ b/dbms/src/Common/getExecutablePath.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +/** Get path to the running executable if possible. + * It is possible when: + * - procfs exists; + * - there is a /proc/self/exe file; + * Otherwise return empty string. + */ +std::string getExecutablePath(); diff --git a/libs/libcommon/include/common/logger_useful.h b/libs/libcommon/include/common/logger_useful.h index d7466273320..b4693115cb3 100644 --- a/libs/libcommon/include/common/logger_useful.h +++ b/libs/libcommon/include/common/logger_useful.h @@ -47,4 +47,5 @@ using DB::CurrentThread; #define LOG_INFO(logger, message) LOG_SIMPLE(logger, message, LogsLevel::information, Message::PRIO_INFORMATION) #define LOG_WARNING(logger, message) LOG_SIMPLE(logger, message, LogsLevel::warning, Message::PRIO_WARNING) #define LOG_ERROR(logger, message) LOG_SIMPLE(logger, message, LogsLevel::error, Message::PRIO_ERROR) +#define LOG_FATAL(logger, message) LOG_SIMPLE(logger, message, LogsLevel::error, Message::PRIO_FATAL) diff --git a/libs/libdaemon/src/BaseDaemon.cpp b/libs/libdaemon/src/BaseDaemon.cpp index 16bcb132d37..ed262cc6984 100644 --- a/libs/libdaemon/src/BaseDaemon.cpp +++ b/libs/libdaemon/src/BaseDaemon.cpp @@ -204,17 +204,17 @@ private: private: void onTerminate(const std::string & message, ThreadNumber thread_num) const { - LOG_ERROR(log, "(version " << VERSION_STRING << VERSION_OFFICIAL << ") (from thread " << thread_num << ") " << message); + LOG_FATAL(log, "(version " << VERSION_STRING << VERSION_OFFICIAL << ") (from thread " << thread_num << ") " << message); } void onFault(int sig, siginfo_t & info, ucontext_t & context, const StackTrace & stack_trace, ThreadNumber thread_num) const { - LOG_ERROR(log, "########################################"); - LOG_ERROR(log, "(version " << VERSION_STRING << VERSION_OFFICIAL << ") (from thread " << thread_num << ") " + LOG_FATAL(log, "########################################"); + LOG_FATAL(log, "(version " << VERSION_STRING << VERSION_OFFICIAL << ") (from thread " << thread_num << ") " << "Received signal " << strsignal(sig) << " (" << sig << ")" << "."); - LOG_ERROR(log, signalToErrorMessage(sig, info, context)); - LOG_ERROR(log, stack_trace.toString()); + LOG_FATAL(log, signalToErrorMessage(sig, info, context)); + LOG_FATAL(log, stack_trace.toString()); } }; From e5ff049b910c2e36e38a7f806bc3ba9ae92ec004 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 12 Aug 2019 00:03:49 +0300 Subject: [PATCH 81/84] Added a test just in case --- .../queries/0_stateless/00988_expansion_aliases_limit.reference | 0 dbms/tests/queries/0_stateless/00988_expansion_aliases_limit.sql | 1 + 2 files changed, 1 insertion(+) create mode 100644 dbms/tests/queries/0_stateless/00988_expansion_aliases_limit.reference create mode 100644 dbms/tests/queries/0_stateless/00988_expansion_aliases_limit.sql diff --git a/dbms/tests/queries/0_stateless/00988_expansion_aliases_limit.reference b/dbms/tests/queries/0_stateless/00988_expansion_aliases_limit.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/queries/0_stateless/00988_expansion_aliases_limit.sql b/dbms/tests/queries/0_stateless/00988_expansion_aliases_limit.sql new file mode 100644 index 00000000000..15c9f82da6f --- /dev/null +++ b/dbms/tests/queries/0_stateless/00988_expansion_aliases_limit.sql @@ -0,0 +1 @@ +SELECT 1 AS a, a + a AS b, b + b AS c, c + c AS d, d + d AS e, e + e AS f, f + f AS g, g + g AS h, h + h AS i, i + i AS j, j + j AS k, k + k AS l, l + l AS m, m + m AS n, n + n AS o, o + o AS p, p + p AS q, q + q AS r, r + r AS s, s + s AS t, t + t AS u, u + u AS v, v + v AS w, w + w AS x, x + x AS y, y + y AS z; -- { serverError 168 } From 2570907f4402e2dda05c2cb002b11c052105cf8c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 12 Aug 2019 00:45:18 +0300 Subject: [PATCH 82/84] Removed useless parameter --- .../ParallelAggregatingBlockInputStream.cpp | 6 ++---- .../ParallelAggregatingBlockInputStream.h | 2 -- dbms/src/Interpreters/Aggregator.cpp | 20 ++++++++----------- dbms/src/Interpreters/Aggregator.h | 2 -- .../Transforms/AggregatingTransform.cpp | 5 ++--- .../Transforms/AggregatingTransform.h | 1 - 6 files changed, 12 insertions(+), 24 deletions(-) diff --git a/dbms/src/DataStreams/ParallelAggregatingBlockInputStream.cpp b/dbms/src/DataStreams/ParallelAggregatingBlockInputStream.cpp index 9fee2996a7b..6c75ec726d4 100644 --- a/dbms/src/DataStreams/ParallelAggregatingBlockInputStream.cpp +++ b/dbms/src/DataStreams/ParallelAggregatingBlockInputStream.cpp @@ -110,8 +110,7 @@ ParallelAggregatingBlockInputStream::TemporaryFileStream::TemporaryFileStream(co void ParallelAggregatingBlockInputStream::Handler::onBlock(Block & block, size_t thread_num) { parent.aggregator.executeOnBlock(block, *parent.many_data[thread_num], - parent.threads_data[thread_num].key_columns, parent.threads_data[thread_num].aggregate_columns, - parent.threads_data[thread_num].key, parent.no_more_keys); + parent.threads_data[thread_num].key_columns, parent.threads_data[thread_num].aggregate_columns, parent.no_more_keys); parent.threads_data[thread_num].src_rows += block.rows(); parent.threads_data[thread_num].src_bytes += block.bytes(); @@ -205,8 +204,7 @@ void ParallelAggregatingBlockInputStream::execute() /// To do this, we pass a block with zero rows to aggregate. if (total_src_rows == 0 && params.keys_size == 0 && !params.empty_result_for_aggregation_by_empty_set) aggregator.executeOnBlock(children.at(0)->getHeader(), *many_data[0], - threads_data[0].key_columns, threads_data[0].aggregate_columns, - threads_data[0].key, no_more_keys); + threads_data[0].key_columns, threads_data[0].aggregate_columns, no_more_keys); } } diff --git a/dbms/src/DataStreams/ParallelAggregatingBlockInputStream.h b/dbms/src/DataStreams/ParallelAggregatingBlockInputStream.h index 5342c03e68f..0c93f5d1161 100644 --- a/dbms/src/DataStreams/ParallelAggregatingBlockInputStream.h +++ b/dbms/src/DataStreams/ParallelAggregatingBlockInputStream.h @@ -80,13 +80,11 @@ private: size_t src_rows = 0; size_t src_bytes = 0; - StringRefs key; ColumnRawPtrs key_columns; Aggregator::AggregateColumns aggregate_columns; ThreadData(size_t keys_size_, size_t aggregates_size_) { - key.resize(keys_size_); key_columns.resize(keys_size_); aggregate_columns.resize(aggregates_size_); } diff --git a/dbms/src/Interpreters/Aggregator.cpp b/dbms/src/Interpreters/Aggregator.cpp index 3985ec94c66..f367908a0a5 100644 --- a/dbms/src/Interpreters/Aggregator.cpp +++ b/dbms/src/Interpreters/Aggregator.cpp @@ -704,8 +704,7 @@ void NO_INLINE Aggregator::executeWithoutKeyImpl( bool Aggregator::executeOnBlock(const Block & block, AggregatedDataVariants & result, - ColumnRawPtrs & key_columns, AggregateColumns & aggregate_columns, StringRefs & key, - bool & no_more_keys) + ColumnRawPtrs & key_columns, AggregateColumns & aggregate_columns, bool & no_more_keys) { if (isCancelled()) return true; @@ -819,9 +818,9 @@ bool Aggregator::executeOnBlock(const Block & block, AggregatedDataVariants & re reinterpret_cast(compiled_data->compiled_method_ptr) \ + bool, AggregateDataPtr)>(compiled_data->compiled_method_ptr) \ (*this, *result.NAME, result.aggregates_pool, rows, key_columns, aggregate_columns, \ - key, no_more_keys, overflow_row_ptr); + no_more_keys, overflow_row_ptr); if (false) {} APPLY_FOR_AGGREGATED_VARIANTS(M) @@ -835,9 +834,9 @@ bool Aggregator::executeOnBlock(const Block & block, AggregatedDataVariants & re reinterpret_cast(compiled_data->compiled_two_level_method_ptr) \ + bool, AggregateDataPtr)>(compiled_data->compiled_two_level_method_ptr) \ (*this, *result.NAME, result.aggregates_pool, rows, key_columns, aggregate_columns, \ - key, no_more_keys, overflow_row_ptr); + no_more_keys, overflow_row_ptr); if (false) {} APPLY_FOR_VARIANTS_TWO_LEVEL(M) @@ -1055,7 +1054,6 @@ void Aggregator::execute(const BlockInputStreamPtr & stream, AggregatedDataVaria if (isCancelled()) return; - StringRefs key(params.keys_size); ColumnRawPtrs key_columns(params.keys_size); AggregateColumns aggregate_columns(params.aggregates_size); @@ -1082,14 +1080,14 @@ void Aggregator::execute(const BlockInputStreamPtr & stream, AggregatedDataVaria src_rows += block.rows(); src_bytes += block.bytes(); - if (!executeOnBlock(block, result, key_columns, aggregate_columns, key, no_more_keys)) + if (!executeOnBlock(block, result, key_columns, aggregate_columns, no_more_keys)) break; } /// If there was no data, and we aggregate without keys, and we must return single row with the result of empty aggregation. /// To do this, we pass a block with zero rows to aggregate. if (result.empty() && params.keys_size == 0 && !params.empty_result_for_aggregation_by_empty_set) - executeOnBlock(stream->getHeader(), result, key_columns, aggregate_columns, key, no_more_keys); + executeOnBlock(stream->getHeader(), result, key_columns, aggregate_columns, no_more_keys); double elapsed_seconds = watch.elapsedSeconds(); size_t rows = result.sizeWithoutOverflowRow(); @@ -2344,7 +2342,6 @@ void NO_INLINE Aggregator::convertBlockToTwoLevelImpl( Method & method, Arena * pool, ColumnRawPtrs & key_columns, - StringRefs & keys [[maybe_unused]], const Block & source, std::vector & destinations) const { @@ -2406,7 +2403,6 @@ std::vector Aggregator::convertBlockToTwoLevel(const Block & block) AggregatedDataVariants data; - StringRefs key(params.keys_size); ColumnRawPtrs key_columns(params.keys_size); /// Remember the columns we will work with @@ -2446,7 +2442,7 @@ std::vector Aggregator::convertBlockToTwoLevel(const Block & block) #define M(NAME) \ else if (data.type == AggregatedDataVariants::Type::NAME) \ convertBlockToTwoLevelImpl(*data.NAME, data.aggregates_pool, \ - key_columns, key, block, splitted_blocks); + key_columns, block, splitted_blocks); if (false) {} APPLY_FOR_VARIANTS_TWO_LEVEL(M) diff --git a/dbms/src/Interpreters/Aggregator.h b/dbms/src/Interpreters/Aggregator.h index 4b6f7d16b90..df7354d8294 100644 --- a/dbms/src/Interpreters/Aggregator.h +++ b/dbms/src/Interpreters/Aggregator.h @@ -842,7 +842,6 @@ public: /// Process one block. Return false if the processing should be aborted (with group_by_overflow_mode = 'break'). bool executeOnBlock(const Block & block, AggregatedDataVariants & result, ColumnRawPtrs & key_columns, AggregateColumns & aggregate_columns, /// Passed to not create them anew for each block - StringRefs & keys, /// - pass the corresponding objects that are initially empty. bool & no_more_keys); /** Convert the aggregation data structure into a block. @@ -1181,7 +1180,6 @@ protected: Method & method, Arena * pool, ColumnRawPtrs & key_columns, - StringRefs & keys, const Block & source, std::vector & destinations) const; diff --git a/dbms/src/Processors/Transforms/AggregatingTransform.cpp b/dbms/src/Processors/Transforms/AggregatingTransform.cpp index 55fe66b7d9f..5575b628344 100644 --- a/dbms/src/Processors/Transforms/AggregatingTransform.cpp +++ b/dbms/src/Processors/Transforms/AggregatingTransform.cpp @@ -101,7 +101,6 @@ AggregatingTransform::AggregatingTransform( Block header, AggregatingTransformParamsPtr params_, ManyAggregatedDataPtr many_data_, size_t current_variant, size_t temporary_data_merge_threads_, size_t max_threads_) : IProcessor({std::move(header)}, {params_->getHeader()}), params(std::move(params_)) - , key(params->params.keys_size) , key_columns(params->params.keys_size) , aggregate_columns(params->params.aggregates_size) , many_data(std::move(many_data_)) @@ -212,7 +211,7 @@ void AggregatingTransform::consume(Chunk chunk) auto block = getInputs().front().getHeader().cloneWithColumns(chunk.detachColumns()); - if (!params->aggregator.executeOnBlock(block, variants, key_columns, aggregate_columns, key, no_more_keys)) + if (!params->aggregator.executeOnBlock(block, variants, key_columns, aggregate_columns, no_more_keys)) is_consume_finished = true; } @@ -226,7 +225,7 @@ void AggregatingTransform::initGenerate() /// If there was no data, and we aggregate without keys, and we must return single row with the result of empty aggregation. /// To do this, we pass a block with zero rows to aggregate. if (variants.empty() && params->params.keys_size == 0 && !params->params.empty_result_for_aggregation_by_empty_set) - params->aggregator.executeOnBlock(getInputs().front().getHeader(), variants, key_columns, aggregate_columns, key, no_more_keys); + params->aggregator.executeOnBlock(getInputs().front().getHeader(), variants, key_columns, aggregate_columns, no_more_keys); double elapsed_seconds = watch.elapsedSeconds(); size_t rows = variants.sizeWithoutOverflowRow(); diff --git a/dbms/src/Processors/Transforms/AggregatingTransform.h b/dbms/src/Processors/Transforms/AggregatingTransform.h index 17786ccfa1a..3621a60517c 100644 --- a/dbms/src/Processors/Transforms/AggregatingTransform.h +++ b/dbms/src/Processors/Transforms/AggregatingTransform.h @@ -71,7 +71,6 @@ private: AggregatingTransformParamsPtr params; Logger * log = &Logger::get("AggregatingTransform"); - StringRefs key; ColumnRawPtrs key_columns; Aggregator::AggregateColumns aggregate_columns; bool no_more_keys = false; From 8cd759565d2737795df896a04638a8305121d16d Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 12 Aug 2019 06:06:44 +0300 Subject: [PATCH 83/84] Updated test (the previous test was a wrong reference) --- .../queries/0_stateless/00909_arrayEnumerateUniq.reference | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dbms/tests/queries/0_stateless/00909_arrayEnumerateUniq.reference b/dbms/tests/queries/0_stateless/00909_arrayEnumerateUniq.reference index 5065b782f54..f97d393cc32 100644 --- a/dbms/tests/queries/0_stateless/00909_arrayEnumerateUniq.reference +++ b/dbms/tests/queries/0_stateless/00909_arrayEnumerateUniq.reference @@ -180,9 +180,9 @@ a3,a4 1..n [[1,1]] a3,a4 1..1 [[]] a3,a4 1..1 [[]] a3,a4 1..1 [[1,2]] -a3,a4 1..1 [[3,4]] a3,a4 1..1 [[1,2]] -a3,a4 1..1 [[3,4]] +a3,a4 1..1 [[1,2]] +a3,a4 1..1 [[1,2]] ---------BAD [] [] From fe8e9124a07375205f2e1e3f57ff30cd446dacd6 Mon Sep 17 00:00:00 2001 From: BayoNet Date: Mon, 12 Aug 2019 12:52:53 +0300 Subject: [PATCH 84/84] =?UTF-8?q?DOCAPI-5765=20=D0=B8=205766:=20system.tab?= =?UTF-8?q?les=20docs.=20EN=20review.=20RU=20translation=20(#6221)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * DOCAPI-5765: system.tables docs EN review. * DOCAPI-5765. RU translation. --- docs/en/operations/system_tables.md | 45 ++++++++++++------------- docs/ru/operations/system_tables.md | 52 +++++++++++++++++++++-------- 2 files changed, 60 insertions(+), 37 deletions(-) diff --git a/docs/en/operations/system_tables.md b/docs/en/operations/system_tables.md index 14fa1ace01d..cdcd3ce0049 100644 --- a/docs/en/operations/system_tables.md +++ b/docs/en/operations/system_tables.md @@ -60,11 +60,11 @@ user String — The name of the user for connecting to the server. ## system.columns -Contains information about the columns in all the tables. +Contains information about columns in all the tables. You can use this table to get information similar to the [DESCRIBE TABLE](../query_language/misc.md#misc-describe-table) query, but for multiple tables at once. -The `system.columns` table contains the following columns (the type of the corresponding column is shown in brackets): +The `system.columns` table contains the following columns (the column type is shown in brackets): - `database` (String) — Database name. - `table` (String) — Table name. @@ -75,11 +75,11 @@ The `system.columns` table contains the following columns (the type of the corre - `data_compressed_bytes` (UInt64) — The size of compressed data, in bytes. - `data_uncompressed_bytes` (UInt64) — The size of decompressed data, in bytes. - `marks_bytes` (UInt64) — The size of marks, in bytes. -- `comment` (String) — The comment about column, or an empty string if it is not defined. -- `is_in_partition_key` (UInt8) — Flag that indicates whether the column is in partition expression. -- `is_in_sorting_key` (UInt8) — Flag that indicates whether the column is in sorting key expression. -- `is_in_primary_key` (UInt8) — Flag that indicates whether the column is in primary key expression. -- `is_in_sampling_key` (UInt8) — Flag that indicates whether the column is in sampling key expression. +- `comment` (String) — Comment on the column, or an empty string if it is not defined. +- `is_in_partition_key` (UInt8) — Flag that indicates whether the column is in the partition expression. +- `is_in_sorting_key` (UInt8) — Flag that indicates whether the column is in the sorting key expression. +- `is_in_primary_key` (UInt8) — Flag that indicates whether the column is in the primary key expression. +- `is_in_sampling_key` (UInt8) — Flag that indicates whether the column is in the sampling key expression. ## system.databases @@ -89,11 +89,8 @@ This system table is used for implementing the `SHOW DATABASES` query. ## system.detached_parts -Contains information about detached parts of -[MergeTree](table_engines/mergetree.md) tables. The `reason` column specifies -why the part was detached. For user-detached parts, the reason is empty. Such -parts can be attached with [ALTER TABLE ATTACH PARTITION|PART](../query_language/query_language/alter/#alter_attach-partition) -command. For the description of other columns, see [system.parts](#system_tables-parts). +Contains information about detached parts of [MergeTree](table_engines/mergetree.md) tables. The `reason` column specifies why the part was detached. For user-detached parts, the reason is empty. Such parts can be attached with [ALTER TABLE ATTACH PARTITION|PART](../query_language/query_language/alter/#alter_attach-partition) command. For the description of other columns, see [system.parts](#system_tables-parts). + ## system.dictionaries @@ -110,7 +107,7 @@ Columns: - `bytes_allocated UInt64` — The amount of RAM the dictionary uses. - `hit_rate Float64` — For cache dictionaries, the percentage of uses for which the value was in the cache. - `element_count UInt64` — The number of items stored in the dictionary. -- `load_factor Float64` — The percentage full of the dictionary (for a hashed dictionary, the percentage filled in the hash table). +- `load_factor Float64` — The percentage filled in the dictionary (for a hashed dictionary, the percentage filled in the hash table). - `creation_time DateTime` — The time when the dictionary was created or last successfully reloaded. - `last_exception String` — Text of the error that occurs when creating or reloading the dictionary if the dictionary couldn't be created. - `source String` — Text describing the data source for the dictionary. @@ -160,7 +157,7 @@ Columns: ## system.graphite_retentions -Contains information about parameters [graphite_rollup](server_settings/settings.md#server_settings-graphite_rollup) which use in tables with [\*GraphiteMergeTree](table_engines/graphitemergetree.md) engines. +Contains information about parameters [graphite_rollup](server_settings/settings.md#server_settings-graphite_rollup) which are used in tables with [*GraphiteMergeTree](table_engines/graphitemergetree.md) engines. Columns: @@ -170,9 +167,9 @@ Columns: - `age` (UInt64) - The minimum age of the data in seconds. - `precision` (UInt64) - How precisely to define the age of the data in seconds. - `priority` (UInt16) - Pattern priority. -- `is_default` (UInt8) - Is pattern default or not. -- `Tables.database` (Array(String)) - Array of databases names of tables, which use `config_name` parameter. -- `Tables.table` (Array(String)) - Array of tables names, which use `config_name` parameter. +- `is_default` (UInt8) - Whether the pattern is the default. +- `Tables.database` (Array(String)) - Array of names of database tables that use the `config_name` parameter. +- `Tables.table` (Array(String)) - Array of table names that use the `config_name` parameter. ## system.merges @@ -193,7 +190,7 @@ Columns: - `bytes_read_uncompressed UInt64` — Number of bytes read, uncompressed. - `rows_read UInt64` — Number of rows read. - `bytes_written_uncompressed UInt64` — Number of bytes written, uncompressed. -- `rows_written UInt64` — Number of lines rows written. +- `rows_written UInt64` — Number of rows written. ## system.metrics {#system_tables-metrics} @@ -603,9 +600,9 @@ WHERE changed Contains metadata of each table that the server knows about. Detached tables are not shown in `system.tables`. -This table contains the following columns (the type of the corresponding column is shown in brackets): +This table contains the following columns (the column type is shown in brackets): -- `database` (String) — The name of database the table is in. +- `database` (String) — The name of the database the table is in. - `name` (String) — Table name. - `engine` (String) — Table engine name (without parameters). - `is_temporary` (UInt8) - Flag that indicates whether the table is temporary. @@ -621,7 +618,7 @@ This table contains the following columns (the type of the corresponding column - `primary_key` (String) - The primary key expression specified in the table. - `sampling_key` (String) - The sampling key expression specified in the table. -The `system.tables` is used in `SHOW TABLES` query implementation. +The `system.tables` table is used in `SHOW TABLES` query implementation. ## system.zookeeper @@ -647,7 +644,7 @@ Columns: - `version Int32` — Node version: the number of times the node was changed. - `cversion Int32` — Number of added or removed descendants. - `aversion Int32` — Number of changes to the ACL. -- `ephemeralOwner Int64` — For ephemeral nodes, the ID of hte session that owns this node. +- `ephemeralOwner Int64` — For ephemeral nodes, the ID of the session that owns this node. Example: @@ -706,13 +703,13 @@ The table contains information about [mutations](../query_language/alter.md#alte **create_time** - When this mutation command was submitted for execution. -**block_numbers.partition_id**, **block_numbers.number** - A Nested column. For mutations of replicated tables contains one record for each partition: the partition ID and the block number that was acquired by the mutation (in each partition only parts that contain blocks with numbers less than the block number acquired by the mutation in that partition will be mutated). Because in non-replicated tables blocks numbers in all partitions form a single sequence, for mutatations of non-replicated tables the column will contain one record with a single block number acquired by the mutation. +**block_numbers.partition_id**, **block_numbers.number** - A nested column. For mutations of replicated tables, it contains one record for each partition: the partition ID and the block number that was acquired by the mutation (in each partition, only parts that contain blocks with numbers less than the block number acquired by the mutation in that partition will be mutated). In non-replicated tables, block numbers in all partitions form a single sequence. This means that for mutations of non-replicated tables, the column will contain one record with a single block number acquired by the mutation. **parts_to_do** - The number of data parts that need to be mutated for the mutation to finish. **is_done** - Is the mutation done? Note that even if `parts_to_do = 0` it is possible that a mutation of a replicated table is not done yet because of a long-running INSERT that will create a new data part that will need to be mutated. -If there were problems with mutating some parts the following columns contain additional information: +If there were problems with mutating some parts, the following columns contain additional information: **latest_failed_part** - The name of the most recent part that could not be mutated. diff --git a/docs/ru/operations/system_tables.md b/docs/ru/operations/system_tables.md index e6e511caa93..707f74c0b22 100644 --- a/docs/ru/operations/system_tables.md +++ b/docs/ru/operations/system_tables.md @@ -61,16 +61,26 @@ user String — имя пользователя, которого использ ## system.columns Содержит информацию о столбцах всех таблиц. -С помощью этой таблицы можно получить информацию аналогично запросу `DESCRIBE TABLE`, но для многих таблиц сразу. -``` -database String - имя базы данных, в которой находится таблица -table String - имя таблицы -name String - имя столбца -type String - тип столбца -default_type String - тип (DEFAULT, MATERIALIZED, ALIAS) выражения для значения по умолчанию, или пустая строка, если оно не описано -default_expression String - выражение для значения по умолчанию, или пустая строка, если оно не описано -``` +С помощью этой таблицы можно получить информацию аналогично запросу [DESCRIBE TABLE](../query_language/misc.md#misc-describe-table), но для многих таблиц сразу. + +Таблица `system.columns` содержит столбцы (тип столбца указан в скобках): + +- `database` (String) — имя базы данных. +- `table` (String) — имя таблицы. +- `name` (String) — имя столбца. +- `type` (String) — тип столбца. +- `default_kind` (String) — тип выражения (`DEFAULT`, `MATERIALIZED`, `ALIAS`) значения по умолчанию, или пустая строка. +- `default_expression` (String) — выражение для значения по умолчанию или пустая строка. +- `data_compressed_bytes` (UInt64) — размер сжатых данных в байтах. +- `data_uncompressed_bytes` (UInt64) — размер распакованных данных в байтах. +- `marks_bytes` (UInt64) — размер засечек в байтах. +- `comment` (String) — комментарий к столбцу или пустая строка. +- `is_in_partition_key` (UInt8) — флаг, показывающий включение столбца в ключ партиционирования. +- `is_in_sorting_key` (UInt8) — флаг, показываюший включение столбца в ключ сортировки. +- `is_in_primary_key` (UInt8) — флаг, показывающий включение столбца в первичный ключ. +- `is_in_sampling_key` (UInt8) — флаг, показывающий включение столбца в ключ выборки. + ## system.databases Таблица содержит один столбец name типа String - имя базы данных. @@ -566,11 +576,27 @@ WHERE changed ## system.tables -Таблица содержит столбцы database, name, engine типа String. -Также таблица содержит три виртуальных столбца: metadata_modification_time типа DateTime, create_table_query и engine_full типа String. -Для каждой таблицы, о которой знает сервер, будет присутствовать соответствующая запись в таблице system.tables. -Эта системная таблица используется для реализации запросов SHOW TABLES. +Содержит метаданные каждой таблицы, о которой знает сервер. Отсоединённые таблицы не отображаются в `system.tables`. +Эта таблица содержит следующие столбцы (тип столбца показан в скобках): + +- `database String` — имя базы данных, в которой находится таблица. +- `name` (String) — имя таблицы. +- `engine` (String) — движок таблицы (без параметров). +- `is_temporary` (UInt8) — флаг, указывающий на то, временная это таблица или нет. +- `data_path` (String) — путь к данным таблицы в файловой системе. +- `metadata_path` (String) — путь к табличным метаданным в файловой системе. +- `metadata_modification_time` (DateTime) — время последней модификации табличных метаданных. +- `dependencies_database` (Array(String)) — зависимости базы данных. +- `dependencies_table` (Array(String)) — табличные зависимости (таблицы [MaterializedView](table_engines/materializedview.md), созданные на базе текущей таблицы). +- `create_table_query` (String) — запрос, которым создавалась таблица. +- `engine_full` (String) — параметры табличного движка. +- `partition_key` (String) — ключ партиционирования таблицы. +- `sorting_key` (String) — ключ сортировки таблицы. +- `primary_key` (String) - первичный ключ таблицы. +- `sampling_key` (String) — ключ сэмплирования таблицы. + +Таблица `system.tables` используется при выполнении запроса `SHOW TABLES`. ## system.zookeeper