From 402815977fe32976385d0ce74382179c9004c250 Mon Sep 17 00:00:00 2001 From: alesapin Date: Wed, 8 Aug 2018 21:01:25 +0300 Subject: [PATCH 01/22] Add new error in fetchPartition op --- dbms/src/Interpreters/Settings.h | 1 + dbms/src/Storages/StorageReplicatedMergeTree.cpp | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/dbms/src/Interpreters/Settings.h b/dbms/src/Interpreters/Settings.h index d0753ea3c9e..08522929179 100644 --- a/dbms/src/Interpreters/Settings.h +++ b/dbms/src/Interpreters/Settings.h @@ -268,6 +268,7 @@ struct Settings M(SettingUInt64, enable_conditional_computation, 0, "Enable conditional computations") \ \ M(SettingDateTimeInputFormat, date_time_input_format, FormatSettings::DateTimeInputFormat::Basic, "Method to read DateTime from text input formats. Possible values: 'basic' and 'best_effort'.") \ + M(SettingUInt64, max_fetch_partition_retries_count, 5, "Amount of retries while fetching partition from another host.") \ #define DECLARE(TYPE, NAME, DEFAULT, DESCRIPTION) \ TYPE NAME {DEFAULT}; diff --git a/dbms/src/Storages/StorageReplicatedMergeTree.cpp b/dbms/src/Storages/StorageReplicatedMergeTree.cpp index 75f6b045d73..734ce8cd1a8 100644 --- a/dbms/src/Storages/StorageReplicatedMergeTree.cpp +++ b/dbms/src/Storages/StorageReplicatedMergeTree.cpp @@ -3942,7 +3942,7 @@ void StorageReplicatedMergeTree::fetchPartition(const ASTPtr & partition, const if (try_no) LOG_INFO(log, "Some of parts (" << missing_parts.size() << ") are missing. Will try to fetch covering parts."); - if (try_no >= 5) + if (try_no >= context.getSettings().max_fetch_partition_retries_count) throw Exception("Too many retries to fetch parts from " + best_replica_path, ErrorCodes::TOO_MANY_RETRIES_TO_FETCH_PARTS); Strings parts = getZooKeeper()->getChildren(best_replica_path + "/parts"); @@ -3989,7 +3989,8 @@ void StorageReplicatedMergeTree::fetchPartition(const ASTPtr & partition, const } catch (const DB::Exception & e) { - if (e.code() != ErrorCodes::RECEIVED_ERROR_FROM_REMOTE_IO_SERVER && e.code() != ErrorCodes::RECEIVED_ERROR_TOO_MANY_REQUESTS) + if (e.code() != ErrorCodes::RECEIVED_ERROR_FROM_REMOTE_IO_SERVER && e.code() != ErrorCodes::RECEIVED_ERROR_TOO_MANY_REQUESTS + && e.code() != ErrorCodes::CANNOT_READ_ALL_DATA) throw; LOG_INFO(log, e.displayText()); From abbbd26e71ccade82141a193cd6a05df99f24c73 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Thu, 9 Aug 2018 04:42:46 +0300 Subject: [PATCH 02/22] Auto version update to [18.10.0] [54405] --- dbms/cmake/version.cmake | 10 +++++----- debian/changelog | 4 ++-- docker/client/Dockerfile | 2 +- docker/server/Dockerfile | 2 +- docker/test/Dockerfile | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/dbms/cmake/version.cmake b/dbms/cmake/version.cmake index 4e1fb44caa4..719a722d439 100644 --- a/dbms/cmake/version.cmake +++ b/dbms/cmake/version.cmake @@ -1,11 +1,11 @@ # This strings autochanged from release_lib.sh: -set(VERSION_REVISION 54404 CACHE STRING "") +set(VERSION_REVISION 54405 CACHE STRING "") set(VERSION_MAJOR 18 CACHE STRING "") -set(VERSION_MINOR 9 CACHE STRING "") +set(VERSION_MINOR 10 CACHE STRING "") set(VERSION_PATCH 0 CACHE STRING "") -set(VERSION_GITHASH c83721a02db002eef7ff864f82d53ca89d47f9e6 CACHE STRING "") -set(VERSION_DESCRIBE v18.9.0-testing CACHE STRING "") -set(VERSION_STRING 18.9.0 CACHE STRING "") +set(VERSION_GITHASH cd7b254b7bff5bdfeadcd3eff0df20648870c939 CACHE STRING "") +set(VERSION_DESCRIBE v18.10.0-testing CACHE STRING "") +set(VERSION_STRING 18.10.0 CACHE STRING "") # end of autochange set(VERSION_EXTRA "" CACHE STRING "") diff --git a/debian/changelog b/debian/changelog index 1b7b9165ed5..978b9cf4a8d 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,5 +1,5 @@ -clickhouse (18.9.0) unstable; urgency=low +clickhouse (18.10.0) unstable; urgency=low * Modified source code - -- Alexey Milovidov Fri, 03 Aug 2018 19:17:05 +0300 + -- Thu, 09 Aug 2018 04:42:46 +0300 diff --git a/docker/client/Dockerfile b/docker/client/Dockerfile index 5f3b6ad9d42..073ff28ee43 100644 --- a/docker/client/Dockerfile +++ b/docker/client/Dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:18.04 ARG repository="deb http://repo.yandex.ru/clickhouse/deb/stable/ main/" -ARG version=18.9.0 +ARG version=18.10.0 RUN apt-get update && \ apt-get install -y apt-transport-https dirmngr && \ diff --git a/docker/server/Dockerfile b/docker/server/Dockerfile index af1c42c85e7..a8a2d066258 100644 --- a/docker/server/Dockerfile +++ b/docker/server/Dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:18.04 ARG repository="deb http://repo.yandex.ru/clickhouse/deb/stable/ main/" -ARG version=18.9.0 +ARG version=18.10.0 RUN apt-get update && \ apt-get install -y apt-transport-https dirmngr && \ diff --git a/docker/test/Dockerfile b/docker/test/Dockerfile index f03e21e082b..c362def2f01 100644 --- a/docker/test/Dockerfile +++ b/docker/test/Dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:18.04 ARG repository="deb http://repo.yandex.ru/clickhouse/deb/stable/ main/" -ARG version=18.9.0 +ARG version=18.10.0 RUN apt-get update && \ apt-get install -y apt-transport-https dirmngr && \ From 71a093fa483af3f8fcf96d8b3ebd8ae5413823be Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 9 Aug 2018 21:18:29 +0300 Subject: [PATCH 03/22] Removed obsolete variable [#CLICKHOUSE-2] --- CMakeLists.txt | 2 +- cmake/find_capnp.cmake | 2 +- cmake/find_rdkafka.cmake | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index f5aee27ddab..c664b2f2945 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -61,7 +61,7 @@ if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang") endif () option (TEST_COVERAGE "Enables flags for test coverage" OFF) -option (ENABLE_TESTS "Enables tests" ${NOT_MSVC}) +option (ENABLE_TESTS "Enables tests" ON) option (USE_STATIC_LIBRARIES "Set to FALSE to use shared libraries" ON) option (MAKE_STATIC_LIBRARIES "Set to FALSE to make shared libraries" ${USE_STATIC_LIBRARIES}) diff --git a/cmake/find_capnp.cmake b/cmake/find_capnp.cmake index 03ecadda6a1..6c064112686 100644 --- a/cmake/find_capnp.cmake +++ b/cmake/find_capnp.cmake @@ -1,4 +1,4 @@ -option (ENABLE_CAPNP "Enable Cap'n Proto" ${NOT_MSVC}) +option (ENABLE_CAPNP "Enable Cap'n Proto" ON) if (ENABLE_CAPNP) # cmake 3.5.1 bug: diff --git a/cmake/find_rdkafka.cmake b/cmake/find_rdkafka.cmake index f05ced94707..c62c32bb64f 100644 --- a/cmake/find_rdkafka.cmake +++ b/cmake/find_rdkafka.cmake @@ -1,4 +1,4 @@ -option (ENABLE_RDKAFKA "Enable kafka" ${NOT_MSVC}) +option (ENABLE_RDKAFKA "Enable kafka" ON) if (ENABLE_RDKAFKA) From 7c08cae609aa1009c1a0630df45c8777662f8b0c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 9 Aug 2018 22:11:46 +0300 Subject: [PATCH 04/22] Fixed link order #2807 --- contrib/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 3c0e284e3b9..045352b7164 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -127,6 +127,7 @@ if (USE_INTERNAL_RDKAFKA_LIBRARY) if (USE_INTERNAL_SSL_LIBRARY AND MAKE_STATIC_LIBRARIES) target_include_directories(rdkafka PRIVATE BEFORE ${OPENSSL_INCLUDE_DIR}) + target_link_libraries(rdkafka PRIVATE ${OPENSSL_SSL_LIBRARY} ${OPENSSL_CRYPTO_LIBRARY}) endif () target_include_directories(rdkafka PRIVATE BEFORE ${ZLIB_INCLUDE_DIR}) endif () From 998c754207ed8302cc70624543dbb29a3c154e77 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Thu, 9 Aug 2018 22:19:12 +0300 Subject: [PATCH 05/22] Update CHANGELOG.md --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4f26f565e8e..99994b0621d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,6 +36,10 @@ * Fixed a bug in the `anyHeavy` aggregate function ([a2101df2](https://github.com/yandex/ClickHouse/commit/a2101df25a6a0fba99aa71f8793d762af2b801ee)) * Fixed server crash when using the `countArray()` aggregate function. +### Backward incompatible changes: + +* Parameters for `Kafka` engine was changed from `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_schema, kafka_num_consumers])` to `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_row_delimiter, kafka_schema, kafka_num_consumers])`. If your tables use `kafka_schema` or `kafka_num_consumers` parameters, you have to manually edit the metadata files `path/metadata/database/table.sql` and add `kafka_row_delimiter` parameter with `''` value. + ## ClickHouse release 18.1.0, 2018-07-23 ### New features: From 25024fe91fad64ef7e4ce3cfcd75715f4c0ef4dc Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Thu, 9 Aug 2018 22:21:31 +0300 Subject: [PATCH 06/22] Update CHANGELOG_RU.md --- CHANGELOG_RU.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG_RU.md b/CHANGELOG_RU.md index 494f76379c2..7988e7850c6 100644 --- a/CHANGELOG_RU.md +++ b/CHANGELOG_RU.md @@ -43,6 +43,10 @@ * Исправлена ошибка в агрегатной функции `anyHeavy` ([a2101df2](https://github.com/yandex/ClickHouse/commit/a2101df25a6a0fba99aa71f8793d762af2b801ee)) * Исправлено падение сервера при использовании функции `countArray()`. +### Обратно несовместимые изменения: + +* Список параметров для таблиц `Kafka` был изменён с `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_schema, kafka_num_consumers])` на `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_row_delimiter, kafka_schema, kafka_num_consumers])`. Если вы использовали параметры `kafka_schema` или `kafka_num_consumers`, вам необходимо вручную отредактировать файлы с метаданными `path/metadata/database/table.sql`, добавив параметр `kafka_row_delimiter` со значением `''` в соответствующее место. + ## ClickHouse release 18.1.0, 2018-07-23 From bbb3e240c0313c2585fd912f5c7b81a9f3713d69 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 9 Aug 2018 22:46:20 +0300 Subject: [PATCH 07/22] Revert. Fixed link order #2807 --- contrib/CMakeLists.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 045352b7164..3c0e284e3b9 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -127,7 +127,6 @@ if (USE_INTERNAL_RDKAFKA_LIBRARY) if (USE_INTERNAL_SSL_LIBRARY AND MAKE_STATIC_LIBRARIES) target_include_directories(rdkafka PRIVATE BEFORE ${OPENSSL_INCLUDE_DIR}) - target_link_libraries(rdkafka PRIVATE ${OPENSSL_SSL_LIBRARY} ${OPENSSL_CRYPTO_LIBRARY}) endif () target_include_directories(rdkafka PRIVATE BEFORE ${ZLIB_INCLUDE_DIR}) endif () From 2f2fafb74a72a517c44592f2985c0739c7f1ae91 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 9 Aug 2018 23:43:28 +0300 Subject: [PATCH 08/22] Fixed link order #2807 --- dbms/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dbms/CMakeLists.txt b/dbms/CMakeLists.txt index 91d5b7676a2..b3bb1a65c23 100644 --- a/dbms/CMakeLists.txt +++ b/dbms/CMakeLists.txt @@ -213,7 +213,7 @@ if (USE_CAPNP) endif () if (USE_RDKAFKA) - target_link_libraries (dbms ${RDKAFKA_LIBRARY}) + target_link_libraries (dbms ${RDKAFKA_LIBRARY} ${OPENSSL_SSL_LIBRARY} ${OPENSSL_CRYPTO_LIBRARY}) if (NOT USE_INTERNAL_RDKAFKA_LIBRARY) target_include_directories (dbms SYSTEM BEFORE PRIVATE ${RDKAFKA_INCLUDE_DIR}) endif () From b6d6b57cec4d0385c08a809437e8c149f59ec4c6 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 10 Aug 2018 00:01:59 +0300 Subject: [PATCH 09/22] Revert "Fixed link order #2807" This reverts commit 2f2fafb74a72a517c44592f2985c0739c7f1ae91. --- dbms/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dbms/CMakeLists.txt b/dbms/CMakeLists.txt index b3bb1a65c23..91d5b7676a2 100644 --- a/dbms/CMakeLists.txt +++ b/dbms/CMakeLists.txt @@ -213,7 +213,7 @@ if (USE_CAPNP) endif () if (USE_RDKAFKA) - target_link_libraries (dbms ${RDKAFKA_LIBRARY} ${OPENSSL_SSL_LIBRARY} ${OPENSSL_CRYPTO_LIBRARY}) + target_link_libraries (dbms ${RDKAFKA_LIBRARY}) if (NOT USE_INTERNAL_RDKAFKA_LIBRARY) target_include_directories (dbms SYSTEM BEFORE PRIVATE ${RDKAFKA_INCLUDE_DIR}) endif () From 497c8fb7426c8d57f494e3bb8a6da6c3709d1d4e Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 10 Aug 2018 02:02:52 +0300 Subject: [PATCH 10/22] Better CMakeLists for rdkafka library [#CLICKHOUSE-2] --- contrib/CMakeLists.txt | 21 +------ contrib/librdkafka-cmake/CMakeLists.txt | 59 ++++++++++++++++++++ contrib/librdkafka-cmake/config.h | 74 +++++++++++++++++++++++++ contrib/librdkafka-cmake/include/README | 1 + 4 files changed, 136 insertions(+), 19 deletions(-) create mode 100644 contrib/librdkafka-cmake/CMakeLists.txt create mode 100644 contrib/librdkafka-cmake/config.h create mode 100644 contrib/librdkafka-cmake/include/README diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 3c0e284e3b9..134b46af4ca 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -109,26 +109,9 @@ if (ENABLE_MYSQL AND USE_INTERNAL_MYSQL_LIBRARY) endif () if (USE_INTERNAL_RDKAFKA_LIBRARY) - set (RDKAFKA_BUILD_EXAMPLES OFF CACHE INTERNAL "") - set (RDKAFKA_BUILD_TESTS OFF CACHE INTERNAL "") - set (RDKAFKA_BUILD_STATIC ${MAKE_STATIC_LIBRARIES} CACHE INTERNAL "") - mark_as_advanced (ZLIB_INCLUDE_DIR) - - if (USE_INTERNAL_SSL_LIBRARY) - if (MAKE_STATIC_LIBRARIES) - add_library(bundled-ssl ALIAS ${OPENSSL_SSL_LIBRARY}) - set (WITH_BUNDLED_SSL 1 CACHE INTERNAL "") - else () - set (WITH_SSL 0 CACHE INTERNAL "") - endif () - endif () - - add_subdirectory (librdkafka) - - if (USE_INTERNAL_SSL_LIBRARY AND MAKE_STATIC_LIBRARIES) - target_include_directories(rdkafka PRIVATE BEFORE ${OPENSSL_INCLUDE_DIR}) - endif () + add_subdirectory (librdkafka-cmake) target_include_directories(rdkafka PRIVATE BEFORE ${ZLIB_INCLUDE_DIR}) + target_include_directories(rdkafka PRIVATE BEFORE ${OPENSSL_INCLUDE_DIR}) endif () if (ENABLE_ODBC AND USE_INTERNAL_ODBC_LIBRARY) diff --git a/contrib/librdkafka-cmake/CMakeLists.txt b/contrib/librdkafka-cmake/CMakeLists.txt new file mode 100644 index 00000000000..96701c4f8a7 --- /dev/null +++ b/contrib/librdkafka-cmake/CMakeLists.txt @@ -0,0 +1,59 @@ +set(RDKAFKA_SOURCE_DIR ${CMAKE_SOURCE_DIR}/contrib/librdkafka/src) + +set(SRCS +${RDKAFKA_SOURCE_DIR}/crc32c.c +${RDKAFKA_SOURCE_DIR}/rdaddr.c +${RDKAFKA_SOURCE_DIR}/rdavl.c +${RDKAFKA_SOURCE_DIR}/rdbuf.c +${RDKAFKA_SOURCE_DIR}/rdcrc32.c +${RDKAFKA_SOURCE_DIR}/rdkafka.c +${RDKAFKA_SOURCE_DIR}/rdkafka_assignor.c +${RDKAFKA_SOURCE_DIR}/rdkafka_broker.c +${RDKAFKA_SOURCE_DIR}/rdkafka_buf.c +${RDKAFKA_SOURCE_DIR}/rdkafka_cgrp.c +${RDKAFKA_SOURCE_DIR}/rdkafka_conf.c +${RDKAFKA_SOURCE_DIR}/rdkafka_event.c +${RDKAFKA_SOURCE_DIR}/rdkafka_feature.c +${RDKAFKA_SOURCE_DIR}/rdkafka_lz4.c +${RDKAFKA_SOURCE_DIR}/rdkafka_metadata.c +${RDKAFKA_SOURCE_DIR}/rdkafka_metadata_cache.c +${RDKAFKA_SOURCE_DIR}/rdkafka_msg.c +${RDKAFKA_SOURCE_DIR}/rdkafka_msgset_reader.c +${RDKAFKA_SOURCE_DIR}/rdkafka_msgset_writer.c +${RDKAFKA_SOURCE_DIR}/rdkafka_offset.c +${RDKAFKA_SOURCE_DIR}/rdkafka_op.c +${RDKAFKA_SOURCE_DIR}/rdkafka_partition.c +${RDKAFKA_SOURCE_DIR}/rdkafka_pattern.c +${RDKAFKA_SOURCE_DIR}/rdkafka_queue.c +${RDKAFKA_SOURCE_DIR}/rdkafka_range_assignor.c +${RDKAFKA_SOURCE_DIR}/rdkafka_request.c +${RDKAFKA_SOURCE_DIR}/rdkafka_roundrobin_assignor.c +${RDKAFKA_SOURCE_DIR}/rdkafka_sasl.c +${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_plain.c +${RDKAFKA_SOURCE_DIR}/rdkafka_subscription.c +${RDKAFKA_SOURCE_DIR}/rdkafka_timer.c +${RDKAFKA_SOURCE_DIR}/rdkafka_topic.c +${RDKAFKA_SOURCE_DIR}/rdkafka_transport.c +${RDKAFKA_SOURCE_DIR}/rdkafka_interceptor.c +${RDKAFKA_SOURCE_DIR}/rdkafka_header.c +${RDKAFKA_SOURCE_DIR}/rdlist.c +${RDKAFKA_SOURCE_DIR}/rdlog.c +${RDKAFKA_SOURCE_DIR}/rdmurmur2.c +${RDKAFKA_SOURCE_DIR}/rdports.c +${RDKAFKA_SOURCE_DIR}/rdrand.c +${RDKAFKA_SOURCE_DIR}/rdregex.c +${RDKAFKA_SOURCE_DIR}/rdstring.c +${RDKAFKA_SOURCE_DIR}/rdunittest.c +${RDKAFKA_SOURCE_DIR}/rdvarint.c +${RDKAFKA_SOURCE_DIR}/snappy.c +${RDKAFKA_SOURCE_DIR}/tinycthread.c +${RDKAFKA_SOURCE_DIR}/xxhash.c +${RDKAFKA_SOURCE_DIR}/lz4.c +${RDKAFKA_SOURCE_DIR}/lz4frame.c +${RDKAFKA_SOURCE_DIR}/lz4hc.c +${RDKAFKA_SOURCE_DIR}/rdgz.c +) + +add_library(rdkafka STATIC ${SRCS}) +target_include_directories(rdkafka PRIVATE include) +target_link_libraries(rdkafka PUBLIC ${ZLIB_LIBRARIES} ${OPENSSL_SSL_LIBRARY} ${OPENSSL_CRYPTO_LIBRARY}) diff --git a/contrib/librdkafka-cmake/config.h b/contrib/librdkafka-cmake/config.h new file mode 100644 index 00000000000..68e93a10ff1 --- /dev/null +++ b/contrib/librdkafka-cmake/config.h @@ -0,0 +1,74 @@ +// Automatically generated by ./configure +#ifndef _CONFIG_H_ +#define _CONFIG_H_ +#define ARCH "x86_64" +#define CPU "generic" +#define WITHOUT_OPTIMIZATION 0 +#define ENABLE_DEVEL 0 +#define ENABLE_VALGRIND 0 +#define ENABLE_REFCNT_DEBUG 0 +#define ENABLE_SHAREDPTR_DEBUG 0 +#define ENABLE_LZ4_EXT 1 +#define ENABLE_SSL 1 +//#define ENABLE_SASL 1 +#define MKL_APP_NAME "librdkafka" +#define MKL_APP_DESC_ONELINE "The Apache Kafka C/C++ library" +// distro +//#define SOLIB_EXT ".so" +// gcc +//#define WITH_GCC 1 +// gxx +//#define WITH_GXX 1 +// pkgconfig +//#define WITH_PKGCONFIG 1 +// install +//#define WITH_INSTALL 1 +// PIC +//#define HAVE_PIC 1 +// gnulib +//#define WITH_GNULD 1 +// __atomic_32 +#define HAVE_ATOMICS_32 1 +// __atomic_32 +#define HAVE_ATOMICS_32_ATOMIC 1 +// atomic_32 +#define ATOMIC_OP32(OP1,OP2,PTR,VAL) __atomic_ ## OP1 ## _ ## OP2(PTR, VAL, __ATOMIC_SEQ_CST) +// __atomic_64 +#define HAVE_ATOMICS_64 1 +// __atomic_64 +#define HAVE_ATOMICS_64_ATOMIC 1 +// atomic_64 +#define ATOMIC_OP64(OP1,OP2,PTR,VAL) __atomic_ ## OP1 ## _ ## OP2(PTR, VAL, __ATOMIC_SEQ_CST) +// atomic_64 +#define ATOMIC_OP(OP1,OP2,PTR,VAL) __atomic_ ## OP1 ## _ ## OP2(PTR, VAL, __ATOMIC_SEQ_CST) +// parseversion +#define RDKAFKA_VERSION_STR "0.11.4" +// parseversion +#define MKL_APP_VERSION "0.11.4" +// libdl +//#define WITH_LIBDL 1 +// WITH_PLUGINS +//#define WITH_PLUGINS 1 +// zlib +#define WITH_ZLIB 1 +// WITH_SNAPPY +#define WITH_SNAPPY 1 +// WITH_SOCKEM +#define WITH_SOCKEM 1 +// libssl +#define WITH_SSL 1 +// WITH_SASL_SCRAM +//#define WITH_SASL_SCRAM 1 +// crc32chw +#define WITH_CRC32C_HW 1 +// regex +#define HAVE_REGEX 1 +// strndup +#define HAVE_STRNDUP 1 +// strerror_r +#define HAVE_STRERROR_R 1 +// pthread_setname_gnu +#define HAVE_PTHREAD_SETNAME_GNU 1 +// python +//#define HAVE_PYTHON 1 +#endif /* _CONFIG_H_ */ diff --git a/contrib/librdkafka-cmake/include/README b/contrib/librdkafka-cmake/include/README new file mode 100644 index 00000000000..58fa024e68a --- /dev/null +++ b/contrib/librdkafka-cmake/include/README @@ -0,0 +1 @@ +This directory is needed because rdkafka files have #include "../config.h" From a82aee19d250fcf9ef479838b8cc87a27d50bd93 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 10 Aug 2018 02:14:31 +0300 Subject: [PATCH 11/22] Better CMakeLists for rdkafka library [#CLICKHOUSE-2] --- contrib/librdkafka-cmake/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/contrib/librdkafka-cmake/CMakeLists.txt b/contrib/librdkafka-cmake/CMakeLists.txt index 96701c4f8a7..34f2315c33c 100644 --- a/contrib/librdkafka-cmake/CMakeLists.txt +++ b/contrib/librdkafka-cmake/CMakeLists.txt @@ -56,4 +56,5 @@ ${RDKAFKA_SOURCE_DIR}/rdgz.c add_library(rdkafka STATIC ${SRCS}) target_include_directories(rdkafka PRIVATE include) +target_include_directories(rdkafka PUBLIC ${RDKAFKA_SOURCE_DIR}) target_link_libraries(rdkafka PUBLIC ${ZLIB_LIBRARIES} ${OPENSSL_SSL_LIBRARY} ${OPENSSL_CRYPTO_LIBRARY}) From 973bdab77ff6a380347030fab2727edf7ca5fc98 Mon Sep 17 00:00:00 2001 From: proller Date: Fri, 10 Aug 2018 04:20:10 +0300 Subject: [PATCH 12/22] Format ODBCDriver2 with NULL support (#2834) * Format ODBCDriver2 with NULL support * Fix comment * Update ODBCDriver2BlockOutputStream.cpp * clean --- dbms/src/Formats/FormatFactory.cpp | 2 + .../Formats/ODBCDriver2BlockOutputStream.cpp | 95 +++++++++++++++++++ .../Formats/ODBCDriver2BlockOutputStream.h | 44 +++++++++ 3 files changed, 141 insertions(+) create mode 100644 dbms/src/Formats/ODBCDriver2BlockOutputStream.cpp create mode 100644 dbms/src/Formats/ODBCDriver2BlockOutputStream.h diff --git a/dbms/src/Formats/FormatFactory.cpp b/dbms/src/Formats/FormatFactory.cpp index a1910492afd..068454c8681 100644 --- a/dbms/src/Formats/FormatFactory.cpp +++ b/dbms/src/Formats/FormatFactory.cpp @@ -120,6 +120,7 @@ void registerOutputFormatJSON(FormatFactory & factory); void registerOutputFormatJSONCompact(FormatFactory & factory); void registerOutputFormatXML(FormatFactory & factory); void registerOutputFormatODBCDriver(FormatFactory & factory); +void registerOutputFormatODBCDriver2(FormatFactory & factory); void registerOutputFormatNull(FormatFactory & factory); /// Input only formats. @@ -153,6 +154,7 @@ FormatFactory::FormatFactory() registerOutputFormatJSONCompact(*this); registerOutputFormatXML(*this); registerOutputFormatODBCDriver(*this); + registerOutputFormatODBCDriver2(*this); registerOutputFormatNull(*this); } diff --git a/dbms/src/Formats/ODBCDriver2BlockOutputStream.cpp b/dbms/src/Formats/ODBCDriver2BlockOutputStream.cpp new file mode 100644 index 00000000000..85af97903ae --- /dev/null +++ b/dbms/src/Formats/ODBCDriver2BlockOutputStream.cpp @@ -0,0 +1,95 @@ +#include +#include +#include +#include +#include + + +#include + + +namespace DB +{ +ODBCDriver2BlockOutputStream::ODBCDriver2BlockOutputStream( + WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings) + : out(out_), header(header_), format_settings(format_settings) +{ +} + +void ODBCDriver2BlockOutputStream::flush() +{ + out.next(); +} + +void writeODBCString(WriteBuffer & out, const std::string & str) +{ + writeIntBinary(Int32(str.size()), out); + out.write(str.data(), str.size()); +} + +void ODBCDriver2BlockOutputStream::write(const Block & block) +{ + const size_t rows = block.rows(); + const size_t columns = block.columns(); + String text_value; + + for (size_t i = 0; i < rows; ++i) + { + for (size_t j = 0; j < columns; ++j) + { + text_value.resize(0); + const ColumnWithTypeAndName & col = block.getByPosition(j); + + if (col.column->isNullAt(i)) + { + writeIntBinary(Int32(-1), out); + } + else + { + { + WriteBufferFromString text_out(text_value); + col.type->serializeText(*col.column, i, text_out, format_settings); + } + writeODBCString(out, text_value); + } + } + } +} + +void ODBCDriver2BlockOutputStream::writePrefix() +{ + const size_t columns = header.columns(); + + /// Number of header rows. + writeIntBinary(Int32(2), out); + + /// Names of columns. + /// Number of columns + 1 for first name column. + writeIntBinary(Int32(columns + 1), out); + writeODBCString(out, "name"); + for (size_t i = 0; i < columns; ++i) + { + const ColumnWithTypeAndName & col = header.getByPosition(i); + writeODBCString(out, col.name); + } + + /// Types of columns. + writeIntBinary(Int32(columns + 1), out); + writeODBCString(out, "type"); + for (size_t i = 0; i < columns; ++i) + { + const ColumnWithTypeAndName & col = header.getByPosition(i); + writeODBCString(out, col.type->getName()); + } +} + + +void registerOutputFormatODBCDriver2(FormatFactory & factory) +{ + factory.registerOutputFormat( + "ODBCDriver2", [](WriteBuffer & buf, const Block & sample, const Context &, const FormatSettings & format_settings) { + return std::make_shared(buf, sample, format_settings); + }); +} + +} diff --git a/dbms/src/Formats/ODBCDriver2BlockOutputStream.h b/dbms/src/Formats/ODBCDriver2BlockOutputStream.h new file mode 100644 index 00000000000..7e295b52d60 --- /dev/null +++ b/dbms/src/Formats/ODBCDriver2BlockOutputStream.h @@ -0,0 +1,44 @@ +#pragma once + +#include +#include +#include +#include + + +namespace DB +{ +class WriteBuffer; + + +/** A data format designed to simplify the implementation of the ODBC driver. + * ODBC driver is designed to be build for different platforms without dependencies from the main code, + * so the format is made that way so that it can be as easy as possible to parse it. + * A header is displayed with the required information. + * The data is then output in the order of the rows. Each value is displayed as follows: length in Int32 format (-1 for NULL), then data in text form. + */ +class ODBCDriver2BlockOutputStream : public IBlockOutputStream +{ +public: + ODBCDriver2BlockOutputStream(WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings); + + Block getHeader() const override + { + return header; + } + void write(const Block & block) override; + void writePrefix() override; + + void flush() override; + std::string getContentType() const override + { + return "application/octet-stream"; + } + +private: + WriteBuffer & out; + const Block header; + const FormatSettings format_settings; +}; + +} From c019d732c5afc96b0062c5a5e1f80c32af5dacdb Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Fri, 10 Aug 2018 04:27:54 +0300 Subject: [PATCH 13/22] ClickHouse 3419 Add setting prefer_localhost_replica (#2832) * add setting prefer_localhost_replica * add prefer_localhost_replica setting * fix bugs * update setting comment * Add test for prefer_localhost_replica * fix bug --- dbms/src/Interpreters/Cluster.cpp | 77 +++++++------------ dbms/src/Interpreters/Cluster.h | 2 +- .../ClusterProxy/SelectStreamFactory.cpp | 21 +++-- dbms/src/Interpreters/Settings.h | 2 + .../configs/remote_servers.xml | 12 +++ .../test_insert_into_distributed/test.py | 50 +++++++++++- 6 files changed, 99 insertions(+), 65 deletions(-) diff --git a/dbms/src/Interpreters/Cluster.cpp b/dbms/src/Interpreters/Cluster.cpp index cd1a3a2da11..a27dcdf6265 100644 --- a/dbms/src/Interpreters/Cluster.cpp +++ b/dbms/src/Interpreters/Cluster.cpp @@ -201,23 +201,18 @@ Cluster::Cluster(Poco::Util::AbstractConfiguration & config, const Settings & se info.weight = weight; if (address.is_local) - { info.local_addresses.push_back(address); - info.per_replica_pools = {nullptr}; - } - else - { - ConnectionPoolPtr pool = std::make_shared( - settings.distributed_connections_pool_size, - address.host_name, address.port, - address.default_database, address.user, address.password, - ConnectionTimeouts::getTCPTimeoutsWithoutFailover(settings).getSaturated(settings.max_execution_time), - "server", address.compression, address.secure); - info.pool = std::make_shared( - ConnectionPoolPtrs{pool}, settings.load_balancing, settings.connections_with_failover_max_tries); - info.per_replica_pools = {std::move(pool)}; - } + ConnectionPoolPtr pool = std::make_shared( + settings.distributed_connections_pool_size, + address.host_name, address.port, + address.default_database, address.user, address.password, + ConnectionTimeouts::getTCPTimeoutsWithoutFailover(settings).getSaturated(settings.max_execution_time), + "server", address.compression, address.secure); + + info.pool = std::make_shared( + ConnectionPoolPtrs{pool}, settings.load_balancing, settings.connections_with_failover_max_tries); + info.per_replica_pools = {std::move(pool)}; if (weight) slot_to_shard.insert(std::end(slot_to_shard), weight, shards_info.size()); @@ -276,36 +271,25 @@ Cluster::Cluster(Poco::Util::AbstractConfiguration & config, const Settings & se Addresses shard_local_addresses; - ConnectionPoolPtrs remote_replicas_pools; ConnectionPoolPtrs all_replicas_pools; - remote_replicas_pools.reserve(replica_addresses.size()); all_replicas_pools.reserve(replica_addresses.size()); for (const auto & replica : replica_addresses) { - if (replica.is_local) - { - shard_local_addresses.push_back(replica); - all_replicas_pools.emplace_back(nullptr); - } - else - { - auto replica_pool = std::make_shared( - settings.distributed_connections_pool_size, - replica.host_name, replica.port, - replica.default_database, replica.user, replica.password, - ConnectionTimeouts::getTCPTimeoutsWithFailover(settings).getSaturated(settings.max_execution_time), - "server", replica.compression, replica.secure); + auto replica_pool = std::make_shared( + settings.distributed_connections_pool_size, + replica.host_name, replica.port, + replica.default_database, replica.user, replica.password, + ConnectionTimeouts::getTCPTimeoutsWithFailover(settings).getSaturated(settings.max_execution_time), + "server", replica.compression, replica.secure); - remote_replicas_pools.emplace_back(replica_pool); - all_replicas_pools.emplace_back(replica_pool); - } + all_replicas_pools.emplace_back(replica_pool); + if (replica.is_local) + shard_local_addresses.push_back(replica); } - ConnectionPoolWithFailoverPtr shard_pool; - if (!remote_replicas_pools.empty()) - shard_pool = std::make_shared( - std::move(remote_replicas_pools), settings.load_balancing, settings.connections_with_failover_max_tries); + ConnectionPoolWithFailoverPtr shard_pool = std::make_shared( + all_replicas_pools, settings.load_balancing, settings.connections_with_failover_max_tries); if (weight) slot_to_shard.insert(std::end(slot_to_shard), weight, shards_info.size()); @@ -341,32 +325,23 @@ Cluster::Cluster(const Settings & settings, const std::vector( + auto replica_pool = std::make_shared( settings.distributed_connections_pool_size, replica.host_name, replica.port, replica.default_database, replica.user, replica.password, ConnectionTimeouts::getTCPTimeoutsWithFailover(settings).getSaturated(settings.max_execution_time), "server", replica.compression, replica.secure); - all_replicas.emplace_back(replica_pool); - remote_replicas.emplace_back(replica_pool); - } + all_replicas.emplace_back(replica_pool); + if (replica.is_local && !treat_local_as_remote) + shard_local_addresses.push_back(replica); } ConnectionPoolWithFailoverPtr shard_pool = std::make_shared( - std::move(remote_replicas), settings.load_balancing, settings.connections_with_failover_max_tries); + all_replicas, settings.load_balancing, settings.connections_with_failover_max_tries); slot_to_shard.insert(std::end(slot_to_shard), default_weight, shards_info.size()); shards_info.push_back({{}, current_shard_num, default_weight, std::move(shard_local_addresses), std::move(shard_pool), diff --git a/dbms/src/Interpreters/Cluster.h b/dbms/src/Interpreters/Cluster.h index cc1f43a05ca..945e594663b 100644 --- a/dbms/src/Interpreters/Cluster.h +++ b/dbms/src/Interpreters/Cluster.h @@ -99,7 +99,7 @@ public: { public: bool isLocal() const { return !local_addresses.empty(); } - bool hasRemoteConnections() const { return pool != nullptr; } + bool hasRemoteConnections() const { return local_addresses.size() != per_replica_pools.size(); } size_t getLocalNodeCount() const { return local_addresses.size(); } bool hasInternalReplication() const { return has_internal_replication; } diff --git a/dbms/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp b/dbms/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp index cd6bed0c1d7..7b97a9dd218 100644 --- a/dbms/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp +++ b/dbms/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp @@ -90,7 +90,9 @@ void SelectStreamFactory::createForShard( res.emplace_back(std::move(stream)); }; - if (shard_info.isLocal()) + const auto & settings = context.getSettingsRef(); + + if (settings.prefer_localhost_replica && shard_info.isLocal()) { StoragePtr main_table_storage; @@ -106,22 +108,18 @@ void SelectStreamFactory::createForShard( if (!main_table_storage) /// Table is absent on a local server. { ProfileEvents::increment(ProfileEvents::DistributedConnectionMissingTable); - if (shard_info.pool) + if (shard_info.hasRemoteConnections()) { LOG_WARNING( &Logger::get("ClusterProxy::SelectStreamFactory"), "There is no table " << main_table.database << "." << main_table.table << " on local replica of shard " << shard_info.shard_num << ", will try remote replicas."); - emplace_remote_stream(); - return; } else - { - /// Let it fail the usual way. - emplace_local_stream(); - return; - } + emplace_local_stream(); /// Let it fail the usual way. + + return; } const auto * replicated_storage = dynamic_cast(main_table_storage.get()); @@ -133,7 +131,6 @@ void SelectStreamFactory::createForShard( return; } - const Settings & settings = context.getSettingsRef(); UInt64 max_allowed_delay = settings.max_replica_delay_for_distributed_queries; if (!max_allowed_delay) @@ -158,7 +155,7 @@ void SelectStreamFactory::createForShard( if (!settings.fallback_to_stale_replicas_for_distributed_queries) { - if (shard_info.pool) + if (shard_info.hasRemoteConnections()) { /// If we cannot fallback, then we cannot use local replica. Try our luck with remote replicas. emplace_remote_stream(); @@ -171,7 +168,7 @@ void SelectStreamFactory::createForShard( ErrorCodes::ALL_REPLICAS_ARE_STALE); } - if (!shard_info.pool) + if (!shard_info.hasRemoteConnections()) { /// There are no remote replicas but we are allowed to fall back to stale local replica. emplace_local_stream(); diff --git a/dbms/src/Interpreters/Settings.h b/dbms/src/Interpreters/Settings.h index 08522929179..6a60485b4ca 100644 --- a/dbms/src/Interpreters/Settings.h +++ b/dbms/src/Interpreters/Settings.h @@ -268,8 +268,10 @@ struct Settings M(SettingUInt64, enable_conditional_computation, 0, "Enable conditional computations") \ \ M(SettingDateTimeInputFormat, date_time_input_format, FormatSettings::DateTimeInputFormat::Basic, "Method to read DateTime from text input formats. Possible values: 'basic' and 'best_effort'.") \ + M(SettingBool, prefer_localhost_replica, 1, "1 - always send query to local replica, if it exists. 0 - choose replica to send query between local and remote ones according to load_balancing") \ M(SettingUInt64, max_fetch_partition_retries_count, 5, "Amount of retries while fetching partition from another host.") \ + #define DECLARE(TYPE, NAME, DEFAULT, DESCRIPTION) \ TYPE NAME {DEFAULT}; diff --git a/dbms/tests/integration/test_insert_into_distributed/configs/remote_servers.xml b/dbms/tests/integration/test_insert_into_distributed/configs/remote_servers.xml index 84b98cc0223..d596982ea97 100644 --- a/dbms/tests/integration/test_insert_into_distributed/configs/remote_servers.xml +++ b/dbms/tests/integration/test_insert_into_distributed/configs/remote_servers.xml @@ -16,5 +16,17 @@ + + + + node1 + 9000 + + + node2 + 9000 + + + diff --git a/dbms/tests/integration/test_insert_into_distributed/test.py b/dbms/tests/integration/test_insert_into_distributed/test.py index 414ac2babc8..bc90d33cdb0 100644 --- a/dbms/tests/integration/test_insert_into_distributed/test.py +++ b/dbms/tests/integration/test_insert_into_distributed/test.py @@ -18,6 +18,9 @@ instance_test_inserts_local_cluster = cluster.add_instance( 'instance_test_inserts_local_cluster', main_configs=['configs/remote_servers.xml']) +node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) +node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) + @pytest.fixture(scope="module") def started_cluster(): @@ -39,6 +42,20 @@ CREATE TABLE distributed (d Date, x UInt32) ENGINE = Distributed('test_cluster', CREATE TABLE distributed_on_local (d Date, x UInt32) ENGINE = Distributed('test_local_cluster', 'default', 'local') ''') + node1.query(''' +CREATE TABLE replicated(date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/replicated', 'node1', date, id, 8192) +''') + node2.query(''' +CREATE TABLE replicated(date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/replicated', 'node2', date, id, 8192) +''') + + node1.query(''' +CREATE TABLE distributed (date Date, id UInt32) ENGINE = Distributed('shard_with_local_replica', 'default', 'replicated') +''') + + node2.query(''' +CREATE TABLE distributed (date Date, id UInt32) ENGINE = Distributed('shard_with_local_replica', 'default', 'replicated') +''') yield cluster finally: @@ -108,7 +125,7 @@ def test_inserts_batching(started_cluster): # 4. Full batch of inserts after ALTER (that have different block structure). # 5. What was left to insert with (d, x) order before ALTER. expected = '''\ -20000101_20000101_1_1_0 [1] +20000101_20000101_1_1_0 [1] 20000101_20000101_2_2_0 [3,4,5] 20000101_20000101_3_3_0 [2,7,8] 20000101_20000101_4_4_0 [10,11,12] @@ -122,3 +139,34 @@ def test_inserts_local(started_cluster): instance.query("INSERT INTO distributed_on_local VALUES ('2000-01-01', 1)") time.sleep(0.5) assert instance.query("SELECT count(*) FROM local").strip() == '1' + +def test_prefer_localhost_replica(started_cluster): + test_query = "SELECT * FROM distributed ORDER BY id;" + node1.query("INSERT INTO distributed VALUES (toDate('2017-06-17'), 11)") + node2.query("INSERT INTO distributed VALUES (toDate('2017-06-17'), 22)") + time.sleep(1.0) + expected_distributed = '''\ +2017-06-17 11 +2017-06-17 22 +''' + assert TSV(node1.query(test_query)) == TSV(expected_distributed) + assert TSV(node2.query(test_query)) == TSV(expected_distributed) + with PartitionManager() as pm: + pm.partition_instances(node1, node2, action='REJECT --reject-with tcp-reset') + node1.query("INSERT INTO replicated VALUES (toDate('2017-06-17'), 33)") + node2.query("INSERT INTO replicated VALUES (toDate('2017-06-17'), 44)") + time.sleep(1.0) + expected_from_node2 = '''\ +2017-06-17 11 +2017-06-17 22 +2017-06-17 44 +''' + # Query is sent to node2, as it local and prefer_localhost_replica=1 + assert TSV(node2.query(test_query)) == TSV(expected_from_node2) + expected_from_node1 = '''\ +2017-06-17 11 +2017-06-17 22 +2017-06-17 33 +''' + # Now query is sent to node1, as it higher in order + assert TSV(node2.query("SET load_balancing='in_order'; SET prefer_localhost_replica=0;" + test_query)) == TSV(expected_from_node1) \ No newline at end of file From 29e40f0087bec57b01829da11b199f39c6eabd67 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 10 Aug 2018 04:31:55 +0300 Subject: [PATCH 14/22] Fixed potential error #2832 --- .../test_insert_into_distributed/test.py | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/dbms/tests/integration/test_insert_into_distributed/test.py b/dbms/tests/integration/test_insert_into_distributed/test.py index bc90d33cdb0..9da48ec55ee 100644 --- a/dbms/tests/integration/test_insert_into_distributed/test.py +++ b/dbms/tests/integration/test_insert_into_distributed/test.py @@ -125,11 +125,11 @@ def test_inserts_batching(started_cluster): # 4. Full batch of inserts after ALTER (that have different block structure). # 5. What was left to insert with (d, x) order before ALTER. expected = '''\ -20000101_20000101_1_1_0 [1] -20000101_20000101_2_2_0 [3,4,5] -20000101_20000101_3_3_0 [2,7,8] -20000101_20000101_4_4_0 [10,11,12] -20000101_20000101_5_5_0 [6,9] +20000101_20000101_1_1_0\t[1] +20000101_20000101_2_2_0\t[3,4,5] +20000101_20000101_3_3_0\t[2,7,8] +20000101_20000101_4_4_0\t[10,11,12] +20000101_20000101_5_5_0\t[6,9] ''' assert TSV(result) == TSV(expected) @@ -146,8 +146,8 @@ def test_prefer_localhost_replica(started_cluster): node2.query("INSERT INTO distributed VALUES (toDate('2017-06-17'), 22)") time.sleep(1.0) expected_distributed = '''\ -2017-06-17 11 -2017-06-17 22 +2017-06-17\t11 +2017-06-17\t22 ''' assert TSV(node1.query(test_query)) == TSV(expected_distributed) assert TSV(node2.query(test_query)) == TSV(expected_distributed) @@ -157,16 +157,16 @@ def test_prefer_localhost_replica(started_cluster): node2.query("INSERT INTO replicated VALUES (toDate('2017-06-17'), 44)") time.sleep(1.0) expected_from_node2 = '''\ -2017-06-17 11 -2017-06-17 22 -2017-06-17 44 +2017-06-17\t11 +2017-06-17\t22 +2017-06-17\t44 ''' # Query is sent to node2, as it local and prefer_localhost_replica=1 assert TSV(node2.query(test_query)) == TSV(expected_from_node2) expected_from_node1 = '''\ -2017-06-17 11 -2017-06-17 22 -2017-06-17 33 +2017-06-17\t11 +2017-06-17\t22 +2017-06-17\t33 ''' # Now query is sent to node1, as it higher in order - assert TSV(node2.query("SET load_balancing='in_order'; SET prefer_localhost_replica=0;" + test_query)) == TSV(expected_from_node1) \ No newline at end of file + assert TSV(node2.query("SET load_balancing='in_order'; SET prefer_localhost_replica=0;" + test_query)) == TSV(expected_from_node1) From d8ca4345b62afd2be166a430c5c136960eca5baf Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 10 Aug 2018 04:41:54 +0300 Subject: [PATCH 15/22] Fixed style violations [#CLICKHOUSE-2] --- dbms/src/Formats/ODBCDriver2BlockOutputStream.cpp | 3 ++- dbms/src/Functions/FunctionsHashing.h | 2 +- .../src/Interpreters/ClusterProxy/SelectStreamFactory.cpp | 2 +- dbms/src/Interpreters/ExternalLoader.cpp | 8 ++++---- dbms/src/Storages/StorageFactory.cpp | 2 +- dbms/src/Storages/StorageMaterializedView.h | 1 - dbms/src/Storages/StorageMergeTree.cpp | 2 +- dbms/src/Storages/StorageReplicatedMergeTree.cpp | 4 ++-- 8 files changed, 12 insertions(+), 12 deletions(-) diff --git a/dbms/src/Formats/ODBCDriver2BlockOutputStream.cpp b/dbms/src/Formats/ODBCDriver2BlockOutputStream.cpp index 85af97903ae..3d02bd92e07 100644 --- a/dbms/src/Formats/ODBCDriver2BlockOutputStream.cpp +++ b/dbms/src/Formats/ODBCDriver2BlockOutputStream.cpp @@ -87,7 +87,8 @@ void ODBCDriver2BlockOutputStream::writePrefix() void registerOutputFormatODBCDriver2(FormatFactory & factory) { factory.registerOutputFormat( - "ODBCDriver2", [](WriteBuffer & buf, const Block & sample, const Context &, const FormatSettings & format_settings) { + "ODBCDriver2", [](WriteBuffer & buf, const Block & sample, const Context &, const FormatSettings & format_settings) + { return std::make_shared(buf, sample, format_settings); }); } diff --git a/dbms/src/Functions/FunctionsHashing.h b/dbms/src/Functions/FunctionsHashing.h index dce5a51baf1..3014d4205a2 100644 --- a/dbms/src/Functions/FunctionsHashing.h +++ b/dbms/src/Functions/FunctionsHashing.h @@ -583,7 +583,7 @@ public: size_t getNumberOfArguments() const override { return 1; } - DataTypePtr getReturnTypeImpl(const DataTypes & /*arguments */) const override + DataTypePtr getReturnTypeImpl(const DataTypes & /*arguments */) const override { return std::make_shared>(); } bool useDefaultImplementationForConstants() const override { return true; } diff --git a/dbms/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp b/dbms/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp index 7b97a9dd218..8a304f7701e 100644 --- a/dbms/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp +++ b/dbms/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp @@ -118,7 +118,7 @@ void SelectStreamFactory::createForShard( } else emplace_local_stream(); /// Let it fail the usual way. - + return; } diff --git a/dbms/src/Interpreters/ExternalLoader.cpp b/dbms/src/Interpreters/ExternalLoader.cpp index 751af361d0f..816d4798e30 100644 --- a/dbms/src/Interpreters/ExternalLoader.cpp +++ b/dbms/src/Interpreters/ExternalLoader.cpp @@ -228,8 +228,8 @@ void ExternalLoader::reloadFromConfigFiles(const bool throw_on_error, const bool throw; } } - - /// erase removed from config loadable objects + + /// erase removed from config loadable objects std::list removed_loadable_objects; for (const auto & loadable : loadable_objects) { @@ -261,7 +261,7 @@ void ExternalLoader::reloadFromConfigFile(const std::string & config_path, const if (force_reload || last_modified > config_last_modified) { auto config = config_repository->load(config_path); - + loadable_objects_defined_in_config[config_path].clear(); /// Definitions of loadable objects may have changed, recreate all of them @@ -295,7 +295,7 @@ void ExternalLoader::reloadFromConfigFile(const std::string & config_path, const LOG_WARNING(log, config_path << ": " + config_settings.external_name + " name cannot be empty"); continue; } - + loadable_objects_defined_in_config[config_path].emplace(name); if (!loadable_name.empty() && name != loadable_name) continue; diff --git a/dbms/src/Storages/StorageFactory.cpp b/dbms/src/Storages/StorageFactory.cpp index 9ceb59abbcb..05a61343108 100644 --- a/dbms/src/Storages/StorageFactory.cpp +++ b/dbms/src/Storages/StorageFactory.cpp @@ -91,7 +91,7 @@ StoragePtr StorageFactory::get( { throw Exception( "Engine " + name + " doesn't support SETTINGS clause. " - "Currently only the MergeTree family of engines and Kafka engine supports it", + "Currently only the MergeTree family of engines and Kafka engine supports it", ErrorCodes::BAD_ARGUMENTS); } diff --git a/dbms/src/Storages/StorageMaterializedView.h b/dbms/src/Storages/StorageMaterializedView.h index 1f0c4c7af47..8ef047dba67 100644 --- a/dbms/src/Storages/StorageMaterializedView.h +++ b/dbms/src/Storages/StorageMaterializedView.h @@ -43,7 +43,6 @@ public: void shutdown() override; void checkTableCanBeDropped() const override; - void checkPartitionCanBeDropped(const ASTPtr & partition) override; BlockInputStreams read( diff --git a/dbms/src/Storages/StorageMergeTree.cpp b/dbms/src/Storages/StorageMergeTree.cpp index c4949345470..46c6f0b2600 100644 --- a/dbms/src/Storages/StorageMergeTree.cpp +++ b/dbms/src/Storages/StorageMergeTree.cpp @@ -131,7 +131,7 @@ void StorageMergeTree::checkTableCanBeDropped() const void StorageMergeTree::checkPartitionCanBeDropped(const ASTPtr & partition) { const_cast(getData()).recalculateColumnSizes(); - + const String partition_id = data.getPartitionIDFromQuery(partition, context); auto parts_to_remove = data.getDataPartsVectorInPartition(MergeTreeDataPartState::Committed, partition_id); diff --git a/dbms/src/Storages/StorageReplicatedMergeTree.cpp b/dbms/src/Storages/StorageReplicatedMergeTree.cpp index 734ce8cd1a8..1292aaab8f7 100644 --- a/dbms/src/Storages/StorageReplicatedMergeTree.cpp +++ b/dbms/src/Storages/StorageReplicatedMergeTree.cpp @@ -3356,12 +3356,12 @@ void StorageReplicatedMergeTree::checkTableCanBeDropped() const void StorageReplicatedMergeTree::checkPartitionCanBeDropped(const ASTPtr & partition) { const_cast(getData()).recalculateColumnSizes(); - + const String partition_id = data.getPartitionIDFromQuery(partition, context); auto parts_to_remove = data.getDataPartsVectorInPartition(MergeTreeDataPartState::Committed, partition_id); UInt64 partition_size = 0; - + for (const auto & part : parts_to_remove) { partition_size += part->bytes_on_disk; From 1ca5607f0a3b72e6824e15090571b35d2af4cd52 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 10 Aug 2018 05:23:02 +0300 Subject: [PATCH 16/22] Mark Poco headers as system (this is intended to enable more warnings in application code) [#CLICKHOUSE-2] --- contrib/poco | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/poco b/contrib/poco index 4ab45bc3bb0..3df947389e6 160000 --- a/contrib/poco +++ b/contrib/poco @@ -1 +1 @@ -Subproject commit 4ab45bc3bb0d2c476ea5385ec2d398c6bfc9f089 +Subproject commit 3df947389e6d9654919002797bdd86ed190b3963 From 419bc587c0079b51a906a65af9a10da3300ddaf2 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 10 Aug 2018 05:24:10 +0300 Subject: [PATCH 17/22] Mark library headers as system (this is intended to enable more warnings in application code) [#CLICKHOUSE-2] --- CMakeLists.txt | 2 +- contrib/CMakeLists.txt | 6 +++--- contrib/boost-cmake/CMakeLists.txt | 6 +++--- contrib/re2_st/CMakeLists.txt | 3 ++- 4 files changed, 9 insertions(+), 8 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index c664b2f2945..8fff4641e24 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -174,7 +174,7 @@ if (OS_LINUX AND CMAKE_CXX_COMPILER_ID STREQUAL "Clang") endif () if (LIBCXX_PATH) -# include_directories (BEFORE SYSTEM "${LIBCXX_PATH}/include" "${LIBCXX_PATH}/include/c++/v1") +# include_directories (SYSTEM BEFORE "${LIBCXX_PATH}/include" "${LIBCXX_PATH}/include/c++/v1") link_directories ("${LIBCXX_PATH}/lib") endif () endif () diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 134b46af4ca..15213d980ab 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -98,8 +98,8 @@ if (USE_INTERNAL_SSL_LIBRARY) set (USE_SHARED ${USE_STATIC_LIBRARIES}) set (LIBRESSL_SKIP_INSTALL 1) add_subdirectory (ssl) - target_include_directories(${OPENSSL_CRYPTO_LIBRARY} PUBLIC ${OPENSSL_INCLUDE_DIR}) - target_include_directories(${OPENSSL_SSL_LIBRARY} PUBLIC ${OPENSSL_INCLUDE_DIR}) + target_include_directories(${OPENSSL_CRYPTO_LIBRARY} SYSTEM PUBLIC ${OPENSSL_INCLUDE_DIR}) + target_include_directories(${OPENSSL_SSL_LIBRARY} SYSTEM PUBLIC ${OPENSSL_INCLUDE_DIR}) endif () if (ENABLE_MYSQL AND USE_INTERNAL_MYSQL_LIBRARY) @@ -145,7 +145,7 @@ if (USE_INTERNAL_POCO_LIBRARY) if (OPENSSL_FOUND AND TARGET Crypto AND (NOT DEFINED ENABLE_POCO_NETSSL OR ENABLE_POCO_NETSSL)) # Bug in poco https://github.com/pocoproject/poco/pull/2100 found on macos - target_include_directories(Crypto PUBLIC ${OPENSSL_INCLUDE_DIR}) + target_include_directories(Crypto SYSTEM PUBLIC ${OPENSSL_INCLUDE_DIR}) endif () endif () diff --git a/contrib/boost-cmake/CMakeLists.txt b/contrib/boost-cmake/CMakeLists.txt index 2a89293c902..7e2379c5738 100644 --- a/contrib/boost-cmake/CMakeLists.txt +++ b/contrib/boost-cmake/CMakeLists.txt @@ -42,9 +42,9 @@ ${LIBRARY_DIR}/libs/filesystem/src/windows_file_codecvt.cpp) add_library(boost_system_internal ${LIBRARY_DIR}/libs/system/src/error_code.cpp) -target_include_directories (boost_program_options_internal BEFORE PUBLIC ${Boost_INCLUDE_DIRS}) -target_include_directories (boost_filesystem_internal BEFORE PUBLIC ${Boost_INCLUDE_DIRS}) -target_include_directories (boost_system_internal BEFORE PUBLIC ${Boost_INCLUDE_DIRS}) +target_include_directories (boost_program_options_internal SYSTEM BEFORE PUBLIC ${Boost_INCLUDE_DIRS}) +target_include_directories (boost_filesystem_internal SYSTEM BEFORE PUBLIC ${Boost_INCLUDE_DIRS}) +target_include_directories (boost_system_internal SYSTEM BEFORE PUBLIC ${Boost_INCLUDE_DIRS}) target_compile_definitions (boost_program_options_internal PUBLIC BOOST_SYSTEM_NO_DEPRECATED) target_compile_definitions (boost_filesystem_internal PUBLIC BOOST_SYSTEM_NO_DEPRECATED) diff --git a/contrib/re2_st/CMakeLists.txt b/contrib/re2_st/CMakeLists.txt index 79362f4bb56..6bc7fd8f343 100644 --- a/contrib/re2_st/CMakeLists.txt +++ b/contrib/re2_st/CMakeLists.txt @@ -12,7 +12,8 @@ endforeach () add_library (re2_st ${RE2_ST_SOURCES}) target_compile_definitions (re2_st PRIVATE NDEBUG NO_THREADS re2=re2_st) -target_include_directories (re2_st PRIVATE . PUBLIC ${CMAKE_CURRENT_BINARY_DIR} ${RE2_SOURCE_DIR}) +target_include_directories (re2_st PRIVATE .) +target_include_directories (re2_st SYSTEM PUBLIC ${CMAKE_CURRENT_BINARY_DIR} ${RE2_SOURCE_DIR}) file (MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/re2_st) foreach (FILENAME filtered_re2.h re2.h set.h stringpiece.h) From e53899a56176cc01ff7ff911d3dcaa40e390875d Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Fri, 10 Aug 2018 06:02:50 +0300 Subject: [PATCH 18/22] Auto version update to [18.10.1] [54405] --- dbms/cmake/version.cmake | 8 ++++---- debian/changelog | 4 ++-- docker/client/Dockerfile | 2 +- docker/server/Dockerfile | 2 +- docker/test/Dockerfile | 2 +- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/dbms/cmake/version.cmake b/dbms/cmake/version.cmake index 719a722d439..659f73b2f26 100644 --- a/dbms/cmake/version.cmake +++ b/dbms/cmake/version.cmake @@ -2,10 +2,10 @@ set(VERSION_REVISION 54405 CACHE STRING "") set(VERSION_MAJOR 18 CACHE STRING "") set(VERSION_MINOR 10 CACHE STRING "") -set(VERSION_PATCH 0 CACHE STRING "") -set(VERSION_GITHASH cd7b254b7bff5bdfeadcd3eff0df20648870c939 CACHE STRING "") -set(VERSION_DESCRIBE v18.10.0-testing CACHE STRING "") -set(VERSION_STRING 18.10.0 CACHE STRING "") +set(VERSION_PATCH 1 CACHE STRING "") +set(VERSION_GITHASH 419bc587c0079b51a906a65af9a10da3300ddaf2 CACHE STRING "") +set(VERSION_DESCRIBE v18.10.1-testing CACHE STRING "") +set(VERSION_STRING 18.10.1 CACHE STRING "") # end of autochange set(VERSION_EXTRA "" CACHE STRING "") diff --git a/debian/changelog b/debian/changelog index 978b9cf4a8d..b8b2b68d598 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,5 +1,5 @@ -clickhouse (18.10.0) unstable; urgency=low +clickhouse (18.10.1) unstable; urgency=low * Modified source code - -- Thu, 09 Aug 2018 04:42:46 +0300 + -- Fri, 10 Aug 2018 06:02:50 +0300 diff --git a/docker/client/Dockerfile b/docker/client/Dockerfile index 073ff28ee43..2e17e66cdb4 100644 --- a/docker/client/Dockerfile +++ b/docker/client/Dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:18.04 ARG repository="deb http://repo.yandex.ru/clickhouse/deb/stable/ main/" -ARG version=18.10.0 +ARG version=18.10.1 RUN apt-get update && \ apt-get install -y apt-transport-https dirmngr && \ diff --git a/docker/server/Dockerfile b/docker/server/Dockerfile index a8a2d066258..e2be737e579 100644 --- a/docker/server/Dockerfile +++ b/docker/server/Dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:18.04 ARG repository="deb http://repo.yandex.ru/clickhouse/deb/stable/ main/" -ARG version=18.10.0 +ARG version=18.10.1 RUN apt-get update && \ apt-get install -y apt-transport-https dirmngr && \ diff --git a/docker/test/Dockerfile b/docker/test/Dockerfile index c362def2f01..ccefa47627b 100644 --- a/docker/test/Dockerfile +++ b/docker/test/Dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:18.04 ARG repository="deb http://repo.yandex.ru/clickhouse/deb/stable/ main/" -ARG version=18.10.0 +ARG version=18.10.1 RUN apt-get update && \ apt-get install -y apt-transport-https dirmngr && \ From 36db216abfa51ec63a22aa4992e68c8aa7b3a7bd Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 10 Aug 2018 07:02:56 +0300 Subject: [PATCH 19/22] Preparation for extra warnings [#CLICKHOUSE-2] --- contrib/libcpuid/CMakeLists.txt | 2 +- contrib/librdkafka-cmake/CMakeLists.txt | 2 +- .../src/Client/ConnectionPoolWithFailover.cpp | 2 +- dbms/src/Columns/Collator.cpp | 3 -- dbms/src/Common/COWPtr.h | 10 ---- dbms/src/Common/ConcurrentBoundedQueue.h | 2 +- dbms/src/Common/Exception.cpp | 4 +- dbms/src/Common/HashTable/HashTable.h | 2 +- dbms/src/Common/IFactoryWithAliases.h | 4 +- dbms/src/Common/PODArray.h | 15 ------ dbms/src/Common/SpaceSaving.h | 2 +- dbms/src/Common/ZooKeeper/KeeperException.h | 2 +- dbms/src/Common/ZooKeeper/ZooKeeperHolder.h | 2 +- dbms/src/Common/ZooKeeper/ZooKeeperImpl.h | 2 +- dbms/src/Common/formatIPv6.cpp | 5 +- dbms/src/Common/localBackup.cpp | 2 +- dbms/src/Common/tests/CMakeLists.txt | 2 +- dbms/src/Core/Field.h | 2 +- dbms/src/Core/UUID.h | 2 +- .../AggregatingSortedBlockInputStream.cpp | 2 +- .../CollapsingSortedBlockInputStream.cpp | 2 +- .../GraphiteRollupSortedBlockInputStream.cpp | 2 +- .../IProfilingBlockInputStream.cpp | 2 +- .../MergeSortingBlockInputStream.cpp | 4 +- .../MergeSortingBlockInputStream.h | 2 +- ...regatedMemoryEfficientBlockInputStream.cpp | 8 +-- .../MergingSortedBlockInputStream.cpp | 6 +-- .../MergingSortedBlockInputStream.h | 11 +--- .../DataStreams/RemoteBlockInputStream.cpp | 8 +-- .../ReplacingSortedBlockInputStream.cpp | 2 +- .../SummingSortedBlockInputStream.cpp | 2 +- ...sionedCollapsingSortedBlockInputStream.cpp | 2 +- dbms/src/DataTypes/DataTypeEnum.cpp | 34 ++++++------- dbms/src/DataTypes/DataTypeEnum.h | 10 ++-- dbms/src/DataTypes/NestedUtils.h | 2 +- dbms/src/Databases/DatabaseDictionary.cpp | 6 +-- dbms/src/Dictionaries/CacheDictionary.cpp | 20 ++++---- dbms/src/Dictionaries/CatBoostModel.cpp | 38 +++++++------- dbms/src/Dictionaries/CatBoostModel.h | 2 +- ...exKeyCacheDictionary_setAttributeValue.cpp | 10 ++-- .../Dictionaries/DictionaryBlockInputStream.h | 50 +++++++++---------- .../DictionaryBlockInputStreamBase.h | 2 +- .../Dictionaries/DictionarySourceFactory.cpp | 3 -- dbms/src/Dictionaries/DictionaryStructure.cpp | 18 +++---- .../Dictionaries/MongoDBBlockInputStream.cpp | 11 ++-- .../Dictionaries/MongoDBDictionarySource.cpp | 13 ++--- .../Dictionaries/MySQLDictionarySource.cpp | 16 +++--- dbms/src/Dictionaries/ODBCBlockInputStream.h | 9 ++-- .../src/Dictionaries/ODBCDictionarySource.cpp | 6 +-- dbms/src/Dictionaries/ODBCDictionarySource.h | 5 +- .../validateODBCConnectionString.cpp | 14 +++--- dbms/src/Functions/FunctionsMath.h | 20 ++++---- dbms/src/IO/CascadeWriteBuffer.h | 2 +- dbms/src/IO/InterserverWriteBuffer.h | 2 +- dbms/src/IO/LZ4_decompress_faster.cpp | 16 +++--- dbms/src/IO/LZ4_decompress_faster.h | 4 +- .../src/IO/MMapReadBufferFromFileDescriptor.h | 2 +- dbms/src/IO/MemoryReadWriteBuffer.cpp | 2 +- dbms/src/IO/ReadBufferFromFileDescriptor.cpp | 2 +- dbms/src/IO/ReadBufferFromPocoSocket.cpp | 2 +- .../IO/WriteBufferFromHTTPServerResponse.h | 2 +- dbms/src/IO/WriteBufferFromPocoSocket.cpp | 2 +- dbms/src/IO/WriteHelpers.h | 4 +- dbms/src/IO/ZlibDeflatingWriteBuffer.cpp | 4 +- dbms/src/IO/ZlibInflatingReadBuffer.cpp | 4 +- dbms/src/Interpreters/Aggregator.cpp | 2 +- dbms/src/Interpreters/DDLWorker.cpp | 4 +- dbms/src/Interpreters/DNSCacheUpdater.cpp | 4 +- dbms/src/Interpreters/DictionaryFactory.cpp | 2 +- dbms/src/Interpreters/ExpressionAnalyzer.cpp | 2 +- .../InterpreterSelectWithUnionQuery.h | 2 +- dbms/src/Interpreters/tests/CMakeLists.txt | 2 +- dbms/src/Parsers/ASTWithAlias.h | 2 +- dbms/src/Parsers/StringRange.h | 2 +- dbms/src/Storages/Kafka/StorageKafka.cpp | 6 +-- .../Storages/MergeTree/DataPartsExchange.cpp | 2 +- .../MergeTree/MergeTreeDataFormatVersion.h | 2 +- .../Storages/MergeTree/MergeTreeDataPart.cpp | 2 +- .../MergeTree/MergeTreeDataSelectExecutor.cpp | 4 +- .../MergeTreeThreadBlockInputStream.cpp | 2 +- .../ReplicatedMergeTreePartCheckThread.cpp | 2 +- .../ReplicatedMergeTreeRestartingThread.cpp | 2 +- dbms/src/Storages/StorageFile.cpp | 2 +- dbms/src/Storages/StorageJoin.cpp | 4 +- dbms/src/Storages/StorageLog.cpp | 2 +- dbms/src/Storages/StorageMergeTree.cpp | 2 +- dbms/src/Storages/StorageODBC.h | 7 +-- .../Storages/StorageReplicatedMergeTree.cpp | 2 +- dbms/src/Storages/StorageSet.cpp | 2 +- dbms/src/Storages/StorageStripeLog.cpp | 4 +- dbms/src/Storages/StorageTinyLog.cpp | 2 +- dbms/src/Storages/StorageURL.cpp | 2 +- .../Storages/System/StorageSystemColumns.cpp | 14 +++--- dbms/src/TableFunctions/TableFunctionODBC.cpp | 10 ++-- libs/libcommon/include/common/DateLUTImpl.h | 2 +- libs/libcommon/include/common/JSON.h | 16 +++--- libs/libcommon/include/common/StringRef.h | 2 +- libs/libcommon/include/ext/bit_cast.h | 4 +- libs/libcommon/include/ext/collection_cast.h | 2 +- libs/libcommon/include/ext/map.h | 4 +- libs/libmysqlxx/include/mysqlxx/Connection.h | 4 +- libs/libmysqlxx/include/mysqlxx/Row.h | 2 +- 102 files changed, 267 insertions(+), 323 deletions(-) diff --git a/contrib/libcpuid/CMakeLists.txt b/contrib/libcpuid/CMakeLists.txt index c04acf99f36..cd3e7fa06fe 100644 --- a/contrib/libcpuid/CMakeLists.txt +++ b/contrib/libcpuid/CMakeLists.txt @@ -17,4 +17,4 @@ include/libcpuid/recog_amd.h include/libcpuid/recog_intel.h ) -target_include_directories (cpuid PUBLIC include) +target_include_directories (cpuid SYSTEM PUBLIC include) diff --git a/contrib/librdkafka-cmake/CMakeLists.txt b/contrib/librdkafka-cmake/CMakeLists.txt index 34f2315c33c..7211c791b2f 100644 --- a/contrib/librdkafka-cmake/CMakeLists.txt +++ b/contrib/librdkafka-cmake/CMakeLists.txt @@ -56,5 +56,5 @@ ${RDKAFKA_SOURCE_DIR}/rdgz.c add_library(rdkafka STATIC ${SRCS}) target_include_directories(rdkafka PRIVATE include) -target_include_directories(rdkafka PUBLIC ${RDKAFKA_SOURCE_DIR}) +target_include_directories(rdkafka SYSTEM PUBLIC ${RDKAFKA_SOURCE_DIR}) target_link_libraries(rdkafka PUBLIC ${ZLIB_LIBRARIES} ${OPENSSL_SSL_LIBRARY} ${OPENSSL_CRYPTO_LIBRARY}) diff --git a/dbms/src/Client/ConnectionPoolWithFailover.cpp b/dbms/src/Client/ConnectionPoolWithFailover.cpp index 73469fcf53f..8c2f1a07bc1 100644 --- a/dbms/src/Client/ConnectionPoolWithFailover.cpp +++ b/dbms/src/Client/ConnectionPoolWithFailover.cpp @@ -226,6 +226,6 @@ ConnectionPoolWithFailover::tryGetEntry( } } return result; -}; +} } diff --git a/dbms/src/Columns/Collator.cpp b/dbms/src/Columns/Collator.cpp index aaf917fb93d..507ae9f54c9 100644 --- a/dbms/src/Columns/Collator.cpp +++ b/dbms/src/Columns/Collator.cpp @@ -3,10 +3,7 @@ #include #if USE_ICU - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wold-style-cast" #include - #pragma GCC diagnostic pop #else #ifdef __clang__ #pragma clang diagnostic push diff --git a/dbms/src/Common/COWPtr.h b/dbms/src/Common/COWPtr.h index 472bdb23390..525f2372c6c 100644 --- a/dbms/src/Common/COWPtr.h +++ b/dbms/src/Common/COWPtr.h @@ -1,17 +1,7 @@ #pragma once -#ifdef __clang__ - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" -#endif - #include #include - -#ifdef __clang__ - #pragma clang diagnostic pop -#endif - #include diff --git a/dbms/src/Common/ConcurrentBoundedQueue.h b/dbms/src/Common/ConcurrentBoundedQueue.h index aafeefbba38..5ac887bf4d4 100644 --- a/dbms/src/Common/ConcurrentBoundedQueue.h +++ b/dbms/src/Common/ConcurrentBoundedQueue.h @@ -37,7 +37,7 @@ namespace detail { MoveOrCopyIfThrow()(std::forward(src), dst); } -}; +} /** A very simple thread-safe queue of limited size. * If you try to pop an item from an empty queue, the thread is blocked until the queue becomes nonempty. diff --git a/dbms/src/Common/Exception.cpp b/dbms/src/Common/Exception.cpp index 95bc2cd0660..ab59780c44d 100644 --- a/dbms/src/Common/Exception.cpp +++ b/dbms/src/Common/Exception.cpp @@ -130,11 +130,11 @@ int getCurrentExceptionCode() { return e.code(); } - catch (const Poco::Exception & e) + catch (const Poco::Exception &) { return ErrorCodes::POCO_EXCEPTION; } - catch (const std::exception & e) + catch (const std::exception &) { return ErrorCodes::STD_EXCEPTION; } diff --git a/dbms/src/Common/HashTable/HashTable.h b/dbms/src/Common/HashTable/HashTable.h index 9fe446ccb8f..111960138bd 100644 --- a/dbms/src/Common/HashTable/HashTable.h +++ b/dbms/src/Common/HashTable/HashTable.h @@ -74,7 +74,7 @@ bool check(const T x) { return x == 0; } template void set(T & x) { x = 0; } -}; +} /** Compile-time interface for cell of the hash table. diff --git a/dbms/src/Common/IFactoryWithAliases.h b/dbms/src/Common/IFactoryWithAliases.h index 9006a3c7cfd..c66782af798 100644 --- a/dbms/src/Common/IFactoryWithAliases.h +++ b/dbms/src/Common/IFactoryWithAliases.h @@ -94,8 +94,8 @@ public: { if (auto it = aliases.find(name); it != aliases.end()) return it->second; - else if (auto it = case_insensitive_aliases.find(Poco::toLower(name)); it != case_insensitive_aliases.end()) - return it->second; + else if (auto jt = case_insensitive_aliases.find(Poco::toLower(name)); jt != case_insensitive_aliases.end()) + return jt->second; throw Exception(getFactoryName() + ": name '" + name + "' is not alias", ErrorCodes::LOGICAL_ERROR); } diff --git a/dbms/src/Common/PODArray.h b/dbms/src/Common/PODArray.h index e8a0ce85be0..cb228f5b554 100644 --- a/dbms/src/Common/PODArray.h +++ b/dbms/src/Common/PODArray.h @@ -5,24 +5,9 @@ #include #include -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wold-style-cast" - -#ifdef __clang__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" -#pragma clang diagnostic ignored "-Wreserved-id-macro" -#endif - #include #include -#ifdef __clang__ -#pragma clang diagnostic pop -#endif - -#pragma GCC diagnostic pop - #include #include diff --git a/dbms/src/Common/SpaceSaving.h b/dbms/src/Common/SpaceSaving.h index b08fa140fef..08d392bf56d 100644 --- a/dbms/src/Common/SpaceSaving.h +++ b/dbms/src/Common/SpaceSaving.h @@ -340,4 +340,4 @@ private: size_t m_capacity; }; -}; +} diff --git a/dbms/src/Common/ZooKeeper/KeeperException.h b/dbms/src/Common/ZooKeeper/KeeperException.h index fe6c7aedeff..f2c88ea82fd 100644 --- a/dbms/src/Common/ZooKeeper/KeeperException.h +++ b/dbms/src/Common/ZooKeeper/KeeperException.h @@ -50,4 +50,4 @@ private: static size_t getFailedOpIndex(int32_t code, const Responses & responses); }; -}; +} diff --git a/dbms/src/Common/ZooKeeper/ZooKeeperHolder.h b/dbms/src/Common/ZooKeeper/ZooKeeperHolder.h index 3c6d2ced2e6..d9e86d86406 100644 --- a/dbms/src/Common/ZooKeeper/ZooKeeperHolder.h +++ b/dbms/src/Common/ZooKeeper/ZooKeeperHolder.h @@ -83,4 +83,4 @@ void ZooKeeperHolder::init(Args&&... args) using ZooKeeperHolderPtr = std::shared_ptr; -}; +} diff --git a/dbms/src/Common/ZooKeeper/ZooKeeperImpl.h b/dbms/src/Common/ZooKeeper/ZooKeeperImpl.h index 35d97c04025..5c027753371 100644 --- a/dbms/src/Common/ZooKeeper/ZooKeeperImpl.h +++ b/dbms/src/Common/ZooKeeper/ZooKeeperImpl.h @@ -646,4 +646,4 @@ private: CurrentMetrics::Increment active_session_metric_increment{CurrentMetrics::ZooKeeperSession}; }; -}; +} diff --git a/dbms/src/Common/formatIPv6.cpp b/dbms/src/Common/formatIPv6.cpp index a9eea719e6f..e2a295cb923 100644 --- a/dbms/src/Common/formatIPv6.cpp +++ b/dbms/src/Common/formatIPv6.cpp @@ -77,7 +77,10 @@ void formatIPv6(const unsigned char * src, char *& dst, UInt8 zeroed_tail_bytes_ if (words[i] == 0) { if (cur.base == -1) - cur.base = i, cur.len = 1; + { + cur.base = i; + cur.len = 1; + } else cur.len++; } diff --git a/dbms/src/Common/localBackup.cpp b/dbms/src/Common/localBackup.cpp index bd12d5509cc..aec7da1f65d 100644 --- a/dbms/src/Common/localBackup.cpp +++ b/dbms/src/Common/localBackup.cpp @@ -108,7 +108,7 @@ void localBackup(const Poco::Path & source_path, const Poco::Path & destination_ continue; } - catch (const Poco::FileNotFoundException & e) + catch (const Poco::FileNotFoundException &) { ++try_no; if (try_no == max_tries) diff --git a/dbms/src/Common/tests/CMakeLists.txt b/dbms/src/Common/tests/CMakeLists.txt index f1605a13447..f4d01e85bd2 100644 --- a/dbms/src/Common/tests/CMakeLists.txt +++ b/dbms/src/Common/tests/CMakeLists.txt @@ -60,7 +60,7 @@ add_executable (space_saving space_saving.cpp) target_link_libraries (space_saving clickhouse_common_io) add_executable (integer_hash_tables_and_hashes integer_hash_tables_and_hashes.cpp) -target_include_directories (integer_hash_tables_and_hashes BEFORE PRIVATE ${SPARCEHASH_INCLUDE_DIR}) +target_include_directories (integer_hash_tables_and_hashes SYSTEM BEFORE PRIVATE ${SPARCEHASH_INCLUDE_DIR}) target_link_libraries (integer_hash_tables_and_hashes clickhouse_common_io) add_executable (allocator allocator.cpp) diff --git a/dbms/src/Core/Field.h b/dbms/src/Core/Field.h index 0e1a9ca6c0a..c8811fd0f29 100644 --- a/dbms/src/Core/Field.h +++ b/dbms/src/Core/Field.h @@ -25,7 +25,7 @@ namespace ErrorCodes class Field; using Array = std::vector; using TupleBackend = std::vector; -STRONG_TYPEDEF(TupleBackend, Tuple); /// Array and Tuple are different types with equal representation inside Field. +STRONG_TYPEDEF(TupleBackend, Tuple) /// Array and Tuple are different types with equal representation inside Field. /** 32 is enough. Round number is used for alignment and for better arithmetic inside std::vector. diff --git a/dbms/src/Core/UUID.h b/dbms/src/Core/UUID.h index bd6d330255f..4f8fdced814 100644 --- a/dbms/src/Core/UUID.h +++ b/dbms/src/Core/UUID.h @@ -6,6 +6,6 @@ namespace DB { -STRONG_TYPEDEF(UInt128, UUID); +STRONG_TYPEDEF(UInt128, UUID) } diff --git a/dbms/src/DataStreams/AggregatingSortedBlockInputStream.cpp b/dbms/src/DataStreams/AggregatingSortedBlockInputStream.cpp index 7b431e206e9..0697ec8167c 100644 --- a/dbms/src/DataStreams/AggregatingSortedBlockInputStream.cpp +++ b/dbms/src/DataStreams/AggregatingSortedBlockInputStream.cpp @@ -63,7 +63,7 @@ Block AggregatingSortedBlockInputStream::readImpl() for (size_t i = 0, size = columns_to_aggregate.size(); i < size; ++i) columns_to_aggregate[i] = typeid_cast(merged_columns[column_numbers_to_aggregate[i]].get()); - merge(merged_columns, queue); + merge(merged_columns, queue_without_collation); return header.cloneWithColumns(std::move(merged_columns)); } diff --git a/dbms/src/DataStreams/CollapsingSortedBlockInputStream.cpp b/dbms/src/DataStreams/CollapsingSortedBlockInputStream.cpp index 91e68bea75f..72df102a57f 100644 --- a/dbms/src/DataStreams/CollapsingSortedBlockInputStream.cpp +++ b/dbms/src/DataStreams/CollapsingSortedBlockInputStream.cpp @@ -102,7 +102,7 @@ Block CollapsingSortedBlockInputStream::readImpl() if (merged_columns.empty()) return {}; - merge(merged_columns, queue); + merge(merged_columns, queue_without_collation); return header.cloneWithColumns(std::move(merged_columns)); } diff --git a/dbms/src/DataStreams/GraphiteRollupSortedBlockInputStream.cpp b/dbms/src/DataStreams/GraphiteRollupSortedBlockInputStream.cpp index 0a2273d45a9..f4424b29a11 100644 --- a/dbms/src/DataStreams/GraphiteRollupSortedBlockInputStream.cpp +++ b/dbms/src/DataStreams/GraphiteRollupSortedBlockInputStream.cpp @@ -102,7 +102,7 @@ Block GraphiteRollupSortedBlockInputStream::readImpl() if (merged_columns.empty()) return Block(); - merge(merged_columns, queue); + merge(merged_columns, queue_without_collation); return header.cloneWithColumns(std::move(merged_columns)); } diff --git a/dbms/src/DataStreams/IProfilingBlockInputStream.cpp b/dbms/src/DataStreams/IProfilingBlockInputStream.cpp index 19842f74f03..5c4cf2aa219 100644 --- a/dbms/src/DataStreams/IProfilingBlockInputStream.cpp +++ b/dbms/src/DataStreams/IProfilingBlockInputStream.cpp @@ -188,7 +188,7 @@ static bool handleOverflowMode(OverflowMode mode, const String & message, int co default: throw Exception("Logical error: unknown overflow mode", ErrorCodes::LOGICAL_ERROR); } -}; +} bool IProfilingBlockInputStream::checkTimeLimit() diff --git a/dbms/src/DataStreams/MergeSortingBlockInputStream.cpp b/dbms/src/DataStreams/MergeSortingBlockInputStream.cpp index 432bb0216c7..b062c679c0a 100644 --- a/dbms/src/DataStreams/MergeSortingBlockInputStream.cpp +++ b/dbms/src/DataStreams/MergeSortingBlockInputStream.cpp @@ -183,7 +183,7 @@ MergeSortingBlocksBlockInputStream::MergeSortingBlocksBlockInputStream( if (!has_collation) { for (size_t i = 0; i < cursors.size(); ++i) - queue.push(SortCursor(&cursors[i])); + queue_without_collation.push(SortCursor(&cursors[i])); } else { @@ -206,7 +206,7 @@ Block MergeSortingBlocksBlockInputStream::readImpl() } return !has_collation - ? mergeImpl(queue) + ? mergeImpl(queue_without_collation) : mergeImpl(queue_with_collation); } diff --git a/dbms/src/DataStreams/MergeSortingBlockInputStream.h b/dbms/src/DataStreams/MergeSortingBlockInputStream.h index c52806f1b56..ad6d81984cc 100644 --- a/dbms/src/DataStreams/MergeSortingBlockInputStream.h +++ b/dbms/src/DataStreams/MergeSortingBlockInputStream.h @@ -55,7 +55,7 @@ private: bool has_collation = false; - std::priority_queue queue; + std::priority_queue queue_without_collation; std::priority_queue queue_with_collation; /** Two different cursors are supported - with and without Collation. diff --git a/dbms/src/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.cpp b/dbms/src/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.cpp index e6445f03155..334c65d0c40 100644 --- a/dbms/src/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.cpp +++ b/dbms/src/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.cpp @@ -320,7 +320,7 @@ void MergingAggregatedMemoryEfficientBlockInputStream::mergeThread(MemoryTracker * - or, if no next blocks, set 'exhausted' flag. */ { - std::lock_guard lock(parallel_merge_data->get_next_blocks_mutex); + std::lock_guard lock_next_blocks(parallel_merge_data->get_next_blocks_mutex); if (parallel_merge_data->exhausted || parallel_merge_data->finish) break; @@ -330,7 +330,7 @@ void MergingAggregatedMemoryEfficientBlockInputStream::mergeThread(MemoryTracker if (!blocks_to_merge || blocks_to_merge->empty()) { { - std::unique_lock lock(parallel_merge_data->merged_blocks_mutex); + std::unique_lock lock_merged_blocks(parallel_merge_data->merged_blocks_mutex); parallel_merge_data->exhausted = true; } @@ -344,9 +344,9 @@ void MergingAggregatedMemoryEfficientBlockInputStream::mergeThread(MemoryTracker : blocks_to_merge->front().info.bucket_num; { - std::unique_lock lock(parallel_merge_data->merged_blocks_mutex); + std::unique_lock lock_merged_blocks(parallel_merge_data->merged_blocks_mutex); - parallel_merge_data->have_space.wait(lock, [this] + parallel_merge_data->have_space.wait(lock_merged_blocks, [this] { return parallel_merge_data->merged_blocks.size() < merging_threads || parallel_merge_data->finish; diff --git a/dbms/src/DataStreams/MergingSortedBlockInputStream.cpp b/dbms/src/DataStreams/MergingSortedBlockInputStream.cpp index 62b32330679..8dd929759ca 100644 --- a/dbms/src/DataStreams/MergingSortedBlockInputStream.cpp +++ b/dbms/src/DataStreams/MergingSortedBlockInputStream.cpp @@ -58,7 +58,7 @@ void MergingSortedBlockInputStream::init(MutableColumns & merged_columns) if (has_collation) initQueue(queue_with_collation); else - initQueue(queue); + initQueue(queue_without_collation); } /// Let's check that all source blocks have the same structure. @@ -105,7 +105,7 @@ Block MergingSortedBlockInputStream::readImpl() if (has_collation) merge(merged_columns, queue_with_collation); else - merge(merged_columns, queue); + merge(merged_columns, queue_without_collation); return header.cloneWithColumns(std::move(merged_columns)); } @@ -200,7 +200,7 @@ void MergingSortedBlockInputStream::merge(MutableColumns & merged_columns, std:: // std::cerr << "copied columns\n"; - size_t merged_rows = merged_columns.at(0)->size(); + merged_rows = merged_columns.at(0)->size(); if (limit && total_merged_rows + merged_rows > limit) { diff --git a/dbms/src/DataStreams/MergingSortedBlockInputStream.h b/dbms/src/DataStreams/MergingSortedBlockInputStream.h index 1da53d21fa4..2a3fb7f5133 100644 --- a/dbms/src/DataStreams/MergingSortedBlockInputStream.h +++ b/dbms/src/DataStreams/MergingSortedBlockInputStream.h @@ -2,17 +2,8 @@ #include -#ifdef __clang__ - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" -#endif - #include -#ifdef __clang__ - #pragma clang diagnostic pop -#endif - #include #include @@ -161,7 +152,7 @@ protected: CursorImpls cursors; using Queue = std::priority_queue; - Queue queue; + Queue queue_without_collation; using QueueWithCollation = std::priority_queue; QueueWithCollation queue_with_collation; diff --git a/dbms/src/DataStreams/RemoteBlockInputStream.cpp b/dbms/src/DataStreams/RemoteBlockInputStream.cpp index 0aa129e264f..349c74732d6 100644 --- a/dbms/src/DataStreams/RemoteBlockInputStream.cpp +++ b/dbms/src/DataStreams/RemoteBlockInputStream.cpp @@ -58,21 +58,21 @@ RemoteBlockInputStream::RemoteBlockInputStream( create_multiplexed_connections = [this, pool, throttler]() { - const Settings & settings = context.getSettingsRef(); + const Settings & current_settings = context.getSettingsRef(); std::vector connections; if (main_table) { - auto try_results = pool->getManyChecked(&settings, pool_mode, *main_table); + auto try_results = pool->getManyChecked(¤t_settings, pool_mode, *main_table); connections.reserve(try_results.size()); for (auto & try_result : try_results) connections.emplace_back(std::move(try_result.entry)); } else - connections = pool->getMany(&settings, pool_mode); + connections = pool->getMany(¤t_settings, pool_mode); return std::make_unique( - std::move(connections), settings, throttler, append_extra_info); + std::move(connections), current_settings, throttler, append_extra_info); }; } diff --git a/dbms/src/DataStreams/ReplacingSortedBlockInputStream.cpp b/dbms/src/DataStreams/ReplacingSortedBlockInputStream.cpp index db8efcd162f..d0298ac77fe 100644 --- a/dbms/src/DataStreams/ReplacingSortedBlockInputStream.cpp +++ b/dbms/src/DataStreams/ReplacingSortedBlockInputStream.cpp @@ -44,7 +44,7 @@ Block ReplacingSortedBlockInputStream::readImpl() if (merged_columns.empty()) return Block(); - merge(merged_columns, queue); + merge(merged_columns, queue_without_collation); return header.cloneWithColumns(std::move(merged_columns)); } diff --git a/dbms/src/DataStreams/SummingSortedBlockInputStream.cpp b/dbms/src/DataStreams/SummingSortedBlockInputStream.cpp index 18c122ddc1a..f87b2d63b11 100644 --- a/dbms/src/DataStreams/SummingSortedBlockInputStream.cpp +++ b/dbms/src/DataStreams/SummingSortedBlockInputStream.cpp @@ -286,7 +286,7 @@ Block SummingSortedBlockInputStream::readImpl() desc.merged_column = header.safeGetByPosition(desc.column_numbers[0]).column->cloneEmpty(); } - merge(merged_columns, queue); + merge(merged_columns, queue_without_collation); Block res = header.cloneWithColumns(std::move(merged_columns)); /// Place aggregation results into block. diff --git a/dbms/src/DataStreams/VersionedCollapsingSortedBlockInputStream.cpp b/dbms/src/DataStreams/VersionedCollapsingSortedBlockInputStream.cpp index 863e021b279..fc24bef60bc 100644 --- a/dbms/src/DataStreams/VersionedCollapsingSortedBlockInputStream.cpp +++ b/dbms/src/DataStreams/VersionedCollapsingSortedBlockInputStream.cpp @@ -76,7 +76,7 @@ Block VersionedCollapsingSortedBlockInputStream::readImpl() if (merged_columns.empty()) return {}; - merge(merged_columns, queue); + merge(merged_columns, queue_without_collation); return header.cloneWithColumns(std::move(merged_columns)); } diff --git a/dbms/src/DataTypes/DataTypeEnum.cpp b/dbms/src/DataTypes/DataTypeEnum.cpp index bdc27e3f1be..edd3b797602 100644 --- a/dbms/src/DataTypes/DataTypeEnum.cpp +++ b/dbms/src/DataTypes/DataTypeEnum.cpp @@ -97,7 +97,7 @@ DataTypeEnum::DataTypeEnum(const Values & values_) : values{values_} }); fillMaps(); - name = generateName(values); + type_name = generateName(values); } template @@ -145,9 +145,9 @@ template void DataTypeEnum::deserializeTextEscaped(IColumn & column, ReadBuffer & istr, const FormatSettings &) const { /// NOTE It would be nice to do without creating a temporary object - at least extract std::string out. - std::string name; - readEscapedString(name, istr); - static_cast(column).getData().push_back(getValue(StringRef(name))); + std::string field_name; + readEscapedString(field_name, istr); + static_cast(column).getData().push_back(getValue(StringRef(field_name))); } template @@ -159,9 +159,9 @@ void DataTypeEnum::serializeTextQuoted(const IColumn & column, size_t row_ template void DataTypeEnum::deserializeTextQuoted(IColumn & column, ReadBuffer & istr, const FormatSettings &) const { - std::string name; - readQuotedStringWithSQLStyle(name, istr); - static_cast(column).getData().push_back(getValue(StringRef(name))); + std::string field_name; + readQuotedStringWithSQLStyle(field_name, istr); + static_cast(column).getData().push_back(getValue(StringRef(field_name))); } template @@ -179,9 +179,9 @@ void DataTypeEnum::serializeTextXML(const IColumn & column, size_t row_num template void DataTypeEnum::deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings &) const { - std::string name; - readJSONString(name, istr); - static_cast(column).getData().push_back(getValue(StringRef(name))); + std::string field_name; + readJSONString(field_name, istr); + static_cast(column).getData().push_back(getValue(StringRef(field_name))); } template @@ -193,9 +193,9 @@ void DataTypeEnum::serializeTextCSV(const IColumn & column, size_t row_num template void DataTypeEnum::deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const { - std::string name; - readCSVString(name, istr, settings.csv); - static_cast(column).getData().push_back(getValue(StringRef(name))); + std::string field_name; + readCSVString(field_name, istr, settings.csv); + static_cast(column).getData().push_back(getValue(StringRef(field_name))); } template @@ -237,7 +237,7 @@ void DataTypeEnum::insertDefaultInto(IColumn & column) const template bool DataTypeEnum::equals(const IDataType & rhs) const { - return typeid(rhs) == typeid(*this) && name == static_cast &>(rhs).name; + return typeid(rhs) == typeid(*this) && type_name == static_cast &>(rhs).type_name; } @@ -346,14 +346,14 @@ static DataTypePtr create(const ASTPtr & arguments) throw Exception("Elements of Enum data type must be of form: 'name' = number, where name is string literal and number is an integer", ErrorCodes::UNEXPECTED_AST_STRUCTURE); - const String & name = name_literal->value.get(); + const String & field_name = name_literal->value.get(); const auto value = value_literal->value.get::Type>(); if (value > std::numeric_limits::max() || value < std::numeric_limits::min()) - throw Exception{"Value " + toString(value) + " for element '" + name + "' exceeds range of " + EnumName::value, + throw Exception{"Value " + toString(value) + " for element '" + field_name + "' exceeds range of " + EnumName::value, ErrorCodes::ARGUMENT_OUT_OF_BOUND}; - values.emplace_back(name, value); + values.emplace_back(field_name, value); } return std::make_shared(values); diff --git a/dbms/src/DataTypes/DataTypeEnum.h b/dbms/src/DataTypes/DataTypeEnum.h index c9abddb742c..14be08aae10 100644 --- a/dbms/src/DataTypes/DataTypeEnum.h +++ b/dbms/src/DataTypes/DataTypeEnum.h @@ -53,7 +53,7 @@ private: Values values; NameToValueMap name_to_value_map; ValueToNameMap value_to_name_map; - std::string name; + std::string type_name; static std::string generateName(const Values & values); void fillMaps(); @@ -62,7 +62,7 @@ public: explicit DataTypeEnum(const Values & values_); const Values & getValues() const { return values; } - std::string getName() const override { return name; } + std::string getName() const override { return type_name; } const char * getFamilyName() const override; const StringRef & getNameForValue(const FieldType & value) const @@ -74,11 +74,11 @@ public: return it->second; } - FieldType getValue(StringRef name) const + FieldType getValue(StringRef field_name) const { - const auto it = name_to_value_map.find(name); + const auto it = name_to_value_map.find(field_name); if (it == std::end(name_to_value_map)) - throw Exception{"Unknown element '" + name.toString() + "' for type " + getName(), ErrorCodes::LOGICAL_ERROR}; + throw Exception{"Unknown element '" + field_name.toString() + "' for type " + getName(), ErrorCodes::LOGICAL_ERROR}; return it->second; } diff --git a/dbms/src/DataTypes/NestedUtils.h b/dbms/src/DataTypes/NestedUtils.h index 3bbc8845be2..e3513562b37 100644 --- a/dbms/src/DataTypes/NestedUtils.h +++ b/dbms/src/DataTypes/NestedUtils.h @@ -22,6 +22,6 @@ namespace Nested /// Collect Array columns in a form of `column_name.element_name` to single Array(Tuple(...)) column. NamesAndTypesList collect(const NamesAndTypesList & names_and_types); -}; +} } diff --git a/dbms/src/Databases/DatabaseDictionary.cpp b/dbms/src/Databases/DatabaseDictionary.cpp index 4d121ddbe0f..04fbd3b24a6 100644 --- a/dbms/src/Databases/DatabaseDictionary.cpp +++ b/dbms/src/Databases/DatabaseDictionary.cpp @@ -39,15 +39,15 @@ Tables DatabaseDictionary::loadTables() Tables tables; for (const auto & pair : dictionaries) { - const std::string & name = pair.first; - if (deleted_tables.count(name)) + const std::string & dict_name = pair.first; + if (deleted_tables.count(dict_name)) continue; auto dict_ptr = std::static_pointer_cast(pair.second.loadable); if (dict_ptr) { const DictionaryStructure & dictionary_structure = dict_ptr->getStructure(); auto columns = StorageDictionary::getNamesAndTypes(dictionary_structure); - tables[name] = StorageDictionary::create(name, ColumnsDescription{columns}, dictionary_structure, name); + tables[dict_name] = StorageDictionary::create(dict_name, ColumnsDescription{columns}, dictionary_structure, dict_name); } } diff --git a/dbms/src/Dictionaries/CacheDictionary.cpp b/dbms/src/Dictionaries/CacheDictionary.cpp index 32587f6ed41..6b378faf928 100644 --- a/dbms/src/Dictionaries/CacheDictionary.cpp +++ b/dbms/src/Dictionaries/CacheDictionary.cpp @@ -101,12 +101,12 @@ void CacheDictionary::isInImpl( { /// Transform all children to parents until ancestor id or null_value will be reached. - size_t size = out.size(); - memset(out.data(), 0xFF, size); /// 0xFF means "not calculated" + size_t out_size = out.size(); + memset(out.data(), 0xFF, out_size); /// 0xFF means "not calculated" const auto null_value = std::get(hierarchical_attribute->null_values); - PaddedPODArray children(size); + PaddedPODArray children(out_size); PaddedPODArray parents(child_ids.begin(), child_ids.end()); while (true) @@ -115,7 +115,7 @@ void CacheDictionary::isInImpl( size_t parents_idx = 0; size_t new_children_idx = 0; - while (out_idx < size) + while (out_idx < out_size) { /// Already calculated if (out[out_idx] != 0xFF) @@ -203,7 +203,7 @@ void CacheDictionary::isInConstantVector( } /// Assuming short hierarchy, so linear search is Ok. - for (size_t i = 0, size = out.size(); i < size; ++i) + for (size_t i = 0, out_size = out.size(); i < out_size; ++i) out[i] = std::find(ancestors.begin(), ancestors.end(), ancestor_ids[i]) != ancestors.end(); } @@ -936,12 +936,12 @@ void CacheDictionary::setAttributeValue(Attribute & attribute, const Key idx, co if (string_ref.data && string_ref.data != null_value_ref.data()) string_arena->free(const_cast(string_ref.data), string_ref.size); - const auto size = string.size(); - if (size != 0) + const auto str_size = string.size(); + if (str_size != 0) { - auto string_ptr = string_arena->alloc(size + 1); - std::copy(string.data(), string.data() + size + 1, string_ptr); - string_ref = StringRef{string_ptr, size}; + auto string_ptr = string_arena->alloc(str_size + 1); + std::copy(string.data(), string.data() + str_size + 1, string_ptr); + string_ref = StringRef{string_ptr, str_size}; } else string_ref = {}; diff --git a/dbms/src/Dictionaries/CatBoostModel.cpp b/dbms/src/Dictionaries/CatBoostModel.cpp index d36ec1ecae0..6714a47e0ed 100644 --- a/dbms/src/Dictionaries/CatBoostModel.cpp +++ b/dbms/src/Dictionaries/CatBoostModel.cpp @@ -307,13 +307,13 @@ private: /// buffer[column_size * cat_features_count] -> char * => cat_features[column_size][cat_features_count] -> char * void fillCatFeaturesBuffer(const char *** cat_features, const char ** buffer, - size_t column_size, size_t cat_features_count) const + size_t column_size, size_t cat_features_count_current) const { for (size_t i = 0; i < column_size; ++i) { *cat_features = buffer; ++cat_features; - buffer += cat_features_count; + buffer += cat_features_count_current; } } @@ -321,7 +321,7 @@ private: /// * CalcModelPredictionFlat if no cat features /// * CalcModelPrediction if all cat features are strings /// * CalcModelPredictionWithHashedCatFeatures if has int cat features. - ColumnPtr evalImpl(const ColumnRawPtrs & columns, size_t float_features_count, size_t cat_features_count, + ColumnPtr evalImpl(const ColumnRawPtrs & columns, size_t float_features_count_current, size_t cat_features_count_current, bool cat_features_are_strings) const { std::string error_msg = "Error occurred while applying CatBoost model: "; @@ -334,12 +334,12 @@ private: PODArray float_features(column_size); auto float_features_buf = float_features.data(); /// Store all float data into single column. float_features is a list of pointers to it. - auto float_features_col = placeNumericColumns(columns, 0, float_features_count, float_features_buf); + auto float_features_col = placeNumericColumns(columns, 0, float_features_count_current, float_features_buf); - if (cat_features_count == 0) + if (cat_features_count_current == 0) { if (!api->CalcModelPredictionFlat(handle->get(), column_size, - float_features_buf, float_features_count, + float_features_buf, float_features_count_current, result_buf, column_size)) { @@ -352,18 +352,18 @@ private: if (cat_features_are_strings) { /// cat_features_holder stores pointers to ColumnString data or fixed_strings_data. - PODArray cat_features_holder(cat_features_count * column_size); + PODArray cat_features_holder(cat_features_count_current * column_size); PODArray cat_features(column_size); auto cat_features_buf = cat_features.data(); - fillCatFeaturesBuffer(cat_features_buf, cat_features_holder.data(), column_size, cat_features_count); + fillCatFeaturesBuffer(cat_features_buf, cat_features_holder.data(), column_size, cat_features_count_current); /// Fixed strings are stored without termination zero, so have to copy data into fixed_strings_data. - auto fixed_strings_data = placeStringColumns(columns, float_features_count, - cat_features_count, cat_features_holder.data()); + auto fixed_strings_data = placeStringColumns(columns, float_features_count_current, + cat_features_count_current, cat_features_holder.data()); if (!api->CalcModelPrediction(handle->get(), column_size, - float_features_buf, float_features_count, - cat_features_buf, cat_features_count, + float_features_buf, float_features_count_current, + cat_features_buf, cat_features_count_current, result_buf, column_size)) { throw Exception(error_msg + api->GetErrorString(), ErrorCodes::CANNOT_APPLY_CATBOOST_MODEL); @@ -373,13 +373,13 @@ private: { PODArray cat_features(column_size); auto cat_features_buf = cat_features.data(); - auto cat_features_col = placeNumericColumns(columns, float_features_count, - cat_features_count, cat_features_buf); - calcHashes(columns, float_features_count, cat_features_count, cat_features_buf); + auto cat_features_col = placeNumericColumns(columns, float_features_count_current, + cat_features_count_current, cat_features_buf); + calcHashes(columns, float_features_count_current, cat_features_count_current, cat_features_buf); if (!api->CalcModelPredictionWithHashedCatFeatures( handle->get(), column_size, - float_features_buf, float_features_count, - cat_features_buf, cat_features_count, + float_features_buf, float_features_count_current, + cat_features_buf, cat_features_count_current, result_buf, column_size)) { throw Exception(error_msg + api->GetErrorString(), ErrorCodes::CANNOT_APPLY_CATBOOST_MODEL); @@ -453,7 +453,7 @@ CatBoostModel::CatBoostModel(std::string name_, std::string model_path_, std::st { try { - init(lib_path); + init(); } catch (...) { @@ -463,7 +463,7 @@ CatBoostModel::CatBoostModel(std::string name_, std::string model_path_, std::st creation_time = std::chrono::system_clock::now(); } -void CatBoostModel::init(const std::string & lib_path) +void CatBoostModel::init() { api_provider = getCatBoostWrapperHolder(lib_path); api = &api_provider->getAPI(); diff --git a/dbms/src/Dictionaries/CatBoostModel.h b/dbms/src/Dictionaries/CatBoostModel.h index 735a9f4a7d3..b6a937fe048 100644 --- a/dbms/src/Dictionaries/CatBoostModel.h +++ b/dbms/src/Dictionaries/CatBoostModel.h @@ -80,7 +80,7 @@ private: std::chrono::time_point creation_time; std::exception_ptr creation_exception; - void init(const std::string & lib_path); + void init(); }; } diff --git a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_setAttributeValue.cpp b/dbms/src/Dictionaries/ComplexKeyCacheDictionary_setAttributeValue.cpp index 16e05333cf0..22cf1ae33c2 100644 --- a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_setAttributeValue.cpp +++ b/dbms/src/Dictionaries/ComplexKeyCacheDictionary_setAttributeValue.cpp @@ -28,12 +28,12 @@ void ComplexKeyCacheDictionary::setAttributeValue(Attribute & attribute, const s if (string_ref.data && string_ref.data != null_value_ref.data()) string_arena->free(const_cast(string_ref.data), string_ref.size); - const auto size = string.size(); - if (size != 0) + const auto str_size = string.size(); + if (str_size != 0) { - auto string_ptr = string_arena->alloc(size + 1); - std::copy(string.data(), string.data() + size + 1, string_ptr); - string_ref = StringRef{string_ptr, size}; + auto string_ptr = string_arena->alloc(str_size + 1); + std::copy(string.data(), string.data() + str_size + 1, string_ptr); + string_ref = StringRef{string_ptr, str_size}; } else string_ref = {}; diff --git a/dbms/src/Dictionaries/DictionaryBlockInputStream.h b/dbms/src/Dictionaries/DictionaryBlockInputStream.h index 1cefd9e3eeb..12cfcca58d9 100644 --- a/dbms/src/Dictionaries/DictionaryBlockInputStream.h +++ b/dbms/src/Dictionaries/DictionaryBlockInputStream.h @@ -223,47 +223,47 @@ Block DictionaryBlockInputStream::getBlock(size_t start, si template template void DictionaryBlockInputStream::callGetter( - DictionaryGetter getter, const PaddedPODArray & ids, + DictionaryGetter getter, const PaddedPODArray & ids_to_fill, const Columns & /*keys*/, const DataTypes & /*data_types*/, - Container & container, const DictionaryAttribute & attribute, const DictionaryType & dictionary) const + Container & container, const DictionaryAttribute & attribute, const DictionaryType & dict) const { - (dictionary.*getter)(attribute.name, ids, container); + (dict.*getter)(attribute.name, ids_to_fill, container); } template template void DictionaryBlockInputStream::callGetter( - DictionaryStringGetter getter, const PaddedPODArray & ids, + DictionaryStringGetter getter, const PaddedPODArray & ids_to_fill, const Columns & /*keys*/, const DataTypes & /*data_types*/, - Container & container, const DictionaryAttribute & attribute, const DictionaryType & dictionary) const + Container & container, const DictionaryAttribute & attribute, const DictionaryType & dict) const { - (dictionary.*getter)(attribute.name, ids, container); + (dict.*getter)(attribute.name, ids_to_fill, container); } template template void DictionaryBlockInputStream::callGetter( - GetterByKey getter, const PaddedPODArray & /*ids*/, + GetterByKey getter, const PaddedPODArray & /*ids_to_fill*/, const Columns & keys, const DataTypes & data_types, - Container & container, const DictionaryAttribute & attribute, const DictionaryType & dictionary) const + Container & container, const DictionaryAttribute & attribute, const DictionaryType & dict) const { - (dictionary.*getter)(attribute.name, keys, data_types, container); + (dict.*getter)(attribute.name, keys, data_types, container); } template template void DictionaryBlockInputStream::callGetter( - StringGetterByKey getter, const PaddedPODArray & /*ids*/, + StringGetterByKey getter, const PaddedPODArray & /*ids_to_fill*/, const Columns & keys, const DataTypes & data_types, - Container & container, const DictionaryAttribute & attribute, const DictionaryType & dictionary) const + Container & container, const DictionaryAttribute & attribute, const DictionaryType & dict) const { - (dictionary.*getter)(attribute.name, keys, data_types, container); + (dict.*getter)(attribute.name, keys, data_types, container); } template template