mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 09:32:06 +00:00
Merge branch 'master' into one_more_error_clickhouse_test
This commit is contained in:
commit
0c006a9c96
@ -468,7 +468,7 @@ void BaseDaemon::reloadConfiguration()
|
||||
* instead of using files specified in config.xml.
|
||||
* (It's convenient to log in console when you start server without any command line parameters.)
|
||||
*/
|
||||
config_path = config().getString("config-file", "config.xml");
|
||||
config_path = config().getString("config-file", getDefaultConfigFileName());
|
||||
DB::ConfigProcessor config_processor(config_path, false, true);
|
||||
config_processor.setConfigPath(Poco::Path(config_path).makeParent().toString());
|
||||
loaded_config = config_processor.loadConfig(/* allow_zk_includes = */ true);
|
||||
@ -516,6 +516,11 @@ std::string BaseDaemon::getDefaultCorePath() const
|
||||
return "/opt/cores/";
|
||||
}
|
||||
|
||||
std::string BaseDaemon::getDefaultConfigFileName() const
|
||||
{
|
||||
return "config.xml";
|
||||
}
|
||||
|
||||
void BaseDaemon::closeFDs()
|
||||
{
|
||||
#if defined(OS_FREEBSD) || defined(OS_DARWIN)
|
||||
|
@ -149,6 +149,8 @@ protected:
|
||||
|
||||
virtual std::string getDefaultCorePath() const;
|
||||
|
||||
virtual std::string getDefaultConfigFileName() const;
|
||||
|
||||
std::optional<DB::StatusFile> pid_file;
|
||||
|
||||
std::atomic_bool is_cancelled{false};
|
||||
|
2
contrib/re2
vendored
2
contrib/re2
vendored
@ -1 +1 @@
|
||||
Subproject commit 7cf8b88e8f70f97fd4926b56aa87e7f53b2717e0
|
||||
Subproject commit 13ebb377c6ad763ca61d12dd6f88b1126bd0b911
|
@ -1,7 +1,7 @@
|
||||
file (READ ${SOURCE_FILENAME} CONTENT)
|
||||
string (REGEX REPLACE "using re2::RE2;" "" CONTENT "${CONTENT}")
|
||||
string (REGEX REPLACE "using re2::LazyRE2;" "" CONTENT "${CONTENT}")
|
||||
string (REGEX REPLACE "namespace re2" "namespace re2_st" CONTENT "${CONTENT}")
|
||||
string (REGEX REPLACE "namespace re2 {" "namespace re2_st {" CONTENT "${CONTENT}")
|
||||
string (REGEX REPLACE "re2::" "re2_st::" CONTENT "${CONTENT}")
|
||||
string (REGEX REPLACE "\"re2/" "\"re2_st/" CONTENT "${CONTENT}")
|
||||
string (REGEX REPLACE "(.\\*?_H)" "\\1_ST" CONTENT "${CONTENT}")
|
||||
|
@ -73,7 +73,7 @@ function start_server
|
||||
--path "$FASTTEST_DATA"
|
||||
--user_files_path "$FASTTEST_DATA/user_files"
|
||||
--top_level_domains_path "$FASTTEST_DATA/top_level_domains"
|
||||
--keeper_server.log_storage_path "$FASTTEST_DATA/coordination"
|
||||
--keeper_server.storage_path "$FASTTEST_DATA/coordination"
|
||||
)
|
||||
clickhouse-server "${opts[@]}" &>> "$FASTTEST_OUTPUT/server.log" &
|
||||
server_pid=$!
|
||||
@ -376,6 +376,7 @@ function run_tests
|
||||
# Depends on LLVM JIT
|
||||
01852_jit_if
|
||||
01865_jit_comparison_constant_result
|
||||
01871_merge_tree_compile_expressions
|
||||
)
|
||||
|
||||
(time clickhouse-test --hung-check -j 8 --order=random --use-skip-list --no-long --testname --shard --zookeeper --skip "${TESTS_TO_SKIP[@]}" -- "$FASTTEST_FOCUS" 2>&1 ||:) | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/test_log.txt"
|
||||
|
@ -0,0 +1,92 @@
|
||||
version: '2.3'
|
||||
services:
|
||||
zoo1:
|
||||
image: ${image:-yandex/clickhouse-integration-test}
|
||||
restart: always
|
||||
user: ${user:-}
|
||||
volumes:
|
||||
- type: bind
|
||||
source: ${keeper_binary:-}
|
||||
target: /usr/bin/clickhouse
|
||||
- type: bind
|
||||
source: ${keeper_config_dir1:-}
|
||||
target: /etc/clickhouse-keeper
|
||||
- type: bind
|
||||
source: ${keeper_logs_dir1:-}
|
||||
target: /var/log/clickhouse-keeper
|
||||
- type: ${keeper_fs:-tmpfs}
|
||||
source: ${keeper_db_dir1:-}
|
||||
target: /var/lib/clickhouse-keeper
|
||||
entrypoint: "clickhouse keeper --config=/etc/clickhouse-keeper/keeper_config1.xml --log-file=/var/log/clickhouse-keeper/clickhouse-keeper.log --errorlog-file=/var/log/clickhouse-keeper/clickhouse-keeper.err.log"
|
||||
cap_add:
|
||||
- SYS_PTRACE
|
||||
- NET_ADMIN
|
||||
- IPC_LOCK
|
||||
- SYS_NICE
|
||||
security_opt:
|
||||
- label:disable
|
||||
dns_opt:
|
||||
- attempts:2
|
||||
- timeout:1
|
||||
- inet6
|
||||
- rotate
|
||||
zoo2:
|
||||
image: ${image:-yandex/clickhouse-integration-test}
|
||||
restart: always
|
||||
user: ${user:-}
|
||||
volumes:
|
||||
- type: bind
|
||||
source: ${keeper_binary:-}
|
||||
target: /usr/bin/clickhouse
|
||||
- type: bind
|
||||
source: ${keeper_config_dir2:-}
|
||||
target: /etc/clickhouse-keeper
|
||||
- type: bind
|
||||
source: ${keeper_logs_dir2:-}
|
||||
target: /var/log/clickhouse-keeper
|
||||
- type: ${keeper_fs:-tmpfs}
|
||||
source: ${keeper_db_dir2:-}
|
||||
target: /var/lib/clickhouse-keeper
|
||||
entrypoint: "clickhouse keeper --config=/etc/clickhouse-keeper/keeper_config2.xml --log-file=/var/log/clickhouse-keeper/clickhouse-keeper.log --errorlog-file=/var/log/clickhouse-keeper/clickhouse-keeper.err.log"
|
||||
cap_add:
|
||||
- SYS_PTRACE
|
||||
- NET_ADMIN
|
||||
- IPC_LOCK
|
||||
- SYS_NICE
|
||||
security_opt:
|
||||
- label:disable
|
||||
dns_opt:
|
||||
- attempts:2
|
||||
- timeout:1
|
||||
- inet6
|
||||
- rotate
|
||||
zoo3:
|
||||
image: ${image:-yandex/clickhouse-integration-test}
|
||||
restart: always
|
||||
user: ${user:-}
|
||||
volumes:
|
||||
- type: bind
|
||||
source: ${keeper_binary:-}
|
||||
target: /usr/bin/clickhouse
|
||||
- type: bind
|
||||
source: ${keeper_config_dir3:-}
|
||||
target: /etc/clickhouse-keeper
|
||||
- type: bind
|
||||
source: ${keeper_logs_dir3:-}
|
||||
target: /var/log/clickhouse-keeper
|
||||
- type: ${keeper_fs:-tmpfs}
|
||||
source: ${keeper_db_dir3:-}
|
||||
target: /var/lib/clickhouse-keeper
|
||||
entrypoint: "clickhouse keeper --config=/etc/clickhouse-keeper/keeper_config3.xml --log-file=/var/log/clickhouse-keeper/clickhouse-keeper.log --errorlog-file=/var/log/clickhouse-keeper/clickhouse-keeper.err.log"
|
||||
cap_add:
|
||||
- SYS_PTRACE
|
||||
- NET_ADMIN
|
||||
- IPC_LOCK
|
||||
- SYS_NICE
|
||||
security_opt:
|
||||
- label:disable
|
||||
dns_opt:
|
||||
- attempts:2
|
||||
- timeout:1
|
||||
- inet6
|
||||
- rotate
|
@ -183,7 +183,7 @@ CREATE TABLE big_table (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9
|
||||
#### Ограничения {#limitations}
|
||||
* hadoop\_security\_kerberos\_ticket\_cache\_path могут быть определены только на глобальном уровне
|
||||
|
||||
## Поддержика Kerberos {#kerberos-support}
|
||||
## Поддержка Kerberos {#kerberos-support}
|
||||
|
||||
Если hadoop\_security\_authentication параметр имеет значение 'kerberos', ClickHouse аутентифицируется с помощью Kerberos.
|
||||
[Расширенные параметры](#clickhouse-extras) и hadoop\_security\_kerberos\_ticket\_cache\_path помогают сделать это.
|
||||
|
@ -5,9 +5,9 @@ toc_priority: 61
|
||||
toc_title: "\u95F4\u9694"
|
||||
---
|
||||
|
||||
# 间隔 {#data-type-interval}
|
||||
# Interval类型 {#data-type-interval}
|
||||
|
||||
表示时间和日期间隔的数据类型族。 由此产生的类型 [INTERVAL](../../../sql-reference/operators/index.md#operator-interval) 接线员
|
||||
表示时间和日期间隔的数据类型家族。 [INTERVAL](../../../sql-reference/operators/index.md#operator-interval) 运算的结果类型。
|
||||
|
||||
!!! warning "警告"
|
||||
`Interval` 数据类型值不能存储在表中。
|
||||
@ -15,7 +15,7 @@ toc_title: "\u95F4\u9694"
|
||||
结构:
|
||||
|
||||
- 时间间隔作为无符号整数值。
|
||||
- 间隔的类型。
|
||||
- 时间间隔的类型。
|
||||
|
||||
支持的时间间隔类型:
|
||||
|
||||
@ -28,7 +28,7 @@ toc_title: "\u95F4\u9694"
|
||||
- `QUARTER`
|
||||
- `YEAR`
|
||||
|
||||
对于每个间隔类型,都有一个单独的数据类型。 例如, `DAY` 间隔对应于 `IntervalDay` 数据类型:
|
||||
对于每个时间间隔类型,都有一个单独的数据类型。 例如, `DAY` 间隔对应于 `IntervalDay` 数据类型:
|
||||
|
||||
``` sql
|
||||
SELECT toTypeName(INTERVAL 4 DAY)
|
||||
@ -42,7 +42,7 @@ SELECT toTypeName(INTERVAL 4 DAY)
|
||||
|
||||
## 使用说明 {#data-type-interval-usage-remarks}
|
||||
|
||||
您可以使用 `Interval`-在算术运算类型值 [日期](../../../sql-reference/data-types/date.md) 和 [日期时间](../../../sql-reference/data-types/datetime.md)-类型值。 例如,您可以将4天添加到当前时间:
|
||||
您可以在与 [日期](../../../sql-reference/data-types/date.md) 和 [日期时间](../../../sql-reference/data-types/datetime.md) 类型值的算术运算中使用 `Interval` 类型值。 例如,您可以将4天添加到当前时间:
|
||||
|
||||
``` sql
|
||||
SELECT now() as current_date_time, current_date_time + INTERVAL 4 DAY
|
||||
@ -54,9 +54,9 @@ SELECT now() as current_date_time, current_date_time + INTERVAL 4 DAY
|
||||
└─────────────────────┴───────────────────────────────┘
|
||||
```
|
||||
|
||||
不同类型的间隔不能合并。 你不能使用间隔,如 `4 DAY 1 HOUR`. 以小于或等于间隔的最小单位的单位指定间隔,例如,间隔 `1 day and an hour` 间隔可以表示为 `25 HOUR` 或 `90000 SECOND`.
|
||||
不同类型的间隔不能合并。 你不能使用诸如 `4 DAY 1 HOUR` 的时间间隔. 以小于或等于时间间隔最小单位的单位来指定间隔,例如,时间间隔 `1 day and an hour` 可以表示为 `25 HOUR` 或 `90000 SECOND`.
|
||||
|
||||
你不能执行算术运算 `Interval`-类型值,但你可以添加不同类型的时间间隔,因此值 `Date` 或 `DateTime` 数据类型。 例如:
|
||||
你不能对 `Interval` 类型的值执行算术运算,但你可以向 `Date` 或 `DateTime` 数据类型的值添加不同类型的时间间隔,例如:
|
||||
|
||||
``` sql
|
||||
SELECT now() AS current_date_time, current_date_time + INTERVAL 4 DAY + INTERVAL 3 HOUR
|
||||
@ -81,5 +81,5 @@ Code: 43. DB::Exception: Received from localhost:9000. DB::Exception: Wrong argu
|
||||
|
||||
## 另请参阅 {#see-also}
|
||||
|
||||
- [INTERVAL](../../../sql-reference/operators/index.md#operator-interval) 接线员
|
||||
- [INTERVAL](../../../sql-reference/operators/index.md#operator-interval) 操作
|
||||
- [toInterval](../../../sql-reference/functions/type-conversion-functions.md#function-tointerval) 类型转换函数
|
||||
|
@ -47,6 +47,9 @@ option (ENABLE_CLICKHOUSE_LIBRARY_BRIDGE "HTTP-server working like a proxy to Li
|
||||
option (ENABLE_CLICKHOUSE_GIT_IMPORT "A tool to analyze Git repositories"
|
||||
${ENABLE_CLICKHOUSE_ALL})
|
||||
|
||||
|
||||
option (ENABLE_CLICKHOUSE_KEEPER "ClickHouse alternative to ZooKeeper" ${ENABLE_CLICKHOUSE_ALL})
|
||||
|
||||
if (CLICKHOUSE_SPLIT_BINARY)
|
||||
option(ENABLE_CLICKHOUSE_INSTALL "Install ClickHouse without .deb/.rpm/.tgz packages (having the binary only)" OFF)
|
||||
else ()
|
||||
@ -134,6 +137,12 @@ else()
|
||||
message(STATUS "ClickHouse git-import: OFF")
|
||||
endif()
|
||||
|
||||
if (ENABLE_CLICKHOUSE_KEEPER)
|
||||
message(STATUS "ClickHouse keeper mode: ON")
|
||||
else()
|
||||
message(STATUS "ClickHouse keeper mode: OFF")
|
||||
endif()
|
||||
|
||||
if(NOT (MAKE_STATIC_LIBRARIES OR SPLIT_SHARED_LIBRARIES))
|
||||
set(CLICKHOUSE_ONE_SHARED ON)
|
||||
endif()
|
||||
@ -189,6 +198,54 @@ macro(clickhouse_program_add name)
|
||||
clickhouse_program_add_executable(${name})
|
||||
endmacro()
|
||||
|
||||
# Embed default config files as a resource into the binary.
|
||||
# This is needed for two purposes:
|
||||
# 1. Allow to run the binary without download of any other files.
|
||||
# 2. Allow to implement "sudo clickhouse install" tool.
|
||||
#
|
||||
# Arguments: target (server, client, keeper, etc.) and list of files
|
||||
#
|
||||
# Also dependency on TARGET_FILE is required, look at examples in programs/server and programs/keeper
|
||||
macro(clickhouse_embed_binaries)
|
||||
# TODO We actually need this on Mac, FreeBSD.
|
||||
if (OS_LINUX)
|
||||
|
||||
set(arguments_list "${ARGN}")
|
||||
list(GET arguments_list 0 target)
|
||||
|
||||
# for some reason cmake iterates loop including <stop>
|
||||
math(EXPR arguments_count "${ARGC}-1")
|
||||
|
||||
foreach(RESOURCE_POS RANGE 1 "${arguments_count}")
|
||||
list(GET arguments_list "${RESOURCE_POS}" RESOURCE_FILE)
|
||||
set(RESOURCE_OBJ ${RESOURCE_FILE}.o)
|
||||
set(RESOURCE_OBJS ${RESOURCE_OBJS} ${RESOURCE_OBJ})
|
||||
|
||||
# https://stackoverflow.com/questions/14776463/compile-and-add-an-object-file-from-a-binary-with-cmake
|
||||
# PPC64LE fails to do this with objcopy, use ld or lld instead
|
||||
if (ARCH_PPC64LE)
|
||||
add_custom_command(OUTPUT ${RESOURCE_OBJ}
|
||||
COMMAND cd ${CMAKE_CURRENT_SOURCE_DIR} && ${CMAKE_LINKER} -m elf64lppc -r -b binary -o "${CMAKE_CURRENT_BINARY_DIR}/${RESOURCE_OBJ}" ${RESOURCE_FILE})
|
||||
else()
|
||||
add_custom_command(OUTPUT ${RESOURCE_OBJ}
|
||||
COMMAND cd ${CMAKE_CURRENT_SOURCE_DIR} && ${OBJCOPY_PATH} -I binary ${OBJCOPY_ARCH_OPTIONS} ${RESOURCE_FILE} "${CMAKE_CURRENT_BINARY_DIR}/${RESOURCE_OBJ}"
|
||||
COMMAND ${OBJCOPY_PATH} --rename-section .data=.rodata,alloc,load,readonly,data,contents
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/${RESOURCE_OBJ}" "${CMAKE_CURRENT_BINARY_DIR}/${RESOURCE_OBJ}")
|
||||
endif()
|
||||
set_source_files_properties(${RESOURCE_OBJ} PROPERTIES EXTERNAL_OBJECT true GENERATED true)
|
||||
endforeach()
|
||||
|
||||
add_library(clickhouse_${target}_configs STATIC ${RESOURCE_OBJS})
|
||||
set_target_properties(clickhouse_${target}_configs PROPERTIES LINKER_LANGUAGE C)
|
||||
|
||||
# whole-archive prevents symbols from being discarded for unknown reason
|
||||
# CMake can shuffle each of target_link_libraries arguments with other
|
||||
# libraries in linker command. To avoid this we hardcode whole-archive
|
||||
# library into single string.
|
||||
add_dependencies(clickhouse-${target}-lib clickhouse_${target}_configs)
|
||||
endif ()
|
||||
endmacro()
|
||||
|
||||
|
||||
add_subdirectory (server)
|
||||
add_subdirectory (client)
|
||||
@ -202,6 +259,7 @@ add_subdirectory (obfuscator)
|
||||
add_subdirectory (install)
|
||||
add_subdirectory (git-import)
|
||||
add_subdirectory (bash-completion)
|
||||
add_subdirectory (keeper)
|
||||
|
||||
if (ENABLE_CLICKHOUSE_ODBC_BRIDGE)
|
||||
add_subdirectory (odbc-bridge)
|
||||
@ -212,15 +270,15 @@ if (ENABLE_CLICKHOUSE_LIBRARY_BRIDGE)
|
||||
endif ()
|
||||
|
||||
if (CLICKHOUSE_ONE_SHARED)
|
||||
add_library(clickhouse-lib SHARED ${CLICKHOUSE_SERVER_SOURCES} ${CLICKHOUSE_CLIENT_SOURCES} ${CLICKHOUSE_LOCAL_SOURCES} ${CLICKHOUSE_BENCHMARK_SOURCES} ${CLICKHOUSE_COPIER_SOURCES} ${CLICKHOUSE_EXTRACT_FROM_CONFIG_SOURCES} ${CLICKHOUSE_COMPRESSOR_SOURCES} ${CLICKHOUSE_FORMAT_SOURCES} ${CLICKHOUSE_OBFUSCATOR_SOURCES} ${CLICKHOUSE_GIT_IMPORT_SOURCES} ${CLICKHOUSE_ODBC_BRIDGE_SOURCES})
|
||||
target_link_libraries(clickhouse-lib ${CLICKHOUSE_SERVER_LINK} ${CLICKHOUSE_CLIENT_LINK} ${CLICKHOUSE_LOCAL_LINK} ${CLICKHOUSE_BENCHMARK_LINK} ${CLICKHOUSE_COPIER_LINK} ${CLICKHOUSE_EXTRACT_FROM_CONFIG_LINK} ${CLICKHOUSE_COMPRESSOR_LINK} ${CLICKHOUSE_FORMAT_LINK} ${CLICKHOUSE_OBFUSCATOR_LINK} ${CLICKHOUSE_GIT_IMPORT_LINK} ${CLICKHOUSE_ODBC_BRIDGE_LINK})
|
||||
target_include_directories(clickhouse-lib ${CLICKHOUSE_SERVER_INCLUDE} ${CLICKHOUSE_CLIENT_INCLUDE} ${CLICKHOUSE_LOCAL_INCLUDE} ${CLICKHOUSE_BENCHMARK_INCLUDE} ${CLICKHOUSE_COPIER_INCLUDE} ${CLICKHOUSE_EXTRACT_FROM_CONFIG_INCLUDE} ${CLICKHOUSE_COMPRESSOR_INCLUDE} ${CLICKHOUSE_FORMAT_INCLUDE} ${CLICKHOUSE_OBFUSCATOR_INCLUDE} ${CLICKHOUSE_GIT_IMPORT_INCLUDE} ${CLICKHOUSE_ODBC_BRIDGE_INCLUDE})
|
||||
add_library(clickhouse-lib SHARED ${CLICKHOUSE_SERVER_SOURCES} ${CLICKHOUSE_CLIENT_SOURCES} ${CLICKHOUSE_LOCAL_SOURCES} ${CLICKHOUSE_BENCHMARK_SOURCES} ${CLICKHOUSE_COPIER_SOURCES} ${CLICKHOUSE_EXTRACT_FROM_CONFIG_SOURCES} ${CLICKHOUSE_COMPRESSOR_SOURCES} ${CLICKHOUSE_FORMAT_SOURCES} ${CLICKHOUSE_OBFUSCATOR_SOURCES} ${CLICKHOUSE_GIT_IMPORT_SOURCES} ${CLICKHOUSE_ODBC_BRIDGE_SOURCES} ${CLICKHOUSE_KEEPER_SOURCES})
|
||||
target_link_libraries(clickhouse-lib ${CLICKHOUSE_SERVER_LINK} ${CLICKHOUSE_CLIENT_LINK} ${CLICKHOUSE_LOCAL_LINK} ${CLICKHOUSE_BENCHMARK_LINK} ${CLICKHOUSE_COPIER_LINK} ${CLICKHOUSE_EXTRACT_FROM_CONFIG_LINK} ${CLICKHOUSE_COMPRESSOR_LINK} ${CLICKHOUSE_FORMAT_LINK} ${CLICKHOUSE_OBFUSCATOR_LINK} ${CLICKHOUSE_GIT_IMPORT_LINK} ${CLICKHOUSE_ODBC_BRIDGE_LINK} ${CLICKHOUSE_KEEPER_LINK})
|
||||
target_include_directories(clickhouse-lib ${CLICKHOUSE_SERVER_INCLUDE} ${CLICKHOUSE_CLIENT_INCLUDE} ${CLICKHOUSE_LOCAL_INCLUDE} ${CLICKHOUSE_BENCHMARK_INCLUDE} ${CLICKHOUSE_COPIER_INCLUDE} ${CLICKHOUSE_EXTRACT_FROM_CONFIG_INCLUDE} ${CLICKHOUSE_COMPRESSOR_INCLUDE} ${CLICKHOUSE_FORMAT_INCLUDE} ${CLICKHOUSE_OBFUSCATOR_INCLUDE} ${CLICKHOUSE_GIT_IMPORT_INCLUDE} ${CLICKHOUSE_ODBC_BRIDGE_INCLUDE} ${CLICKHOUSE_KEEPER_INCLUDE})
|
||||
set_target_properties(clickhouse-lib PROPERTIES SOVERSION ${VERSION_MAJOR}.${VERSION_MINOR} VERSION ${VERSION_SO} OUTPUT_NAME clickhouse DEBUG_POSTFIX "")
|
||||
install (TARGETS clickhouse-lib LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT clickhouse)
|
||||
endif()
|
||||
|
||||
if (CLICKHOUSE_SPLIT_BINARY)
|
||||
set (CLICKHOUSE_ALL_TARGETS clickhouse-server clickhouse-client clickhouse-local clickhouse-benchmark clickhouse-extract-from-config clickhouse-compressor clickhouse-format clickhouse-obfuscator clickhouse-git-import clickhouse-copier)
|
||||
set (CLICKHOUSE_ALL_TARGETS clickhouse-server clickhouse-client clickhouse-local clickhouse-benchmark clickhouse-extract-from-config clickhouse-compressor clickhouse-format clickhouse-obfuscator clickhouse-git-import clickhouse-copier clickhouse-keeper)
|
||||
|
||||
if (ENABLE_CLICKHOUSE_ODBC_BRIDGE)
|
||||
list (APPEND CLICKHOUSE_ALL_TARGETS clickhouse-odbc-bridge)
|
||||
@ -277,6 +335,9 @@ else ()
|
||||
if (ENABLE_CLICKHOUSE_GIT_IMPORT)
|
||||
clickhouse_target_link_split_lib(clickhouse git-import)
|
||||
endif ()
|
||||
if (ENABLE_CLICKHOUSE_KEEPER)
|
||||
clickhouse_target_link_split_lib(clickhouse keeper)
|
||||
endif()
|
||||
if (ENABLE_CLICKHOUSE_INSTALL)
|
||||
clickhouse_target_link_split_lib(clickhouse install)
|
||||
endif ()
|
||||
@ -332,6 +393,11 @@ else ()
|
||||
install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-git-import" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
||||
list(APPEND CLICKHOUSE_BUNDLE clickhouse-git-import)
|
||||
endif ()
|
||||
if (ENABLE_CLICKHOUSE_KEEPER)
|
||||
add_custom_target (clickhouse-keeper ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-keeper DEPENDS clickhouse)
|
||||
install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-keeper" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
||||
list(APPEND CLICKHOUSE_BUNDLE clickhouse-keeper)
|
||||
endif ()
|
||||
|
||||
install (TARGETS clickhouse RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
||||
|
||||
|
@ -16,3 +16,4 @@
|
||||
#cmakedefine01 ENABLE_CLICKHOUSE_INSTALL
|
||||
#cmakedefine01 ENABLE_CLICKHOUSE_ODBC_BRIDGE
|
||||
#cmakedefine01 ENABLE_CLICKHOUSE_LIBRARY_BRIDGE
|
||||
#cmakedefine01 ENABLE_CLICKHOUSE_KEEPER
|
||||
|
24
programs/keeper/CMakeLists.txt
Normal file
24
programs/keeper/CMakeLists.txt
Normal file
@ -0,0 +1,24 @@
|
||||
set(CLICKHOUSE_KEEPER_SOURCES
|
||||
Keeper.cpp
|
||||
)
|
||||
|
||||
if (OS_LINUX)
|
||||
set (LINK_RESOURCE_LIB INTERFACE "-Wl,${WHOLE_ARCHIVE} $<TARGET_FILE:clickhouse_keeper_configs> -Wl,${NO_WHOLE_ARCHIVE}")
|
||||
endif ()
|
||||
|
||||
set (CLICKHOUSE_KEEPER_LINK
|
||||
PRIVATE
|
||||
clickhouse_common_config
|
||||
clickhouse_common_io
|
||||
clickhouse_common_zookeeper
|
||||
daemon
|
||||
dbms
|
||||
|
||||
${LINK_RESOURCE_LIB}
|
||||
)
|
||||
|
||||
clickhouse_program_add(keeper)
|
||||
|
||||
install (FILES keeper_config.xml DESTINATION "${CLICKHOUSE_ETC_DIR}/clickhouse-keeper" COMPONENT clickhouse-keeper)
|
||||
|
||||
clickhouse_embed_binaries(keeper keeper_config.xml keeper_embedded.xml)
|
474
programs/keeper/Keeper.cpp
Normal file
474
programs/keeper/Keeper.cpp
Normal file
@ -0,0 +1,474 @@
|
||||
#include "Keeper.h"
|
||||
|
||||
#include <sys/stat.h>
|
||||
#include <pwd.h>
|
||||
#include <Common/ClickHouseRevision.h>
|
||||
#include <Server/ProtocolServerAdapter.h>
|
||||
#include <Common/DNSResolver.h>
|
||||
#include <Interpreters/DNSCacheUpdater.h>
|
||||
#include <Poco/Net/NetException.h>
|
||||
#include <Poco/Net/TCPServerParams.h>
|
||||
#include <Poco/Net/TCPServer.h>
|
||||
#include <common/defines.h>
|
||||
#include <common/logger_useful.h>
|
||||
#include <common/ErrorHandlers.h>
|
||||
#include <ext/scope_guard.h>
|
||||
#include <Poco/Util/HelpFormatter.h>
|
||||
#include <Poco/Version.h>
|
||||
#include <Poco/Environment.h>
|
||||
#include <Common/getMultipleKeysFromConfig.h>
|
||||
#include <filesystem>
|
||||
#include <IO/UseSSL.h>
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
# include "config_core.h"
|
||||
# include "Common/config_version.h"
|
||||
#endif
|
||||
|
||||
#if USE_SSL
|
||||
# include <Poco/Net/Context.h>
|
||||
# include <Poco/Net/SecureServerSocket.h>
|
||||
#endif
|
||||
|
||||
#if USE_NURAFT
|
||||
# include <Server/KeeperTCPHandlerFactory.h>
|
||||
#endif
|
||||
|
||||
#if defined(OS_LINUX)
|
||||
# include <unistd.h>
|
||||
# include <sys/syscall.h>
|
||||
#endif
|
||||
|
||||
|
||||
int mainEntryClickHouseKeeper(int argc, char ** argv)
|
||||
{
|
||||
DB::Keeper app;
|
||||
|
||||
try
|
||||
{
|
||||
return app.run(argc, argv);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
std::cerr << DB::getCurrentExceptionMessage(true) << "\n";
|
||||
auto code = DB::getCurrentExceptionCode();
|
||||
return code ? code : 1;
|
||||
}
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int NO_ELEMENTS_IN_CONFIG;
|
||||
extern const int SUPPORT_IS_DISABLED;
|
||||
extern const int NETWORK_ERROR;
|
||||
extern const int MISMATCHING_USERS_FOR_PROCESS_AND_DATA;
|
||||
extern const int FAILED_TO_GETPWUID;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
int waitServersToFinish(std::vector<DB::ProtocolServerAdapter> & servers, size_t seconds_to_wait)
|
||||
{
|
||||
const int sleep_max_ms = 1000 * seconds_to_wait;
|
||||
const int sleep_one_ms = 100;
|
||||
int sleep_current_ms = 0;
|
||||
int current_connections = 0;
|
||||
for (;;)
|
||||
{
|
||||
current_connections = 0;
|
||||
|
||||
for (auto & server : servers)
|
||||
{
|
||||
server.stop();
|
||||
current_connections += server.currentConnections();
|
||||
}
|
||||
|
||||
if (!current_connections)
|
||||
break;
|
||||
|
||||
sleep_current_ms += sleep_one_ms;
|
||||
if (sleep_current_ms < sleep_max_ms)
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(sleep_one_ms));
|
||||
else
|
||||
break;
|
||||
}
|
||||
return current_connections;
|
||||
}
|
||||
|
||||
Poco::Net::SocketAddress makeSocketAddress(const std::string & host, UInt16 port, Poco::Logger * log)
|
||||
{
|
||||
Poco::Net::SocketAddress socket_address;
|
||||
try
|
||||
{
|
||||
socket_address = Poco::Net::SocketAddress(host, port);
|
||||
}
|
||||
catch (const Poco::Net::DNSException & e)
|
||||
{
|
||||
const auto code = e.code();
|
||||
if (code == EAI_FAMILY
|
||||
#if defined(EAI_ADDRFAMILY)
|
||||
|| code == EAI_ADDRFAMILY
|
||||
#endif
|
||||
)
|
||||
{
|
||||
LOG_ERROR(log, "Cannot resolve listen_host ({}), error {}: {}. "
|
||||
"If it is an IPv6 address and your host has disabled IPv6, then consider to "
|
||||
"specify IPv4 address to listen in <listen_host> element of configuration "
|
||||
"file. Example: <listen_host>0.0.0.0</listen_host>",
|
||||
host, e.code(), e.message());
|
||||
}
|
||||
|
||||
throw;
|
||||
}
|
||||
return socket_address;
|
||||
}
|
||||
|
||||
[[noreturn]] void forceShutdown()
|
||||
{
|
||||
#if defined(THREAD_SANITIZER) && defined(OS_LINUX)
|
||||
/// Thread sanitizer tries to do something on exit that we don't need if we want to exit immediately,
|
||||
/// while connection handling threads are still run.
|
||||
(void)syscall(SYS_exit_group, 0);
|
||||
__builtin_unreachable();
|
||||
#else
|
||||
_exit(0);
|
||||
#endif
|
||||
}
|
||||
|
||||
std::string getUserName(uid_t user_id)
|
||||
{
|
||||
/// Try to convert user id into user name.
|
||||
auto buffer_size = sysconf(_SC_GETPW_R_SIZE_MAX);
|
||||
if (buffer_size <= 0)
|
||||
buffer_size = 1024;
|
||||
std::string buffer;
|
||||
buffer.reserve(buffer_size);
|
||||
|
||||
struct passwd passwd_entry;
|
||||
struct passwd * result = nullptr;
|
||||
const auto error = getpwuid_r(user_id, &passwd_entry, buffer.data(), buffer_size, &result);
|
||||
|
||||
if (error)
|
||||
throwFromErrno("Failed to find user name for " + toString(user_id), ErrorCodes::FAILED_TO_GETPWUID, error);
|
||||
else if (result)
|
||||
return result->pw_name;
|
||||
return toString(user_id);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
Poco::Net::SocketAddress Keeper::socketBindListen(Poco::Net::ServerSocket & socket, const std::string & host, UInt16 port, [[maybe_unused]] bool secure) const
|
||||
{
|
||||
auto address = makeSocketAddress(host, port, &logger());
|
||||
#if !defined(POCO_CLICKHOUSE_PATCH) || POCO_VERSION < 0x01090100
|
||||
if (secure)
|
||||
/// Bug in old (<1.9.1) poco, listen() after bind() with reusePort param will fail because have no implementation in SecureServerSocketImpl
|
||||
/// https://github.com/pocoproject/poco/pull/2257
|
||||
socket.bind(address, /* reuseAddress = */ true);
|
||||
else
|
||||
#endif
|
||||
#if POCO_VERSION < 0x01080000
|
||||
socket.bind(address, /* reuseAddress = */ true);
|
||||
#else
|
||||
socket.bind(address, /* reuseAddress = */ true, /* reusePort = */ config().getBool("listen_reuse_port", false));
|
||||
#endif
|
||||
|
||||
socket.listen(/* backlog = */ config().getUInt("listen_backlog", 64));
|
||||
|
||||
return address;
|
||||
}
|
||||
|
||||
void Keeper::createServer(const std::string & listen_host, const char * port_name, bool listen_try, CreateServerFunc && func) const
|
||||
{
|
||||
/// For testing purposes, user may omit tcp_port or http_port or https_port in configuration file.
|
||||
if (!config().has(port_name))
|
||||
return;
|
||||
|
||||
auto port = config().getInt(port_name);
|
||||
try
|
||||
{
|
||||
func(port);
|
||||
}
|
||||
catch (const Poco::Exception &)
|
||||
{
|
||||
std::string message = "Listen [" + listen_host + "]:" + std::to_string(port) + " failed: " + getCurrentExceptionMessage(false);
|
||||
|
||||
if (listen_try)
|
||||
{
|
||||
LOG_WARNING(&logger(), "{}. If it is an IPv6 or IPv4 address and your host has disabled IPv6 or IPv4, then consider to "
|
||||
"specify not disabled IPv4 or IPv6 address to listen in <listen_host> element of configuration "
|
||||
"file. Example for disabled IPv6: <listen_host>0.0.0.0</listen_host> ."
|
||||
" Example for disabled IPv4: <listen_host>::</listen_host>",
|
||||
message);
|
||||
}
|
||||
else
|
||||
{
|
||||
throw Exception{message, ErrorCodes::NETWORK_ERROR};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Keeper::uninitialize()
|
||||
{
|
||||
logger().information("shutting down");
|
||||
BaseDaemon::uninitialize();
|
||||
}
|
||||
|
||||
int Keeper::run()
|
||||
{
|
||||
if (config().hasOption("help"))
|
||||
{
|
||||
Poco::Util::HelpFormatter help_formatter(Keeper::options());
|
||||
auto header_str = fmt::format("{} [OPTION] [-- [ARG]...]\n"
|
||||
"positional arguments can be used to rewrite config.xml properties, for example, --http_port=8010",
|
||||
commandName());
|
||||
help_formatter.setHeader(header_str);
|
||||
help_formatter.format(std::cout);
|
||||
return 0;
|
||||
}
|
||||
if (config().hasOption("version"))
|
||||
{
|
||||
std::cout << DBMS_NAME << " keeper version " << VERSION_STRING << VERSION_OFFICIAL << "." << std::endl;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return Application::run(); // NOLINT
|
||||
}
|
||||
|
||||
void Keeper::initialize(Poco::Util::Application & self)
|
||||
{
|
||||
BaseDaemon::initialize(self);
|
||||
logger().information("starting up");
|
||||
|
||||
LOG_INFO(&logger(), "OS Name = {}, OS Version = {}, OS Architecture = {}",
|
||||
Poco::Environment::osName(),
|
||||
Poco::Environment::osVersion(),
|
||||
Poco::Environment::osArchitecture());
|
||||
}
|
||||
|
||||
std::string Keeper::getDefaultConfigFileName() const
|
||||
{
|
||||
return "keeper_config.xml";
|
||||
}
|
||||
|
||||
void Keeper::defineOptions(Poco::Util::OptionSet & options)
|
||||
{
|
||||
options.addOption(
|
||||
Poco::Util::Option("help", "h", "show help and exit")
|
||||
.required(false)
|
||||
.repeatable(false)
|
||||
.binding("help"));
|
||||
options.addOption(
|
||||
Poco::Util::Option("version", "V", "show version and exit")
|
||||
.required(false)
|
||||
.repeatable(false)
|
||||
.binding("version"));
|
||||
BaseDaemon::defineOptions(options);
|
||||
}
|
||||
|
||||
int Keeper::main(const std::vector<std::string> & /*args*/)
|
||||
{
|
||||
Poco::Logger * log = &logger();
|
||||
|
||||
UseSSL use_ssl;
|
||||
|
||||
MainThreadStatus::getInstance();
|
||||
|
||||
#if !defined(NDEBUG) || !defined(__OPTIMIZE__)
|
||||
LOG_WARNING(log, "Keeper was built in debug mode. It will work slowly.");
|
||||
#endif
|
||||
|
||||
#if defined(SANITIZER)
|
||||
LOG_WARNING(log, "Keeper was built with sanitizer. It will work slowly.");
|
||||
#endif
|
||||
|
||||
auto shared_context = Context::createShared();
|
||||
global_context = Context::createGlobal(shared_context.get());
|
||||
|
||||
global_context->makeGlobalContext();
|
||||
global_context->setApplicationType(Context::ApplicationType::KEEPER);
|
||||
|
||||
if (!config().has("keeper_server"))
|
||||
throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG, "Keeper configuration (<keeper_server> section) not found in config");
|
||||
|
||||
|
||||
std::string path;
|
||||
|
||||
if (config().has("keeper_server.storage_path"))
|
||||
path = config().getString("keeper_server.storage_path");
|
||||
else if (config().has("keeper_server.log_storage_path"))
|
||||
path = config().getString("keeper_server.log_storage_path");
|
||||
else if (config().has("keeper_server.snapshot_storage_path"))
|
||||
path = config().getString("keeper_server.snapshot_storage_path");
|
||||
else
|
||||
path = std::filesystem::path{KEEPER_DEFAULT_PATH};
|
||||
|
||||
|
||||
/// Check that the process user id matches the owner of the data.
|
||||
const auto effective_user_id = geteuid();
|
||||
struct stat statbuf;
|
||||
if (stat(path.c_str(), &statbuf) == 0 && effective_user_id != statbuf.st_uid)
|
||||
{
|
||||
const auto effective_user = getUserName(effective_user_id);
|
||||
const auto data_owner = getUserName(statbuf.st_uid);
|
||||
std::string message = "Effective user of the process (" + effective_user +
|
||||
") does not match the owner of the data (" + data_owner + ").";
|
||||
if (effective_user_id == 0)
|
||||
{
|
||||
message += " Run under 'sudo -u " + data_owner + "'.";
|
||||
throw Exception(message, ErrorCodes::MISMATCHING_USERS_FOR_PROCESS_AND_DATA);
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_WARNING(log, message);
|
||||
}
|
||||
}
|
||||
|
||||
const Settings & settings = global_context->getSettingsRef();
|
||||
|
||||
GlobalThreadPool::initialize(config().getUInt("max_thread_pool_size", 100));
|
||||
|
||||
static ServerErrorHandler error_handler;
|
||||
Poco::ErrorHandler::set(&error_handler);
|
||||
|
||||
/// Initialize DateLUT early, to not interfere with running time of first query.
|
||||
LOG_DEBUG(log, "Initializing DateLUT.");
|
||||
DateLUT::instance();
|
||||
LOG_TRACE(log, "Initialized DateLUT with time zone '{}'.", DateLUT::instance().getTimeZone());
|
||||
|
||||
/// Don't want to use DNS cache
|
||||
DNSResolver::instance().setDisableCacheFlag();
|
||||
|
||||
Poco::ThreadPool server_pool(3, config().getUInt("max_connections", 1024));
|
||||
|
||||
std::vector<std::string> listen_hosts = DB::getMultipleValuesFromConfig(config(), "", "listen_host");
|
||||
|
||||
bool listen_try = config().getBool("listen_try", false);
|
||||
if (listen_hosts.empty())
|
||||
{
|
||||
listen_hosts.emplace_back("::1");
|
||||
listen_hosts.emplace_back("127.0.0.1");
|
||||
listen_try = true;
|
||||
}
|
||||
|
||||
auto servers = std::make_shared<std::vector<ProtocolServerAdapter>>();
|
||||
|
||||
#if USE_NURAFT
|
||||
/// Initialize test keeper RAFT. Do nothing if no nu_keeper_server in config.
|
||||
global_context->initializeKeeperStorageDispatcher();
|
||||
for (const auto & listen_host : listen_hosts)
|
||||
{
|
||||
/// TCP Keeper
|
||||
const char * port_name = "keeper_server.tcp_port";
|
||||
createServer(listen_host, port_name, listen_try, [&](UInt16 port)
|
||||
{
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socketBindListen(socket, listen_host, port);
|
||||
socket.setReceiveTimeout(settings.receive_timeout);
|
||||
socket.setSendTimeout(settings.send_timeout);
|
||||
servers->emplace_back(
|
||||
port_name,
|
||||
std::make_unique<Poco::Net::TCPServer>(
|
||||
new KeeperTCPHandlerFactory(*this, false), server_pool, socket, new Poco::Net::TCPServerParams));
|
||||
|
||||
LOG_INFO(log, "Listening for connections to Keeper (tcp): {}", address.toString());
|
||||
});
|
||||
|
||||
const char * secure_port_name = "keeper_server.tcp_port_secure";
|
||||
createServer(listen_host, secure_port_name, listen_try, [&](UInt16 port)
|
||||
{
|
||||
#if USE_SSL
|
||||
Poco::Net::SecureServerSocket socket;
|
||||
auto address = socketBindListen(socket, listen_host, port, /* secure = */ true);
|
||||
socket.setReceiveTimeout(settings.receive_timeout);
|
||||
socket.setSendTimeout(settings.send_timeout);
|
||||
servers->emplace_back(
|
||||
secure_port_name,
|
||||
std::make_unique<Poco::Net::TCPServer>(
|
||||
new KeeperTCPHandlerFactory(*this, true), server_pool, socket, new Poco::Net::TCPServerParams));
|
||||
LOG_INFO(log, "Listening for connections to Keeper with secure protocol (tcp_secure): {}", address.toString());
|
||||
#else
|
||||
UNUSED(port);
|
||||
throw Exception{"SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.",
|
||||
ErrorCodes::SUPPORT_IS_DISABLED};
|
||||
#endif
|
||||
});
|
||||
}
|
||||
#else
|
||||
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "ClickHouse keeper built without NuRaft library. Cannot use coordination.");
|
||||
#endif
|
||||
|
||||
for (auto & server : *servers)
|
||||
server.start();
|
||||
|
||||
SCOPE_EXIT({
|
||||
LOG_INFO(log, "Shutting down.");
|
||||
|
||||
global_context->shutdown();
|
||||
|
||||
LOG_DEBUG(log, "Waiting for current connections to Keeper to finish.");
|
||||
int current_connections = 0;
|
||||
for (auto & server : *servers)
|
||||
{
|
||||
server.stop();
|
||||
current_connections += server.currentConnections();
|
||||
}
|
||||
|
||||
if (current_connections)
|
||||
LOG_INFO(log, "Closed all listening sockets. Waiting for {} outstanding connections.", current_connections);
|
||||
else
|
||||
LOG_INFO(log, "Closed all listening sockets.");
|
||||
|
||||
if (current_connections > 0)
|
||||
current_connections = waitServersToFinish(*servers, config().getInt("shutdown_wait_unfinished", 5));
|
||||
|
||||
if (current_connections)
|
||||
LOG_INFO(log, "Closed connections to Keeper. But {} remain. Probably some users cannot finish their connections after context shutdown.", current_connections);
|
||||
else
|
||||
LOG_INFO(log, "Closed connections to Keeper.");
|
||||
|
||||
global_context->shutdownKeeperStorageDispatcher();
|
||||
|
||||
/// Wait server pool to avoid use-after-free of destroyed context in the handlers
|
||||
server_pool.joinAll();
|
||||
|
||||
/** Explicitly destroy Context. It is more convenient than in destructor of Server, because logger is still available.
|
||||
* At this moment, no one could own shared part of Context.
|
||||
*/
|
||||
global_context.reset();
|
||||
shared_context.reset();
|
||||
|
||||
LOG_DEBUG(log, "Destroyed global context.");
|
||||
|
||||
if (current_connections)
|
||||
{
|
||||
LOG_INFO(log, "Will shutdown forcefully.");
|
||||
forceShutdown();
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
buildLoggers(config(), logger());
|
||||
|
||||
LOG_INFO(log, "Ready for connections.");
|
||||
|
||||
waitForTerminationRequest();
|
||||
|
||||
return Application::EXIT_OK;
|
||||
}
|
||||
|
||||
|
||||
void Keeper::logRevision() const
|
||||
{
|
||||
Poco::Logger::root().information("Starting ClickHouse Keeper " + std::string{VERSION_STRING}
|
||||
+ " with revision " + std::to_string(ClickHouseRevision::getVersionRevision())
|
||||
+ ", " + build_id_info
|
||||
+ ", PID " + std::to_string(getpid()));
|
||||
}
|
||||
|
||||
|
||||
}
|
69
programs/keeper/Keeper.h
Normal file
69
programs/keeper/Keeper.h
Normal file
@ -0,0 +1,69 @@
|
||||
#pragma once
|
||||
|
||||
#include <Server/IServer.h>
|
||||
#include <daemon/BaseDaemon.h>
|
||||
|
||||
namespace Poco
|
||||
{
|
||||
namespace Net
|
||||
{
|
||||
class ServerSocket;
|
||||
}
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/// standalone clickhouse-keeper server (replacement for ZooKeeper). Uses the same
|
||||
/// config as clickhouse-server. Serves requests on TCP ports with or without
|
||||
/// SSL using ZooKeeper protocol.
|
||||
class Keeper : public BaseDaemon, public IServer
|
||||
{
|
||||
public:
|
||||
using ServerApplication::run;
|
||||
|
||||
Poco::Util::LayeredConfiguration & config() const override
|
||||
{
|
||||
return BaseDaemon::config();
|
||||
}
|
||||
|
||||
Poco::Logger & logger() const override
|
||||
{
|
||||
return BaseDaemon::logger();
|
||||
}
|
||||
|
||||
ContextPtr context() const override
|
||||
{
|
||||
return global_context;
|
||||
}
|
||||
|
||||
bool isCancelled() const override
|
||||
{
|
||||
return BaseDaemon::isCancelled();
|
||||
}
|
||||
|
||||
void defineOptions(Poco::Util::OptionSet & _options) override;
|
||||
|
||||
protected:
|
||||
void logRevision() const override;
|
||||
|
||||
int run() override;
|
||||
|
||||
void initialize(Application & self) override;
|
||||
|
||||
void uninitialize() override;
|
||||
|
||||
int main(const std::vector<std::string> & args) override;
|
||||
|
||||
std::string getDefaultConfigFileName() const override;
|
||||
|
||||
private:
|
||||
ContextPtr global_context;
|
||||
|
||||
Poco::Net::SocketAddress socketBindListen(Poco::Net::ServerSocket & socket, const std::string & host, UInt16 port, [[maybe_unused]] bool secure = false) const;
|
||||
|
||||
using CreateServerFunc = std::function<void(UInt16)>;
|
||||
void createServer(const std::string & listen_host, const char * port_name, bool listen_try, CreateServerFunc && func) const;
|
||||
};
|
||||
|
||||
}
|
6
programs/keeper/clickhouse-keeper.cpp
Normal file
6
programs/keeper/clickhouse-keeper.cpp
Normal file
@ -0,0 +1,6 @@
|
||||
int mainEntryClickHouseKeeper(int argc, char ** argv);
|
||||
|
||||
int main(int argc_, char ** argv_)
|
||||
{
|
||||
return mainEntryClickHouseKeeper(argc_, argv_);
|
||||
}
|
81
programs/keeper/keeper_config.xml
Normal file
81
programs/keeper/keeper_config.xml
Normal file
@ -0,0 +1,81 @@
|
||||
<yandex>
|
||||
<logger>
|
||||
<!-- Possible levels [1]:
|
||||
|
||||
- none (turns off logging)
|
||||
- fatal
|
||||
- critical
|
||||
- error
|
||||
- warning
|
||||
- notice
|
||||
- information
|
||||
- debug
|
||||
- trace
|
||||
|
||||
[1]: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105-L114
|
||||
-->
|
||||
<level>trace</level>
|
||||
<log>/var/log/clickhouse-keeper/clickhouse-keeper.log</log>
|
||||
<errorlog>/var/log/clickhouse-keeper/clickhouse-keeper.err.log</errorlog>
|
||||
<!-- Rotation policy
|
||||
See https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/FileChannel.h#L54-L85
|
||||
-->
|
||||
<size>1000M</size>
|
||||
<count>10</count>
|
||||
<!-- <console>1</console> --> <!-- Default behavior is autodetection (log to console if not daemon mode and is tty) -->
|
||||
</logger>
|
||||
|
||||
<max_connections>4096</max_connections>
|
||||
|
||||
<keeper_server>
|
||||
<tcp_port>9181</tcp_port>
|
||||
|
||||
<!-- Must be unique among all keeper serves -->
|
||||
<server_id>1</server_id>
|
||||
|
||||
<log_storage_path>/var/lib/clickhouse/coordination/logs</log_storage_path>
|
||||
<snapshot_storage_path>/var/lib/clickhouse/coordination/snapshots</snapshot_storage_path>
|
||||
|
||||
<coordination_settings>
|
||||
<operation_timeout_ms>10000</operation_timeout_ms>
|
||||
<session_timeout_ms>30000</session_timeout_ms>
|
||||
<raft_logs_level>information</raft_logs_level>
|
||||
<!-- All settings listed in https://github.com/ClickHouse/ClickHouse/blob/master/src/Coordination/CoordinationSettings.h -->
|
||||
</coordination_settings>
|
||||
|
||||
<raft_configuration>
|
||||
<server>
|
||||
<id>1</id>
|
||||
|
||||
<!-- Internal port and hostname -->
|
||||
<hostname>localhost</hostname>
|
||||
<port>44444</port>
|
||||
</server>
|
||||
|
||||
<!-- Add more servers here -->
|
||||
|
||||
</raft_configuration>
|
||||
</keeper_server>
|
||||
|
||||
|
||||
<openSSL>
|
||||
<server>
|
||||
<!-- Used for secure tcp port -->
|
||||
<!-- openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt -->
|
||||
<certificateFile>/etc/clickhouse-keeper/server.crt</certificateFile>
|
||||
<privateKeyFile>/etc/clickhouse-keeper/server.key</privateKeyFile>
|
||||
<!-- dhparams are optional. You can delete the <dhParamsFile> element.
|
||||
To generate dhparams, use the following command:
|
||||
openssl dhparam -out /etc/clickhouse-keeper/dhparam.pem 4096
|
||||
Only file format with BEGIN DH PARAMETERS is supported.
|
||||
-->
|
||||
<dhParamsFile>/etc/clickhouse-keeper/dhparam.pem</dhParamsFile>
|
||||
<verificationMode>none</verificationMode>
|
||||
<loadDefaultCAFile>true</loadDefaultCAFile>
|
||||
<cacheSessions>true</cacheSessions>
|
||||
<disableProtocols>sslv2,sslv3</disableProtocols>
|
||||
<preferServerCiphers>true</preferServerCiphers>
|
||||
</server>
|
||||
</openSSL>
|
||||
|
||||
</yandex>
|
21
programs/keeper/keeper_embedded.xml
Normal file
21
programs/keeper/keeper_embedded.xml
Normal file
@ -0,0 +1,21 @@
|
||||
<yandex>
|
||||
<logger>
|
||||
<level>trace</level>
|
||||
<console>true</console>
|
||||
</logger>
|
||||
|
||||
<keeper_server>
|
||||
<tcp_port>9181</tcp_port>
|
||||
<server_id>1</server_id>
|
||||
<log_storage_path>./keeper_log</log_storage_path>
|
||||
<snapshot_storage_path>./keeper_snapshot</snapshot_storage_path>
|
||||
|
||||
<raft_configuration>
|
||||
<server>
|
||||
<id>1</id>
|
||||
<hostname>localhost</hostname>
|
||||
<port>44444</port>
|
||||
</server>
|
||||
</raft_configuration>
|
||||
</keeper_server>
|
||||
</yandex>
|
@ -55,6 +55,9 @@ int mainEntryClickHouseObfuscator(int argc, char ** argv);
|
||||
#if ENABLE_CLICKHOUSE_GIT_IMPORT
|
||||
int mainEntryClickHouseGitImport(int argc, char ** argv);
|
||||
#endif
|
||||
#if ENABLE_CLICKHOUSE_KEEPER
|
||||
int mainEntryClickHouseKeeper(int argc, char ** argv);
|
||||
#endif
|
||||
#if ENABLE_CLICKHOUSE_INSTALL
|
||||
int mainEntryClickHouseInstall(int argc, char ** argv);
|
||||
int mainEntryClickHouseStart(int argc, char ** argv);
|
||||
@ -112,6 +115,9 @@ std::pair<const char *, MainFunc> clickhouse_applications[] =
|
||||
#if ENABLE_CLICKHOUSE_GIT_IMPORT
|
||||
{"git-import", mainEntryClickHouseGitImport},
|
||||
#endif
|
||||
#if ENABLE_CLICKHOUSE_KEEPER
|
||||
{"keeper", mainEntryClickHouseKeeper},
|
||||
#endif
|
||||
#if ENABLE_CLICKHOUSE_INSTALL
|
||||
{"install", mainEntryClickHouseInstall},
|
||||
{"start", mainEntryClickHouseStart},
|
||||
|
@ -31,37 +31,4 @@ clickhouse_program_add(server)
|
||||
|
||||
install(FILES config.xml users.xml DESTINATION "${CLICKHOUSE_ETC_DIR}/clickhouse-server" COMPONENT clickhouse)
|
||||
|
||||
# TODO We actually need this on Mac, FreeBSD.
|
||||
if (OS_LINUX)
|
||||
# Embed default config files as a resource into the binary.
|
||||
# This is needed for two purposes:
|
||||
# 1. Allow to run the binary without download of any other files.
|
||||
# 2. Allow to implement "sudo clickhouse install" tool.
|
||||
|
||||
foreach(RESOURCE_FILE config.xml users.xml embedded.xml play.html)
|
||||
set(RESOURCE_OBJ ${RESOURCE_FILE}.o)
|
||||
set(RESOURCE_OBJS ${RESOURCE_OBJS} ${RESOURCE_OBJ})
|
||||
|
||||
# https://stackoverflow.com/questions/14776463/compile-and-add-an-object-file-from-a-binary-with-cmake
|
||||
# PPC64LE fails to do this with objcopy, use ld or lld instead
|
||||
if (ARCH_PPC64LE)
|
||||
add_custom_command(OUTPUT ${RESOURCE_OBJ}
|
||||
COMMAND cd ${CMAKE_CURRENT_SOURCE_DIR} && ${CMAKE_LINKER} -m elf64lppc -r -b binary -o "${CMAKE_CURRENT_BINARY_DIR}/${RESOURCE_OBJ}" ${RESOURCE_FILE})
|
||||
else()
|
||||
add_custom_command(OUTPUT ${RESOURCE_OBJ}
|
||||
COMMAND cd ${CMAKE_CURRENT_SOURCE_DIR} && ${OBJCOPY_PATH} -I binary ${OBJCOPY_ARCH_OPTIONS} ${RESOURCE_FILE} "${CMAKE_CURRENT_BINARY_DIR}/${RESOURCE_OBJ}"
|
||||
COMMAND ${OBJCOPY_PATH} --rename-section .data=.rodata,alloc,load,readonly,data,contents
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/${RESOURCE_OBJ}" "${CMAKE_CURRENT_BINARY_DIR}/${RESOURCE_OBJ}")
|
||||
endif()
|
||||
set_source_files_properties(${RESOURCE_OBJ} PROPERTIES EXTERNAL_OBJECT true GENERATED true)
|
||||
endforeach(RESOURCE_FILE)
|
||||
|
||||
add_library(clickhouse_server_configs STATIC ${RESOURCE_OBJS})
|
||||
set_target_properties(clickhouse_server_configs PROPERTIES LINKER_LANGUAGE C)
|
||||
|
||||
# whole-archive prevents symbols from being discarded for unknown reason
|
||||
# CMake can shuffle each of target_link_libraries arguments with other
|
||||
# libraries in linker command. To avoid this we hardcode whole-archive
|
||||
# library into single string.
|
||||
add_dependencies(clickhouse-server-lib clickhouse_server_configs)
|
||||
endif ()
|
||||
clickhouse_embed_binaries(server config.xml users.xml embedded.xml play.html)
|
||||
|
@ -462,10 +462,19 @@ XMLDocumentPtr ConfigProcessor::processConfig(
|
||||
}
|
||||
else
|
||||
{
|
||||
/// When we can use config embedded in binary.
|
||||
/// These embedded files added during build with some cmake magic.
|
||||
/// Look at the end of programs/sever/CMakeLists.txt.
|
||||
std::string embedded_name;
|
||||
if (path == "config.xml")
|
||||
embedded_name = "embedded.xml";
|
||||
|
||||
if (path == "keeper_config.xml")
|
||||
embedded_name = "keeper_embedded.xml";
|
||||
|
||||
/// When we can use config embedded in binary.
|
||||
if (!embedded_name.empty())
|
||||
{
|
||||
auto resource = getResource("embedded.xml");
|
||||
auto resource = getResource(embedded_name);
|
||||
if (resource.empty())
|
||||
throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "Configuration file {} doesn't exist and there is no embedded config", path);
|
||||
LOG_DEBUG(log, "There is no file '{}', will use embedded config.", path);
|
||||
|
@ -90,17 +90,16 @@ private:
|
||||
}
|
||||
};
|
||||
|
||||
template <size_t MaxNumHints, class Self>
|
||||
|
||||
template <size_t MaxNumHints, typename Self>
|
||||
class IHints
|
||||
{
|
||||
public:
|
||||
|
||||
virtual std::vector<String> getAllRegisteredNames() const = 0;
|
||||
|
||||
std::vector<String> getHints(const String & name) const
|
||||
{
|
||||
static const auto registered_names = getAllRegisteredNames();
|
||||
return prompter.getHints(name, registered_names);
|
||||
return prompter.getHints(name, getAllRegisteredNames());
|
||||
}
|
||||
|
||||
virtual ~IHints() = default;
|
||||
|
@ -513,7 +513,7 @@ public:
|
||||
insertPrepare(from_begin, from_end);
|
||||
|
||||
if (unlikely(bytes_to_move))
|
||||
memcpy(this->c_end + bytes_to_copy - bytes_to_move, this->c_end - bytes_to_move, bytes_to_move);
|
||||
memmove(this->c_end + bytes_to_copy - bytes_to_move, this->c_end - bytes_to_move, bytes_to_move);
|
||||
|
||||
memcpy(this->c_end - bytes_to_move, reinterpret_cast<const void *>(&*from_begin), bytes_to_copy);
|
||||
|
||||
|
@ -36,6 +36,10 @@ struct NetworkInterfaces
|
||||
ifaddrs * iface;
|
||||
for (iface = ifaddr; iface != nullptr; iface = iface->ifa_next)
|
||||
{
|
||||
/// Point-to-point (VPN) addresses may have NULL ifa_addr
|
||||
if (!iface->ifa_addr)
|
||||
continue;
|
||||
|
||||
auto family = iface->ifa_addr->sa_family;
|
||||
std::optional<Poco::Net::IPAddress> interface_address;
|
||||
switch (family)
|
||||
|
@ -223,7 +223,7 @@ TEST(Common, SensitiveDataMasker)
|
||||
{
|
||||
EXPECT_EQ(
|
||||
std::string(e.message()),
|
||||
"SensitiveDataMasker: cannot compile re2: ())(, error: missing ): ())(. Look at https://github.com/google/re2/wiki/Syntax for reference.: while adding query masking rule 'test'."
|
||||
"SensitiveDataMasker: cannot compile re2: ())(, error: unexpected ): ())(. Look at https://github.com/google/re2/wiki/Syntax for reference.: while adding query masking rule 'test'."
|
||||
);
|
||||
EXPECT_EQ(e.code(), DB::ErrorCodes::CANNOT_COMPILE_REGEXP);
|
||||
}
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <chrono>
|
||||
#include <Common/ZooKeeper/ZooKeeperIO.h>
|
||||
#include <string>
|
||||
#include <filesystem>
|
||||
#include <Poco/Util/Application.h>
|
||||
|
||||
namespace DB
|
||||
@ -59,6 +60,21 @@ void setSSLParams(nuraft::asio_service::options & asio_opts)
|
||||
}
|
||||
#endif
|
||||
|
||||
std::string getSnapshotsPathFromConfig(const Poco::Util::AbstractConfiguration & config, bool standalone_keeper)
|
||||
{
|
||||
/// the most specialized path
|
||||
if (config.has("keeper_server.snapshot_storage_path"))
|
||||
return config.getString("keeper_server.snapshot_storage_path");
|
||||
|
||||
if (config.has("keeper_server.storage_path"))
|
||||
return std::filesystem::path{config.getString("keeper_server.storage_path")} / "snapshots";
|
||||
|
||||
if (standalone_keeper)
|
||||
return std::filesystem::path{config.getString("path", KEEPER_DEFAULT_PATH)} / "snapshots";
|
||||
else
|
||||
return std::filesystem::path{config.getString("path", DBMS_DEFAULT_PATH)} / "coordination/snapshots";
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
KeeperServer::KeeperServer(
|
||||
@ -66,14 +82,15 @@ KeeperServer::KeeperServer(
|
||||
const CoordinationSettingsPtr & coordination_settings_,
|
||||
const Poco::Util::AbstractConfiguration & config,
|
||||
ResponsesQueue & responses_queue_,
|
||||
SnapshotsQueue & snapshots_queue_)
|
||||
SnapshotsQueue & snapshots_queue_,
|
||||
bool standalone_keeper)
|
||||
: server_id(server_id_)
|
||||
, coordination_settings(coordination_settings_)
|
||||
, state_machine(nuraft::cs_new<KeeperStateMachine>(
|
||||
responses_queue_, snapshots_queue_,
|
||||
config.getString("keeper_server.snapshot_storage_path", config.getString("path", DBMS_DEFAULT_PATH) + "coordination/snapshots"),
|
||||
getSnapshotsPathFromConfig(config, standalone_keeper),
|
||||
coordination_settings))
|
||||
, state_manager(nuraft::cs_new<KeeperStateManager>(server_id, "keeper_server", config, coordination_settings))
|
||||
, state_manager(nuraft::cs_new<KeeperStateManager>(server_id, "keeper_server", config, coordination_settings, standalone_keeper))
|
||||
, log(&Poco::Logger::get("KeeperServer"))
|
||||
{
|
||||
if (coordination_settings->quorum_reads)
|
||||
|
@ -55,7 +55,8 @@ public:
|
||||
const CoordinationSettingsPtr & coordination_settings_,
|
||||
const Poco::Util::AbstractConfiguration & config,
|
||||
ResponsesQueue & responses_queue_,
|
||||
SnapshotsQueue & snapshots_queue_);
|
||||
SnapshotsQueue & snapshots_queue_,
|
||||
bool standalone_keeper);
|
||||
|
||||
void startup();
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
#include <Coordination/KeeperStateManager.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <filesystem>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -9,6 +10,26 @@ namespace ErrorCodes
|
||||
extern const int RAFT_ERROR;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
std::string getLogsPathFromConfig(
|
||||
const std::string & config_prefix, const Poco::Util::AbstractConfiguration & config, bool standalone_keeper)
|
||||
{
|
||||
/// the most specialized path
|
||||
if (config.has(config_prefix + ".log_storage_path"))
|
||||
return config.getString(config_prefix + ".log_storage_path");
|
||||
|
||||
if (config.has(config_prefix + ".storage_path"))
|
||||
return std::filesystem::path{config.getString(config_prefix + ".storage_path")} / "logs";
|
||||
|
||||
if (standalone_keeper)
|
||||
return std::filesystem::path{config.getString("path", KEEPER_DEFAULT_PATH)} / "logs";
|
||||
else
|
||||
return std::filesystem::path{config.getString("path", DBMS_DEFAULT_PATH)} / "coordination/logs";
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
KeeperStateManager::KeeperStateManager(int server_id_, const std::string & host, int port, const std::string & logs_path)
|
||||
: my_server_id(server_id_)
|
||||
, my_port(port)
|
||||
@ -24,11 +45,12 @@ KeeperStateManager::KeeperStateManager(
|
||||
int my_server_id_,
|
||||
const std::string & config_prefix,
|
||||
const Poco::Util::AbstractConfiguration & config,
|
||||
const CoordinationSettingsPtr & coordination_settings)
|
||||
const CoordinationSettingsPtr & coordination_settings,
|
||||
bool standalone_keeper)
|
||||
: my_server_id(my_server_id_)
|
||||
, secure(config.getBool(config_prefix + ".raft_configuration.secure", false))
|
||||
, log_store(nuraft::cs_new<KeeperLogStore>(
|
||||
config.getString(config_prefix + ".log_storage_path", config.getString("path", DBMS_DEFAULT_PATH) + "coordination/logs"),
|
||||
getLogsPathFromConfig(config_prefix, config, standalone_keeper),
|
||||
coordination_settings->rotate_log_storage_interval, coordination_settings->force_sync))
|
||||
, cluster_config(nuraft::cs_new<nuraft::cluster_config>())
|
||||
{
|
||||
|
@ -17,7 +17,8 @@ public:
|
||||
int server_id_,
|
||||
const std::string & config_prefix,
|
||||
const Poco::Util::AbstractConfiguration & config,
|
||||
const CoordinationSettingsPtr & coordination_settings);
|
||||
const CoordinationSettingsPtr & coordination_settings,
|
||||
bool standalone_keeper);
|
||||
|
||||
KeeperStateManager(
|
||||
int server_id_,
|
||||
|
@ -547,6 +547,17 @@ struct KeeperStorageCloseRequest final : public KeeperStorageRequest
|
||||
}
|
||||
};
|
||||
|
||||
/// Dummy implementation TODO: implement simple ACL
|
||||
struct KeeperStorageAuthRequest final : public KeeperStorageRequest
|
||||
{
|
||||
using KeeperStorageRequest::KeeperStorageRequest;
|
||||
std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(KeeperStorage::Container &, KeeperStorage::Ephemerals &, int64_t, int64_t) const override
|
||||
{
|
||||
Coordination::ZooKeeperResponsePtr response_ptr = zk_request->makeResponse();
|
||||
return { response_ptr, {} };
|
||||
}
|
||||
};
|
||||
|
||||
void KeeperStorage::finalize()
|
||||
{
|
||||
if (finalized)
|
||||
@ -611,7 +622,7 @@ KeeperWrapperFactory::KeeperWrapperFactory()
|
||||
{
|
||||
registerKeeperRequestWrapper<Coordination::OpNum::Heartbeat, KeeperStorageHeartbeatRequest>(*this);
|
||||
registerKeeperRequestWrapper<Coordination::OpNum::Sync, KeeperStorageSyncRequest>(*this);
|
||||
//registerKeeperRequestWrapper<Coordination::OpNum::Auth, KeeperStorageAuthRequest>(*this);
|
||||
registerKeeperRequestWrapper<Coordination::OpNum::Auth, KeeperStorageAuthRequest>(*this);
|
||||
registerKeeperRequestWrapper<Coordination::OpNum::Close, KeeperStorageCloseRequest>(*this);
|
||||
registerKeeperRequestWrapper<Coordination::OpNum::Create, KeeperStorageCreateRequest>(*this);
|
||||
registerKeeperRequestWrapper<Coordination::OpNum::Remove, KeeperStorageRemoveRequest>(*this);
|
||||
|
@ -234,7 +234,7 @@ bool KeeperStorageDispatcher::putRequest(const Coordination::ZooKeeperRequestPtr
|
||||
return true;
|
||||
}
|
||||
|
||||
void KeeperStorageDispatcher::initialize(const Poco::Util::AbstractConfiguration & config)
|
||||
void KeeperStorageDispatcher::initialize(const Poco::Util::AbstractConfiguration & config, bool standalone_keeper)
|
||||
{
|
||||
LOG_DEBUG(log, "Initializing storage dispatcher");
|
||||
int myid = config.getInt("keeper_server.server_id");
|
||||
@ -246,7 +246,8 @@ void KeeperStorageDispatcher::initialize(const Poco::Util::AbstractConfiguration
|
||||
responses_thread = ThreadFromGlobalPool([this] { responseThread(); });
|
||||
snapshot_thread = ThreadFromGlobalPool([this] { snapshotThread(); });
|
||||
|
||||
server = std::make_unique<KeeperServer>(myid, coordination_settings, config, responses_queue, snapshots_queue);
|
||||
server = std::make_unique<KeeperServer>(
|
||||
myid, coordination_settings, config, responses_queue, snapshots_queue, standalone_keeper);
|
||||
try
|
||||
{
|
||||
LOG_DEBUG(log, "Waiting server to initialize");
|
||||
|
@ -86,7 +86,7 @@ private:
|
||||
public:
|
||||
KeeperStorageDispatcher();
|
||||
|
||||
void initialize(const Poco::Util::AbstractConfiguration & config);
|
||||
void initialize(const Poco::Util::AbstractConfiguration & config, bool standalone_keeper);
|
||||
|
||||
void shutdown();
|
||||
|
||||
|
@ -98,6 +98,8 @@
|
||||
|
||||
#define DBMS_DEFAULT_PATH "/var/lib/clickhouse/"
|
||||
|
||||
#define KEEPER_DEFAULT_PATH "/var/lib/clickhouse-keeper/"
|
||||
|
||||
// more aliases: https://mailman.videolan.org/pipermail/x264-devel/2014-May/010660.html
|
||||
|
||||
/// Marks that extra information is sent to a shard. It could be any magic numbers.
|
||||
|
@ -15,7 +15,7 @@ AddingDefaultBlockOutputStream::AddingDefaultBlockOutputStream(
|
||||
: output(output_), header(header_)
|
||||
{
|
||||
auto dag = addMissingDefaults(header_, output->getHeader().getNamesAndTypesList(), columns_, context_, null_as_default_);
|
||||
adding_defaults_actions = std::make_shared<ExpressionActions>(std::move(dag), ExpressionActionsSettings::fromContext(context_));
|
||||
adding_defaults_actions = std::make_shared<ExpressionActions>(std::move(dag), ExpressionActionsSettings::fromContext(context_, CompileExpressions::yes));
|
||||
}
|
||||
|
||||
void AddingDefaultBlockOutputStream::write(const Block & block)
|
||||
|
@ -174,7 +174,7 @@ Block AddingDefaultsBlockInputStream::readImpl()
|
||||
auto dag = evaluateMissingDefaults(evaluate_block, header.getNamesAndTypesList(), columns, context, false);
|
||||
if (dag)
|
||||
{
|
||||
auto actions = std::make_shared<ExpressionActions>(std::move(dag), ExpressionActionsSettings::fromContext(context));
|
||||
auto actions = std::make_shared<ExpressionActions>(std::move(dag), ExpressionActionsSettings::fromContext(context, CompileExpressions::yes));
|
||||
actions->execute(evaluate_block);
|
||||
}
|
||||
|
||||
|
@ -1147,17 +1147,24 @@ public:
|
||||
/// NOTE: We consider NaN comparison to be implementation specific (and in our implementation NaNs are sometimes equal sometimes not).
|
||||
if (left_type->equals(*right_type) && !left_type->isNullable() && !isTuple(left_type) && col_left_untyped == col_right_untyped)
|
||||
{
|
||||
ColumnPtr result_column;
|
||||
|
||||
/// Always true: =, <=, >=
|
||||
if constexpr (IsOperation<Op>::equals
|
||||
|| IsOperation<Op>::less_or_equals
|
||||
|| IsOperation<Op>::greater_or_equals)
|
||||
{
|
||||
return DataTypeUInt8().createColumnConst(input_rows_count, 1u);
|
||||
result_column = DataTypeUInt8().createColumnConst(input_rows_count, 1u);
|
||||
}
|
||||
else
|
||||
{
|
||||
return DataTypeUInt8().createColumnConst(input_rows_count, 0u);
|
||||
result_column = DataTypeUInt8().createColumnConst(input_rows_count, 0u);
|
||||
}
|
||||
|
||||
if (!isColumnConst(*col_left_untyped))
|
||||
result_column = result_column->convertToFullColumnIfConst();
|
||||
|
||||
return result_column;
|
||||
}
|
||||
|
||||
WhichDataType which_left{left_type};
|
||||
|
@ -61,13 +61,14 @@ ColumnPtr replaceLowCardinalityColumnsByNestedAndGetDictionaryIndexes(
|
||||
{
|
||||
/// Single LowCardinality column is supported now.
|
||||
if (indexes)
|
||||
throw Exception("Expected single dictionary argument for function.", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected single dictionary argument for function.");
|
||||
|
||||
const auto * low_cardinality_type = checkAndGetDataType<DataTypeLowCardinality>(column.type.get());
|
||||
|
||||
if (!low_cardinality_type)
|
||||
throw Exception("Incompatible type for low cardinality column: " + column.type->getName(),
|
||||
ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
"Incompatible type for low cardinality column: {}",
|
||||
column.type->getName());
|
||||
|
||||
if (can_be_executed_on_default_arguments)
|
||||
{
|
||||
@ -121,7 +122,10 @@ ColumnPtr IExecutableFunction::defaultImplementationForConstantArguments(
|
||||
/// Check that these arguments are really constant.
|
||||
for (auto arg_num : arguments_to_remain_constants)
|
||||
if (arg_num < args.size() && !isColumnConst(*args[arg_num].column))
|
||||
throw Exception("Argument at index " + toString(arg_num) + " for function " + getName() + " must be constant", ErrorCodes::ILLEGAL_COLUMN);
|
||||
throw Exception(ErrorCodes::ILLEGAL_COLUMN,
|
||||
"Argument at index {} for function {} must be constant",
|
||||
toString(arg_num),
|
||||
getName());
|
||||
|
||||
if (args.empty() || !useDefaultImplementationForConstants() || !allArgumentsAreConstants(args))
|
||||
return nullptr;
|
||||
@ -150,8 +154,9 @@ ColumnPtr IExecutableFunction::defaultImplementationForConstantArguments(
|
||||
* not in "arguments_to_remain_constants" set. Otherwise we get infinite recursion.
|
||||
*/
|
||||
if (!have_converted_columns)
|
||||
throw Exception("Number of arguments for function " + getName() + " doesn't match: the function requires more arguments",
|
||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
|
||||
"Number of arguments for function {} doesn't match: the function requires more arguments",
|
||||
getName());
|
||||
|
||||
ColumnPtr result_column = executeWithoutLowCardinalityColumns(temporary_columns, result_type, 1, dry_run);
|
||||
|
||||
@ -266,9 +271,11 @@ void IFunctionOverloadResolver::checkNumberOfArguments(size_t number_of_argument
|
||||
size_t expected_number_of_arguments = getNumberOfArguments();
|
||||
|
||||
if (number_of_arguments != expected_number_of_arguments)
|
||||
throw Exception("Number of arguments for function " + getName() + " doesn't match: passed "
|
||||
+ toString(number_of_arguments) + ", should be " + toString(expected_number_of_arguments),
|
||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
|
||||
"Number of arguments for function {} doesn't match: passed {}, should be {}",
|
||||
getName(),
|
||||
toString(number_of_arguments),
|
||||
toString(expected_number_of_arguments));
|
||||
}
|
||||
|
||||
DataTypePtr IFunctionOverloadResolver::getReturnType(const ColumnsWithTypeAndName & arguments) const
|
||||
@ -299,11 +306,7 @@ DataTypePtr IFunctionOverloadResolver::getReturnType(const ColumnsWithTypeAndNam
|
||||
++num_full_ordinary_columns;
|
||||
}
|
||||
|
||||
for (auto & arg : args_without_low_cardinality)
|
||||
{
|
||||
arg.column = recursiveRemoveLowCardinality(arg.column);
|
||||
arg.type = recursiveRemoveLowCardinality(arg.type);
|
||||
}
|
||||
convertLowCardinalityColumnsToFull(args_without_low_cardinality);
|
||||
|
||||
auto type_without_low_cardinality = getReturnTypeWithoutLowCardinality(args_without_low_cardinality);
|
||||
|
||||
|
@ -245,6 +245,8 @@ public:
|
||||
|
||||
void getLambdaArgumentTypes(DataTypes & arguments) const;
|
||||
|
||||
void checkNumberOfArguments(size_t number_of_arguments) const;
|
||||
|
||||
/// Get the main function name.
|
||||
virtual String getName() const = 0;
|
||||
|
||||
@ -319,8 +321,6 @@ protected:
|
||||
|
||||
private:
|
||||
|
||||
void checkNumberOfArguments(size_t number_of_arguments) const;
|
||||
|
||||
DataTypePtr getReturnTypeWithoutLowCardinality(const ColumnsWithTypeAndName & arguments) const;
|
||||
};
|
||||
|
||||
|
@ -17,7 +17,7 @@ class IHashingBuffer : public BufferWithOwnMemory<Buffer>
|
||||
public:
|
||||
using uint128 = CityHash_v1_0_2::uint128;
|
||||
|
||||
IHashingBuffer<Buffer>(size_t block_size_ = DBMS_DEFAULT_HASHING_BLOCK_SIZE)
|
||||
IHashingBuffer(size_t block_size_ = DBMS_DEFAULT_HASHING_BLOCK_SIZE)
|
||||
: BufferWithOwnMemory<Buffer>(block_size_), block_pos(0), block_size(block_size_), state(0, 0)
|
||||
{
|
||||
}
|
||||
|
@ -293,6 +293,17 @@ NamesAndTypesList ActionsDAG::getRequiredColumns() const
|
||||
return result;
|
||||
}
|
||||
|
||||
Names ActionsDAG::getRequiredColumnsNames() const
|
||||
{
|
||||
Names result;
|
||||
result.reserve(inputs.size());
|
||||
|
||||
for (const auto & input : inputs)
|
||||
result.emplace_back(input->result_name);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
ColumnsWithTypeAndName ActionsDAG::getResultColumns() const
|
||||
{
|
||||
ColumnsWithTypeAndName result;
|
||||
@ -1041,11 +1052,19 @@ ActionsDAGPtr ActionsDAG::makeConvertingActions(
|
||||
{
|
||||
auto & input = inputs[res_elem.name];
|
||||
if (input.empty())
|
||||
{
|
||||
const auto * res_const = typeid_cast<const ColumnConst *>(res_elem.column.get());
|
||||
if (ignore_constant_values && res_const)
|
||||
src_node = dst_node = &actions_dag->addColumn(res_elem);
|
||||
else
|
||||
throw Exception("Cannot find column " + backQuote(res_elem.name) + " in source stream",
|
||||
ErrorCodes::THERE_IS_NO_COLUMN);
|
||||
|
||||
}
|
||||
else
|
||||
{
|
||||
src_node = dst_node = actions_dag->inputs[input.front()];
|
||||
input.pop_front();
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -121,6 +121,7 @@ public:
|
||||
const NodeRawConstPtrs & getInputs() const { return inputs; }
|
||||
|
||||
NamesAndTypesList getRequiredColumns() const;
|
||||
Names getRequiredColumnsNames() const;
|
||||
ColumnsWithTypeAndName getResultColumns() const;
|
||||
NamesAndTypesList getNamesAndTypesList() const;
|
||||
|
||||
|
@ -1015,7 +1015,7 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data &
|
||||
|
||||
auto lambda_actions = std::make_shared<ExpressionActions>(
|
||||
lambda_dag,
|
||||
ExpressionActionsSettings::fromContext(data.getContext()));
|
||||
ExpressionActionsSettings::fromContext(data.getContext(), CompileExpressions::yes));
|
||||
|
||||
DataTypePtr result_type = lambda_actions->getSampleBlock().getByName(result_name).type;
|
||||
|
||||
|
@ -8,11 +8,13 @@
|
||||
#include <Common/checkStackSize.h>
|
||||
#include <TableFunctions/TableFunctionFactory.h>
|
||||
#include <IO/ConnectionTimeoutsContext.h>
|
||||
#include <Interpreters/RequiredSourceColumnsVisitor.h>
|
||||
|
||||
#include <common/logger_useful.h>
|
||||
#include <Processors/Pipe.h>
|
||||
#include <Processors/Sources/RemoteSource.h>
|
||||
#include <Processors/Sources/DelayedSource.h>
|
||||
#include <Processors/Transforms/ExpressionTransform.h>
|
||||
#include <Processors/QueryPlan/QueryPlan.h>
|
||||
#include <Processors/QueryPlan/ExpressionStep.h>
|
||||
#include <Processors/QueryPlan/BuildQueryPipelineSettings.h>
|
||||
@ -73,6 +75,95 @@ SelectStreamFactory::SelectStreamFactory(
|
||||
namespace
|
||||
{
|
||||
|
||||
/// Special support for the case when `_shard_num` column is used in GROUP BY key expression.
|
||||
/// This column is a constant for shard.
|
||||
/// Constant expression with this column may be removed from intermediate header.
|
||||
/// However, this column is not constant for initiator, and it expect intermediate header has it.
|
||||
///
|
||||
/// To fix it, the following trick is applied.
|
||||
/// We check all GROUP BY keys which depend only on `_shard_num`.
|
||||
/// Calculate such expression for current shard if it is used in header.
|
||||
/// Those columns will be added to modified header as already known constants.
|
||||
///
|
||||
/// For local shard, missed constants will be added by converting actions.
|
||||
/// For remote shard, RemoteQueryExecutor will automatically add missing constant.
|
||||
Block evaluateConstantGroupByKeysWithShardNumber(
|
||||
const ContextPtr & context, const ASTPtr & query_ast, const Block & header, UInt32 shard_num)
|
||||
{
|
||||
Block res;
|
||||
|
||||
ColumnWithTypeAndName shard_num_col;
|
||||
shard_num_col.type = std::make_shared<DataTypeUInt32>();
|
||||
shard_num_col.column = shard_num_col.type->createColumnConst(0, shard_num);
|
||||
shard_num_col.name = "_shard_num";
|
||||
|
||||
if (auto group_by = query_ast->as<ASTSelectQuery &>().groupBy())
|
||||
{
|
||||
for (const auto & elem : group_by->children)
|
||||
{
|
||||
String key_name = elem->getColumnName();
|
||||
if (header.has(key_name))
|
||||
{
|
||||
auto ast = elem->clone();
|
||||
|
||||
RequiredSourceColumnsVisitor::Data columns_context;
|
||||
RequiredSourceColumnsVisitor(columns_context).visit(ast);
|
||||
|
||||
auto required_columns = columns_context.requiredColumns();
|
||||
if (required_columns.size() != 1 || required_columns.count("_shard_num") == 0)
|
||||
continue;
|
||||
|
||||
Block block({shard_num_col});
|
||||
auto syntax_result = TreeRewriter(context).analyze(ast, {NameAndTypePair{shard_num_col.name, shard_num_col.type}});
|
||||
ExpressionAnalyzer(ast, syntax_result, context).getActions(true, false)->execute(block);
|
||||
|
||||
res.insert(block.getByName(key_name));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// We always add _shard_num constant just in case.
|
||||
/// For initial query it is considered as a column from table, and may be required by intermediate block.
|
||||
if (!res.has(shard_num_col.name))
|
||||
res.insert(std::move(shard_num_col));
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
ActionsDAGPtr getConvertingDAG(const Block & block, const Block & header)
|
||||
{
|
||||
/// Convert header structure to expected.
|
||||
/// Also we ignore constants from result and replace it with constants from header.
|
||||
/// It is needed for functions like `now64()` or `randConstant()` because their values may be different.
|
||||
return ActionsDAG::makeConvertingActions(
|
||||
block.getColumnsWithTypeAndName(),
|
||||
header.getColumnsWithTypeAndName(),
|
||||
ActionsDAG::MatchColumnsMode::Name,
|
||||
true);
|
||||
}
|
||||
|
||||
void addConvertingActions(QueryPlan & plan, const Block & header)
|
||||
{
|
||||
if (blocksHaveEqualStructure(plan.getCurrentDataStream().header, header))
|
||||
return;
|
||||
|
||||
auto convert_actions_dag = getConvertingDAG(plan.getCurrentDataStream().header, header);
|
||||
auto converting = std::make_unique<ExpressionStep>(plan.getCurrentDataStream(), convert_actions_dag);
|
||||
plan.addStep(std::move(converting));
|
||||
}
|
||||
|
||||
void addConvertingActions(Pipe & pipe, const Block & header)
|
||||
{
|
||||
if (blocksHaveEqualStructure(pipe.getHeader(), header))
|
||||
return;
|
||||
|
||||
auto convert_actions = std::make_shared<ExpressionActions>(getConvertingDAG(pipe.getHeader(), header));
|
||||
pipe.addSimpleTransform([&](const Block & cur_header, Pipe::StreamType) -> ProcessorPtr
|
||||
{
|
||||
return std::make_shared<ExpressionTransform>(cur_header, convert_actions);
|
||||
});
|
||||
}
|
||||
|
||||
std::unique_ptr<QueryPlan> createLocalPlan(
|
||||
const ASTPtr & query_ast,
|
||||
const Block & header,
|
||||
@ -86,18 +177,7 @@ std::unique_ptr<QueryPlan> createLocalPlan(
|
||||
InterpreterSelectQuery interpreter(query_ast, context, SelectQueryOptions(processed_stage));
|
||||
interpreter.buildQueryPlan(*query_plan);
|
||||
|
||||
/// Convert header structure to expected.
|
||||
/// Also we ignore constants from result and replace it with constants from header.
|
||||
/// It is needed for functions like `now64()` or `randConstant()` because their values may be different.
|
||||
auto convert_actions_dag = ActionsDAG::makeConvertingActions(
|
||||
query_plan->getCurrentDataStream().header.getColumnsWithTypeAndName(),
|
||||
header.getColumnsWithTypeAndName(),
|
||||
ActionsDAG::MatchColumnsMode::Name,
|
||||
true);
|
||||
|
||||
auto converting = std::make_unique<ExpressionStep>(query_plan->getCurrentDataStream(), convert_actions_dag);
|
||||
converting->setStepDescription("Convert block structure for query from local replica");
|
||||
query_plan->addStep(std::move(converting));
|
||||
addConvertingActions(*query_plan, header);
|
||||
|
||||
return query_plan;
|
||||
}
|
||||
@ -134,12 +214,25 @@ void SelectStreamFactory::createForShard(
|
||||
}
|
||||
|
||||
auto modified_query_ast = query_ast->clone();
|
||||
auto modified_header = header;
|
||||
if (has_virtual_shard_num_column)
|
||||
{
|
||||
VirtualColumnUtils::rewriteEntityInAst(modified_query_ast, "_shard_num", shard_info.shard_num, "toUInt32");
|
||||
auto shard_num_constants = evaluateConstantGroupByKeysWithShardNumber(context, query_ast, modified_header, shard_info.shard_num);
|
||||
|
||||
for (auto & col : shard_num_constants)
|
||||
{
|
||||
if (modified_header.has(col.name))
|
||||
modified_header.getByName(col.name).column = std::move(col.column);
|
||||
else
|
||||
modified_header.insert(std::move(col));
|
||||
}
|
||||
}
|
||||
|
||||
auto emplace_local_stream = [&]()
|
||||
{
|
||||
plans.emplace_back(createLocalPlan(modified_query_ast, header, context, processed_stage));
|
||||
plans.emplace_back(createLocalPlan(modified_query_ast, modified_header, context, processed_stage));
|
||||
addConvertingActions(*plans.back(), header);
|
||||
};
|
||||
|
||||
String modified_query = formattedAST(modified_query_ast);
|
||||
@ -147,7 +240,7 @@ void SelectStreamFactory::createForShard(
|
||||
auto emplace_remote_stream = [&]()
|
||||
{
|
||||
auto remote_query_executor = std::make_shared<RemoteQueryExecutor>(
|
||||
shard_info.pool, modified_query, header, context, throttler, scalars, external_tables, processed_stage);
|
||||
shard_info.pool, modified_query, modified_header, context, throttler, scalars, external_tables, processed_stage);
|
||||
remote_query_executor->setLogger(log);
|
||||
|
||||
remote_query_executor->setPoolMode(PoolMode::GET_MANY);
|
||||
@ -156,6 +249,7 @@ void SelectStreamFactory::createForShard(
|
||||
|
||||
remote_pipes.emplace_back(createRemoteSourcePipe(remote_query_executor, add_agg_info, add_totals, add_extremes, async_read));
|
||||
remote_pipes.back().addInterpreterContext(context);
|
||||
addConvertingActions(remote_pipes.back(), header);
|
||||
};
|
||||
|
||||
const auto & settings = context->getSettingsRef();
|
||||
@ -247,7 +341,7 @@ void SelectStreamFactory::createForShard(
|
||||
/// Do it lazily to avoid connecting in the main thread.
|
||||
|
||||
auto lazily_create_stream = [
|
||||
pool = shard_info.pool, shard_num = shard_info.shard_num, modified_query, header = header, modified_query_ast,
|
||||
pool = shard_info.pool, shard_num = shard_info.shard_num, modified_query, header = modified_header, modified_query_ast,
|
||||
context, throttler,
|
||||
main_table = main_table, table_func_ptr = table_func_ptr, scalars = scalars, external_tables = external_tables,
|
||||
stage = processed_stage, local_delay, add_agg_info, add_totals, add_extremes, async_read]()
|
||||
@ -302,8 +396,9 @@ void SelectStreamFactory::createForShard(
|
||||
}
|
||||
};
|
||||
|
||||
delayed_pipes.emplace_back(createDelayedPipe(header, lazily_create_stream, add_totals, add_extremes));
|
||||
delayed_pipes.emplace_back(createDelayedPipe(modified_header, lazily_create_stream, add_totals, add_extremes));
|
||||
delayed_pipes.back().addInterpreterContext(context);
|
||||
addConvertingActions(delayed_pipes.back(), header);
|
||||
}
|
||||
else
|
||||
emplace_remote_stream();
|
||||
|
@ -314,8 +314,8 @@ struct ContextSharedPart
|
||||
ConfigurationPtr zookeeper_config; /// Stores zookeeper configs
|
||||
|
||||
#if USE_NURAFT
|
||||
mutable std::mutex nu_keeper_storage_dispatcher_mutex;
|
||||
mutable std::shared_ptr<KeeperStorageDispatcher> nu_keeper_storage_dispatcher;
|
||||
mutable std::mutex keeper_storage_dispatcher_mutex;
|
||||
mutable std::shared_ptr<KeeperStorageDispatcher> keeper_storage_dispatcher;
|
||||
#endif
|
||||
mutable std::mutex auxiliary_zookeepers_mutex;
|
||||
mutable std::map<String, zkutil::ZooKeeperPtr> auxiliary_zookeepers; /// Map for auxiliary ZooKeeper clients.
|
||||
@ -1678,16 +1678,16 @@ zkutil::ZooKeeperPtr Context::getZooKeeper() const
|
||||
void Context::initializeKeeperStorageDispatcher() const
|
||||
{
|
||||
#if USE_NURAFT
|
||||
std::lock_guard lock(shared->nu_keeper_storage_dispatcher_mutex);
|
||||
std::lock_guard lock(shared->keeper_storage_dispatcher_mutex);
|
||||
|
||||
if (shared->nu_keeper_storage_dispatcher)
|
||||
if (shared->keeper_storage_dispatcher)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Trying to initialize Keeper multiple times");
|
||||
|
||||
const auto & config = getConfigRef();
|
||||
if (config.has("keeper_server"))
|
||||
{
|
||||
shared->nu_keeper_storage_dispatcher = std::make_shared<KeeperStorageDispatcher>();
|
||||
shared->nu_keeper_storage_dispatcher->initialize(config);
|
||||
shared->keeper_storage_dispatcher = std::make_shared<KeeperStorageDispatcher>();
|
||||
shared->keeper_storage_dispatcher->initialize(config, getApplicationType() == ApplicationType::KEEPER);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
@ -1695,22 +1695,22 @@ void Context::initializeKeeperStorageDispatcher() const
|
||||
#if USE_NURAFT
|
||||
std::shared_ptr<KeeperStorageDispatcher> & Context::getKeeperStorageDispatcher() const
|
||||
{
|
||||
std::lock_guard lock(shared->nu_keeper_storage_dispatcher_mutex);
|
||||
if (!shared->nu_keeper_storage_dispatcher)
|
||||
std::lock_guard lock(shared->keeper_storage_dispatcher_mutex);
|
||||
if (!shared->keeper_storage_dispatcher)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Keeper must be initialized before requests");
|
||||
|
||||
return shared->nu_keeper_storage_dispatcher;
|
||||
return shared->keeper_storage_dispatcher;
|
||||
}
|
||||
#endif
|
||||
|
||||
void Context::shutdownKeeperStorageDispatcher() const
|
||||
{
|
||||
#if USE_NURAFT
|
||||
std::lock_guard lock(shared->nu_keeper_storage_dispatcher_mutex);
|
||||
if (shared->nu_keeper_storage_dispatcher)
|
||||
std::lock_guard lock(shared->keeper_storage_dispatcher_mutex);
|
||||
if (shared->keeper_storage_dispatcher)
|
||||
{
|
||||
shared->nu_keeper_storage_dispatcher->shutdown();
|
||||
shared->nu_keeper_storage_dispatcher.reset();
|
||||
shared->keeper_storage_dispatcher->shutdown();
|
||||
shared->keeper_storage_dispatcher.reset();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
@ -734,7 +734,8 @@ public:
|
||||
{
|
||||
SERVER, /// The program is run as clickhouse-server daemon (default behavior)
|
||||
CLIENT, /// clickhouse-client
|
||||
LOCAL /// clickhouse-local
|
||||
LOCAL, /// clickhouse-local
|
||||
KEEPER, /// clickhouse-keeper (also daemon)
|
||||
};
|
||||
|
||||
ApplicationType getApplicationType() const;
|
||||
|
@ -51,7 +51,7 @@ ExpressionActions::ExpressionActions(ActionsDAGPtr actions_dag_, const Expressio
|
||||
actions_dag = actions_dag_->clone();
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
if (settings.compile_expressions)
|
||||
if (settings.can_compile_expressions && settings.compile_expressions == CompileExpressions::yes)
|
||||
actions_dag->compileExpressions(settings.min_count_to_compile_expression);
|
||||
#endif
|
||||
|
||||
|
@ -30,7 +30,6 @@ using ArrayJoinActionPtr = std::shared_ptr<ArrayJoinAction>;
|
||||
class ExpressionActions;
|
||||
using ExpressionActionsPtr = std::shared_ptr<ExpressionActions>;
|
||||
|
||||
|
||||
/// Sequence of actions on the block.
|
||||
/// Is used to calculate expressions.
|
||||
///
|
||||
|
@ -6,20 +6,21 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
ExpressionActionsSettings ExpressionActionsSettings::fromSettings(const Settings & from)
|
||||
ExpressionActionsSettings ExpressionActionsSettings::fromSettings(const Settings & from, CompileExpressions compile_expressions)
|
||||
{
|
||||
ExpressionActionsSettings settings;
|
||||
settings.compile_expressions = from.compile_expressions;
|
||||
settings.can_compile_expressions = from.compile_expressions;
|
||||
settings.min_count_to_compile_expression = from.min_count_to_compile_expression;
|
||||
settings.max_temporary_columns = from.max_temporary_columns;
|
||||
settings.max_temporary_non_const_columns = from.max_temporary_non_const_columns;
|
||||
settings.compile_expressions = compile_expressions;
|
||||
|
||||
return settings;
|
||||
}
|
||||
|
||||
ExpressionActionsSettings ExpressionActionsSettings::fromContext(ContextPtr from)
|
||||
ExpressionActionsSettings ExpressionActionsSettings::fromContext(ContextPtr from, CompileExpressions compile_expressions)
|
||||
{
|
||||
return fromSettings(from->getSettingsRef());
|
||||
return fromSettings(from->getSettingsRef(), compile_expressions);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -9,16 +9,24 @@ namespace DB
|
||||
|
||||
struct Settings;
|
||||
|
||||
enum class CompileExpressions: uint8_t
|
||||
{
|
||||
no = 0,
|
||||
yes = 1,
|
||||
};
|
||||
|
||||
struct ExpressionActionsSettings
|
||||
{
|
||||
bool compile_expressions = false;
|
||||
bool can_compile_expressions = false;
|
||||
size_t min_count_to_compile_expression = 0;
|
||||
|
||||
size_t max_temporary_columns = 0;
|
||||
size_t max_temporary_non_const_columns = 0;
|
||||
|
||||
static ExpressionActionsSettings fromSettings(const Settings & from);
|
||||
static ExpressionActionsSettings fromContext(ContextPtr from);
|
||||
CompileExpressions compile_expressions = CompileExpressions::no;
|
||||
|
||||
static ExpressionActionsSettings fromSettings(const Settings & from, CompileExpressions compile_expressions = CompileExpressions::no);
|
||||
static ExpressionActionsSettings fromContext(ContextPtr from, CompileExpressions compile_expressions = CompileExpressions::no);
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -909,8 +909,8 @@ ActionsDAGPtr SelectQueryExpressionAnalyzer::appendPrewhere(
|
||||
auto tmp_actions_dag = std::make_shared<ActionsDAG>(sourceColumns());
|
||||
getRootActions(select_query->prewhere(), only_types, tmp_actions_dag);
|
||||
tmp_actions_dag->removeUnusedActions(NameSet{prewhere_column_name});
|
||||
auto tmp_actions = std::make_shared<ExpressionActions>(tmp_actions_dag, ExpressionActionsSettings::fromContext(getContext()));
|
||||
auto required_columns = tmp_actions->getRequiredColumns();
|
||||
|
||||
auto required_columns = tmp_actions_dag->getRequiredColumnsNames();
|
||||
NameSet required_source_columns(required_columns.begin(), required_columns.end());
|
||||
required_source_columns.insert(first_action_names.begin(), first_action_names.end());
|
||||
|
||||
@ -1028,7 +1028,7 @@ bool SelectQueryExpressionAnalyzer::appendGroupBy(ExpressionActionsChain & chain
|
||||
auto actions_dag = std::make_shared<ActionsDAG>(columns_after_join);
|
||||
getRootActions(child, only_types, actions_dag);
|
||||
group_by_elements_actions.emplace_back(
|
||||
std::make_shared<ExpressionActions>(actions_dag, ExpressionActionsSettings::fromContext(getContext())));
|
||||
std::make_shared<ExpressionActions>(actions_dag, ExpressionActionsSettings::fromContext(getContext(), CompileExpressions::yes)));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1187,7 +1187,7 @@ ActionsDAGPtr SelectQueryExpressionAnalyzer::appendOrderBy(ExpressionActionsChai
|
||||
auto actions_dag = std::make_shared<ActionsDAG>(columns_after_join);
|
||||
getRootActions(child, only_types, actions_dag);
|
||||
order_by_elements_actions.emplace_back(
|
||||
std::make_shared<ExpressionActions>(actions_dag, ExpressionActionsSettings::fromContext(getContext())));
|
||||
std::make_shared<ExpressionActions>(actions_dag, ExpressionActionsSettings::fromContext(getContext(), CompileExpressions::yes)));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1345,13 +1345,12 @@ ActionsDAGPtr ExpressionAnalyzer::getActionsDAG(bool add_aliases, bool project_r
|
||||
return actions_dag;
|
||||
}
|
||||
|
||||
ExpressionActionsPtr ExpressionAnalyzer::getActions(bool add_aliases, bool project_result)
|
||||
ExpressionActionsPtr ExpressionAnalyzer::getActions(bool add_aliases, bool project_result, CompileExpressions compile_expressions)
|
||||
{
|
||||
return std::make_shared<ExpressionActions>(
|
||||
getActionsDAG(add_aliases, project_result), ExpressionActionsSettings::fromContext(getContext()));
|
||||
getActionsDAG(add_aliases, project_result), ExpressionActionsSettings::fromContext(getContext(), compile_expressions));
|
||||
}
|
||||
|
||||
|
||||
ExpressionActionsPtr ExpressionAnalyzer::getConstActions(const ColumnsWithTypeAndName & constant_inputs)
|
||||
{
|
||||
auto actions = std::make_shared<ActionsDAG>(constant_inputs);
|
||||
|
@ -112,7 +112,7 @@ public:
|
||||
/// If also project_result, than only aliases remain in the output block.
|
||||
/// Otherwise, only temporary columns will be deleted from the block.
|
||||
ActionsDAGPtr getActionsDAG(bool add_aliases, bool project_result = true);
|
||||
ExpressionActionsPtr getActions(bool add_aliases, bool project_result = true);
|
||||
ExpressionActionsPtr getActions(bool add_aliases, bool project_result = true, CompileExpressions compile_expressions = CompileExpressions::no);
|
||||
|
||||
/// Actions that can be performed on an empty block: adding constants and applying functions that depend only on constants.
|
||||
/// Does not execute subqueries.
|
||||
|
@ -315,28 +315,6 @@ static bool isCompilableConstant(const ActionsDAG::Node & node)
|
||||
return node.column && isColumnConst(*node.column) && canBeNativeType(*node.result_type) && node.allow_constant_folding;
|
||||
}
|
||||
|
||||
static bool checkIfFunctionIsComparisonEdgeCase(const ActionsDAG::Node & node, const IFunctionBase & impl)
|
||||
{
|
||||
static std::unordered_set<std::string_view> comparison_functions
|
||||
{
|
||||
NameEquals::name,
|
||||
NameNotEquals::name,
|
||||
NameLess::name,
|
||||
NameGreater::name,
|
||||
NameLessOrEquals::name,
|
||||
NameGreaterOrEquals::name
|
||||
};
|
||||
|
||||
auto it = comparison_functions.find(impl.getName());
|
||||
if (it == comparison_functions.end())
|
||||
return false;
|
||||
|
||||
const auto * lhs_node = node.children[0];
|
||||
const auto * rhs_node = node.children[1];
|
||||
|
||||
return lhs_node == rhs_node && !isTuple(lhs_node->result_type);
|
||||
}
|
||||
|
||||
static bool isCompilableFunction(const ActionsDAG::Node & node)
|
||||
{
|
||||
if (node.type != ActionsDAG::ActionType::FUNCTION)
|
||||
@ -353,9 +331,6 @@ static bool isCompilableFunction(const ActionsDAG::Node & node)
|
||||
return false;
|
||||
}
|
||||
|
||||
if (checkIfFunctionIsComparisonEdgeCase(node, *node.function_base))
|
||||
return false;
|
||||
|
||||
return function.isCompilable();
|
||||
}
|
||||
|
||||
|
@ -325,7 +325,7 @@ BlockIO InterpreterInsertQuery::execute()
|
||||
res.pipeline.getHeader().getColumnsWithTypeAndName(),
|
||||
header.getColumnsWithTypeAndName(),
|
||||
ActionsDAG::MatchColumnsMode::Position);
|
||||
auto actions = std::make_shared<ExpressionActions>(actions_dag, ExpressionActionsSettings::fromContext(getContext()));
|
||||
auto actions = std::make_shared<ExpressionActions>(actions_dag, ExpressionActionsSettings::fromContext(getContext(), CompileExpressions::yes));
|
||||
|
||||
res.pipeline.addSimpleTransform([&](const Block & in_header) -> ProcessorPtr
|
||||
{
|
||||
|
@ -637,9 +637,7 @@ Block InterpreterSelectQuery::getSampleBlockImpl()
|
||||
|
||||
if (analysis_result.prewhere_info)
|
||||
{
|
||||
ExpressionActions(
|
||||
analysis_result.prewhere_info->prewhere_actions,
|
||||
ExpressionActionsSettings::fromContext(context)).execute(header);
|
||||
header = analysis_result.prewhere_info->prewhere_actions->updateHeader(header);
|
||||
if (analysis_result.prewhere_info->remove_prewhere_column)
|
||||
header.erase(analysis_result.prewhere_info->prewhere_column_name);
|
||||
}
|
||||
@ -1901,11 +1899,12 @@ void InterpreterSelectQuery::executeFetchColumns(QueryProcessingStage::Enum proc
|
||||
|
||||
// TODO figure out how to make set for projections
|
||||
query_info.sets = query_analyzer->getPreparedSets();
|
||||
auto actions_settings = ExpressionActionsSettings::fromContext(context);
|
||||
auto & prewhere_info = analysis_result.prewhere_info;
|
||||
|
||||
if (prewhere_info)
|
||||
{
|
||||
auto actions_settings = ExpressionActionsSettings::fromContext(context, CompileExpressions::yes);
|
||||
|
||||
query_info.prewhere_info = std::make_shared<PrewhereInfo>();
|
||||
query_info.prewhere_info->prewhere_actions = std::make_shared<ExpressionActions>(prewhere_info->prewhere_actions, actions_settings);
|
||||
|
||||
|
@ -124,8 +124,8 @@ static NamesAndTypesList getNames(const ASTFunction & expr, ContextPtr context,
|
||||
|
||||
ASTPtr temp_ast = expr.clone();
|
||||
auto syntax = TreeRewriter(context).analyze(temp_ast, columns);
|
||||
auto expression = ExpressionAnalyzer(temp_ast, syntax, context).getActions(false);
|
||||
return expression->getRequiredColumnsWithTypes();
|
||||
auto required_columns = ExpressionAnalyzer(temp_ast, syntax, context).getActionsDAG(false)->getRequiredColumns();
|
||||
return required_columns;
|
||||
}
|
||||
|
||||
static NamesAndTypesList modifyPrimaryKeysToNonNullable(const NamesAndTypesList & primary_keys, NamesAndTypesList & columns)
|
||||
|
@ -201,7 +201,13 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID
|
||||
return src;
|
||||
}
|
||||
|
||||
/// TODO Conversion from integers to DateTime64
|
||||
if (which_type.isDateTime64()
|
||||
&& (which_from_type.isNativeInt() || which_from_type.isNativeUInt() || which_from_type.isDateOrDateTime()))
|
||||
{
|
||||
const auto scale = static_cast<const DataTypeDateTime64 &>(type).getScale();
|
||||
const auto decimal_value = DecimalUtils::decimalFromComponents<DateTime64>(src.reinterpret<Int64>(), 0, scale);
|
||||
return Field(DecimalField<DateTime64>(decimal_value, scale));
|
||||
}
|
||||
}
|
||||
else if (which_type.isUUID() && src.getType() == Field::Types::UUID)
|
||||
{
|
||||
|
@ -60,6 +60,7 @@ void ArrayJoinStep::transformPipeline(QueryPipeline & pipeline, const BuildQuery
|
||||
pipeline.getHeader().getColumnsWithTypeAndName(),
|
||||
res_header.getColumnsWithTypeAndName(),
|
||||
ActionsDAG::MatchColumnsMode::Name);
|
||||
|
||||
auto actions = std::make_shared<ExpressionActions>(actions_dag, settings.getActionsSettings());
|
||||
|
||||
pipeline.addSimpleTransform([&](const Block & header)
|
||||
|
@ -9,7 +9,7 @@ namespace DB
|
||||
BuildQueryPipelineSettings BuildQueryPipelineSettings::fromSettings(const Settings & from)
|
||||
{
|
||||
BuildQueryPipelineSettings settings;
|
||||
settings.actions_settings = ExpressionActionsSettings::fromSettings(from);
|
||||
settings.actions_settings = ExpressionActionsSettings::fromSettings(from, CompileExpressions::yes);
|
||||
return settings;
|
||||
}
|
||||
|
||||
|
@ -55,6 +55,7 @@ void ExpressionStep::updateInputStream(DataStream input_stream, bool keep_header
|
||||
void ExpressionStep::transformPipeline(QueryPipeline & pipeline, const BuildQueryPipelineSettings & settings)
|
||||
{
|
||||
auto expression = std::make_shared<ExpressionActions>(actions_dag, settings.getActionsSettings());
|
||||
|
||||
pipeline.addSimpleTransform([&](const Block & header)
|
||||
{
|
||||
return std::make_shared<ExpressionTransform>(header, expression);
|
||||
@ -80,7 +81,7 @@ void ExpressionStep::describeActions(FormatSettings & settings) const
|
||||
String prefix(settings.offset, ' ');
|
||||
bool first = true;
|
||||
|
||||
auto expression = std::make_shared<ExpressionActions>(actions_dag, ExpressionActionsSettings{});
|
||||
auto expression = std::make_shared<ExpressionActions>(actions_dag);
|
||||
for (const auto & action : expression->getActions())
|
||||
{
|
||||
settings.out << prefix << (first ? "Actions: "
|
||||
@ -97,7 +98,7 @@ void ExpressionStep::describeActions(FormatSettings & settings) const
|
||||
|
||||
void ExpressionStep::describeActions(JSONBuilder::JSONMap & map) const
|
||||
{
|
||||
auto expression = std::make_shared<ExpressionActions>(actions_dag, ExpressionActionsSettings{});
|
||||
auto expression = std::make_shared<ExpressionActions>(actions_dag);
|
||||
map.add("Expression", expression->toTree());
|
||||
}
|
||||
|
||||
|
@ -68,6 +68,7 @@ void FilterStep::updateInputStream(DataStream input_stream, bool keep_header)
|
||||
void FilterStep::transformPipeline(QueryPipeline & pipeline, const BuildQueryPipelineSettings & settings)
|
||||
{
|
||||
auto expression = std::make_shared<ExpressionActions>(actions_dag, settings.getActionsSettings());
|
||||
|
||||
pipeline.addSimpleTransform([&](const Block & header, QueryPipeline::StreamType stream_type)
|
||||
{
|
||||
bool on_totals = stream_type == QueryPipeline::StreamType::Totals;
|
||||
@ -99,7 +100,7 @@ void FilterStep::describeActions(FormatSettings & settings) const
|
||||
settings.out << '\n';
|
||||
|
||||
bool first = true;
|
||||
auto expression = std::make_shared<ExpressionActions>(actions_dag, ExpressionActionsSettings{});
|
||||
auto expression = std::make_shared<ExpressionActions>(actions_dag);
|
||||
for (const auto & action : expression->getActions())
|
||||
{
|
||||
settings.out << prefix << (first ? "Actions: "
|
||||
@ -119,7 +120,7 @@ void FilterStep::describeActions(JSONBuilder::JSONMap & map) const
|
||||
map.add("Filter Column", filter_column_name);
|
||||
map.add("Removes Filter", remove_filter_column);
|
||||
|
||||
auto expression = std::make_shared<ExpressionActions>(actions_dag, ExpressionActionsSettings{});
|
||||
auto expression = std::make_shared<ExpressionActions>(actions_dag);
|
||||
map.add("Expression", expression->toTree());
|
||||
}
|
||||
|
||||
|
@ -51,10 +51,16 @@ TotalsHavingStep::TotalsHavingStep(
|
||||
|
||||
void TotalsHavingStep::transformPipeline(QueryPipeline & pipeline, const BuildQueryPipelineSettings & settings)
|
||||
{
|
||||
auto expression_actions = actions_dag ? std::make_shared<ExpressionActions>(actions_dag, settings.getActionsSettings()) : nullptr;
|
||||
|
||||
auto totals_having = std::make_shared<TotalsHavingTransform>(
|
||||
pipeline.getHeader(), overflow_row,
|
||||
(actions_dag ? std::make_shared<ExpressionActions>(actions_dag, settings.getActionsSettings()) : nullptr),
|
||||
filter_column_name, totals_mode, auto_include_threshold, final);
|
||||
pipeline.getHeader(),
|
||||
overflow_row,
|
||||
expression_actions,
|
||||
filter_column_name,
|
||||
totals_mode,
|
||||
auto_include_threshold,
|
||||
final);
|
||||
|
||||
pipeline.addTotalsHavingTransform(std::move(totals_having));
|
||||
}
|
||||
@ -85,7 +91,7 @@ void TotalsHavingStep::describeActions(FormatSettings & settings) const
|
||||
if (actions_dag)
|
||||
{
|
||||
bool first = true;
|
||||
auto expression = std::make_shared<ExpressionActions>(actions_dag, ExpressionActionsSettings{});
|
||||
auto expression = std::make_shared<ExpressionActions>(actions_dag);
|
||||
for (const auto & action : expression->getActions())
|
||||
{
|
||||
settings.out << prefix << (first ? "Actions: "
|
||||
@ -102,7 +108,7 @@ void TotalsHavingStep::describeActions(JSONBuilder::JSONMap & map) const
|
||||
if (actions_dag)
|
||||
{
|
||||
map.add("Filter column", filter_column_name);
|
||||
auto expression = std::make_shared<ExpressionActions>(actions_dag, ExpressionActionsSettings{});
|
||||
auto expression = std::make_shared<ExpressionActions>(actions_dag);
|
||||
map.add("Expression", expression->toTree());
|
||||
}
|
||||
}
|
||||
|
@ -52,7 +52,7 @@ ConstraintsExpressions ConstraintsDescription::getExpressions(const DB::ContextP
|
||||
auto * constraint_ptr = constraint->as<ASTConstraintDeclaration>();
|
||||
ASTPtr expr = constraint_ptr->expr->clone();
|
||||
auto syntax_result = TreeRewriter(context).analyze(expr, source_columns_);
|
||||
res.push_back(ExpressionAnalyzer(constraint_ptr->expr->clone(), syntax_result, context).getActions(false));
|
||||
res.push_back(ExpressionAnalyzer(constraint_ptr->expr->clone(), syntax_result, context).getActions(false, true, CompileExpressions::yes));
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
@ -1268,6 +1268,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor
|
||||
need_remove_expired_values = true;
|
||||
|
||||
/// All columns from part are changed and may be some more that were missing before in part
|
||||
/// TODO We can materialize compact part without copying data
|
||||
if (!isWidePart(source_part)
|
||||
|| (mutation_kind == MutationsInterpreter::MutationKind::MUTATE_OTHER && interpreter && interpreter->isAffectingAllColumns()))
|
||||
{
|
||||
@ -1386,6 +1387,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor
|
||||
metadata_snapshot,
|
||||
indices_to_recalc,
|
||||
projections_to_recalc,
|
||||
// If it's an index/projection materialization, we don't write any data columns, thus empty header is used
|
||||
mutation_kind == MutationsInterpreter::MutationKind::MUTATE_INDEX_PROJECTION ? Block{} : updated_header,
|
||||
new_data_part,
|
||||
in,
|
||||
|
@ -355,6 +355,8 @@ size_t MergeTreeDataPartWriterCompact::ColumnsBuffer::size() const
|
||||
|
||||
void MergeTreeDataPartWriterCompact::finish(IMergeTreeDataPart::Checksums & checksums, bool sync)
|
||||
{
|
||||
// If we don't have anything to write, skip finalization.
|
||||
if (!columns_list.empty())
|
||||
finishDataSerialization(checksums, sync);
|
||||
|
||||
if (settings.rewrite_primary_key)
|
||||
|
@ -559,7 +559,10 @@ void MergeTreeDataPartWriterWide::finishDataSerialization(IMergeTreeDataPart::Ch
|
||||
|
||||
void MergeTreeDataPartWriterWide::finish(IMergeTreeDataPart::Checksums & checksums, bool sync)
|
||||
{
|
||||
// If we don't have anything to write, skip finalization.
|
||||
if (!columns_list.empty())
|
||||
finishDataSerialization(checksums, sync);
|
||||
|
||||
if (settings.rewrite_primary_key)
|
||||
finishPrimaryIndexSerialization(checksums, sync);
|
||||
|
||||
|
@ -262,7 +262,14 @@ void StorageMemory::mutate(const MutationCommands & commands, ContextPtr context
|
||||
auto metadata_snapshot = getInMemoryMetadataPtr();
|
||||
auto storage = getStorageID();
|
||||
auto storage_ptr = DatabaseCatalog::instance().getTable(storage, context);
|
||||
auto interpreter = std::make_unique<MutationsInterpreter>(storage_ptr, metadata_snapshot, commands, context, true);
|
||||
|
||||
/// When max_threads > 1, the order of returning blocks is uncentain,
|
||||
/// which will lead to inconsistency after updateBlockData.
|
||||
auto new_context = Context::createCopy(context);
|
||||
new_context->setSetting("max_streams_to_max_threads_ratio", 1);
|
||||
new_context->setSetting("max_threads", 1);
|
||||
|
||||
auto interpreter = std::make_unique<MutationsInterpreter>(storage_ptr, metadata_snapshot, commands, new_context, true);
|
||||
auto in = interpreter->execute();
|
||||
|
||||
in->readPrefix();
|
||||
|
@ -415,7 +415,7 @@ Pipe StorageMerge::createSources(
|
||||
auto adding_column_dag = ActionsDAG::makeAddingColumnActions(std::move(column));
|
||||
auto adding_column_actions = std::make_shared<ExpressionActions>(
|
||||
std::move(adding_column_dag),
|
||||
ExpressionActionsSettings::fromContext(modified_context));
|
||||
ExpressionActionsSettings::fromContext(modified_context, CompileExpressions::yes));
|
||||
|
||||
pipe.addSimpleTransform([&](const Block & stream_header)
|
||||
{
|
||||
@ -559,7 +559,7 @@ void StorageMerge::convertingSourceStream(
|
||||
pipe.getHeader().getColumnsWithTypeAndName(),
|
||||
header.getColumnsWithTypeAndName(),
|
||||
ActionsDAG::MatchColumnsMode::Name);
|
||||
auto convert_actions = std::make_shared<ExpressionActions>(convert_actions_dag, ExpressionActionsSettings::fromContext(local_context));
|
||||
auto convert_actions = std::make_shared<ExpressionActions>(convert_actions_dag, ExpressionActionsSettings::fromContext(local_context, CompileExpressions::yes));
|
||||
|
||||
pipe.addSimpleTransform([&](const Block & stream_header)
|
||||
{
|
||||
|
@ -4722,6 +4722,10 @@ void StorageReplicatedMergeTree::alter(
|
||||
if (new_indices_str != current_metadata->secondary_indices.toString())
|
||||
future_metadata_in_zk.skip_indices = new_indices_str;
|
||||
|
||||
String new_projections_str = future_metadata.projections.toString();
|
||||
if (new_projections_str != current_metadata->projections.toString())
|
||||
future_metadata_in_zk.projections = new_projections_str;
|
||||
|
||||
String new_constraints_str = future_metadata.constraints.toString();
|
||||
if (new_constraints_str != current_metadata->constraints.toString())
|
||||
future_metadata_in_zk.constraints = new_constraints_str;
|
||||
|
@ -103,7 +103,7 @@ public:
|
||||
ActionsDAG::MatchColumnsMode::Name);
|
||||
auto convert_actions = std::make_shared<ExpressionActions>(
|
||||
convert_actions_dag,
|
||||
ExpressionActionsSettings::fromSettings(context->getSettingsRef()));
|
||||
ExpressionActionsSettings::fromSettings(context->getSettingsRef(), CompileExpressions::yes));
|
||||
|
||||
pipe.addSimpleTransform([&](const Block & header)
|
||||
{
|
||||
|
@ -199,7 +199,7 @@ void filterBlockWithQuery(const ASTPtr & query, Block & block, ContextPtr contex
|
||||
auto syntax_result = TreeRewriter(context).analyze(expression_ast, block.getNamesAndTypesList());
|
||||
ExpressionAnalyzer analyzer(expression_ast, syntax_result, context);
|
||||
buildSets(expression_ast, analyzer);
|
||||
ExpressionActionsPtr actions = analyzer.getActions(false);
|
||||
ExpressionActionsPtr actions = analyzer.getActions(false /* add alises */, true /* project result */, CompileExpressions::yes);
|
||||
|
||||
Block block_with_filter = block;
|
||||
actions->execute(block_with_filter);
|
||||
|
@ -37,7 +37,6 @@ DEFAULT_ENV_NAME = 'env_file'
|
||||
|
||||
SANITIZER_SIGN = "=================="
|
||||
|
||||
|
||||
def _create_env_file(path, variables, fname=DEFAULT_ENV_NAME):
|
||||
full_path = os.path.join(path, fname)
|
||||
with open(full_path, 'w') as f:
|
||||
@ -199,9 +198,11 @@ class ClickHouseCluster:
|
||||
self.schema_registry_port = 8081
|
||||
|
||||
self.zookeeper_use_tmpfs = True
|
||||
self.use_keeper = True
|
||||
|
||||
self.docker_client = None
|
||||
self.is_up = False
|
||||
self.env = os.environ.copy()
|
||||
print("CLUSTER INIT base_config_dir:{}".format(self.base_config_dir))
|
||||
|
||||
def get_client_cmd(self):
|
||||
@ -218,7 +219,7 @@ class ClickHouseCluster:
|
||||
with_redis=False, with_minio=False, with_cassandra=False,
|
||||
hostname=None, env_variables=None, image="yandex/clickhouse-integration-test", tag=None,
|
||||
stay_alive=False, ipv4_address=None, ipv6_address=None, with_installed_binary=False, tmpfs=None,
|
||||
zookeeper_docker_compose_path=None, zookeeper_use_tmpfs=True, minio_certs_dir=None):
|
||||
zookeeper_docker_compose_path=None, zookeeper_use_tmpfs=True, minio_certs_dir=None, use_keeper=True):
|
||||
"""Add an instance to the cluster.
|
||||
|
||||
name - the name of the instance directory and the value of the 'instance' macro in ClickHouse.
|
||||
@ -239,6 +240,8 @@ class ClickHouseCluster:
|
||||
if not env_variables:
|
||||
env_variables = {}
|
||||
|
||||
self.use_keeper = use_keeper
|
||||
|
||||
# Code coverage files will be placed in database directory
|
||||
# (affect only WITH_COVERAGE=1 build)
|
||||
env_variables['LLVM_PROFILE_FILE'] = '/var/lib/clickhouse/server_%h_%p_%m.profraw'
|
||||
@ -291,6 +294,9 @@ class ClickHouseCluster:
|
||||
cmds = []
|
||||
if with_zookeeper and not self.with_zookeeper:
|
||||
if not zookeeper_docker_compose_path:
|
||||
if self.use_keeper:
|
||||
zookeeper_docker_compose_path = p.join(docker_compose_yml_dir, 'docker_compose_keeper.yml')
|
||||
else:
|
||||
zookeeper_docker_compose_path = p.join(docker_compose_yml_dir, 'docker_compose_zookeeper.yml')
|
||||
|
||||
self.with_zookeeper = True
|
||||
@ -680,10 +686,39 @@ class ClickHouseCluster:
|
||||
common_opts = ['up', '-d']
|
||||
|
||||
if self.with_zookeeper and self.base_zookeeper_cmd:
|
||||
print('Setup ZooKeeper')
|
||||
env = os.environ.copy()
|
||||
if self.use_keeper:
|
||||
print('Setup Keeper')
|
||||
binary_path = self.server_bin_path
|
||||
if binary_path.endswith('-server'):
|
||||
binary_path = binary_path[:-len('-server')]
|
||||
|
||||
self.env['keeper_binary'] = binary_path
|
||||
self.env['image'] = "yandex/clickhouse-integration-test:" + self.docker_base_tag
|
||||
self.env['user'] = str(os.getuid())
|
||||
if not self.zookeeper_use_tmpfs:
|
||||
env['ZK_FS'] = 'bind'
|
||||
self.env['keeper_fs'] = 'bind'
|
||||
|
||||
for i in range (1, 4):
|
||||
instance_dir = p.join(self.instances_dir, f"keeper{i}")
|
||||
logs_dir = p.join(instance_dir, "logs")
|
||||
configs_dir = p.join(instance_dir, "configs")
|
||||
coordination_dir = p.join(instance_dir, "coordination")
|
||||
if not os.path.exists(instance_dir):
|
||||
os.mkdir(instance_dir)
|
||||
os.mkdir(configs_dir)
|
||||
os.mkdir(logs_dir)
|
||||
if not self.zookeeper_use_tmpfs:
|
||||
os.mkdir(coordination_dir)
|
||||
shutil.copy(os.path.join(HELPERS_DIR, f'keeper_config{i}.xml'), configs_dir)
|
||||
|
||||
self.env[f'keeper_logs_dir{i}'] = p.abspath(logs_dir)
|
||||
self.env[f'keeper_config_dir{i}'] = p.abspath(configs_dir)
|
||||
if not self.zookeeper_use_tmpfs:
|
||||
self.env[f'keeper_db_dir{i}'] = p.abspath(coordination_dir)
|
||||
else:
|
||||
print('Setup ZooKeeper')
|
||||
if not self.zookeeper_use_tmpfs:
|
||||
self.env['ZK_FS'] = 'bind'
|
||||
for i in range(1, 4):
|
||||
zk_data_path = self.instances_dir + '/zkdata' + str(i)
|
||||
zk_log_data_path = self.instances_dir + '/zklog' + str(i)
|
||||
@ -691,9 +726,10 @@ class ClickHouseCluster:
|
||||
os.mkdir(zk_data_path)
|
||||
if not os.path.exists(zk_log_data_path):
|
||||
os.mkdir(zk_log_data_path)
|
||||
env['ZK_DATA' + str(i)] = zk_data_path
|
||||
env['ZK_DATA_LOG' + str(i)] = zk_log_data_path
|
||||
run_and_check(self.base_zookeeper_cmd + common_opts, env=env)
|
||||
self.env['ZK_DATA' + str(i)] = zk_data_path
|
||||
self.env['ZK_DATA_LOG' + str(i)] = zk_log_data_path
|
||||
|
||||
run_and_check(self.base_zookeeper_cmd + common_opts, env=self.env)
|
||||
for command in self.pre_zookeeper_commands:
|
||||
self.run_kazoo_commands_with_retries(command, repeats=5)
|
||||
self.wait_zookeeper_to_start(120)
|
||||
@ -730,9 +766,8 @@ class ClickHouseCluster:
|
||||
|
||||
if self.with_kerberized_kafka and self.base_kerberized_kafka_cmd:
|
||||
print('Setup kerberized kafka')
|
||||
env = os.environ.copy()
|
||||
env['KERBERIZED_KAFKA_DIR'] = instance.path + '/'
|
||||
run_and_check(self.base_kerberized_kafka_cmd + common_opts + ['--renew-anon-volumes'], env=env)
|
||||
self.env['KERBERIZED_KAFKA_DIR'] = instance.path + '/'
|
||||
run_and_check(self.base_kerberized_kafka_cmd + common_opts + ['--renew-anon-volumes'], env=self.env)
|
||||
self.kerberized_kafka_docker_id = self.get_instance_docker_id('kerberized_kafka1')
|
||||
if self.with_rabbitmq and self.base_rabbitmq_cmd:
|
||||
subprocess_check_call(self.base_rabbitmq_cmd + common_opts + ['--renew-anon-volumes'])
|
||||
@ -746,9 +781,8 @@ class ClickHouseCluster:
|
||||
|
||||
if self.with_kerberized_hdfs and self.base_kerberized_hdfs_cmd:
|
||||
print('Setup kerberized HDFS')
|
||||
env = os.environ.copy()
|
||||
env['KERBERIZED_HDFS_DIR'] = instance.path + '/'
|
||||
run_and_check(self.base_kerberized_hdfs_cmd + common_opts, env=env)
|
||||
self.env['KERBERIZED_HDFS_DIR'] = instance.path + '/'
|
||||
run_and_check(self.base_kerberized_hdfs_cmd + common_opts, env=self.env)
|
||||
self.make_hdfs_api(kerberized=True)
|
||||
self.wait_hdfs_to_start(timeout=300)
|
||||
|
||||
@ -763,23 +797,22 @@ class ClickHouseCluster:
|
||||
time.sleep(10)
|
||||
|
||||
if self.with_minio and self.base_minio_cmd:
|
||||
env = os.environ.copy()
|
||||
prev_ca_certs = os.environ.get('SSL_CERT_FILE')
|
||||
if self.minio_certs_dir:
|
||||
minio_certs_dir = p.join(self.base_dir, self.minio_certs_dir)
|
||||
env['MINIO_CERTS_DIR'] = minio_certs_dir
|
||||
self.env['MINIO_CERTS_DIR'] = minio_certs_dir
|
||||
# Minio client (urllib3) uses SSL_CERT_FILE for certificate validation.
|
||||
os.environ['SSL_CERT_FILE'] = p.join(minio_certs_dir, 'public.crt')
|
||||
else:
|
||||
# Attach empty certificates directory to ensure non-secure mode.
|
||||
minio_certs_dir = p.join(self.instances_dir, 'empty_minio_certs_dir')
|
||||
os.mkdir(minio_certs_dir)
|
||||
env['MINIO_CERTS_DIR'] = minio_certs_dir
|
||||
self.env['MINIO_CERTS_DIR'] = minio_certs_dir
|
||||
|
||||
minio_start_cmd = self.base_minio_cmd + common_opts
|
||||
|
||||
logging.info("Trying to create Minio instance by command %s", ' '.join(map(str, minio_start_cmd)))
|
||||
run_and_check(minio_start_cmd, env=env)
|
||||
run_and_check(minio_start_cmd, env=self.env)
|
||||
|
||||
try:
|
||||
logging.info("Trying to connect to Minio...")
|
||||
@ -798,7 +831,7 @@ class ClickHouseCluster:
|
||||
|
||||
clickhouse_start_cmd = self.base_cmd + ['up', '-d', '--no-recreate']
|
||||
print(("Trying to create ClickHouse instance by command %s", ' '.join(map(str, clickhouse_start_cmd))))
|
||||
subprocess_check_call(clickhouse_start_cmd)
|
||||
run_and_check(clickhouse_start_cmd, env=self.env)
|
||||
print("ClickHouse instance created")
|
||||
|
||||
start_timeout = 20.0 # seconds
|
||||
@ -824,7 +857,7 @@ class ClickHouseCluster:
|
||||
sanitizer_assert_instance = None
|
||||
with open(self.docker_logs_path, "w+") as f:
|
||||
try:
|
||||
subprocess.check_call(self.base_cmd + ['logs'], stdout=f) # STYLE_CHECK_ALLOW_SUBPROCESS_CHECK_CALL
|
||||
subprocess.check_call(self.base_cmd + ['logs'], env=self.env, stdout=f) # STYLE_CHECK_ALLOW_SUBPROCESS_CHECK_CALL
|
||||
except Exception as e:
|
||||
print("Unable to get logs from docker.")
|
||||
f.seek(0)
|
||||
@ -835,14 +868,14 @@ class ClickHouseCluster:
|
||||
|
||||
if kill:
|
||||
try:
|
||||
subprocess_check_call(self.base_cmd + ['stop', '--timeout', '20'])
|
||||
run_and_check(self.base_cmd + ['stop', '--timeout', '20'], env=self.env)
|
||||
except Exception as e:
|
||||
print("Kill command failed during shutdown. {}".format(repr(e)))
|
||||
print("Trying to kill forcefully")
|
||||
subprocess_check_call(self.base_cmd + ['kill'])
|
||||
|
||||
try:
|
||||
subprocess_check_call(self.base_cmd + ['down', '--volumes', '--remove-orphans'])
|
||||
run_and_check(self.base_cmd + ['down', '--volumes', '--remove-orphans'], env=self.env)
|
||||
except Exception as e:
|
||||
print("Down + remove orphans failed durung shutdown. {}".format(repr(e)))
|
||||
|
||||
|
41
tests/integration/helpers/keeper_config1.xml
Normal file
41
tests/integration/helpers/keeper_config1.xml
Normal file
@ -0,0 +1,41 @@
|
||||
<yandex>
|
||||
<listen_try>true</listen_try>
|
||||
<listen_host>::</listen_host>
|
||||
<listen_host>0.0.0.0</listen_host>
|
||||
|
||||
<logger>
|
||||
<level>trace</level>
|
||||
<log>/var/log/clickhouse-keeper/clickhouse-keeper.log</log>
|
||||
<errorlog>/var/log/clickhouse-keeper/clickhouse-keeper.err.log</errorlog>
|
||||
</logger>
|
||||
|
||||
<keeper_server>
|
||||
<tcp_port>2181</tcp_port>
|
||||
<server_id>1</server_id>
|
||||
|
||||
<coordination_settings>
|
||||
<operation_timeout_ms>10000</operation_timeout_ms>
|
||||
<session_timeout_ms>30000</session_timeout_ms>
|
||||
<raft_logs_level>trace</raft_logs_level>
|
||||
<force_sync>false</force_sync>
|
||||
</coordination_settings>
|
||||
|
||||
<raft_configuration>
|
||||
<server>
|
||||
<id>1</id>
|
||||
<hostname>zoo1</hostname>
|
||||
<port>9444</port>
|
||||
</server>
|
||||
<server>
|
||||
<id>2</id>
|
||||
<hostname>zoo2</hostname>
|
||||
<port>9444</port>
|
||||
</server>
|
||||
<server>
|
||||
<id>3</id>
|
||||
<hostname>zoo3</hostname>
|
||||
<port>9444</port>
|
||||
</server>
|
||||
</raft_configuration>
|
||||
</keeper_server>
|
||||
</yandex>
|
41
tests/integration/helpers/keeper_config2.xml
Normal file
41
tests/integration/helpers/keeper_config2.xml
Normal file
@ -0,0 +1,41 @@
|
||||
<yandex>
|
||||
<listen_try>true</listen_try>
|
||||
<listen_host>::</listen_host>
|
||||
<listen_host>0.0.0.0</listen_host>
|
||||
|
||||
<logger>
|
||||
<level>trace</level>
|
||||
<log>/var/log/clickhouse-keeper/clickhouse-keeper.log</log>
|
||||
<errorlog>/var/log/clickhouse-keeper/clickhouse-keeper.err.log</errorlog>
|
||||
</logger>
|
||||
|
||||
<keeper_server>
|
||||
<tcp_port>2181</tcp_port>
|
||||
<server_id>2</server_id>
|
||||
|
||||
<coordination_settings>
|
||||
<operation_timeout_ms>10000</operation_timeout_ms>
|
||||
<session_timeout_ms>30000</session_timeout_ms>
|
||||
<raft_logs_level>trace</raft_logs_level>
|
||||
<force_sync>false</force_sync>
|
||||
</coordination_settings>
|
||||
|
||||
<raft_configuration>
|
||||
<server>
|
||||
<id>1</id>
|
||||
<hostname>zoo1</hostname>
|
||||
<port>9444</port>
|
||||
</server>
|
||||
<server>
|
||||
<id>2</id>
|
||||
<hostname>zoo2</hostname>
|
||||
<port>9444</port>
|
||||
</server>
|
||||
<server>
|
||||
<id>3</id>
|
||||
<hostname>zoo3</hostname>
|
||||
<port>9444</port>
|
||||
</server>
|
||||
</raft_configuration>
|
||||
</keeper_server>
|
||||
</yandex>
|
41
tests/integration/helpers/keeper_config3.xml
Normal file
41
tests/integration/helpers/keeper_config3.xml
Normal file
@ -0,0 +1,41 @@
|
||||
<yandex>
|
||||
<listen_try>true</listen_try>
|
||||
<listen_host>::</listen_host>
|
||||
<listen_host>0.0.0.0</listen_host>
|
||||
|
||||
<logger>
|
||||
<level>trace</level>
|
||||
<log>/var/log/clickhouse-keeper/clickhouse-keeper.log</log>
|
||||
<errorlog>/var/log/clickhouse-keeper/clickhouse-keeper.err.log</errorlog>
|
||||
</logger>
|
||||
|
||||
<keeper_server>
|
||||
<tcp_port>2181</tcp_port>
|
||||
<server_id>3</server_id>
|
||||
|
||||
<coordination_settings>
|
||||
<operation_timeout_ms>10000</operation_timeout_ms>
|
||||
<session_timeout_ms>30000</session_timeout_ms>
|
||||
<raft_logs_level>trace</raft_logs_level>
|
||||
<force_sync>false</force_sync>
|
||||
</coordination_settings>
|
||||
|
||||
<raft_configuration>
|
||||
<server>
|
||||
<id>1</id>
|
||||
<hostname>zoo1</hostname>
|
||||
<port>9444</port>
|
||||
</server>
|
||||
<server>
|
||||
<id>2</id>
|
||||
<hostname>zoo2</hostname>
|
||||
<port>9444</port>
|
||||
</server>
|
||||
<server>
|
||||
<id>3</id>
|
||||
<hostname>zoo3</hostname>
|
||||
<port>9444</port>
|
||||
</server>
|
||||
</raft_configuration>
|
||||
</keeper_server>
|
||||
</yandex>
|
@ -3,6 +3,7 @@
|
||||
import random
|
||||
import time
|
||||
from multiprocessing.dummy import Pool
|
||||
import datetime
|
||||
|
||||
import pytest
|
||||
from helpers.client import QueryRuntimeException
|
||||
@ -111,7 +112,7 @@ def insert(node, table_name, chunk=1000, col_names=None, iterations=1, ignore_ex
|
||||
try:
|
||||
query = ["SET max_partitions_per_insert_block = 10000000"]
|
||||
if with_many_parts:
|
||||
query.append("SET max_insert_block_size = 64")
|
||||
query.append("SET max_insert_block_size = 256")
|
||||
if with_time_column:
|
||||
query.append(
|
||||
"INSERT INTO {table_name} ({col0}, {col1}, time) SELECT number AS {col0}, number + 1 AS {col1}, now() + 10 AS time FROM numbers_mt({chunk})"
|
||||
@ -305,35 +306,45 @@ def test_rename_with_parallel_merges(started_cluster):
|
||||
table_name = "test_rename_with_parallel_merges"
|
||||
drop_table(nodes, table_name)
|
||||
try:
|
||||
print("Creating tables", datetime.datetime.now())
|
||||
create_table(nodes, table_name)
|
||||
for i in range(20):
|
||||
for i in range(5):
|
||||
insert(node1, table_name, 100, ["num", "num2"], 1, False, False, True, offset=i * 100)
|
||||
|
||||
def merge_parts(node, table_name, iterations=1):
|
||||
for i in range(iterations):
|
||||
node.query("OPTIMIZE TABLE %s FINAL" % table_name)
|
||||
print("Data inserted", datetime.datetime.now())
|
||||
|
||||
def merge_parts(node, table_name, iterations=1):
|
||||
for _ in range(iterations):
|
||||
try:
|
||||
node.query("OPTIMIZE TABLE %s FINAL" % table_name)
|
||||
except Exception as ex:
|
||||
print("Got an exception while optimizing table", ex)
|
||||
|
||||
print("Creating pool")
|
||||
p = Pool(15)
|
||||
tasks = []
|
||||
for i in range(1):
|
||||
tasks.append(p.apply_async(rename_column, (node1, table_name, "num2", "foo2", 5, True)))
|
||||
tasks.append(p.apply_async(rename_column, (node2, table_name, "foo2", "foo3", 5, True)))
|
||||
tasks.append(p.apply_async(rename_column, (node3, table_name, "foo3", "num2", 5, True)))
|
||||
tasks.append(p.apply_async(merge_parts, (node1, table_name, 5)))
|
||||
tasks.append(p.apply_async(merge_parts, (node2, table_name, 5)))
|
||||
tasks.append(p.apply_async(merge_parts, (node3, table_name, 5)))
|
||||
tasks.append(p.apply_async(rename_column, (node1, table_name, "num2", "foo2", 2, True)))
|
||||
tasks.append(p.apply_async(rename_column, (node2, table_name, "foo2", "foo3", 2, True)))
|
||||
tasks.append(p.apply_async(rename_column, (node3, table_name, "foo3", "num2", 2, True)))
|
||||
tasks.append(p.apply_async(merge_parts, (node1, table_name, 2)))
|
||||
tasks.append(p.apply_async(merge_parts, (node2, table_name, 2)))
|
||||
tasks.append(p.apply_async(merge_parts, (node3, table_name, 2)))
|
||||
|
||||
print("Waiting for tasks", datetime.datetime.now())
|
||||
for task in tasks:
|
||||
task.get(timeout=240)
|
||||
print("Finished waiting", datetime.datetime.now())
|
||||
|
||||
print("Renaming columns", datetime.datetime.now())
|
||||
# rename column back to the original name
|
||||
rename_column(node1, table_name, "foo3", "num2", 1, True)
|
||||
rename_column(node1, table_name, "foo2", "num2", 1, True)
|
||||
print("Finished renaming", datetime.datetime.now())
|
||||
|
||||
# check that select still works
|
||||
select(node1, table_name, "num2", "1998\n")
|
||||
select(node2, table_name, "num2", "1998\n")
|
||||
select(node3, table_name, "num2", "1998\n")
|
||||
select(node1, table_name, "num2", "500\n")
|
||||
select(node2, table_name, "num2", "500\n")
|
||||
select(node3, table_name, "num2", "500\n")
|
||||
finally:
|
||||
drop_table(nodes, table_name)
|
||||
|
||||
|
@ -100,11 +100,12 @@ def test_identity():
|
||||
cluster_1 = ClickHouseCluster(__file__, zookeeper_config_path='configs/zookeeper_config_with_password.xml')
|
||||
cluster_2 = ClickHouseCluster(__file__)
|
||||
|
||||
# TODO ACL not implemented in Keeper.
|
||||
node1 = cluster_1.add_instance('node1', main_configs=["configs/remote_servers.xml",
|
||||
"configs/zookeeper_config_with_password.xml"],
|
||||
with_zookeeper=True, zookeeper_use_tmpfs=False)
|
||||
with_zookeeper=True, zookeeper_use_tmpfs=False, use_keeper=False)
|
||||
node2 = cluster_2.add_instance('node2', main_configs=["configs/remote_servers.xml"], with_zookeeper=True,
|
||||
zookeeper_use_tmpfs=False)
|
||||
zookeeper_use_tmpfs=False, use_keeper=False)
|
||||
|
||||
try:
|
||||
cluster_1.start()
|
||||
@ -126,6 +127,7 @@ def test_identity():
|
||||
cluster_2.shutdown()
|
||||
|
||||
|
||||
# NOTE this test have to be ported to Keeper
|
||||
def test_secure_connection():
|
||||
# We need absolute path in zookeeper volumes. Generate it dynamically.
|
||||
TEMPLATE = '''
|
||||
|
@ -1 +0,0 @@
|
||||
../../../programs/server/config.xml
|
@ -1,4 +1,12 @@
|
||||
<yandex>
|
||||
<listen_host>::</listen_host>
|
||||
|
||||
<logger>
|
||||
<level>trace</level>
|
||||
<log>/var/log/clickhouse-keeper/clickhouse-keeper.log</log>
|
||||
<errorlog>/var/log/clickhouse-keeper/clickhouse-keeper.err.log</errorlog>
|
||||
</logger>
|
||||
|
||||
<keeper_server>
|
||||
<tcp_port>9181</tcp_port>
|
||||
<server_id>{id}</server_id>
|
||||
|
@ -1,3 +0,0 @@
|
||||
<yandex>
|
||||
<listen_host>::</listen_host>
|
||||
</yandex>
|
@ -1 +0,0 @@
|
||||
../../../programs/server/users.xml
|
@ -89,10 +89,7 @@
|
||||
|
||||
(defn install-configs
|
||||
[test node]
|
||||
(c/exec :echo (slurp (io/resource "config.xml")) :> (str configs-dir "/config.xml"))
|
||||
(c/exec :echo (slurp (io/resource "users.xml")) :> (str configs-dir "/users.xml"))
|
||||
(c/exec :echo (slurp (io/resource "listen.xml")) :> (str sub-configs-dir "/listen.xml"))
|
||||
(c/exec :echo (cluster-config test node (slurp (io/resource "keeper_config.xml"))) :> (str sub-configs-dir "/keeper_config.xml")))
|
||||
(c/exec :echo (cluster-config test node (slurp (io/resource "keeper_config.xml"))) :> (str configs-dir "/keeper_config.xml")))
|
||||
|
||||
(defn collect-traces
|
||||
[test node]
|
||||
@ -144,7 +141,7 @@
|
||||
(info node "Coordination files exists, going to compress")
|
||||
(c/cd data-dir
|
||||
(c/exec :tar :czf "coordination.tar.gz" "coordination")))))
|
||||
(let [common-logs [stderr-file (str logs-dir "/clickhouse-server.log") (str data-dir "/coordination.tar.gz")]
|
||||
(let [common-logs [stderr-file (str logs-dir "/clickhouse-keeper.log") (str data-dir "/coordination.tar.gz")]
|
||||
gdb-log (str logs-dir "/gdb.log")]
|
||||
(if (cu/exists? (str logs-dir "/gdb.log"))
|
||||
(conj common-logs gdb-log)
|
||||
|
@ -143,7 +143,7 @@
|
||||
[node test]
|
||||
(info "Checking server alive on" node)
|
||||
(try
|
||||
(c/exec binary-path :client :--query "SELECT 1")
|
||||
(zk-connect (name node) 9181 30000)
|
||||
(catch Exception _ false)))
|
||||
|
||||
(defn wait-clickhouse-alive!
|
||||
@ -169,16 +169,13 @@
|
||||
:logfile stderr-file
|
||||
:chdir data-dir}
|
||||
binary-path
|
||||
:server
|
||||
:--config (str configs-dir "/config.xml")
|
||||
:keeper
|
||||
:--config (str configs-dir "/keeper_config.xml")
|
||||
:--
|
||||
:--path (str data-dir "/")
|
||||
:--user_files_path (str data-dir "/user_files")
|
||||
:--top_level_domains_path (str data-dir "/top_level_domains")
|
||||
:--logger.log (str logs-dir "/clickhouse-server.log")
|
||||
:--logger.errorlog (str logs-dir "/clickhouse-server.err.log")
|
||||
:--logger.log (str logs-dir "/clickhouse-keeper.log")
|
||||
:--logger.errorlog (str logs-dir "/clickhouse-keeper.err.log")
|
||||
:--keeper_server.snapshot_storage_path coordination-snapshots-dir
|
||||
:--keeper_server.logs_storage_path coordination-logs-dir)
|
||||
:--keeper_server.log_storage_path coordination-logs-dir)
|
||||
(wait-clickhouse-alive! node test)))
|
||||
|
||||
(defn md5 [^String s]
|
||||
|
@ -1,31 +0,0 @@
|
||||
<test>
|
||||
<tags>
|
||||
<tag>comparison</tag>
|
||||
</tags>
|
||||
|
||||
<preconditions>
|
||||
<table_exists>hits_100m_single</table_exists>
|
||||
</preconditions>
|
||||
|
||||
|
||||
<query short="1"><![CDATA[SELECT count() FROM hits_100m_single WHERE URL < URL]]></query>
|
||||
<query><![CDATA[SELECT count() FROM hits_100m_single WHERE URL < PageCharset]]></query>
|
||||
<query short="1"><![CDATA[SELECT count() FROM hits_100m_single WHERE SearchPhrase < SearchPhrase SETTINGS max_threads = 2]]></query>
|
||||
<query><![CDATA[SELECT count() FROM hits_100m_single WHERE SearchPhrase < URL]]></query>
|
||||
<query><![CDATA[SELECT count() FROM hits_100m_single WHERE SearchPhrase < PageCharset SETTINGS max_threads = 2]]></query>
|
||||
<query short="1"><![CDATA[SELECT count() FROM hits_100m_single WHERE notEmpty(SearchPhrase) AND SearchPhrase < SearchPhrase SETTINGS max_threads = 2]]></query>
|
||||
<query><![CDATA[SELECT count() FROM hits_100m_single WHERE notEmpty(SearchPhrase) AND SearchPhrase < URL]]></query>
|
||||
<query><![CDATA[SELECT count() FROM hits_100m_single WHERE notEmpty(SearchPhrase) AND SearchPhrase < PageCharset SETTINGS max_threads = 2]]></query>
|
||||
<query short="1"><![CDATA[SELECT count() FROM hits_100m_single WHERE MobilePhoneModel < MobilePhoneModel SETTINGS max_threads = 1]]></query>
|
||||
<query><![CDATA[SELECT count() FROM hits_100m_single WHERE MobilePhoneModel < URL]]></query>
|
||||
<query><![CDATA[SELECT count() FROM hits_100m_single WHERE MobilePhoneModel < PageCharset SETTINGS max_threads = 2]]></query>
|
||||
<query short="1"><![CDATA[SELECT count() FROM hits_100m_single WHERE notEmpty(MobilePhoneModel) AND MobilePhoneModel < MobilePhoneModel SETTINGS max_threads = 1]]></query>
|
||||
<query><![CDATA[SELECT count() FROM hits_100m_single WHERE notEmpty(MobilePhoneModel) AND MobilePhoneModel < URL]]></query>
|
||||
<query><![CDATA[SELECT count() FROM hits_100m_single WHERE notEmpty(MobilePhoneModel) AND MobilePhoneModel < PageCharset SETTINGS max_threads = 2]]></query>
|
||||
<query short="1"><![CDATA[SELECT count() FROM hits_100m_single WHERE PageCharset < PageCharset SETTINGS max_threads = 2]]></query>
|
||||
<query><![CDATA[SELECT count() FROM hits_100m_single WHERE PageCharset < URL]]></query>
|
||||
<query short="1"><![CDATA[SELECT count() FROM hits_100m_single WHERE Title < Title]]></query>
|
||||
<query><![CDATA[SELECT count() FROM hits_100m_single WHERE Title < URL]]></query>
|
||||
<query><![CDATA[SELECT count() FROM hits_100m_single WHERE Title < PageCharset]]></query>
|
||||
|
||||
</test>
|
@ -1,8 +0,0 @@
|
||||
0
|
||||
0
|
||||
0
|
||||
0
|
||||
0
|
||||
0
|
||||
0
|
||||
0
|
@ -1,10 +1,49 @@
|
||||
SELECT count() FROM system.numbers WHERE number != number;
|
||||
SELECT count() FROM system.numbers WHERE number < number;
|
||||
SELECT count() FROM system.numbers WHERE number > number;
|
||||
-- TODO: Tautological optimization breaks JIT expression compilation, because it can return constant result
|
||||
-- for non constant columns. And then sample blocks from same ActionsDAGs can be mismatched.
|
||||
-- This optimization cannot be performed on AST rewrite level, because we does not have information about types
|
||||
-- and equals(tuple(NULL), tuple(NULL)) have same hash code, but should not be optimized.
|
||||
-- Return this test after refactoring of InterpreterSelectQuery.
|
||||
|
||||
SELECT count() FROM system.numbers WHERE NOT (number = number);
|
||||
SELECT count() FROM system.numbers WHERE NOT (number <= number);
|
||||
SELECT count() FROM system.numbers WHERE NOT (number >= number);
|
||||
-- SELECT count() FROM system.numbers WHERE number != number;
|
||||
-- SELECT count() FROM system.numbers WHERE number < number;
|
||||
-- SELECT count() FROM system.numbers WHERE number > number;
|
||||
|
||||
SELECT count() FROM system.numbers WHERE SHA256(toString(number)) != SHA256(toString(number));
|
||||
SELECT count() FROM system.numbers WHERE SHA256(toString(number)) != SHA256(toString(number)) AND rand() > 10;
|
||||
-- SELECT count() FROM system.numbers WHERE NOT (number = number);
|
||||
-- SELECT count() FROM system.numbers WHERE NOT (number <= number);
|
||||
-- SELECT count() FROM system.numbers WHERE NOT (number >= number);
|
||||
|
||||
-- SELECT count() FROM system.numbers WHERE SHA256(toString(number)) != SHA256(toString(number));
|
||||
-- SELECT count() FROM system.numbers WHERE SHA256(toString(number)) != SHA256(toString(number)) AND rand() > 10;
|
||||
|
||||
-- column_column_comparison.xml
|
||||
-- <test>
|
||||
-- <tags>
|
||||
-- <tag>comparison</tag>
|
||||
-- </tags>
|
||||
|
||||
-- <preconditions>
|
||||
-- <table_exists>hits_100m_single</table_exists>
|
||||
-- </preconditions>
|
||||
|
||||
|
||||
-- <query short="1"><![CDATA[SELECT count() FROM hits_100m_single WHERE URL < URL]]></query>
|
||||
-- <query><![CDATA[SELECT count() FROM hits_100m_single WHERE URL < PageCharset]]></query>
|
||||
-- <query short="1"><![CDATA[SELECT count() FROM hits_100m_single WHERE SearchPhrase < SearchPhrase SETTINGS max_threads = 2]]></query>
|
||||
-- <query><![CDATA[SELECT count() FROM hits_100m_single WHERE SearchPhrase < URL]]></query>
|
||||
-- <query><![CDATA[SELECT count() FROM hits_100m_single WHERE SearchPhrase < PageCharset SETTINGS max_threads = 2]]></query>
|
||||
-- <query short="1"><![CDATA[SELECT count() FROM hits_100m_single WHERE notEmpty(SearchPhrase) AND SearchPhrase < SearchPhrase SETTINGS max_threads = 2]]></query>
|
||||
-- <query><![CDATA[SELECT count() FROM hits_100m_single WHERE notEmpty(SearchPhrase) AND SearchPhrase < URL]]></query>
|
||||
-- <query><![CDATA[SELECT count() FROM hits_100m_single WHERE notEmpty(SearchPhrase) AND SearchPhrase < PageCharset SETTINGS max_threads = 2]]></query>
|
||||
-- <query short="1"><![CDATA[SELECT count() FROM hits_100m_single WHERE MobilePhoneModel < MobilePhoneModel SETTINGS max_threads = 1]]></query>
|
||||
-- <query><![CDATA[SELECT count() FROM hits_100m_single WHERE MobilePhoneModel < URL]]></query>
|
||||
-- <query><![CDATA[SELECT count() FROM hits_100m_single WHERE MobilePhoneModel < PageCharset SETTINGS max_threads = 2]]></query>
|
||||
-- <query short="1"><![CDATA[SELECT count() FROM hits_100m_single WHERE notEmpty(MobilePhoneModel) AND MobilePhoneModel < MobilePhoneModel SETTINGS max_threads = 1]]></query>
|
||||
-- <query><![CDATA[SELECT count() FROM hits_100m_single WHERE notEmpty(MobilePhoneModel) AND MobilePhoneModel < URL]]></query>
|
||||
-- <query><![CDATA[SELECT count() FROM hits_100m_single WHERE notEmpty(MobilePhoneModel) AND MobilePhoneModel < PageCharset SETTINGS max_threads = 2]]></query>
|
||||
-- <query short="1"><![CDATA[SELECT count() FROM hits_100m_single WHERE PageCharset < PageCharset SETTINGS max_threads = 2]]></query>
|
||||
-- <query><![CDATA[SELECT count() FROM hits_100m_single WHERE PageCharset < URL]]></query>
|
||||
-- <query short="1"><![CDATA[SELECT count() FROM hits_100m_single WHERE Title < Title]]></query>
|
||||
-- <query><![CDATA[SELECT count() FROM hits_100m_single WHERE Title < URL]]></query>
|
||||
-- <query><![CDATA[SELECT count() FROM hits_100m_single WHERE Title < PageCharset]]></query>
|
||||
|
||||
-- </test>
|
||||
|
@ -9,3 +9,9 @@
|
||||
2 2
|
||||
3 3
|
||||
4 4
|
||||
0
|
||||
CREATE TABLE default.tp_2\n(\n `x` Int32,\n `y` Int32,\n PROJECTION p\n (\n SELECT \n x,\n y\n ORDER BY x\n ),\n PROJECTION pp\n (\n SELECT \n x,\n count()\n GROUP BY x\n )\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/01710_projection_fetch_default\', \'2\')\nORDER BY y\nSETTINGS min_rows_for_compact_part = 2, min_rows_for_wide_part = 4, min_bytes_for_compact_part = 16, min_bytes_for_wide_part = 32, index_granularity = 8192
|
||||
2
|
||||
CREATE TABLE default.tp_2\n(\n `x` Int32,\n `y` Int32,\n PROJECTION p\n (\n SELECT \n x,\n y\n ORDER BY x\n ),\n PROJECTION pp\n (\n SELECT \n x,\n count()\n GROUP BY x\n )\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/01710_projection_fetch_default\', \'2\')\nORDER BY y\nSETTINGS min_rows_for_compact_part = 2, min_rows_for_wide_part = 4, min_bytes_for_compact_part = 16, min_bytes_for_wide_part = 32, index_granularity = 8192
|
||||
CREATE TABLE default.tp_2\n(\n `x` Int32,\n `y` Int32,\n PROJECTION p\n (\n SELECT \n x,\n y\n ORDER BY x\n ),\n PROJECTION pp\n (\n SELECT \n x,\n count()\n GROUP BY x\n )\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/01710_projection_fetch_default\', \'2\')\nORDER BY y\nSETTINGS min_rows_for_compact_part = 2, min_rows_for_wide_part = 4, min_bytes_for_compact_part = 16, min_bytes_for_wide_part = 32, index_granularity = 8192
|
||||
CREATE TABLE default.tp_2\n(\n `x` Int32,\n `y` Int32,\n PROJECTION p\n (\n SELECT \n x,\n y\n ORDER BY x\n )\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/01710_projection_fetch_default\', \'2\')\nORDER BY y\nSETTINGS min_rows_for_compact_part = 2, min_rows_for_wide_part = 4, min_bytes_for_compact_part = 16, min_bytes_for_wide_part = 32, index_granularity = 8192
|
||||
|
@ -15,6 +15,27 @@ insert into tp_1 select number, number from numbers(5);
|
||||
system sync replica tp_2;
|
||||
select * from tp_2 order by x;
|
||||
|
||||
-- test projection creation, materialization, clear and drop
|
||||
alter table tp_1 add projection pp (select x, count() group by x);
|
||||
system sync replica tp_2;
|
||||
select count() from system.projection_parts where table = 'tp_2' and name = 'pp' and active;
|
||||
show create table tp_2;
|
||||
|
||||
-- all other three operations are mutations
|
||||
set mutations_sync = 2;
|
||||
alter table tp_1 materialize projection pp;
|
||||
select count() from system.projection_parts where table = 'tp_2' and name = 'pp' and active;
|
||||
show create table tp_2;
|
||||
|
||||
alter table tp_1 clear projection pp;
|
||||
system sync replica tp_2;
|
||||
select * from system.projection_parts where table = 'tp_2' and name = 'pp' and active;
|
||||
show create table tp_2;
|
||||
|
||||
alter table tp_1 drop projection pp;
|
||||
system sync replica tp_2;
|
||||
select * from system.projection_parts where table = 'tp_2' and name = 'pp' and active;
|
||||
show create table tp_2;
|
||||
|
||||
drop table if exists tp_1;
|
||||
drop table if exists tp_2;
|
||||
|
||||
|
@ -4,5 +4,5 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
$CLICKHOUSE_CLIENT --query="SELECT CAST('Helo', 'Enum(\'Hello\' = 1, \'World\' = 2)')" 2>&1 | grep -q "may be you meant: \['Hello'\]" && echo 'OK' || echo 'FAIL'
|
||||
$CLICKHOUSE_CLIENT --query="SELECT CAST('Helo' AS Enum('Hello' = 1, 'World' = 2))" 2>&1 | grep -q -F "maybe you meant: ['Hello']" && echo 'OK' || echo 'FAIL'
|
||||
|
||||
|
@ -1,3 +1,11 @@
|
||||
ComparisionOperator column with same column
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
ComparisionOperator column with alias on same column
|
||||
1
|
||||
1
|
||||
1
|
||||
|
@ -1,6 +1,8 @@
|
||||
SET compile_expressions = 1;
|
||||
SET min_count_to_compile_expression = 0;
|
||||
|
||||
SELECT 'ComparisionOperator column with same column';
|
||||
|
||||
DROP TABLE IF EXISTS test_table;
|
||||
CREATE TABLE test_table (a UInt64) ENGINE = MergeTree() ORDER BY tuple();
|
||||
INSERT INTO test_table VALUES (1);
|
||||
@ -13,3 +15,22 @@ SELECT test_table.a FROM test_table ORDER BY (test_table.a <= test_table.a) + 1;
|
||||
|
||||
SELECT test_table.a FROM test_table ORDER BY (test_table.a == test_table.a) + 1;
|
||||
SELECT test_table.a FROM test_table ORDER BY (test_table.a != test_table.a) + 1;
|
||||
|
||||
DROP TABLE test_table;
|
||||
|
||||
SELECT 'ComparisionOperator column with alias on same column';
|
||||
|
||||
DROP TABLE IF EXISTS test_table;
|
||||
CREATE TABLE test_table (a UInt64, b ALIAS a, c ALIAS b) ENGINE = MergeTree() ORDER BY tuple();
|
||||
INSERT INTO test_table VALUES (1);
|
||||
|
||||
SELECT test_table.a FROM test_table ORDER BY (test_table.a > test_table.b) + 1 AND (test_table.a > test_table.c) + 1;
|
||||
SELECT test_table.a FROM test_table ORDER BY (test_table.a >= test_table.b) + 1 AND (test_table.a >= test_table.c) + 1;
|
||||
|
||||
SELECT test_table.a FROM test_table ORDER BY (test_table.a < test_table.b) + 1 AND (test_table.a < test_table.c) + 1;
|
||||
SELECT test_table.a FROM test_table ORDER BY (test_table.a <= test_table.b) + 1 AND (test_table.a <= test_table.c) + 1;
|
||||
|
||||
SELECT test_table.a FROM test_table ORDER BY (test_table.a == test_table.b) + 1 AND (test_table.a == test_table.c) + 1;
|
||||
SELECT test_table.a FROM test_table ORDER BY (test_table.a != test_table.b) + 1 AND (test_table.a != test_table.c) + 1;
|
||||
|
||||
DROP TABLE test_table;
|
||||
|
@ -0,0 +1,18 @@
|
||||
1 1
|
||||
2 1
|
||||
1 1
|
||||
2 1
|
||||
2 1
|
||||
3 1
|
||||
2 1
|
||||
3 1
|
||||
1 1
|
||||
2 1
|
||||
1 1
|
||||
2 1
|
||||
1
|
||||
2
|
||||
1
|
||||
2
|
||||
1 1
|
||||
2 1
|
@ -0,0 +1,16 @@
|
||||
-- GROUP BY _shard_num
|
||||
SELECT _shard_num, count() FROM remote('127.0.0.{1,2}', system.one) GROUP BY _shard_num ORDER BY _shard_num;
|
||||
SELECT _shard_num s, count() FROM remote('127.0.0.{1,2}', system.one) GROUP BY _shard_num ORDER BY _shard_num;
|
||||
|
||||
SELECT _shard_num + 1, count() FROM remote('127.0.0.{1,2}', system.one) GROUP BY _shard_num + 1 ORDER BY _shard_num + 1;
|
||||
SELECT _shard_num + 1 s, count() FROM remote('127.0.0.{1,2}', system.one) GROUP BY _shard_num + 1 ORDER BY _shard_num + 1;
|
||||
|
||||
SELECT _shard_num + dummy, count() FROM remote('127.0.0.{1,2}', system.one) GROUP BY _shard_num + dummy ORDER BY _shard_num + dummy;
|
||||
SELECT _shard_num + dummy s, count() FROM remote('127.0.0.{1,2}', system.one) GROUP BY _shard_num + dummy ORDER BY _shard_num + dummy;
|
||||
|
||||
SELECT _shard_num FROM remote('127.0.0.{1,2}', system.one) ORDER BY _shard_num;
|
||||
SELECT _shard_num s FROM remote('127.0.0.{1,2}', system.one) ORDER BY _shard_num;
|
||||
|
||||
SELECT _shard_num s, count() FROM remote('127.0.0.{1,2}', system.one) GROUP BY s order by s;
|
||||
|
||||
select materialize(_shard_num), * from remote('127.{1,2}', system.one) limit 1 by dummy format Null;
|
@ -0,0 +1,12 @@
|
||||
dt64 <= const dt
|
||||
dt64 <= dt
|
||||
dt <= const dt64
|
||||
dt <= dt64
|
||||
dt64 = const dt
|
||||
dt64 = dt
|
||||
dt = const dt64
|
||||
dt = dt64
|
||||
dt64 >= const dt
|
||||
dt64 >= dt
|
||||
dt >= const dt64
|
||||
dt >= dt64
|
@ -0,0 +1,40 @@
|
||||
CREATE TABLE dt64test
|
||||
(
|
||||
`dt64_column` DateTime64(3),
|
||||
`dt_column` DateTime DEFAULT toDateTime(dt64_column)
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
PARTITION BY toYYYYMM(dt64_column)
|
||||
ORDER BY dt64_column;
|
||||
|
||||
INSERT INTO dt64test (`dt64_column`) VALUES ('2020-01-13 13:37:00');
|
||||
|
||||
SELECT 'dt64 < const dt' FROM dt64test WHERE dt64_column < toDateTime('2020-01-13 13:37:00');
|
||||
SELECT 'dt64 < dt' FROM dt64test WHERE dt64_column < materialize(toDateTime('2020-01-13 13:37:00'));
|
||||
SELECT 'dt < const dt64' FROM dt64test WHERE dt_column < toDateTime64('2020-01-13 13:37:00', 3);
|
||||
SELECT 'dt < dt64' FROM dt64test WHERE dt_column < materialize(toDateTime64('2020-01-13 13:37:00', 3));
|
||||
|
||||
SELECT 'dt64 <= const dt' FROM dt64test WHERE dt64_column <= toDateTime('2020-01-13 13:37:00');
|
||||
SELECT 'dt64 <= dt' FROM dt64test WHERE dt64_column <= materialize(toDateTime('2020-01-13 13:37:00'));
|
||||
SELECT 'dt <= const dt64' FROM dt64test WHERE dt_column <= toDateTime64('2020-01-13 13:37:00', 3);
|
||||
SELECT 'dt <= dt64' FROM dt64test WHERE dt_column <= materialize(toDateTime64('2020-01-13 13:37:00', 3));
|
||||
|
||||
SELECT 'dt64 = const dt' FROM dt64test WHERE dt64_column = toDateTime('2020-01-13 13:37:00');
|
||||
SELECT 'dt64 = dt' FROM dt64test WHERE dt64_column = materialize(toDateTime('2020-01-13 13:37:00'));
|
||||
SELECT 'dt = const dt64' FROM dt64test WHERE dt_column = toDateTime64('2020-01-13 13:37:00', 3);
|
||||
SELECT 'dt = dt64' FROM dt64test WHERE dt_column = materialize(toDateTime64('2020-01-13 13:37:00', 3));
|
||||
|
||||
SELECT 'dt64 >= const dt' FROM dt64test WHERE dt64_column >= toDateTime('2020-01-13 13:37:00');
|
||||
SELECT 'dt64 >= dt' FROM dt64test WHERE dt64_column >= materialize(toDateTime('2020-01-13 13:37:00'));
|
||||
SELECT 'dt >= const dt64' FROM dt64test WHERE dt_column >= toDateTime64('2020-01-13 13:37:00', 3);
|
||||
SELECT 'dt >= dt64' FROM dt64test WHERE dt_column >= materialize(toDateTime64('2020-01-13 13:37:00', 3));
|
||||
|
||||
SELECT 'dt64 > const dt' FROM dt64test WHERE dt64_column > toDateTime('2020-01-13 13:37:00');
|
||||
SELECT 'dt64 > dt' FROM dt64test WHERE dt64_column > materialize(toDateTime('2020-01-13 13:37:00'));
|
||||
SELECT 'dt > const dt64' FROM dt64test WHERE dt_column > toDateTime64('2020-01-13 13:37:00', 3);
|
||||
SELECT 'dt > dt64' FROM dt64test WHERE dt_column > materialize(toDateTime64('2020-01-13 13:37:00', 3));
|
||||
|
||||
SELECT 'dt64 != const dt' FROM dt64test WHERE dt64_column != toDateTime('2020-01-13 13:37:00');
|
||||
SELECT 'dt64 != dt' FROM dt64test WHERE dt64_column != materialize(toDateTime('2020-01-13 13:37:00'));
|
||||
SELECT 'dt != const dt64' FROM dt64test WHERE dt_column != toDateTime64('2020-01-13 13:37:00', 3);
|
||||
SELECT 'dt != dt64' FROM dt64test WHERE dt_column != materialize(toDateTime64('2020-01-13 13:37:00', 3));
|
@ -0,0 +1,32 @@
|
||||
DROP TABLE IF EXISTS mem_test;
|
||||
|
||||
CREATE TABLE mem_test
|
||||
(
|
||||
`a` Int64,
|
||||
`b` Int64
|
||||
)
|
||||
ENGINE = Memory;
|
||||
|
||||
SET max_block_size = 3;
|
||||
|
||||
INSERT INTO mem_test SELECT
|
||||
number,
|
||||
number
|
||||
FROM numbers(100);
|
||||
|
||||
ALTER TABLE mem_test
|
||||
UPDATE a = 0 WHERE b = 99;
|
||||
ALTER TABLE mem_test
|
||||
UPDATE a = 0 WHERE b = 99;
|
||||
ALTER TABLE mem_test
|
||||
UPDATE a = 0 WHERE b = 99;
|
||||
ALTER TABLE mem_test
|
||||
UPDATE a = 0 WHERE b = 99;
|
||||
ALTER TABLE mem_test
|
||||
UPDATE a = 0 WHERE b = 99;
|
||||
|
||||
SELECT *
|
||||
FROM mem_test
|
||||
FORMAT Null;
|
||||
|
||||
DROP TABLE mem_test;
|
@ -0,0 +1 @@
|
||||
999
|
@ -0,0 +1,17 @@
|
||||
DROP TABLE IF EXISTS data_01875_1;
|
||||
DROP TABLE IF EXISTS data_01875_2;
|
||||
DROP TABLE IF EXISTS data_01875_3;
|
||||
|
||||
SET compile_expressions=true;
|
||||
|
||||
-- CREATE TABLE will use global profile with default min_count_to_compile_expression=3
|
||||
-- so retry 3 times
|
||||
CREATE TABLE data_01875_1 Engine=MergeTree ORDER BY number PARTITION BY bitShiftRight(number,8) AS SELECT * FROM numbers(16384);
|
||||
CREATE TABLE data_01875_2 Engine=MergeTree ORDER BY number PARTITION BY bitShiftRight(number,8) AS SELECT * FROM numbers(16384);
|
||||
CREATE TABLE data_01875_3 Engine=MergeTree ORDER BY number PARTITION BY bitShiftRight(number,8) AS SELECT * FROM numbers(16384);
|
||||
|
||||
SELECT number FROM data_01875_3 WHERE number = 999;
|
||||
|
||||
DROP TABLE data_01875_1;
|
||||
DROP TABLE data_01875_2;
|
||||
DROP TABLE data_01875_3;
|
@ -1,3 +1,4 @@
|
||||
v21.4.7.3-stable 2021-05-19
|
||||
v21.4.6.55-stable 2021-04-30
|
||||
v21.4.5.46-stable 2021-04-24
|
||||
v21.4.4.30-stable 2021-04-16
|
||||
|
|
Loading…
Reference in New Issue
Block a user