diff --git a/CMakeLists.txt b/CMakeLists.txt index f84a181a39c..fb4ca18126d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,14 +1,22 @@ +foreach(policy + CMP0023 + CMP0048 # CMake 3.0 + CMP0074 # CMake 3.12 + CMP0077 + CMP0079 + ) + if(POLICY ${policy}) + cmake_policy(SET ${policy} NEW) + endif() +endforeach() + project(ClickHouse) cmake_minimum_required(VERSION 3.3) -foreach(policy - CMP0023 - CMP0074 # CMake 3.12 - ) - if(POLICY ${policy}) - cmake_policy(SET ${policy} NEW) - endif() -endforeach() +# Ignore export() since we don't use it, +# but it gets broken with a global targets via link_libraries() +macro (export) +endmacro () set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules/") set(CMAKE_EXPORT_COMPILE_COMMANDS 1) # Write compile_commands.json @@ -128,12 +136,6 @@ if (CMAKE_SYSTEM_PROCESSOR MATCHES "amd64|x86_64") endif () endif () -if (GLIBC_COMPATIBILITY) - set (USE_INTERNAL_MEMCPY ON) -else () - message (WARNING "Option GLIBC_COMPATIBILITY must be turned on for production builds.") -endif () - string(REGEX MATCH "-?[0-9]+(.[0-9]+)?$" COMPILER_POSTFIX ${CMAKE_CXX_COMPILER}) find_program (LLD_PATH NAMES "lld${COMPILER_POSTFIX}" "lld") @@ -172,20 +174,15 @@ if (ARCH_NATIVE) set (COMPILER_FLAGS "${COMPILER_FLAGS} -march=native") endif () -# Special options for better optimized code with clang -#if (COMPILER_CLANG) -# set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -Wno-unused-command-line-argument -mllvm -inline-threshold=10000") -#endif () - if (CMAKE_VERSION VERSION_LESS "3.8.0") if (NOT MSVC) - set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++1z") + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17") endif () else () set (CMAKE_CXX_STANDARD 17) set (CMAKE_CXX_EXTENSIONS 0) # https://cmake.org/cmake/help/latest/prop_tgt/CXX_EXTENSIONS.html#prop_tgt:CXX_EXTENSIONS set (CMAKE_CXX_STANDARD_REQUIRED ON) - set (CXX_FLAGS_INTERNAL_COMPILER "-std=c++1z") + set (CXX_FLAGS_INTERNAL_COMPILER "-std=c++17") endif () if (COMPILER_GCC OR COMPILER_CLANG) @@ -207,17 +204,13 @@ endif() set (CMAKE_BUILD_COLOR_MAKEFILE ON) set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMPILER_FLAGS} ${PLATFORM_EXTRA_CXX_FLAG} -fno-omit-frame-pointer ${COMMON_WARNING_FLAGS} ${CXX_WARNING_FLAGS}") -#set (CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${CMAKE_CXX_FLAGS_ADD}") set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O3 ${CMAKE_CXX_FLAGS_ADD}") set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 -g3 -ggdb3 -fno-inline ${CMAKE_CXX_FLAGS_ADD}") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMPILER_FLAGS} -fno-omit-frame-pointer ${COMMON_WARNING_FLAGS} ${CMAKE_C_FLAGS_ADD}") -#set (CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${CMAKE_C_FLAGS_ADD}") set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -O3 ${CMAKE_C_FLAGS_ADD}") set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O0 -g3 -ggdb3 -fno-inline ${CMAKE_C_FLAGS_ADD}") -# Uses MAKE_STATIC_LIBRARIES - option (UNBUNDLED "Try find all libraries in system. We recommend to avoid this mode for production builds, because we cannot guarantee exact versions and variants of libraries your system has installed. This mode exists for enthusiastic developers who search for trouble. Also it is useful for maintainers of OS packages." OFF) if (UNBUNDLED) @@ -225,149 +218,28 @@ if (UNBUNDLED) else () set(NOT_UNBUNDLED 1) endif () + # Using system libs can cause lot of warnings in includes. if (UNBUNDLED OR NOT (OS_LINUX OR APPLE) OR ARCH_32) option (NO_WERROR "Disable -Werror compiler option" ON) endif () - -set(THREADS_PREFER_PTHREAD_FLAG ON) -find_package (Threads) - -include (cmake/find_cxx.cmake) - -include (cmake/test_compiler.cmake) - -if (OS_LINUX AND COMPILER_CLANG AND USE_STATIC_LIBRARIES) - option (USE_LIBCXX "Use libc++ and libc++abi instead of libstdc++ (only make sense on Linux)" ${HAVE_LIBCXX}) - - if (USE_LIBCXX) - set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -D_LIBCPP_DEBUG=0") # More checks in debug build. - endif () -endif () - -if (USE_LIBCXX) - set (STATIC_STDLIB_FLAGS "") -else () - set (STATIC_STDLIB_FLAGS "-static-libgcc -static-libstdc++") -endif () - -if (MAKE_STATIC_LIBRARIES AND NOT APPLE AND NOT (COMPILER_CLANG AND OS_FREEBSD)) - set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${STATIC_STDLIB_FLAGS}") - - # Along with executables, we also build example of shared library for "library dictionary source"; and it also should be self-contained. - set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${STATIC_STDLIB_FLAGS}") -endif () - -if (USE_STATIC_LIBRARIES AND HAVE_NO_PIE) - set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${FLAG_NO_PIE}") - set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${FLAG_NO_PIE}") -endif () - # Make this extra-checks for correct library dependencies. if (NOT SANITIZE) set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-undefined") set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--no-undefined") endif () -include (cmake/find_unwind.cmake) +include(cmake/dbms_glob_sources.cmake) +include(cmake/default_libs.cmake) -if (USE_INTERNAL_UNWIND_LIBRARY) - option (USE_INTERNAL_UNWIND_LIBRARY_FOR_EXCEPTION_HANDLING "Use internal unwind library for exception handling" ${USE_STATIC_LIBRARIES}) -endif () - - -# Set standard, system and compiler libraries explicitly. -# This is intended for more control of what we are linking. +###################################### +### Add targets below this comment ### +###################################### string (TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UC) set (CMAKE_POSTFIX_VARIABLE "CMAKE_${CMAKE_BUILD_TYPE_UC}_POSTFIX") -set (DEFAULT_LIBS "") -if (OS_LINUX AND NOT UNBUNDLED AND (GLIBC_COMPATIBILITY OR USE_INTERNAL_UNWIND_LIBRARY_FOR_EXCEPTION_HANDLING OR USE_LIBCXX)) - # Note: this probably has no effect, but I'm not an expert in CMake. - set (CMAKE_C_IMPLICIT_LINK_LIBRARIES "") - set (CMAKE_CXX_IMPLICIT_LINK_LIBRARIES "") - - # Disable default linked libraries. - set (DEFAULT_LIBS "-nodefaultlibs") - - # We need builtins from Clang's RT even without libcxx - for ubsan+int128. See https://bugs.llvm.org/show_bug.cgi?id=16404 - set (BUILTINS_LIB_PATH "") - if (COMPILER_CLANG) - execute_process (COMMAND ${CMAKE_CXX_COMPILER} --print-file-name=libclang_rt.builtins-${CMAKE_SYSTEM_PROCESSOR}.a OUTPUT_VARIABLE BUILTINS_LIB_PATH OUTPUT_STRIP_TRAILING_WHITESPACE) - else () - set (BUILTINS_LIB_PATH "-lgcc") - endif () - - string (TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UC) - set (CMAKE_POSTFIX_VARIABLE "CMAKE_${CMAKE_BUILD_TYPE_UC}_POSTFIX") - - # Add C++ libraries. - # - # This consist of: - # - C++ standard library (like implementation of std::string); - # - C++ ABI implementation (functions for exceptions like __cxa_throw, RTTI, etc); - # - functions for internal implementation of exception handling (stack unwinding based on DWARF info; TODO replace with bundled libunwind); - # - compiler builtins (example: functions for implementation of __int128 operations); - # - # There are two variants of C++ library: libc++ (from LLVM compiler infrastructure) and libstdc++ (from GCC). - - if (USE_INTERNAL_UNWIND_LIBRARY_FOR_EXCEPTION_HANDLING) - if (USE_STATIC_LIBRARIES) - set (EXCEPTION_HANDLING_LIBRARY "${ClickHouse_BINARY_DIR}/contrib/libunwind-cmake/libunwind_static${${CMAKE_POSTFIX_VARIABLE}}.a") - else () - set (EXCEPTION_HANDLING_LIBRARY "${ClickHouse_BINARY_DIR}/contrib/libunwind-cmake/libunwind_shared${${CMAKE_POSTFIX_VARIABLE}}.so") - endif () - else () - set (EXCEPTION_HANDLING_LIBRARY "-lgcc_eh") - endif () - - message (STATUS "Using exception handling library: ${EXCEPTION_HANDLING_LIBRARY}") - - if (USE_LIBCXX) - if (USE_INTERNAL_LIBCXX_LIBRARY) - set (LIBCXX_LIBS "${ClickHouse_BINARY_DIR}/contrib/libcxx-cmake/libcxx_static${${CMAKE_POSTFIX_VARIABLE}}.a ${ClickHouse_BINARY_DIR}/contrib/libcxxabi-cmake/libcxxabi_static${${CMAKE_POSTFIX_VARIABLE}}.a") - else () - set (LIBCXX_LIBS "-lc++ -lc++abi -lc++fs") - endif () - - set (DEFAULT_LIBS "${DEFAULT_LIBS} -Wl,-Bstatic ${LIBCXX_LIBS} ${EXCEPTION_HANDLING_LIBRARY} ${BUILTINS_LIB_PATH} -Wl,-Bdynamic") - else () - set (DEFAULT_LIBS "${DEFAULT_LIBS} -Wl,-Bstatic -lstdc++ -lstdc++fs ${EXCEPTION_HANDLING_LIBRARY} ${COVERAGE_OPTION} ${BUILTINS_LIB_PATH} -Wl,-Bdynamic") - endif () - - # Linking with GLIBC prevents portability of binaries to older systems. - # We overcome this behaviour by statically linking with our own implementation of all new symbols (that don't exist in older Libc or have infamous "symbol versioning"). - # The order of linking is important: 'glibc-compatibility' must be before libc but after all other libraries. - if (GLIBC_COMPATIBILITY) - message (STATUS "Some symbols from glibc will be replaced for compatibility") - - string (TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UC) - set (CMAKE_POSTFIX_VARIABLE "CMAKE_${CMAKE_BUILD_TYPE_UC}_POSTFIX") - - # FIXME: glibc-compatibility may be non-static in some builds! - set (DEFAULT_LIBS "${DEFAULT_LIBS} ${ClickHouse_BINARY_DIR}/libs/libglibc-compatibility/libglibc-compatibility${${CMAKE_POSTFIX_VARIABLE}}.a") - endif () - - # Add Libc. GLIBC is actually a collection of interdependent libraries. - set (DEFAULT_LIBS "${DEFAULT_LIBS} -lrt -ldl -lpthread -lm -lc") - - # Note: we'd rather use Musl libc library, but it's little bit more difficult to use. - - message(STATUS "Default libraries: ${DEFAULT_LIBS}") -endif () - -if (NOT GLIBC_COMPATIBILITY) - set (M_LIBRARY m) -endif () - -if (DEFAULT_LIBS) - # Add default libs to all targets as the last dependency. - set(CMAKE_CXX_STANDARD_LIBRARIES ${DEFAULT_LIBS}) - set(CMAKE_C_STANDARD_LIBRARIES ${DEFAULT_LIBS}) -endif () - if (NOT MAKE_STATIC_LIBRARIES) set(CMAKE_POSITION_INDEPENDENT_CODE ON) endif () @@ -420,20 +292,12 @@ if (UNBUNDLED) else () set(NOT_UNBUNDLED 1) endif () + # Using system libs can cause lot of warnings in includes. if (UNBUNDLED OR NOT (OS_LINUX OR APPLE) OR ARCH_32) option (NO_WERROR "Disable -Werror compiler option" ON) endif () -if (USE_LIBCXX) - set (HAVE_LIBCXX 1) - set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++") -endif() - -if (USE_LIBCXX AND USE_INTERNAL_LIBCXX_LIBRARY) - set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -nostdinc++ -isystem ${LIBCXX_INCLUDE_DIR} -isystem ${LIBCXXABI_INCLUDE_DIR}") -endif () - message (STATUS "Building for: ${CMAKE_SYSTEM} ${CMAKE_SYSTEM_PROCESSOR} ${CMAKE_LIBRARY_ARCHITECTURE} ; USE_STATIC_LIBRARIES=${USE_STATIC_LIBRARIES} MAKE_STATIC_LIBRARIES=${MAKE_STATIC_LIBRARIES} SPLIT_SHARED=${SPLIT_SHARED_LIBRARIES} UNBUNDLED=${UNBUNDLED} CCACHE=${CCACHE_FOUND} ${CCACHE_VERSION}") include(GNUInstallDirs) @@ -499,79 +363,11 @@ include (libs/libmysqlxx/cmake/find_mysqlclient.cmake) include (cmake/print_flags.cmake) +install (EXPORT global DESTINATION cmake) + add_subdirectory (contrib EXCLUDE_FROM_ALL) add_subdirectory (libs) add_subdirectory (utils) add_subdirectory (dbms) include (cmake/print_include_directories.cmake) - -if (GLIBC_COMPATIBILITY OR USE_INTERNAL_UNWIND_LIBRARY_FOR_EXCEPTION_HANDLING) - # FIXME: actually glibc-compatibility should always be built first, - # because it's unconditionally linked via $DEFAULT_LIBS, - # and these looks like the first places that get linked. - function (add_default_dependencies target_name) - if (TARGET ${target_name}) - if (GLIBC_COMPATIBILITY) - add_dependencies(${target_name} glibc-compatibility) - endif () - - if (USE_LIBCXX AND USE_INTERNAL_LIBCXX_LIBRARY) - add_dependencies(${target_name} cxx_static cxxabi_static) - endif () - - if (USE_INTERNAL_UNWIND_LIBRARY_FOR_EXCEPTION_HANDLING) - add_dependencies(${target_name} unwind_static) - endif () - endif () - endfunction () - - add_default_dependencies(ltdl) - add_default_dependencies(zlibstatic) - add_default_dependencies(jemalloc) - add_default_dependencies(memcpy) - add_default_dependencies(Foundation) - add_default_dependencies(common) - add_default_dependencies(gtest) - add_default_dependencies(lz4) - add_default_dependencies(zstd) - add_default_dependencies(snappy) - add_default_dependencies(arrow) - add_default_dependencies(protoc) - add_default_dependencies(thrift_static) - add_default_dependencies(cityhash) - add_default_dependencies(farmhash) - add_default_dependencies(murmurhash) - add_default_dependencies(metrohash) - add_default_dependencies(metrohash128) - add_default_dependencies(consistent-hashing) - add_default_dependencies(double-conversion) - add_default_dependencies(cctz) - add_default_dependencies(kj) - add_default_dependencies(simdjson) - add_default_dependencies(apple_rt) - add_default_dependencies(h3) - add_default_dependencies(re2) - add_default_dependencies(re2_st) - add_default_dependencies(hs_compile_shared) - add_default_dependencies(hs_exec_shared) - add_default_dependencies(hs_shared) - add_default_dependencies(widechar_width) - add_default_dependencies(string_utils) - add_default_dependencies(consistent-hashing-sumbur) - add_default_dependencies(boost_program_options_internal) - add_default_dependencies(boost_system_internal) - add_default_dependencies(boost_regex_internal) - add_default_dependencies(roaring) - add_default_dependencies(btrie) - add_default_dependencies(cpuid) - add_default_dependencies(mysqlclient) - add_default_dependencies(zlib) - add_default_dependencies(thrift) - add_default_dependencies(brotli) - add_default_dependencies(libprotobuf) - add_default_dependencies(base64) - add_default_dependencies(readpassphrase) - add_default_dependencies(unwind_static) - add_default_dependencies(fastops) -endif () diff --git a/cmake/default_libs.cmake b/cmake/default_libs.cmake new file mode 100644 index 00000000000..54a01042558 --- /dev/null +++ b/cmake/default_libs.cmake @@ -0,0 +1,48 @@ +# Set standard, system and compiler libraries explicitly. +# This is intended for more control of what we are linking. + +set (DEFAULT_LIBS "-nodefaultlibs") + +if (OS_LINUX) + # We need builtins from Clang's RT even without libcxx - for ubsan+int128. + # See https://bugs.llvm.org/show_bug.cgi?id=16404 + if (COMPILER_CLANG) + execute_process (COMMAND ${CMAKE_CXX_COMPILER} --print-file-name=libclang_rt.builtins-${CMAKE_SYSTEM_PROCESSOR}.a OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE) + else () + set (BUILTINS_LIBRARY "-lgcc") + endif () + + set (DEFAULT_LIBS "${DEFAULT_LIBS} ${BUILTINS_LIBRARY} ${COVERAGE_OPTION} -lc -lm -lrt -lpthread -ldl") + + message(STATUS "Default libraries: ${DEFAULT_LIBS}") +endif () + +set(CMAKE_CXX_STANDARD_LIBRARIES ${DEFAULT_LIBS}) +set(CMAKE_C_STANDARD_LIBRARIES ${DEFAULT_LIBS}) + +# Global libraries + +add_library(global-libs INTERFACE) + +# Unfortunately '-pthread' doesn't work with '-nodefaultlibs'. +# Just make sure we have pthreads at all. +set(THREADS_PREFER_PTHREAD_FLAG ON) +find_package(Threads REQUIRED) + +add_subdirectory(libs/libglibc-compatibility) +include (cmake/find_unwind.cmake) +include (cmake/find_cxx.cmake) + +add_library(global-group INTERFACE) +target_link_libraries(global-group INTERFACE + -Wl,--start-group + $ + -Wl,--end-group +) + +link_libraries(global-group) + +install( + TARGETS global-group global-libs + EXPORT global +) diff --git a/cmake/find_capnp.cmake b/cmake/find_capnp.cmake index ec591afdc38..572fc1b3341 100644 --- a/cmake/find_capnp.cmake +++ b/cmake/find_capnp.cmake @@ -1,50 +1,20 @@ -option (ENABLE_CAPNP "Enable Cap'n Proto" ON) - -if (ENABLE_CAPNP) - # cmake 3.5.1 bug: - # capnproto uses this cmake feature: - # target_compile_features(kj PUBLIC cxx_constexpr) - # old cmake adds -std=gnu++11 to end of all compile commands (even if -std=gnu++17 already present in compile string) - # cmake 3.9.1 (ubuntu artful) have no this bug (c++17 support added to cmake 3.8.2) - if (CMAKE_VERSION VERSION_LESS "3.8.0") - set (USE_INTERNAL_CAPNP_LIBRARY_DEFAULT 0) - set (MISSING_INTERNAL_CAPNP_LIBRARY 1) - else () - set (USE_INTERNAL_CAPNP_LIBRARY_DEFAULT ${NOT_UNBUNDLED}) - endif () - - option (USE_INTERNAL_CAPNP_LIBRARY "Set to FALSE to use system capnproto library instead of bundled" ${USE_INTERNAL_CAPNP_LIBRARY_DEFAULT}) - - if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/capnproto/c++/CMakeLists.txt") - if (USE_INTERNAL_CAPNP_LIBRARY) - message (WARNING "submodule contrib/capnproto is missing. to fix try run: \n git submodule update --init --recursive") - endif () - set (USE_INTERNAL_CAPNP_LIBRARY 0) - set (MISSING_INTERNAL_CAPNP_LIBRARY 1) - endif () - - if (NOT USE_INTERNAL_CAPNP_LIBRARY) - set (CAPNP_PATHS "/usr/local/opt/capnp/lib") - set (CAPNP_INCLUDE_PATHS "/usr/local/opt/capnp/include") - find_library (CAPNP capnp PATHS ${CAPNP_PATHS}) - find_library (CAPNPC capnpc PATHS ${CAPNP_PATHS}) - find_library (KJ kj PATHS ${CAPNP_PATHS}) - set (CAPNP_LIBRARY ${CAPNPC} ${CAPNP} ${KJ}) - find_path (CAPNP_INCLUDE_DIR NAMES capnp/schema-parser.h PATHS ${CAPNP_INCLUDE_PATHS}) - endif () - - if (CAPNP_INCLUDE_DIR AND CAPNP_LIBRARY) - set(USE_CAPNP 1) - elseif (NOT MISSING_INTERNAL_CAPNP_LIBRARY) - set (USE_INTERNAL_CAPNP_LIBRARY 1) - set (CAPNP_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/capnproto/c++/src") - set (CAPNP_LIBRARY capnpc) - set (USE_CAPNP 1) - endif () -endif () +option (USE_CAPNP "Enable Cap'n Proto" ON) if (USE_CAPNP) - message (STATUS "Using capnp=${USE_CAPNP}: ${CAPNP_INCLUDE_DIR} : ${CAPNP_LIBRARY}") -else () - message (STATUS "Build without capnp (support for Cap'n Proto format will be disabled)") + option (USE_INTERNAL_CAPNP_LIBRARY "Set to FALSE to use system capnproto library instead of bundled" ${NOT_UNBUNDLED}) + + # FIXME: refactor to use `add_library(… IMPORTED)` if possible. + if (NOT USE_INTERNAL_CAPNP_LIBRARY) + find_library (KJ kj) + find_library (CAPNP capnp) + find_library (CAPNPC capnpc) + + set (CAPNP_LIBRARIES ${CAPNPC} ${CAPNP} ${KJ}) + else () + add_subdirectory(contrib/capnproto-cmake) + + set (CAPNP_LIBRARIES capnpc) + endif () + + message (STATUS "Using capnp: ${CAPNP_LIBRARIES}") endif () diff --git a/cmake/find_cxx.cmake b/cmake/find_cxx.cmake index 2b2952f6efd..35a0b9d0927 100644 --- a/cmake/find_cxx.cmake +++ b/cmake/find_cxx.cmake @@ -1,26 +1,49 @@ -if (NOT APPLE) +if (OS_LINUX AND COMPILER_CLANG) + option (USE_LIBCXX "Use libc++ and libc++abi instead of libstdc++" ${HAVE_LIBCXX}) option (USE_INTERNAL_LIBCXX_LIBRARY "Set to FALSE to use system libcxx and libcxxabi libraries instead of bundled" ${NOT_UNBUNDLED}) +endif() + +if (USE_LIBCXX) + set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -D_LIBCPP_DEBUG=0") # More checks in debug build. endif () +# FIXME: make better check for submodule presence if (USE_INTERNAL_LIBCXX_LIBRARY AND NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libcxx/include/vector") message (WARNING "submodule contrib/libcxx is missing. to fix try run: \n git submodule update --init --recursive") set (USE_INTERNAL_LIBCXX_LIBRARY 0) endif () +# FIXME: make better check for submodule presence if (USE_INTERNAL_LIBCXX_LIBRARY AND NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libcxxabi/src") message (WARNING "submodule contrib/libcxxabi is missing. to fix try run: \n git submodule update --init --recursive") - set (USE_INTERNAL_LIBCXXABI_LIBRARY 0) + set (USE_INTERNAL_LIBCXX_LIBRARY 0) endif () -if (NOT USE_INTERNAL_LIBCXX_LIBRARY) - find_library (LIBCXX_LIBRARY c++) - find_library (LIBCXXABI_LIBRARY c++abi) +if (USE_LIBCXX) + if (NOT USE_INTERNAL_LIBCXX_LIBRARY) + find_library (LIBCXX_LIBRARY c++) + find_library (LIBCXXFS_LIBRARY c++fs) + find_library (LIBCXXABI_LIBRARY c++abi) + + target_link_libraries(global-libs INTERFACE ${EXCEPTION_HANDLING_LIBRARY}) + else () + set (LIBCXX_LIBRARY cxx) + set (LIBCXXABI_LIBRARY cxxabi) + add_subdirectory(contrib/libcxxabi-cmake) + add_subdirectory(contrib/libcxx-cmake) + + # Exception handling library is embedded into libcxxabi. + endif () + + target_link_libraries(global-libs INTERFACE ${LIBCXX_LIBRARY} ${LIBCXXABI_LIBRARY} ${LIBCXXFS_LIBRARY}) + + set (HAVE_LIBCXX 1) + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++") + + message (STATUS "Using libcxx: ${LIBCXX_LIBRARY}") + message (STATUS "Using libcxxfs: ${LIBCXXFS_LIBRARY}") + message (STATUS "Using libcxxabi: ${LIBCXXABI_LIBRARY}") else () - set (LIBCXX_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libcxx/include) - set (LIBCXXABI_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libcxxabi/include) - set (LIBCXX_LIBRARY cxx_static) - set (LIBCXXABI_LIBRARY cxxabi_static) + target_link_libraries(global-libs INTERFACE -l:libstdc++.a -l:libstdc++fs.a) # Always link these libraries as static + target_link_libraries(global-libs INTERFACE ${EXCEPTION_HANDLING_LIBRARY}) endif () - -message (STATUS "Using libcxx: ${LIBCXX_LIBRARY}") -message (STATUS "Using libcxxabi: ${LIBCXXABI_LIBRARY}") diff --git a/cmake/find_unwind.cmake b/cmake/find_unwind.cmake index 25e088e8deb..ea6e1d4bacb 100644 --- a/cmake/find_unwind.cmake +++ b/cmake/find_unwind.cmake @@ -1,59 +1,17 @@ -include (CMakePushCheckState) -cmake_push_check_state () +option (USE_UNWIND "Enable libunwind (better stacktraces)" ON) -option (ENABLE_UNWIND "Enable libunwind (better stacktraces)" ON) +if (NOT CMAKE_SYSTEM MATCHES "Linux" OR ARCH_ARM OR ARCH_32) + set (USE_UNWIND OFF) +endif () -if (ENABLE_UNWIND) +if (USE_UNWIND) + add_subdirectory(contrib/libunwind-cmake) + set (UNWIND_LIBRARIES unwind) + set (EXCEPTION_HANDLING_LIBRARY ${UNWIND_LIBRARIES}) -if (CMAKE_SYSTEM MATCHES "Linux" AND NOT ARCH_ARM AND NOT ARCH_32) - option (USE_INTERNAL_UNWIND_LIBRARY "Set to FALSE to use system unwind library instead of bundled" ${NOT_UNBUNDLED}) + message (STATUS "Using libunwind: ${UNWIND_LIBRARIES}") else () - option (USE_INTERNAL_UNWIND_LIBRARY "Set to FALSE to use system unwind library instead of bundled" OFF) + set (EXCEPTION_HANDLING_LIBRARY gcc_eh) endif () -if (NOT USE_INTERNAL_UNWIND_LIBRARY) - find_library (UNWIND_LIBRARY unwind) - find_path (UNWIND_INCLUDE_DIR NAMES unwind.h PATHS ${UNWIND_INCLUDE_PATHS}) - - include (CheckCXXSourceCompiles) - set(CMAKE_REQUIRED_INCLUDES ${UNWIND_INCLUDE_DIR}) - set(CMAKE_REQUIRED_LIBRARIES ${UNWIND_LIBRARY}) - check_cxx_source_compiles(" - #include - #define UNW_LOCAL_ONLY - #include - int main () { - ucontext_t context; - unw_cursor_t cursor; - unw_init_local(&cursor, &context); - return 0; - } - " HAVE_UNW_INIT_LOCAL) - if (NOT HAVE_UNW_INIT_LOCAL) - set(UNWIND_LIBRARY "") - set(UNWIND_INCLUDE_DIR "") - endif () - -endif () - -if (UNWIND_LIBRARY AND UNWIND_INCLUDE_DIR) - set (USE_UNWIND 1) -elseif (CMAKE_SYSTEM MATCHES "Linux" AND NOT ARCH_ARM AND NOT ARCH_32 AND NOT UNBUNDLED) - set (USE_INTERNAL_UNWIND_LIBRARY 1) - - set (PACKAGE_VERSION "9.0.0svn" CACHE STRING "") - - set (UNWIND_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libunwind/include") - - set (LIBUNWIND_ENABLE_SHARED OFF CACHE BOOL "") - set (LIBUNWIND_ENABLE_STATIC ON CACHE BOOL "") - set (UNWIND_LIBRARY unwind_static) - - set (USE_UNWIND 1) -endif () - -endif () - -message (STATUS "Using unwind=${USE_UNWIND}: ${UNWIND_INCLUDE_DIR} : ${UNWIND_LIBRARY}") - -cmake_pop_check_state () +message (STATUS "Using exception handler: ${EXCEPTION_HANDLING_LIBRARY}") diff --git a/cmake/test_compiler.cmake b/cmake/test_compiler.cmake deleted file mode 100644 index 570c058b9f7..00000000000 --- a/cmake/test_compiler.cmake +++ /dev/null @@ -1,47 +0,0 @@ -include (CheckCXXSourceCompiles) -include (CMakePushCheckState) - -set(THREADS_PREFER_PTHREAD_FLAG ON) -find_package(Threads) - -cmake_push_check_state () - -if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang") -# clang4 : -no-pie cause error -# clang6 : -no-pie cause warning - - if (MAKE_STATIC_LIBRARIES) - set (TEST_FLAG "-Wl,-Bstatic -stdlib=libc++ -lc++ -lc++abi -Wl,-Bdynamic") - else () - set (TEST_FLAG "-stdlib=libc++ -lc++ -lc++abi") - endif () - - set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG}") - set (CMAKE_REQUIRED_LIBRARIES ${CMAKE_REQUIRED_LIBRARIES} Threads::Threads) - - check_cxx_source_compiles(" - #include - int main() { - std::cerr << std::endl; - return 0; - } - " HAVE_LIBCXX) - -else () - - set (TEST_FLAG "-no-pie") - set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG}") - - check_cxx_source_compiles(" - int main() { - return 0; - } - " HAVE_NO_PIE) - - if (HAVE_NO_PIE) - set (FLAG_NO_PIE ${TEST_FLAG}) - endif () - -endif () - -cmake_pop_check_state () diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index e652c393141..96462de0190 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -23,16 +23,6 @@ if (USE_INTERNAL_ORC_LIBRARY) add_subdirectory(orc) endif() -if (USE_INTERNAL_UNWIND_LIBRARY) - add_subdirectory (libunwind-cmake) -endif () - -if (USE_LIBCXX AND USE_INTERNAL_LIBCXX_LIBRARY) - add_subdirectory(libcxx-cmake) - add_subdirectory(libcxxabi-cmake) -endif() - - if (USE_INTERNAL_BOOST_LIBRARY) add_subdirectory (boost-cmake) endif () @@ -172,15 +162,6 @@ if (ENABLE_ODBC AND USE_INTERNAL_ODBC_LIBRARY) add_library(ODBC::ODBC ALIAS ${ODBC_LIBRARIES}) endif () -if (ENABLE_CAPNP AND USE_INTERNAL_CAPNP_LIBRARY) - set (BUILD_TESTING 0 CACHE INTERNAL "") - set (_save ${CMAKE_CXX_EXTENSIONS}) - set (CMAKE_CXX_EXTENSIONS) - add_subdirectory (capnproto/c++) - set (CMAKE_CXX_EXTENSIONS ${_save}) - target_include_directories(${CAPNP_LIBRARY} PUBLIC $) -endif () - if (USE_INTERNAL_PARQUET_LIBRARY) if (USE_INTERNAL_PARQUET_LIBRARY_NATIVE_CMAKE) # We dont use arrow's cmakefiles because they uses too many depends and download some libs in compile time diff --git a/contrib/arrow-cmake/CMakeLists.txt b/contrib/arrow-cmake/CMakeLists.txt index 843ff9cd8af..ba1ddc2414a 100644 --- a/contrib/arrow-cmake/CMakeLists.txt +++ b/contrib/arrow-cmake/CMakeLists.txt @@ -44,7 +44,6 @@ set( thriftcpp_threads_SOURCES add_library(${THRIFT_LIBRARY} ${thriftcpp_SOURCES} ${thriftcpp_threads_SOURCES}) set_target_properties(${THRIFT_LIBRARY} PROPERTIES CXX_STANDARD 14) # REMOVE after https://github.com/apache/thrift/pull/1641 target_include_directories(${THRIFT_LIBRARY} SYSTEM PUBLIC ${ClickHouse_SOURCE_DIR}/contrib/thrift/lib/cpp/src PRIVATE ${Boost_INCLUDE_DIRS}) -target_link_libraries(${THRIFT_LIBRARY} PRIVATE Threads::Threads) # === orc @@ -219,7 +218,7 @@ endif() add_library(${ARROW_LIBRARY} ${ARROW_SRCS}) add_dependencies(${ARROW_LIBRARY} protoc) target_include_directories(${ARROW_LIBRARY} SYSTEM PUBLIC ${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/cpp/src ${Boost_INCLUDE_DIRS}) -target_link_libraries(${ARROW_LIBRARY} PRIVATE ${DOUBLE_CONVERSION_LIBRARIES} ${Protobuf_LIBRARY} Threads::Threads) +target_link_libraries(${ARROW_LIBRARY} PRIVATE ${DOUBLE_CONVERSION_LIBRARIES} ${Protobuf_LIBRARY}) if (ARROW_WITH_LZ4) target_link_libraries(${ARROW_LIBRARY} PRIVATE ${LZ4_LIBRARY}) endif() diff --git a/contrib/capnproto-cmake/CMakeLists.txt b/contrib/capnproto-cmake/CMakeLists.txt new file mode 100644 index 00000000000..275007c145f --- /dev/null +++ b/contrib/capnproto-cmake/CMakeLists.txt @@ -0,0 +1,68 @@ +set (CAPNPROTO_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/capnproto/c++/src) + +set (KJ_SRCS + ${CAPNPROTO_SOURCE_DIR}/kj/array.c++ + ${CAPNPROTO_SOURCE_DIR}/kj/common.c++ + ${CAPNPROTO_SOURCE_DIR}/kj/debug.c++ + ${CAPNPROTO_SOURCE_DIR}/kj/exception.c++ + ${CAPNPROTO_SOURCE_DIR}/kj/io.c++ + ${CAPNPROTO_SOURCE_DIR}/kj/memory.c++ + ${CAPNPROTO_SOURCE_DIR}/kj/mutex.c++ + ${CAPNPROTO_SOURCE_DIR}/kj/string.c++ + ${CAPNPROTO_SOURCE_DIR}/kj/hash.c++ + ${CAPNPROTO_SOURCE_DIR}/kj/table.c++ + ${CAPNPROTO_SOURCE_DIR}/kj/thread.c++ + ${CAPNPROTO_SOURCE_DIR}/kj/main.c++ + ${CAPNPROTO_SOURCE_DIR}/kj/arena.c++ + ${CAPNPROTO_SOURCE_DIR}/kj/test-helpers.c++ + ${CAPNPROTO_SOURCE_DIR}/kj/units.c++ + ${CAPNPROTO_SOURCE_DIR}/kj/encoding.c++ + + ${CAPNPROTO_SOURCE_DIR}/kj/refcount.c++ + ${CAPNPROTO_SOURCE_DIR}/kj/string-tree.c++ + ${CAPNPROTO_SOURCE_DIR}/kj/time.c++ + ${CAPNPROTO_SOURCE_DIR}/kj/filesystem.c++ + ${CAPNPROTO_SOURCE_DIR}/kj/filesystem-disk-unix.c++ + ${CAPNPROTO_SOURCE_DIR}/kj/filesystem-disk-win32.c++ + ${CAPNPROTO_SOURCE_DIR}/kj/parse/char.c++ +) + +add_library(kj ${KJ_SRCS}) +target_include_directories(kj INTERFACE ${CAPNPROTO_SOURCE_DIR}) + +set (CAPNP_SRCS + ${CAPNPROTO_SOURCE_DIR}/capnp/c++.capnp.c++ + ${CAPNPROTO_SOURCE_DIR}/capnp/blob.c++ + ${CAPNPROTO_SOURCE_DIR}/capnp/arena.c++ + ${CAPNPROTO_SOURCE_DIR}/capnp/layout.c++ + ${CAPNPROTO_SOURCE_DIR}/capnp/list.c++ + ${CAPNPROTO_SOURCE_DIR}/capnp/any.c++ + ${CAPNPROTO_SOURCE_DIR}/capnp/message.c++ + ${CAPNPROTO_SOURCE_DIR}/capnp/schema.capnp.c++ + ${CAPNPROTO_SOURCE_DIR}/capnp/serialize.c++ + ${CAPNPROTO_SOURCE_DIR}/capnp/serialize-packed.c++ + + ${CAPNPROTO_SOURCE_DIR}/capnp/schema.c++ + ${CAPNPROTO_SOURCE_DIR}/capnp/schema-loader.c++ + ${CAPNPROTO_SOURCE_DIR}/capnp/dynamic.c++ + ${CAPNPROTO_SOURCE_DIR}/capnp/stringify.c++ +) + +add_library(capnp ${CAPNP_SRCS}) +target_link_libraries(capnp PUBLIC kj) + +set (CAPNPC_SRCS + ${CAPNPROTO_SOURCE_DIR}/capnp/compiler/type-id.c++ + ${CAPNPROTO_SOURCE_DIR}/capnp/compiler/error-reporter.c++ + ${CAPNPROTO_SOURCE_DIR}/capnp/compiler/lexer.capnp.c++ + ${CAPNPROTO_SOURCE_DIR}/capnp/compiler/lexer.c++ + ${CAPNPROTO_SOURCE_DIR}/capnp/compiler/grammar.capnp.c++ + ${CAPNPROTO_SOURCE_DIR}/capnp/compiler/parser.c++ + ${CAPNPROTO_SOURCE_DIR}/capnp/compiler/node-translator.c++ + ${CAPNPROTO_SOURCE_DIR}/capnp/compiler/compiler.c++ + ${CAPNPROTO_SOURCE_DIR}/capnp/schema-parser.c++ + ${CAPNPROTO_SOURCE_DIR}/capnp/serialize-text.c++ +) + +add_library(capnpc ${CAPNPC_SRCS}) +target_link_libraries(capnpc PUBLIC capnp) diff --git a/contrib/jemalloc-cmake/CMakeLists.txt b/contrib/jemalloc-cmake/CMakeLists.txt index 47f057c0559..e44c54d2b37 100644 --- a/contrib/jemalloc-cmake/CMakeLists.txt +++ b/contrib/jemalloc-cmake/CMakeLists.txt @@ -59,7 +59,6 @@ if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG") if (USE_UNWIND) target_compile_definitions (jemalloc PRIVATE -DJEMALLOC_PROF_LIBUNWIND=1) - target_include_directories (jemalloc BEFORE PRIVATE ${UNWIND_INCLUDE_DIR}) - target_link_libraries (jemalloc PRIVATE ${UNWIND_LIBRARY}) + target_link_libraries (jemalloc PRIVATE ${UNWIND_LIBRARIES}) endif () endif () diff --git a/contrib/libcxx-cmake/CMakeLists.txt b/contrib/libcxx-cmake/CMakeLists.txt index e9ca5e1e7cd..07fa70b9869 100644 --- a/contrib/libcxx-cmake/CMakeLists.txt +++ b/contrib/libcxx-cmake/CMakeLists.txt @@ -1,5 +1,4 @@ set(LIBCXX_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libcxx) -#set(LIBCXX_BINARY_DIR ${ClickHouse_BINARY_DIR}/contrib/libcxx) set(SRCS ${LIBCXX_SOURCE_DIR}/src/optional.cpp @@ -16,10 +15,6 @@ ${LIBCXX_SOURCE_DIR}/src/condition_variable.cpp ${LIBCXX_SOURCE_DIR}/src/hash.cpp ${LIBCXX_SOURCE_DIR}/src/string.cpp ${LIBCXX_SOURCE_DIR}/src/debug.cpp -#${LIBCXX_SOURCE_DIR}/src/support/win32/support.cpp -#${LIBCXX_SOURCE_DIR}/src/support/win32/locale_win32.cpp -#${LIBCXX_SOURCE_DIR}/src/support/win32/thread_win32.cpp -#${LIBCXX_SOURCE_DIR}/src/support/solaris/xlocale.cpp ${LIBCXX_SOURCE_DIR}/src/stdexcept.cpp ${LIBCXX_SOURCE_DIR}/src/utility.cpp ${LIBCXX_SOURCE_DIR}/src/any.cpp @@ -43,9 +38,16 @@ ${LIBCXX_SOURCE_DIR}/src/system_error.cpp ${LIBCXX_SOURCE_DIR}/src/random.cpp ) -add_library(cxx_static ${SRCS}) +add_library(cxx ${SRCS}) -target_include_directories(cxx_static PUBLIC ${LIBCXX_SOURCE_DIR}/include) -target_compile_definitions(cxx_static PRIVATE -D_LIBCPP_BUILDING_LIBRARY -DLIBCXX_BUILDING_LIBCXXABI) -target_compile_options(cxx_static PRIVATE -nostdinc++) +target_include_directories(cxx SYSTEM BEFORE PUBLIC $) +target_compile_definitions(cxx PRIVATE -D_LIBCPP_BUILDING_LIBRARY -DLIBCXX_BUILDING_LIBCXXABI) +target_compile_options(cxx PRIVATE -nostdinc++) +target_link_libraries(cxx PUBLIC cxxabi) +install( + TARGETS cxx + EXPORT global + ARCHIVE DESTINATION lib + RUNTIME DESTINATION lib +) diff --git a/contrib/libcxxabi-cmake/CMakeLists.txt b/contrib/libcxxabi-cmake/CMakeLists.txt index 2abece86691..546d39933af 100644 --- a/contrib/libcxxabi-cmake/CMakeLists.txt +++ b/contrib/libcxxabi-cmake/CMakeLists.txt @@ -1,13 +1,10 @@ set(LIBCXXABI_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libcxxabi) -set(LIBCXX_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libcxx) -#set(LIBCXXABI_BINARY_DIR ${ClickHouse_BINARY_DIR}/contrib/libcxxabi) set(SRCS ${LIBCXXABI_SOURCE_DIR}/src/stdlib_stdexcept.cpp ${LIBCXXABI_SOURCE_DIR}/src/cxa_virtual.cpp ${LIBCXXABI_SOURCE_DIR}/src/cxa_thread_atexit.cpp ${LIBCXXABI_SOURCE_DIR}/src/fallback_malloc.cpp -#${LIBCXXABI_SOURCE_DIR}/src/cxa_noexception.cpp ${LIBCXXABI_SOURCE_DIR}/src/cxa_guard.cpp ${LIBCXXABI_SOURCE_DIR}/src/cxa_default_handlers.cpp ${LIBCXXABI_SOURCE_DIR}/src/cxa_personality.cpp @@ -25,10 +22,24 @@ ${LIBCXXABI_SOURCE_DIR}/src/cxa_vector.cpp ${LIBCXXABI_SOURCE_DIR}/src/stdlib_new_delete.cpp ) -add_library(cxxabi_static ${SRCS}) +add_library(cxxabi ${SRCS}) -target_include_directories(cxxabi_static PUBLIC ${LIBCXXABI_SOURCE_DIR}/include ${LIBCXX_SOURCE_DIR}/include) -target_compile_definitions(cxxabi_static PRIVATE -D_LIBCPP_BUILDING_LIBRARY) -target_compile_options(cxxabi_static PRIVATE -nostdinc++ -fno-sanitize=undefined) # If we don't disable UBSan, infinite recursion happens in dynamic_cast. +target_include_directories(cxxabi SYSTEM BEFORE + PUBLIC $ + PRIVATE $ +) +target_compile_definitions(cxxabi PRIVATE -D_LIBCPP_BUILDING_LIBRARY) +target_compile_options(cxxabi PRIVATE -nostdinc++ -fno-sanitize=undefined) # If we don't disable UBSan, infinite recursion happens in dynamic_cast. +if (USE_UNWIND) + target_link_libraries(cxxabi PRIVATE ${UNWIND_LIBRARIES}) +else () + target_link_libraries(cxxabi PRIVATE gcc_eh) +endif () +install( + TARGETS cxxabi + EXPORT global + ARCHIVE DESTINATION lib + RUNTIME DESTINATION lib +) diff --git a/contrib/librdkafka-cmake/CMakeLists.txt b/contrib/librdkafka-cmake/CMakeLists.txt index 75cd3968204..64dc83fa8b6 100644 --- a/contrib/librdkafka-cmake/CMakeLists.txt +++ b/contrib/librdkafka-cmake/CMakeLists.txt @@ -65,7 +65,7 @@ add_library(rdkafka ${SRCS}) target_include_directories(rdkafka SYSTEM PUBLIC include) target_include_directories(rdkafka SYSTEM PUBLIC ${RDKAFKA_SOURCE_DIR}) # Because weird logic with "include_next" is used. target_include_directories(rdkafka SYSTEM PRIVATE ${ZSTD_INCLUDE_DIR}/common) # Because wrong path to "zstd_errors.h" is used. -target_link_libraries(rdkafka PRIVATE ${ZLIB_LIBRARIES} ${ZSTD_LIBRARY} ${LZ4_LIBRARY} ${LIBGSASL_LIBRARY} Threads::Threads) +target_link_libraries(rdkafka PRIVATE ${ZLIB_LIBRARIES} ${ZSTD_LIBRARY} ${LZ4_LIBRARY} ${LIBGSASL_LIBRARY}) if(OPENSSL_SSL_LIBRARY AND OPENSSL_CRYPTO_LIBRARY) target_link_libraries(rdkafka PRIVATE ${OPENSSL_SSL_LIBRARY} ${OPENSSL_CRYPTO_LIBRARY}) endif() diff --git a/contrib/libunwind-cmake/CMakeLists.txt b/contrib/libunwind-cmake/CMakeLists.txt index 4f24fe249f5..f09d0979692 100644 --- a/contrib/libunwind-cmake/CMakeLists.txt +++ b/contrib/libunwind-cmake/CMakeLists.txt @@ -24,9 +24,15 @@ set(LIBUNWIND_SOURCES ${LIBUNWIND_C_SOURCES} ${LIBUNWIND_ASM_SOURCES}) -add_library(unwind_static ${LIBUNWIND_SOURCES}) +add_library(unwind ${LIBUNWIND_SOURCES}) -target_include_directories(unwind_static SYSTEM BEFORE PUBLIC ${LIBUNWIND_SOURCE_DIR}/include) -target_compile_definitions(unwind_static PRIVATE -D_LIBUNWIND_NO_HEAP=1 -D_DEBUG -D_LIBUNWIND_IS_NATIVE_ONLY) -target_compile_options(unwind_static PRIVATE -fno-exceptions -funwind-tables -fno-sanitize=all -nostdinc++ -fno-rtti) -target_link_libraries(unwind_static PRIVATE Threads::Threads ${CMAKE_DL_LIBS}) +target_include_directories(unwind SYSTEM BEFORE PUBLIC $) +target_compile_definitions(unwind PRIVATE -D_LIBUNWIND_NO_HEAP=1 -D_DEBUG -D_LIBUNWIND_IS_NATIVE_ONLY) +target_compile_options(unwind PRIVATE -fno-exceptions -funwind-tables -fno-sanitize=all -nostdinc++ -fno-rtti) + +install( + TARGETS unwind + EXPORT global + LIBRARY DESTINATION lib + ARCHIVE DESTINATION lib +) diff --git a/contrib/mariadb-connector-c-cmake/CMakeLists.txt b/contrib/mariadb-connector-c-cmake/CMakeLists.txt index 1f453a7f6d1..2e80b0c325f 100644 --- a/contrib/mariadb-connector-c-cmake/CMakeLists.txt +++ b/contrib/mariadb-connector-c-cmake/CMakeLists.txt @@ -62,11 +62,6 @@ endif() add_library(mysqlclient ${SRCS}) -target_link_libraries(mysqlclient PRIVATE ${CMAKE_DL_LIBS} Threads::Threads) -if(M_LIBRARY) - target_link_libraries(mysqlclient PRIVATE ${M_LIBRARY}) -endif() - if(OPENSSL_LIBRARIES) target_link_libraries(mysqlclient PRIVATE ${OPENSSL_LIBRARIES}) target_compile_definitions(mysqlclient PRIVATE -D HAVE_OPENSSL -D HAVE_TLS) diff --git a/dbms/CMakeLists.txt b/dbms/CMakeLists.txt index f011cc21103..98eb23809da 100644 --- a/dbms/CMakeLists.txt +++ b/dbms/CMakeLists.txt @@ -97,8 +97,6 @@ add_subdirectory (src) set(dbms_headers) set(dbms_sources) -include(../cmake/dbms_glob_sources.cmake) - add_headers_and_sources(clickhouse_common_io src/Common) add_headers_and_sources(clickhouse_common_io src/Common/HashTable) add_headers_and_sources(clickhouse_common_io src/IO) @@ -163,9 +161,7 @@ if (OS_FREEBSD) endif () if (USE_UNWIND) - if (NOT USE_INTERNAL_UNWIND_LIBRARY_FOR_EXCEPTION_HANDLING) - target_link_libraries (clickhouse_common_io PRIVATE ${UNWIND_LIBRARY}) - endif () + target_link_libraries (clickhouse_common_io PRIVATE ${UNWIND_LIBRARIES}) endif () add_subdirectory(src/Common/ZooKeeper) @@ -241,15 +237,10 @@ target_link_libraries(clickhouse_common_io ${EXECINFO_LIBRARIES} PUBLIC ${Boost_SYSTEM_LIBRARY} + ${Boost_PROGRAM_OPTIONS_LIBRARY} PRIVATE apple_rt PUBLIC - Threads::Threads - PRIVATE - ${CMAKE_DL_LIBS} - PRIVATE - rt - PUBLIC roaring ) @@ -297,7 +288,6 @@ target_link_libraries (dbms ${Boost_FILESYSTEM_LIBRARY} PUBLIC ${Boost_SYSTEM_LIBRARY} - Threads::Threads ) target_include_directories(dbms PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/src/Core/include) @@ -364,10 +354,7 @@ if (USE_ICU) endif () if (USE_CAPNP) - target_link_libraries (dbms PRIVATE ${CAPNP_LIBRARY}) - if (NOT USE_INTERNAL_CAPNP_LIBRARY) - target_include_directories (dbms SYSTEM BEFORE PRIVATE ${CAPNP_INCLUDE_DIR}) - endif () + target_link_libraries (dbms PRIVATE ${CAPNP_LIBRARIES}) endif () if (USE_PARQUET) @@ -380,7 +367,6 @@ endif () if(OPENSSL_CRYPTO_LIBRARY) target_link_libraries(dbms PRIVATE ${OPENSSL_CRYPTO_LIBRARY}) endif () -target_link_libraries(dbms PRIVATE Threads::Threads) target_include_directories (dbms SYSTEM BEFORE PRIVATE ${DIVIDE_INCLUDE_DIR}) target_include_directories (dbms SYSTEM BEFORE PRIVATE ${SPARCEHASH_INCLUDE_DIR}) diff --git a/dbms/programs/benchmark/Benchmark.cpp b/dbms/programs/benchmark/Benchmark.cpp index fedb7f778a1..e685425eefc 100644 --- a/dbms/programs/benchmark/Benchmark.cpp +++ b/dbms/programs/benchmark/Benchmark.cpp @@ -32,6 +32,7 @@ #include #include #include +#include #include @@ -504,7 +505,7 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv) { using boost::program_options::value; - boost::program_options::options_description desc("Allowed options"); + boost::program_options::options_description desc = createOptionsDescription("Allowed options", getTerminalWidth()); desc.add_options() ("help", "produce help message") ("concurrency,c", value()->default_value(1), "number of parallel queries") diff --git a/dbms/programs/client/Client.cpp b/dbms/programs/client/Client.cpp index cf72d7a87c3..0bb6cf62f90 100644 --- a/dbms/programs/client/Client.cpp +++ b/dbms/programs/client/Client.cpp @@ -67,6 +67,7 @@ #include #include #include +#include #if USE_READLINE #include "Suggest.h" @@ -130,7 +131,7 @@ private: bool print_time_to_stderr = false; /// Output execution time to stderr in batch mode. bool stdin_is_not_tty = false; /// stdin is not a terminal. - winsize terminal_size {}; /// Terminal size is needed to render progress bar. + uint16_t terminal_width = 0; /// Terminal width is needed to render progress bar. std::unique_ptr connection; /// Connection to DB. String query_id; /// Current query_id. @@ -671,7 +672,7 @@ private: String text; if (config().has("query")) - text = config().getString("query"); + text = config().getRawString("query"); /// Poco configuration should not process substitutions in form of ${...} inside query. else { /// If 'query' parameter is not set, read a query from stdin. @@ -1465,7 +1466,7 @@ private: if (show_progress_bar) { - ssize_t width_of_progress_bar = static_cast(terminal_size.ws_col) - written_progress_chars - strlen(" 99%"); + ssize_t width_of_progress_bar = static_cast(terminal_width) - written_progress_chars - strlen(" 99%"); if (width_of_progress_bar > 0) { std::string bar = UnicodeBar::render(UnicodeBar::getWidth(progress.read_rows, 0, total_rows_corrected, width_of_progress_bar)); @@ -1642,22 +1643,13 @@ public: stdin_is_not_tty = !isatty(STDIN_FILENO); + if (!stdin_is_not_tty) + terminal_width = getTerminalWidth(); + namespace po = boost::program_options; - unsigned line_length = po::options_description::m_default_line_length; - unsigned min_description_length = line_length / 2; - if (!stdin_is_not_tty) - { - if (ioctl(STDIN_FILENO, TIOCGWINSZ, &terminal_size)) - throwFromErrno("Cannot obtain terminal window size (ioctl TIOCGWINSZ)", ErrorCodes::SYSTEM_ERROR); - line_length = std::max( - static_cast(strlen("--http_native_compression_disable_checksumming_on_decompress ")), - static_cast(terminal_size.ws_col)); - min_description_length = std::min(min_description_length, line_length - 2); - } - /// Main commandline options related to client functionality and all parameters from Settings. - po::options_description main_description("Main options", line_length, min_description_length); + po::options_description main_description = createOptionsDescription("Main options", terminal_width); main_description.add_options() ("help", "produce help message") ("config-file,C", po::value(), "config-file path") @@ -1672,7 +1664,7 @@ public: * the "\n" is used to distinguish this case because there is hardly a chance an user would use "\n" * as the password. */ - ("password", po::value()->implicit_value("\n"), "password") + ("password", po::value()->implicit_value("\n", ""), "password") ("ask-password", "ask-password") ("query_id", po::value(), "query_id") ("query,q", po::value(), "query") @@ -1703,7 +1695,7 @@ public: context.getSettingsRef().addProgramOptions(main_description); /// Commandline options related to external tables. - po::options_description external_description("External tables options"); + po::options_description external_description = createOptionsDescription("External tables options", terminal_width); external_description.add_options() ("file", po::value(), "data file or - for stdin") ("name", po::value()->default_value("_data"), "name of the table") diff --git a/dbms/programs/compressor/Compressor.cpp b/dbms/programs/compressor/Compressor.cpp index 427d58cbdc6..a073a79b416 100644 --- a/dbms/programs/compressor/Compressor.cpp +++ b/dbms/programs/compressor/Compressor.cpp @@ -12,8 +12,9 @@ #include #include #include - #include +#include + namespace DB { @@ -59,7 +60,7 @@ void checkAndWriteHeader(DB::ReadBuffer & in, DB::WriteBuffer & out) int mainEntryClickHouseCompressor(int argc, char ** argv) { - boost::program_options::options_description desc("Allowed options"); + boost::program_options::options_description desc = createOptionsDescription("Allowed options", getTerminalWidth()); desc.add_options() ("help,h", "produce help message") ("decompress,d", "decompress") diff --git a/dbms/programs/format/Format.cpp b/dbms/programs/format/Format.cpp index b7e2629df16..ff415d88e1b 100644 --- a/dbms/programs/format/Format.cpp +++ b/dbms/programs/format/Format.cpp @@ -6,13 +6,13 @@ #include #include #include - +#include int mainEntryClickHouseFormat(int argc, char ** argv) { using namespace DB; - boost::program_options::options_description desc("Allowed options"); + boost::program_options::options_description desc = createOptionsDescription("Allowed options", getTerminalWidth()); desc.add_options() ("help,h", "produce help message") ("hilite", "add syntax highlight with ANSI terminal escape sequences") diff --git a/dbms/programs/local/LocalServer.cpp b/dbms/programs/local/LocalServer.cpp index bed55a0fc5f..1844c037784 100644 --- a/dbms/programs/local/LocalServer.cpp +++ b/dbms/programs/local/LocalServer.cpp @@ -35,6 +35,7 @@ #include #include #include +#include namespace DB @@ -267,7 +268,7 @@ void LocalServer::attachSystemTables() void LocalServer::processQueries() { String initial_create_query = getInitialCreateTableQuery(); - String queries_str = initial_create_query + config().getString("query"); + String queries_str = initial_create_query + config().getRawString("query"); std::vector queries; auto parse_res = splitMultipartQuery(queries_str, queries); @@ -409,17 +410,7 @@ void LocalServer::init(int argc, char ** argv) /// Don't parse options with Poco library, we prefer neat boost::program_options stopOptionsProcessing(); - unsigned line_length = po::options_description::m_default_line_length; - unsigned min_description_length = line_length / 2; - if (isatty(STDIN_FILENO)) - { - winsize terminal_size{}; - ioctl(0, TIOCGWINSZ, &terminal_size); - line_length = std::max(3U, static_cast(terminal_size.ws_col)); - min_description_length = std::min(min_description_length, line_length - 2); - } - - po::options_description description("Main options", line_length, min_description_length); + po::options_description description = createOptionsDescription("Main options", getTerminalWidth()); description.add_options() ("help", "produce help message") ("config-file,c", po::value(), "config-file path") diff --git a/dbms/programs/obfuscator/Obfuscator.cpp b/dbms/programs/obfuscator/Obfuscator.cpp index 5149566465c..febe2b28606 100644 --- a/dbms/programs/obfuscator/Obfuscator.cpp +++ b/dbms/programs/obfuscator/Obfuscator.cpp @@ -37,6 +37,7 @@ #include #include #include +#include static const char * documantation = R"( @@ -949,7 +950,7 @@ try using namespace DB; namespace po = boost::program_options; - po::options_description description("Options"); + po::options_description description = createOptionsDescription("Options", getTerminalWidth()); description.add_options() ("help", "produce help message") ("structure,S", po::value(), "structure of the initial table (list of column and type names)") diff --git a/dbms/programs/performance-test/PerformanceTestSuite.cpp b/dbms/programs/performance-test/PerformanceTestSuite.cpp index 14ea8882a6b..eaa4e24cde9 100644 --- a/dbms/programs/performance-test/PerformanceTestSuite.cpp +++ b/dbms/programs/performance-test/PerformanceTestSuite.cpp @@ -28,6 +28,7 @@ #include #include #include +#include #include "TestStopConditions.h" #include "TestStats.h" @@ -324,7 +325,7 @@ try using po::value; using Strings = DB::Strings; - po::options_description desc("Allowed options"); + po::options_description desc = createOptionsDescription("Allowed options", getTerminalWidth()); desc.add_options() ("help", "produce help message") ("lite", "use lite version of output") diff --git a/dbms/programs/server/Server.cpp b/dbms/programs/server/Server.cpp index ef61537e38d..5f5e464eb01 100644 --- a/dbms/programs/server/Server.cpp +++ b/dbms/programs/server/Server.cpp @@ -520,7 +520,7 @@ int Server::main(const std::vector & /*args*/) /// Init trace collector only after trace_log system table was created /// Disable it if we collect test coverage information, because it will work extremely slow. -#if USE_INTERNAL_UNWIND_LIBRARY && !WITH_COVERAGE +#if USE_UNWIND && !WITH_COVERAGE /// QueryProfiler cannot work reliably with any other libunwind or without PHDR cache. if (hasPHDRCache()) global_context->initializeTraceCollector(); diff --git a/dbms/src/AggregateFunctions/AggregateFunctionGroupUniqArray.h b/dbms/src/AggregateFunctions/AggregateFunctionGroupUniqArray.h index 4b52f1e6fd9..38b67efd6dc 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionGroupUniqArray.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionGroupUniqArray.h @@ -90,7 +90,7 @@ public: auto & set = this->data(place).value; size_t size = set.size(); writeVarUInt(size, buf); - for (auto & elem : set) + for (const auto & elem : set) writeIntBinary(elem, buf); } diff --git a/dbms/src/Client/ConnectionPool.h b/dbms/src/Client/ConnectionPool.h index 322bad04794..1ecb432c827 100644 --- a/dbms/src/Client/ConnectionPool.h +++ b/dbms/src/Client/ConnectionPool.h @@ -74,7 +74,7 @@ public: { Entry entry; if (settings) - entry = Base::get(settings->queue_max_wait_ms.totalMilliseconds()); + entry = Base::get(settings->connection_pool_max_wait_ms.totalMilliseconds()); else entry = Base::get(-1); diff --git a/dbms/src/Columns/ColumnLowCardinality.cpp b/dbms/src/Columns/ColumnLowCardinality.cpp index fa713f76e5e..32ba2378100 100644 --- a/dbms/src/Columns/ColumnLowCardinality.cpp +++ b/dbms/src/Columns/ColumnLowCardinality.cpp @@ -34,7 +34,7 @@ namespace auto & data = res_col->getData(); data.resize(hash_map.size()); - for (auto val : hash_map) + for (const auto & val : hash_map) data[val.getSecond()] = val.getFirst(); for (auto & ind : index) diff --git a/dbms/src/Columns/ColumnTuple.cpp b/dbms/src/Columns/ColumnTuple.cpp index 07599b3456f..3e3e311270f 100644 --- a/dbms/src/Columns/ColumnTuple.cpp +++ b/dbms/src/Columns/ColumnTuple.cpp @@ -81,6 +81,16 @@ MutableColumnPtr ColumnTuple::cloneEmpty() const return ColumnTuple::create(std::move(new_columns)); } +MutableColumnPtr ColumnTuple::cloneResized(size_t new_size) const +{ + const size_t tuple_size = columns.size(); + MutableColumns new_columns(tuple_size); + for (size_t i = 0; i < tuple_size; ++i) + new_columns[i] = columns[i]->cloneResized(new_size); + + return ColumnTuple::create(std::move(new_columns)); +} + Field ColumnTuple::operator[](size_t n) const { return Tuple{ext::map(columns, [n] (const auto & column) { return (*column)[n]; })}; diff --git a/dbms/src/Columns/ColumnTuple.h b/dbms/src/Columns/ColumnTuple.h index 65dd19fc6da..e5e47ac74db 100644 --- a/dbms/src/Columns/ColumnTuple.h +++ b/dbms/src/Columns/ColumnTuple.h @@ -42,6 +42,7 @@ public: const char * getFamilyName() const override { return "Tuple"; } MutableColumnPtr cloneEmpty() const override; + MutableColumnPtr cloneResized(size_t size) const override; size_t size() const override { diff --git a/dbms/src/Common/Config/CMakeLists.txt b/dbms/src/Common/Config/CMakeLists.txt index e1f78e2c213..c2869763f1b 100644 --- a/dbms/src/Common/Config/CMakeLists.txt +++ b/dbms/src/Common/Config/CMakeLists.txt @@ -4,5 +4,5 @@ add_headers_and_sources(clickhouse_common_config .) add_library(clickhouse_common_config ${clickhouse_common_config_headers} ${clickhouse_common_config_sources}) -target_link_libraries(clickhouse_common_config PUBLIC common PRIVATE clickhouse_common_zookeeper string_utils PUBLIC ${Poco_XML_LIBRARY} ${Poco_Util_LIBRARY} Threads::Threads) +target_link_libraries(clickhouse_common_config PUBLIC common PRIVATE clickhouse_common_zookeeper string_utils PUBLIC ${Poco_XML_LIBRARY} ${Poco_Util_LIBRARY}) target_include_directories(clickhouse_common_config PUBLIC ${DBMS_INCLUDE_DIR}) diff --git a/dbms/src/Common/HashTable/FixedHashMap.h b/dbms/src/Common/HashTable/FixedHashMap.h index ae076ddb877..d50c87a6583 100644 --- a/dbms/src/Common/HashTable/FixedHashMap.h +++ b/dbms/src/Common/HashTable/FixedHashMap.h @@ -11,8 +11,8 @@ struct FixedHashMapCell using State = TState; using value_type = PairNoInit; - bool full; Mapped mapped; + bool full; FixedHashMapCell() {} FixedHashMapCell(const Key &, const State &) : full(true) {} diff --git a/dbms/src/Common/HashTable/HashMap.h b/dbms/src/Common/HashTable/HashMap.h index f82563c4449..98669619d3d 100644 --- a/dbms/src/Common/HashTable/HashMap.h +++ b/dbms/src/Common/HashTable/HashMap.h @@ -128,14 +128,12 @@ struct HashMapCellWithSavedHash : public HashMapCell }; -template -< +template < typename Key, typename Cell, typename Hash = DefaultHash, typename Grower = HashTableGrower<>, - typename Allocator = HashTableAllocator -> + typename Allocator = HashTableAllocator> class HashMapTable : public HashTable { public: @@ -173,23 +171,19 @@ public: }; -template -< +template < typename Key, typename Mapped, typename Hash = DefaultHash, typename Grower = HashTableGrower<>, - typename Allocator = HashTableAllocator -> + typename Allocator = HashTableAllocator> using HashMap = HashMapTable, Hash, Grower, Allocator>; -template -< +template < typename Key, typename Mapped, typename Hash = DefaultHash, typename Grower = HashTableGrower<>, - typename Allocator = HashTableAllocator -> + typename Allocator = HashTableAllocator> using HashMapWithSavedHash = HashMapTable, Hash, Grower, Allocator>; diff --git a/dbms/src/Common/HashTable/HashTable.h b/dbms/src/Common/HashTable/HashTable.h index c5a0c812ee2..d29459a90d5 100644 --- a/dbms/src/Common/HashTable/HashTable.h +++ b/dbms/src/Common/HashTable/HashTable.h @@ -95,7 +95,6 @@ struct HashTableCell /// Create a cell with the given key / key and value. HashTableCell(const Key & key_, const State &) : key(key_) {} -/// HashTableCell(const value_type & value_, const State & state) : key(value_) {} /// Get what the value_type of the container will be. value_type & getValueMutable() { return key; } diff --git a/dbms/src/Common/QueryProfiler.cpp b/dbms/src/Common/QueryProfiler.cpp index b39cdfb4ef5..a0b75c567a9 100644 --- a/dbms/src/Common/QueryProfiler.cpp +++ b/dbms/src/Common/QueryProfiler.cpp @@ -100,7 +100,7 @@ QueryProfilerBase::QueryProfilerBase(const Int32 thread_id, const : log(&Logger::get("QueryProfiler")) , pause_signal(pause_signal_) { -#if USE_INTERNAL_UNWIND_LIBRARY +#if USE_UNWIND /// Sanity check. if (!hasPHDRCache()) throw Exception("QueryProfiler cannot be used without PHDR cache, that is not available for TSan build", ErrorCodes::NOT_IMPLEMENTED); @@ -173,7 +173,7 @@ QueryProfilerBase::~QueryProfilerBase() template void QueryProfilerBase::tryCleanup() { -#if USE_INTERNAL_UNWIND_LIBRARY +#if USE_UNWIND if (timer_id != nullptr && timer_delete(timer_id)) LOG_ERROR(log, "Failed to delete query profiler timer " + errnoToString(ErrorCodes::CANNOT_DELETE_TIMER)); diff --git a/dbms/src/Common/QueryProfiler.h b/dbms/src/Common/QueryProfiler.h index b6420ccc703..abbff6f6c0e 100644 --- a/dbms/src/Common/QueryProfiler.h +++ b/dbms/src/Common/QueryProfiler.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include #include #include @@ -43,7 +44,7 @@ private: Poco::Logger * log; -#if USE_INTERNAL_UNWIND_LIBRARY +#if USE_UNWIND /// Timer id from timer_create(2) timer_t timer_id = nullptr; #endif diff --git a/dbms/src/Common/StackTrace.cpp b/dbms/src/Common/StackTrace.cpp index a642ec2b73a..9981d0941aa 100644 --- a/dbms/src/Common/StackTrace.cpp +++ b/dbms/src/Common/StackTrace.cpp @@ -1,15 +1,20 @@ -#include -#include -#include #include -#include + #include #include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if USE_UNWIND +# include +#endif std::string signalToErrorMessage(int sig, const siginfo_t & info, const ucontext_t & context) { @@ -215,12 +220,6 @@ StackTrace::StackTrace(NoCapture) { } - -#if USE_UNWIND -extern "C" int unw_backtrace(void **, int); -#endif - - void StackTrace::tryCapture() { size = 0; diff --git a/dbms/src/Common/TerminalSize.cpp b/dbms/src/Common/TerminalSize.cpp new file mode 100644 index 00000000000..714a19b188a --- /dev/null +++ b/dbms/src/Common/TerminalSize.cpp @@ -0,0 +1,37 @@ +#include +#include +#include +#include +#include + + +namespace DB::ErrorCodes +{ + extern const int SYSTEM_ERROR; +} + +uint16_t getTerminalWidth() +{ + if (isatty(STDIN_FILENO)) + { + winsize terminal_size {}; + + if (ioctl(STDIN_FILENO, TIOCGWINSZ, &terminal_size)) + DB::throwFromErrno("Cannot obtain terminal window size (ioctl TIOCGWINSZ)", DB::ErrorCodes::SYSTEM_ERROR); + + return terminal_size.ws_col; + } + return 0; +} + +po::options_description createOptionsDescription(const std::string & caption, uint16_t terminal_width) +{ + unsigned line_length = po::options_description::m_default_line_length; + unsigned min_description_length = line_length / 2; + std::string longest_option_desc = "--http_native_compression_disable_checksumming_on_decompress"; + + line_length = std::max(static_cast(longest_option_desc.size()), terminal_width); + min_description_length = std::min(min_description_length, line_length - 2); + + return po::options_description(caption, line_length, min_description_length); +} diff --git a/dbms/src/Common/TerminalSize.h b/dbms/src/Common/TerminalSize.h new file mode 100644 index 00000000000..d7eee417f46 --- /dev/null +++ b/dbms/src/Common/TerminalSize.h @@ -0,0 +1,16 @@ +#pragma once + +#include +#include + + +namespace po = boost::program_options; + + +uint16_t getTerminalWidth(); + +/** Creates po::options_description with name and an appropriate size for option displaying + * when program is called with option --help + * */ +po::options_description createOptionsDescription(const std::string &caption, unsigned short terminal_width); + diff --git a/dbms/src/Common/ZooKeeper/CMakeLists.txt b/dbms/src/Common/ZooKeeper/CMakeLists.txt index 6a611886e65..aa6efcd3ca1 100644 --- a/dbms/src/Common/ZooKeeper/CMakeLists.txt +++ b/dbms/src/Common/ZooKeeper/CMakeLists.txt @@ -4,7 +4,7 @@ add_headers_and_sources(clickhouse_common_zookeeper .) add_library(clickhouse_common_zookeeper ${clickhouse_common_zookeeper_headers} ${clickhouse_common_zookeeper_sources}) -target_link_libraries (clickhouse_common_zookeeper PUBLIC clickhouse_common_io common PRIVATE string_utils PUBLIC ${Poco_Util_LIBRARY} Threads::Threads) +target_link_libraries (clickhouse_common_zookeeper PUBLIC clickhouse_common_io common PRIVATE string_utils PUBLIC ${Poco_Util_LIBRARY}) target_include_directories(clickhouse_common_zookeeper PUBLIC ${DBMS_INCLUDE_DIR}) if (ENABLE_TESTS) diff --git a/dbms/src/Core/Settings.h b/dbms/src/Core/Settings.h index 88a81027cb6..0be8279d3e0 100644 --- a/dbms/src/Core/Settings.h +++ b/dbms/src/Core/Settings.h @@ -61,7 +61,10 @@ struct Settings : public SettingsCollection M(SettingSeconds, receive_timeout, DBMS_DEFAULT_RECEIVE_TIMEOUT_SEC, "") \ M(SettingSeconds, send_timeout, DBMS_DEFAULT_SEND_TIMEOUT_SEC, "") \ M(SettingSeconds, tcp_keep_alive_timeout, 0, "") \ - M(SettingMilliseconds, queue_max_wait_ms, 5000, "The wait time in the request queue, if the number of concurrent requests exceeds the maximum.") \ + M(SettingMilliseconds, queue_max_wait_ms, 0, "The wait time in the request queue, if the number of concurrent requests exceeds the maximum.") \ + M(SettingMilliseconds, connection_pool_max_wait_ms, 0, "The wait time when connection pool is full.") \ + M(SettingMilliseconds, replace_running_query_max_wait_ms, 5000, "The wait time for running query with the same query_id to finish when setting 'replace_running_query' is active.") \ + M(SettingMilliseconds, kafka_max_wait_ms, 5000, "The wait time for reading from Kafka before retry.") \ M(SettingUInt64, poll_interval, DBMS_DEFAULT_POLL_INTERVAL, "Block at the query wait loop on the server for the specified number of seconds.") \ M(SettingUInt64, idle_connection_timeout, 3600, "Close idle TCP connections after specified number of seconds.") \ M(SettingUInt64, distributed_connections_pool_size, DBMS_DEFAULT_DISTRIBUTED_CONNECTIONS_POOL_SIZE, "Maximum number of connections with one remote server in the pool.") \ @@ -302,7 +305,7 @@ struct Settings : public SettingsCollection M(SettingChar, format_csv_delimiter, ',', "The character to be considered as a delimiter in CSV data. If setting with a string, a string has to have a length of 1.") \ M(SettingBool, format_csv_allow_single_quotes, 1, "If it is set to true, allow strings in single quotes.") \ M(SettingBool, format_csv_allow_double_quotes, 1, "If it is set to true, allow strings in double quotes.") \ - M(SettingBool, input_format_csv_unquoted_null_literal_as_null, false, "Consider unquoted NULL literal as \N") \ + M(SettingBool, input_format_csv_unquoted_null_literal_as_null, false, "Consider unquoted NULL literal as \\N") \ \ M(SettingDateTimeInputFormat, date_time_input_format, FormatSettings::DateTimeInputFormat::Basic, "Method to read DateTime from text input formats. Possible values: 'basic' and 'best_effort'.") \ M(SettingBool, log_profile_events, true, "Log query performance statistics into the query_log and query_thread_log.") \ diff --git a/dbms/src/Core/SettingsCommon.h b/dbms/src/Core/SettingsCommon.h index b8c56d50caa..97edfbe9934 100644 --- a/dbms/src/Core/SettingsCommon.h +++ b/dbms/src/Core/SettingsCommon.h @@ -695,7 +695,7 @@ public: #define IMPLEMENT_SETTINGS_COLLECTION_ADD_MUTABLE_MEMBER_INFO_HELPER_(TYPE, NAME, DEFAULT, DESCRIPTION) \ add({[](const Derived & d) { return d.NAME.changed; }, \ - StringRef(#NAME, strlen(#NAME)), StringRef(#DESCRIPTION, strlen(#DESCRIPTION)), true, \ + StringRef(#NAME, strlen(#NAME)), StringRef(DESCRIPTION, strlen(DESCRIPTION)), true, \ &Functions::NAME##_getString, &Functions::NAME##_getField, \ &Functions::NAME##_setString, &Functions::NAME##_setField, \ &Functions::NAME##_serialize, &Functions::NAME##_deserialize, \ @@ -703,7 +703,7 @@ public: #define IMPLEMENT_SETTINGS_COLLECTION_ADD_IMMUTABLE_MEMBER_INFO_HELPER_(TYPE, NAME, DEFAULT, DESCRIPTION) \ add({[](const Derived & d) { return d.NAME.changed; }, \ - StringRef(#NAME, strlen(#NAME)), StringRef(#DESCRIPTION, strlen(#DESCRIPTION)), false, \ + StringRef(#NAME, strlen(#NAME)), StringRef(DESCRIPTION, strlen(DESCRIPTION)), false, \ &Functions::NAME##_getString, &Functions::NAME##_getField, \ &Functions::NAME##_setString, &Functions::NAME##_setField, \ &Functions::NAME##_serialize, &Functions::NAME##_deserialize, \ diff --git a/dbms/src/Databases/DatabaseDictionary.cpp b/dbms/src/Databases/DatabaseDictionary.cpp index bfc0f6e89d0..2bb9bd30238 100644 --- a/dbms/src/Databases/DatabaseDictionary.cpp +++ b/dbms/src/Databases/DatabaseDictionary.cpp @@ -115,26 +115,6 @@ void DatabaseDictionary::removeTable( throw Exception("DatabaseDictionary: removeTable() is not supported", ErrorCodes::NOT_IMPLEMENTED); } -void DatabaseDictionary::renameTable( - const Context &, - const String &, - IDatabase &, - const String &) -{ - throw Exception("DatabaseDictionary: renameTable() is not supported", ErrorCodes::NOT_IMPLEMENTED); -} - -void DatabaseDictionary::alterTable( - const Context &, - const String &, - const ColumnsDescription &, - const IndicesDescription &, - const ConstraintsDescription &, - const ASTModifier &) -{ - throw Exception("DatabaseDictionary: alterTable() is not supported", ErrorCodes::NOT_IMPLEMENTED); -} - time_t DatabaseDictionary::getTableMetadataModificationTime( const Context &, const String &) diff --git a/dbms/src/Databases/DatabaseDictionary.h b/dbms/src/Databases/DatabaseDictionary.h index 650a6986722..1e1af7ef581 100644 --- a/dbms/src/Databases/DatabaseDictionary.h +++ b/dbms/src/Databases/DatabaseDictionary.h @@ -60,20 +60,6 @@ public: void attachTable(const String & table_name, const StoragePtr & table) override; StoragePtr detachTable(const String & table_name) override; - void renameTable( - const Context & context, - const String & table_name, - IDatabase & to_database, - const String & to_table_name) override; - - void alterTable( - const Context & context, - const String & name, - const ColumnsDescription & columns, - const IndicesDescription & indices, - const ConstraintsDescription & constraints, - const ASTModifier & engine_modifier) override; - time_t getTableMetadataModificationTime( const Context & context, const String & table_name) override; diff --git a/dbms/src/Databases/DatabaseFactory.cpp b/dbms/src/Databases/DatabaseFactory.cpp index 1dcf41dc4d6..af2a00830e1 100644 --- a/dbms/src/Databases/DatabaseFactory.cpp +++ b/dbms/src/Databases/DatabaseFactory.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include "config_core.h" #if USE_MYSQL diff --git a/dbms/src/Databases/DatabaseMemory.cpp b/dbms/src/Databases/DatabaseMemory.cpp index a7f8460366c..1356a28d245 100644 --- a/dbms/src/Databases/DatabaseMemory.cpp +++ b/dbms/src/Databases/DatabaseMemory.cpp @@ -39,26 +39,6 @@ void DatabaseMemory::removeTable( detachTable(table_name); } -void DatabaseMemory::renameTable( - const Context &, - const String &, - IDatabase &, - const String &) -{ - throw Exception("DatabaseMemory: renameTable() is not supported", ErrorCodes::NOT_IMPLEMENTED); -} - -void DatabaseMemory::alterTable( - const Context &, - const String &, - const ColumnsDescription &, - const IndicesDescription &, - const ConstraintsDescription &, - const ASTModifier &) -{ - throw Exception("DatabaseMemory: alterTable() is not supported", ErrorCodes::NOT_IMPLEMENTED); -} - time_t DatabaseMemory::getTableMetadataModificationTime( const Context &, const String &) diff --git a/dbms/src/Databases/DatabaseMemory.h b/dbms/src/Databases/DatabaseMemory.h index 32d3045612b..33bb8787168 100644 --- a/dbms/src/Databases/DatabaseMemory.h +++ b/dbms/src/Databases/DatabaseMemory.h @@ -37,20 +37,6 @@ public: const Context & context, const String & table_name) override; - void renameTable( - const Context & context, - const String & table_name, - IDatabase & to_database, - const String & to_table_name) override; - - void alterTable( - const Context & context, - const String & name, - const ColumnsDescription & columns, - const IndicesDescription & indices, - const ConstraintsDescription & constraints, - const ASTModifier & engine_modifier) override; - time_t getTableMetadataModificationTime( const Context & context, const String & table_name) override; diff --git a/dbms/src/Databases/DatabaseMySQL.h b/dbms/src/Databases/DatabaseMySQL.h index 6c277f1e9f8..7044a594b4c 100644 --- a/dbms/src/Databases/DatabaseMySQL.h +++ b/dbms/src/Databases/DatabaseMySQL.h @@ -5,6 +5,8 @@ #include #include +#include + namespace DB { @@ -61,21 +63,11 @@ public: throw Exception("MySQL database engine does not support attach table.", ErrorCodes::NOT_IMPLEMENTED); } - void renameTable(const Context &, const String &, IDatabase &, const String &) override - { - throw Exception("MySQL database engine does not support rename table.", ErrorCodes::NOT_IMPLEMENTED); - } - void createTable(const Context &, const String &, const StoragePtr &, const ASTPtr &) override { throw Exception("MySQL database engine does not support create table.", ErrorCodes::NOT_IMPLEMENTED); } - void alterTable(const Context &, const String &, const ColumnsDescription &, const IndicesDescription &, const ConstraintsDescription &, const ASTModifier &) override - { - throw Exception("MySQL database engine does not support alter table.", ErrorCodes::NOT_IMPLEMENTED); - } - private: struct MySQLStorageInfo { diff --git a/dbms/src/Databases/DatabaseOrdinary.cpp b/dbms/src/Databases/DatabaseOrdinary.cpp index 4748bd0d792..b988329127e 100644 --- a/dbms/src/Databases/DatabaseOrdinary.cpp +++ b/dbms/src/Databases/DatabaseOrdinary.cpp @@ -135,7 +135,25 @@ void DatabaseOrdinary::loadTables( if (endsWith(dir_it.name(), ".sql.bak")) continue; - /// There are files .sql.tmp - delete. + // There are files that we tried to delete previously + static const char * tmp_drop_ext = ".sql.tmp_drop"; + if (endsWith(dir_it.name(), tmp_drop_ext)) + { + const std::string table_name = dir_it.name().substr(0, dir_it.name().size() - strlen(tmp_drop_ext)); + if (Poco::File(data_path + '/' + table_name).exists()) + { + Poco::File(dir_it->path()).renameTo(table_name + ".sql"); + LOG_WARNING(log, "Table " << backQuote(table_name) << " was not dropped previously"); + } + else + { + LOG_INFO(log, "Removing file " << dir_it->path()); + Poco::File(dir_it->path()).remove(); + } + continue; + } + + /// There are files .sql.tmp - delete if (endsWith(dir_it.name(), ".sql.tmp")) { LOG_INFO(log, "Removing file " << dir_it->path()); @@ -302,6 +320,15 @@ void DatabaseOrdinary::removeTable( } catch (...) { + try + { + Poco::File(table_metadata_path + ".tmp_drop").remove(); + return; + } + catch (...) + { + LOG_WARNING(log, getCurrentExceptionMessage(__PRETTY_FUNCTION__)); + } attachTable(table_name, res); throw; } @@ -355,7 +382,8 @@ void DatabaseOrdinary::renameTable( const Context & context, const String & table_name, IDatabase & to_database, - const String & to_table_name) + const String & to_table_name, + TableStructureWriteLockHolder & lock) { DatabaseOrdinary * to_database_concrete = typeid_cast(&to_database); @@ -372,7 +400,7 @@ void DatabaseOrdinary::renameTable( { table->rename(context.getPath() + "/data/" + escapeForFileName(to_database_concrete->name) + "/", to_database_concrete->name, - to_table_name); + to_table_name, lock); } catch (const Exception &) { diff --git a/dbms/src/Databases/DatabaseOrdinary.h b/dbms/src/Databases/DatabaseOrdinary.h index a71029f6495..e8895075768 100644 --- a/dbms/src/Databases/DatabaseOrdinary.h +++ b/dbms/src/Databases/DatabaseOrdinary.h @@ -1,6 +1,7 @@ #pragma once #include +#include namespace DB @@ -35,7 +36,8 @@ public: const Context & context, const String & table_name, IDatabase & to_database, - const String & to_table_name) override; + const String & to_table_name, + TableStructureWriteLockHolder &) override; void alterTable( const Context & context, diff --git a/dbms/src/Databases/DatabasesCommon.h b/dbms/src/Databases/DatabasesCommon.h index 00190d89b1e..734708e4c95 100644 --- a/dbms/src/Databases/DatabasesCommon.h +++ b/dbms/src/Databases/DatabasesCommon.h @@ -4,6 +4,7 @@ #include #include #include +#include /// General functionality for several different database engines. diff --git a/dbms/src/Databases/IDatabase.h b/dbms/src/Databases/IDatabase.h index b9d4c6ce4cb..549d795b66d 100644 --- a/dbms/src/Databases/IDatabase.h +++ b/dbms/src/Databases/IDatabase.h @@ -1,16 +1,9 @@ #pragma once -#include #include -#include #include -#include -#include -#include #include -#include -#include -#include +#include #include #include @@ -21,8 +14,16 @@ namespace DB { class Context; - struct Settings; +struct ConstraintsDescription; +class ColumnsDescription; +struct IndicesDescription; +struct TableStructureWriteLockHolder; + +namespace ErrorCodes +{ + extern const int NOT_IMPLEMENTED; +} /** Allows to iterate over tables. @@ -102,22 +103,29 @@ public: /// Rename the table and possibly move the table to another database. virtual void renameTable( - const Context & context, - const String & name, - IDatabase & to_database, - const String & to_name) = 0; + const Context & /*context*/, + const String & /*name*/, + IDatabase & /*to_database*/, + const String & /*to_name*/, + TableStructureWriteLockHolder &) + { + throw Exception(getEngineName() + ": renameTable() is not supported", ErrorCodes::NOT_IMPLEMENTED); + } using ASTModifier = std::function; /// Change the table structure in metadata. /// You must call under the TableStructureLock of the corresponding table . If engine_modifier is empty, then engine does not change. virtual void alterTable( - const Context & context, - const String & name, - const ColumnsDescription & columns, - const IndicesDescription & indices, - const ConstraintsDescription & constraints, - const ASTModifier & engine_modifier) = 0; + const Context & /*context*/, + const String & /*name*/, + const ColumnsDescription & /*columns*/, + const IndicesDescription & /*indices*/, + const ConstraintsDescription & /*constraints*/, + const ASTModifier & /*engine_modifier*/) + { + throw Exception(getEngineName() + ": renameTable() is not supported", ErrorCodes::NOT_IMPLEMENTED); + } /// Returns time of table's metadata change, 0 if there is no corresponding metadata file. virtual time_t getTableMetadataModificationTime( diff --git a/dbms/src/Dictionaries/CMakeLists.txt b/dbms/src/Dictionaries/CMakeLists.txt index af858e6b26d..4d066d1f59b 100644 --- a/dbms/src/Dictionaries/CMakeLists.txt +++ b/dbms/src/Dictionaries/CMakeLists.txt @@ -15,7 +15,7 @@ list(REMOVE_ITEM clickhouse_dictionaries_sources DictionaryFactory.cpp Dictionar list(REMOVE_ITEM clickhouse_dictionaries_headers DictionaryFactory.h DictionarySourceFactory.h DictionaryStructure.h) add_library(clickhouse_dictionaries ${clickhouse_dictionaries_sources}) -target_link_libraries(clickhouse_dictionaries PRIVATE dbms clickhouse_common_io ${BTRIE_LIBRARIES} PUBLIC Threads::Threads) +target_link_libraries(clickhouse_dictionaries PRIVATE dbms clickhouse_common_io ${BTRIE_LIBRARIES}) if(Poco_SQL_FOUND AND NOT USE_INTERNAL_POCO_LIBRARY) target_include_directories(clickhouse_dictionaries SYSTEM PRIVATE ${Poco_SQL_INCLUDE_DIR}) diff --git a/dbms/src/Interpreters/Aggregator.h b/dbms/src/Interpreters/Aggregator.h index c3d1d5df8fd..b42fdb542fc 100644 --- a/dbms/src/Interpreters/Aggregator.h +++ b/dbms/src/Interpreters/Aggregator.h @@ -196,8 +196,6 @@ struct AggregationMethodString using Data = TData; using Key = typename Data::key_type; using Mapped = typename Data::mapped_type; - using iterator = typename Data::iterator; - using const_iterator = typename Data::const_iterator; Data data; @@ -224,8 +222,6 @@ struct AggregationMethodFixedString using Data = TData; using Key = typename Data::key_type; using Mapped = typename Data::mapped_type; - using iterator = typename Data::iterator; - using const_iterator = typename Data::const_iterator; Data data; @@ -254,8 +250,6 @@ struct AggregationMethodSingleLowCardinalityColumn : public SingleColumnMethod using Data = typename Base::Data; using Key = typename Base::Key; using Mapped = typename Base::Mapped; - using iterator = typename Base::iterator; - using const_iterator = typename Base::const_iterator; using Base::data; @@ -365,8 +359,6 @@ struct AggregationMethodSerialized using Data = TData; using Key = typename Data::key_type; using Mapped = typename Data::mapped_type; - using iterator = typename Data::iterator; - using const_iterator = typename Data::const_iterator; Data data; @@ -460,8 +452,8 @@ struct AggregatedDataVariants : private boost::noncopyable std::unique_ptr> nullable_keys256_two_level; /// Support for low cardinality. - std::unique_ptr>> low_cardinality_key8; - std::unique_ptr>> low_cardinality_key16; + std::unique_ptr>> low_cardinality_key8; + std::unique_ptr>> low_cardinality_key16; std::unique_ptr>> low_cardinality_key32; std::unique_ptr>> low_cardinality_key64; std::unique_ptr>> low_cardinality_key_string; diff --git a/dbms/src/Interpreters/ClusterProxy/executeQuery.cpp b/dbms/src/Interpreters/ClusterProxy/executeQuery.cpp index 25d5a6eb0d4..dc0d3ef27b1 100644 --- a/dbms/src/Interpreters/ClusterProxy/executeQuery.cpp +++ b/dbms/src/Interpreters/ClusterProxy/executeQuery.cpp @@ -18,6 +18,8 @@ Context removeUserRestrictionsFromSettings(const Context & context, const Settin { Settings new_settings = settings; new_settings.queue_max_wait_ms = Cluster::saturate(new_settings.queue_max_wait_ms, settings.max_execution_time); + new_settings.connection_pool_max_wait_ms = Cluster::saturate(new_settings.connection_pool_max_wait_ms, settings.max_execution_time); + new_settings.replace_running_query_max_wait_ms = Cluster::saturate(new_settings.replace_running_query_max_wait_ms, settings.max_execution_time); /// Does not matter on remote servers, because queries are sent under different user. new_settings.max_concurrent_queries_for_user = 0; diff --git a/dbms/src/Interpreters/ExpressionActions.cpp b/dbms/src/Interpreters/ExpressionActions.cpp index 5ef05569f91..0083820d6e9 100644 --- a/dbms/src/Interpreters/ExpressionActions.cpp +++ b/dbms/src/Interpreters/ExpressionActions.cpp @@ -291,8 +291,8 @@ void ExpressionAction::prepare(Block & sample_block, const Settings & settings, bool make_nullable = is_null_used_as_default && right_or_full_join; - if (make_nullable && !col.type->isNullable()) - col.type = std::make_shared(col.type); + if (make_nullable && col.type->canBeInsideNullable()) + col.type = makeNullable(col.type); } for (const auto & col : columns_added_by_join) @@ -316,8 +316,8 @@ void ExpressionAction::prepare(Block & sample_block, const Settings & settings, } } - if (make_nullable && !res_type->isNullable()) - res_type = std::make_shared(res_type); + if (make_nullable && res_type->canBeInsideNullable()) + res_type = makeNullable(res_type); sample_block.insert(ColumnWithTypeAndName(nullptr, res_type, col.name)); } diff --git a/dbms/src/Interpreters/InterpreterDropQuery.cpp b/dbms/src/Interpreters/InterpreterDropQuery.cpp index 91213b6100e..226a93aff88 100644 --- a/dbms/src/Interpreters/InterpreterDropQuery.cpp +++ b/dbms/src/Interpreters/InterpreterDropQuery.cpp @@ -80,7 +80,7 @@ BlockIO InterpreterDropQuery::executeToTable(String & database_name_, String & t /// If table was already dropped by anyone, an exception will be thrown auto table_lock = database_and_table.second->lockExclusively(context.getCurrentQueryId()); /// Drop table data, don't touch metadata - database_and_table.second->truncate(query_ptr, context); + database_and_table.second->truncate(query_ptr, context, table_lock); } else if (kind == ASTDropQuery::Kind::Drop) { @@ -90,11 +90,32 @@ BlockIO InterpreterDropQuery::executeToTable(String & database_name_, String & t /// If table was already dropped by anyone, an exception will be thrown auto table_lock = database_and_table.second->lockExclusively(context.getCurrentQueryId()); - /// Delete table metadata and table itself from memory + const std::string metadata_file_without_extension = + database_and_table.first->getMetadataPath() + + escapeForFileName(database_and_table.second->getTableName()); + + const auto prev_metadata_name = metadata_file_without_extension + ".sql"; + const auto drop_metadata_name = metadata_file_without_extension + ".sql.tmp_drop"; + + /// Try to rename metadata file and delete the data + try + { + /// There some kind of tables that have no metadata - ignore renaming + if (Poco::File(prev_metadata_name).exists()) + Poco::File(prev_metadata_name).renameTo(drop_metadata_name); + /// Delete table data + database_and_table.second->drop(table_lock); + } + catch (...) + { + if (Poco::File(drop_metadata_name).exists()) + Poco::File(drop_metadata_name).renameTo(prev_metadata_name); + throw; + } + + /// Delete table metadata and table itself from memory database_and_table.first->removeTable(context, database_and_table.second->getTableName()); - /// Delete table data - database_and_table.second->drop(); database_and_table.second->is_dropped = true; String database_data_path = database_and_table.first->getDataPath(); @@ -128,7 +149,7 @@ BlockIO InterpreterDropQuery::executeToTemporaryTable(String & table_name, ASTDr /// If table was already dropped by anyone, an exception will be thrown auto table_lock = table->lockExclusively(context.getCurrentQueryId()); /// Drop table data, don't touch metadata - table->truncate(query_ptr, context); + table->truncate(query_ptr, context, table_lock); } else if (kind == ASTDropQuery::Kind::Drop) { @@ -137,7 +158,7 @@ BlockIO InterpreterDropQuery::executeToTemporaryTable(String & table_name, ASTDr /// If table was already dropped by anyone, an exception will be thrown auto table_lock = table->lockExclusively(context.getCurrentQueryId()); /// Delete table data - table->drop(); + table->drop(table_lock); table->is_dropped = true; } } diff --git a/dbms/src/Interpreters/InterpreterRenameQuery.cpp b/dbms/src/Interpreters/InterpreterRenameQuery.cpp index 360adf45194..e763c002209 100644 --- a/dbms/src/Interpreters/InterpreterRenameQuery.cpp +++ b/dbms/src/Interpreters/InterpreterRenameQuery.cpp @@ -26,6 +26,8 @@ struct RenameDescription to_table_name(elem.to.table) {} + TableStructureWriteLockHolder from_table_lock; + String from_database_name; String from_table_name; @@ -75,7 +77,7 @@ BlockIO InterpreterRenameQuery::execute() } }; - std::set unique_tables_from; + std::map tables_from_locks; /// Don't allow to drop tables (that we are renaming); don't allow to create tables in places where tables will be renamed. std::map> table_guards; @@ -87,7 +89,11 @@ BlockIO InterpreterRenameQuery::execute() UniqueTableName from(descriptions.back().from_database_name, descriptions.back().from_table_name); UniqueTableName to(descriptions.back().to_database_name, descriptions.back().to_table_name); - unique_tables_from.emplace(from); + if (!tables_from_locks.count(from)) + if (auto table = context.tryGetTable(from.database_name, from.table_name)) + tables_from_locks.emplace(from, table->lockExclusively(context.getCurrentQueryId())); + + descriptions.back().from_table_lock = tables_from_locks[from]; if (!table_guards.count(from)) table_guards.emplace(from, context.getDDLGuard(from.database_name, from.table_name)); @@ -96,13 +102,6 @@ BlockIO InterpreterRenameQuery::execute() table_guards.emplace(to, context.getDDLGuard(to.database_name, to.table_name)); } - std::vector locks; - locks.reserve(unique_tables_from.size()); - - for (const auto & names : unique_tables_from) - if (auto table = context.tryGetTable(names.database_name, names.table_name)) - locks.emplace_back(table->lockExclusively(context.getCurrentQueryId())); - /** All tables are locked. If there are more than one rename in chain, * we need to hold global lock while doing all renames. Order matters to avoid deadlocks. * It provides atomicity of all RENAME chain as a whole, from the point of view of DBMS client, @@ -114,12 +113,12 @@ BlockIO InterpreterRenameQuery::execute() if (descriptions.size() > 1) lock = context.getLock(); - for (const auto & elem : descriptions) + for (auto & elem : descriptions) { context.assertTableDoesntExist(elem.to_database_name, elem.to_table_name); context.getDatabase(elem.from_database_name)->renameTable( - context, elem.from_table_name, *context.getDatabase(elem.to_database_name), elem.to_table_name); + context, elem.from_table_name, *context.getDatabase(elem.to_database_name), elem.to_table_name, elem.from_table_lock); } return {}; diff --git a/dbms/src/Interpreters/Join.cpp b/dbms/src/Interpreters/Join.cpp index 8e91424bc21..63bf88a8437 100644 --- a/dbms/src/Interpreters/Join.cpp +++ b/dbms/src/Interpreters/Join.cpp @@ -50,7 +50,7 @@ static std::unordered_map requiredRightKeys(const Names & k static void convertColumnToNullable(ColumnWithTypeAndName & column) { - if (column.type->isNullable()) + if (column.type->isNullable() || !column.type->canBeInsideNullable()) return; column.type = makeNullable(column.type); @@ -71,7 +71,7 @@ static ColumnWithTypeAndName correctNullability(ColumnWithTypeAndName && column, if (nullable) { convertColumnToNullable(column); - if (negative_null_map.size()) + if (column.type->isNullable() && negative_null_map.size()) { MutableColumnPtr mutable_column = (*std::move(column.column)).mutate(); assert_cast(*mutable_column).applyNegatedNullMap(negative_null_map); diff --git a/dbms/src/Interpreters/ProcessList.cpp b/dbms/src/Interpreters/ProcessList.cpp index 5a13477147c..71376c6d129 100644 --- a/dbms/src/Interpreters/ProcessList.cpp +++ b/dbms/src/Interpreters/ProcessList.cpp @@ -87,10 +87,10 @@ ProcessList::EntryPtr ProcessList::insert(const String & query_, const IAST * as { std::unique_lock lock(mutex); - const auto max_wait_ms = settings.queue_max_wait_ms.totalMilliseconds(); + const auto queue_max_wait_ms = settings.queue_max_wait_ms.totalMilliseconds(); if (!is_unlimited_query && max_size && processes.size() >= max_size) { - if (!max_wait_ms || !have_space.wait_for(lock, std::chrono::milliseconds(max_wait_ms), [&]{ return processes.size() < max_size; })) + if (!queue_max_wait_ms || !have_space.wait_for(lock, std::chrono::milliseconds(queue_max_wait_ms), [&]{ return processes.size() < max_size; })) throw Exception("Too many simultaneous queries. Maximum: " + toString(max_size), ErrorCodes::TOO_MANY_SIMULTANEOUS_QUERIES); } @@ -127,7 +127,9 @@ ProcessList::EntryPtr ProcessList::insert(const String & query_, const IAST * as /// Ask queries to cancel. They will check this flag. running_query->second->is_killed.store(true, std::memory_order_relaxed); - if (!max_wait_ms || !have_space.wait_for(lock, std::chrono::milliseconds(max_wait_ms), [&] + const auto replace_running_query_max_wait_ms = settings.replace_running_query_max_wait_ms.totalMilliseconds(); + if (!replace_running_query_max_wait_ms || !have_space.wait_for(lock, std::chrono::milliseconds(replace_running_query_max_wait_ms), + [&] { running_query = user_process_list->second.queries.find(client_info.current_query_id); if (running_query == user_process_list->second.queries.end()) @@ -135,8 +137,10 @@ ProcessList::EntryPtr ProcessList::insert(const String & query_, const IAST * as running_query->second->is_killed.store(true, std::memory_order_relaxed); return false; })) + { throw Exception("Query with id = " + client_info.current_query_id + " is already running and can't be stopped", ErrorCodes::QUERY_WITH_SAME_ID_IS_ALREADY_RUNNING); + } } } } diff --git a/dbms/src/Interpreters/tests/CMakeLists.txt b/dbms/src/Interpreters/tests/CMakeLists.txt index 3fac5424c00..03c06eb7257 100644 --- a/dbms/src/Interpreters/tests/CMakeLists.txt +++ b/dbms/src/Interpreters/tests/CMakeLists.txt @@ -57,5 +57,5 @@ target_link_libraries (users PRIVATE dbms clickhouse_common_config stdc++fs) if (OS_LINUX) add_executable (internal_iotop internal_iotop.cpp) - target_link_libraries (internal_iotop PRIVATE dbms Threads::Threads) + target_link_libraries (internal_iotop PRIVATE dbms) endif () diff --git a/dbms/src/Storages/IStorage.cpp b/dbms/src/Storages/IStorage.cpp index 2f3a48d90b6..9091008a38b 100644 --- a/dbms/src/Storages/IStorage.cpp +++ b/dbms/src/Storages/IStorage.cpp @@ -22,6 +22,7 @@ namespace ErrorCodes extern const int TYPE_MISMATCH; extern const int SETTINGS_ARE_NOT_SUPPORTED; extern const int UNKNOWN_SETTING; + extern const int TABLE_IS_DROPPED; } IStorage::IStorage(ColumnsDescription virtuals_) : virtuals(std::move(virtuals_)) diff --git a/dbms/src/Storages/IStorage.h b/dbms/src/Storages/IStorage.h index 6c23a638ddf..ffe4ed1b775 100644 --- a/dbms/src/Storages/IStorage.h +++ b/dbms/src/Storages/IStorage.h @@ -9,11 +9,13 @@ #include #include #include +#include +#include +#include #include #include #include #include -#include #include #include @@ -24,7 +26,6 @@ namespace DB namespace ErrorCodes { - extern const int TABLE_IS_DROPPED; extern const int NOT_IMPLEMENTED; } @@ -261,12 +262,12 @@ public: * The table is not usable during and after call to this method. * If you do not need any action other than deleting the directory with data, you can leave this method blank. */ - virtual void drop() {} + virtual void drop(TableStructureWriteLockHolder &) {} /** Clear the table data and leave it empty. * Must be called under lockForAlter. */ - virtual void truncate(const ASTPtr & /*query*/, const Context & /* context */) + virtual void truncate(const ASTPtr & /*query*/, const Context & /* context */, TableStructureWriteLockHolder &) { throw Exception("Truncate is not supported by storage " + getName(), ErrorCodes::NOT_IMPLEMENTED); } @@ -276,7 +277,8 @@ public: * In this function, you need to rename the directory with the data, if any. * Called when the table structure is locked for write. */ - virtual void rename(const String & /*new_path_to_db*/, const String & /*new_database_name*/, const String & /*new_table_name*/) + virtual void rename(const String & /*new_path_to_db*/, const String & /*new_database_name*/, const String & /*new_table_name*/, + TableStructureWriteLockHolder &) { throw Exception("Method rename is not supported by storage " + getName(), ErrorCodes::NOT_IMPLEMENTED); } diff --git a/dbms/src/Storages/Kafka/KafkaBlockInputStream.cpp b/dbms/src/Storages/Kafka/KafkaBlockInputStream.cpp index 1962e4fbc63..09f97f8c836 100644 --- a/dbms/src/Storages/Kafka/KafkaBlockInputStream.cpp +++ b/dbms/src/Storages/Kafka/KafkaBlockInputStream.cpp @@ -43,7 +43,7 @@ Block KafkaBlockInputStream::getHeader() const void KafkaBlockInputStream::readPrefixImpl() { - auto timeout = std::chrono::milliseconds(context.getSettingsRef().queue_max_wait_ms.totalMilliseconds()); + auto timeout = std::chrono::milliseconds(context.getSettingsRef().kafka_max_wait_ms.totalMilliseconds()); buffer = storage.popReadBuffer(timeout); claimed = !!buffer; diff --git a/dbms/src/Storages/Kafka/KafkaBlockInputStream.h b/dbms/src/Storages/Kafka/KafkaBlockInputStream.h index fef7f8d0469..011ed5fe046 100644 --- a/dbms/src/Storages/Kafka/KafkaBlockInputStream.h +++ b/dbms/src/Storages/Kafka/KafkaBlockInputStream.h @@ -4,6 +4,8 @@ #include #include +#include + namespace DB { diff --git a/dbms/src/Storages/Kafka/StorageKafka.cpp b/dbms/src/Storages/Kafka/StorageKafka.cpp index 835ce43b1a4..2d55eb42f1e 100644 --- a/dbms/src/Storages/Kafka/StorageKafka.cpp +++ b/dbms/src/Storages/Kafka/StorageKafka.cpp @@ -189,7 +189,7 @@ void StorageKafka::shutdown() } -void StorageKafka::rename(const String & /* new_path_to_db */, const String & new_database_name, const String & new_table_name) +void StorageKafka::rename(const String & /* new_path_to_db */, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) { table_name = new_table_name; database_name = new_database_name; diff --git a/dbms/src/Storages/Kafka/StorageKafka.h b/dbms/src/Storages/Kafka/StorageKafka.h index cd55f28820e..51a06a890db 100644 --- a/dbms/src/Storages/Kafka/StorageKafka.h +++ b/dbms/src/Storages/Kafka/StorageKafka.h @@ -1,8 +1,6 @@ #pragma once #include -#include -#include #include #include #include @@ -11,6 +9,8 @@ #include #include +#include + namespace DB { @@ -40,10 +40,9 @@ public: BlockOutputStreamPtr write( const ASTPtr & query, - const Context & context - ) override; + const Context & context) override; - void rename(const String & /* new_path_to_db */, const String & new_database_name, const String & new_table_name) override; + void rename(const String & /* new_path_to_db */, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) override; void updateDependencies() override; diff --git a/dbms/src/Storages/LiveView/LiveViewBlockInputStream.h b/dbms/src/Storages/LiveView/LiveViewBlockInputStream.h index 345fceaf095..f73991ddb77 100644 --- a/dbms/src/Storages/LiveView/LiveViewBlockInputStream.h +++ b/dbms/src/Storages/LiveView/LiveViewBlockInputStream.h @@ -31,7 +31,12 @@ public: const bool has_limit_, const UInt64 limit_, const UInt64 heartbeat_interval_sec_, const UInt64 temporary_live_view_timeout_sec_) - : storage(std::move(storage_)), blocks_ptr(std::move(blocks_ptr_)), blocks_metadata_ptr(std::move(blocks_metadata_ptr_)), active_ptr(std::move(active_ptr_)), has_limit(has_limit_), limit(limit_), heartbeat_interval_usec(heartbeat_interval_sec_ * 1000000), temporary_live_view_timeout_sec(temporary_live_view_timeout_sec_) + : storage(std::move(storage_)), blocks_ptr(std::move(blocks_ptr_)), + blocks_metadata_ptr(std::move(blocks_metadata_ptr_)), + active_ptr(std::move(active_ptr_)), + has_limit(has_limit_), limit(limit_), + heartbeat_interval_usec(heartbeat_interval_sec_ * 1000000), + temporary_live_view_timeout_sec(temporary_live_view_timeout_sec_) { /// grab active pointer active = active_ptr.lock(); diff --git a/dbms/src/Storages/LiveView/LiveViewEventsBlockInputStream.h b/dbms/src/Storages/LiveView/LiveViewEventsBlockInputStream.h index 120d0098536..3308ff2858b 100644 --- a/dbms/src/Storages/LiveView/LiveViewEventsBlockInputStream.h +++ b/dbms/src/Storages/LiveView/LiveViewEventsBlockInputStream.h @@ -51,7 +51,12 @@ public: const bool has_limit_, const UInt64 limit_, const UInt64 heartbeat_interval_sec_, const UInt64 temporary_live_view_timeout_sec_) - : storage(std::move(storage_)), blocks_ptr(std::move(blocks_ptr_)), blocks_metadata_ptr(std::move(blocks_metadata_ptr_)), active_ptr(std::move(active_ptr_)), has_limit(has_limit_), limit(limit_), heartbeat_interval_usec(heartbeat_interval_sec_ * 1000000), temporary_live_view_timeout_sec(temporary_live_view_timeout_sec_) + : storage(std::move(storage_)), blocks_ptr(std::move(blocks_ptr_)), + blocks_metadata_ptr(std::move(blocks_metadata_ptr_)), + active_ptr(std::move(active_ptr_)), has_limit(has_limit_), + limit(limit_), + heartbeat_interval_usec(heartbeat_interval_sec_ * 1000000), + temporary_live_view_timeout_sec(temporary_live_view_timeout_sec_) { /// grab active pointer active = active_ptr.lock(); diff --git a/dbms/src/Storages/LiveView/StorageLiveView.cpp b/dbms/src/Storages/LiveView/StorageLiveView.cpp index 7704f421517..1726fe0fba1 100644 --- a/dbms/src/Storages/LiveView/StorageLiveView.cpp +++ b/dbms/src/Storages/LiveView/StorageLiveView.cpp @@ -365,25 +365,25 @@ void StorageLiveView::checkTableCanBeDropped() const } } -void StorageLiveView::noUsersThread(const UInt64 & timeout) +void StorageLiveView::noUsersThread(std::shared_ptr storage, const UInt64 & timeout) { - if (shutdown_called) - return; - bool drop_table = false; + if (storage->shutdown_called) + return; + { while (1) { - std::unique_lock lock(no_users_thread_mutex); - if (!no_users_thread_condition.wait_for(lock, std::chrono::seconds(timeout), [&] { return no_users_thread_wakeup; })) + std::unique_lock lock(storage->no_users_thread_mutex); + if (!storage->no_users_thread_condition.wait_for(lock, std::chrono::seconds(timeout), [&] { return storage->no_users_thread_wakeup; })) { - no_users_thread_wakeup = false; - if (shutdown_called) + storage->no_users_thread_wakeup = false; + if (storage->shutdown_called) return; - if (hasUsers()) + if (storage->hasUsers()) return; - if (!global_context.getDependencies(database_name, table_name).empty()) + if (!storage->global_context.getDependencies(storage->database_name, storage->table_name).empty()) continue; drop_table = true; } @@ -393,17 +393,17 @@ void StorageLiveView::noUsersThread(const UInt64 & timeout) if (drop_table) { - if (global_context.tryGetTable(database_name, table_name)) + if (storage->global_context.tryGetTable(storage->database_name, storage->table_name)) { try { /// We create and execute `drop` query for this table auto drop_query = std::make_shared(); - drop_query->database = database_name; - drop_query->table = table_name; + drop_query->database = storage->database_name; + drop_query->table = storage->table_name; drop_query->kind = ASTDropQuery::Kind::Drop; ASTPtr ast_drop_query = drop_query; - InterpreterDropQuery drop_interpreter(ast_drop_query, global_context); + InterpreterDropQuery drop_interpreter(ast_drop_query, storage->global_context); drop_interpreter.execute(); } catch (...) @@ -419,9 +419,6 @@ void StorageLiveView::startNoUsersThread(const UInt64 & timeout) if (!start_no_users_thread_called.compare_exchange_strong(expected, true)) return; - if (is_dropped) - return; - if (is_temporary) { if (no_users_thread.joinable()) @@ -438,8 +435,10 @@ void StorageLiveView::startNoUsersThread(const UInt64 & timeout) no_users_thread_wakeup = false; } if (!is_dropped) - no_users_thread = std::thread(&StorageLiveView::noUsersThread, this, timeout); + no_users_thread = std::thread(&StorageLiveView::noUsersThread, + std::static_pointer_cast(shared_from_this()), timeout); } + start_no_users_thread_called = false; } @@ -456,22 +455,22 @@ void StorageLiveView::shutdown() if (no_users_thread.joinable()) { - std::lock_guard lock(no_users_thread_mutex); - no_users_thread_wakeup = true; - no_users_thread_condition.notify_one(); - /// Must detach the no users thread - /// as we can't join it as it will result - /// in a deadlock - no_users_thread.detach(); /// TODO Not viable at all. + { + std::lock_guard lock(no_users_thread_mutex); + no_users_thread_wakeup = true; + no_users_thread_condition.notify_one(); + } } } StorageLiveView::~StorageLiveView() { shutdown(); + if (no_users_thread.joinable()) + no_users_thread.detach(); } -void StorageLiveView::drop() +void StorageLiveView::drop(TableStructureWriteLockHolder &) { global_context.removeDependency( DatabaseAndTableName(select_database_name, select_table_name), @@ -534,8 +533,11 @@ BlockInputStreams StorageLiveView::watch( if (query.is_watch_events) { - auto reader = std::make_shared(std::static_pointer_cast(shared_from_this()), blocks_ptr, blocks_metadata_ptr, active_ptr, has_limit, limit, context.getSettingsRef().live_view_heartbeat_interval.totalSeconds(), - context.getSettingsRef().temporary_live_view_timeout.totalSeconds()); + auto reader = std::make_shared( + std::static_pointer_cast(shared_from_this()), + blocks_ptr, blocks_metadata_ptr, active_ptr, has_limit, limit, + context.getSettingsRef().live_view_heartbeat_interval.totalSeconds(), + context.getSettingsRef().temporary_live_view_timeout.totalSeconds()); if (no_users_thread.joinable()) { @@ -559,8 +561,11 @@ BlockInputStreams StorageLiveView::watch( } else { - auto reader = std::make_shared(std::static_pointer_cast(shared_from_this()), blocks_ptr, blocks_metadata_ptr, active_ptr, has_limit, limit, context.getSettingsRef().live_view_heartbeat_interval.totalSeconds(), - context.getSettingsRef().temporary_live_view_timeout.totalSeconds()); + auto reader = std::make_shared( + std::static_pointer_cast(shared_from_this()), + blocks_ptr, blocks_metadata_ptr, active_ptr, has_limit, limit, + context.getSettingsRef().live_view_heartbeat_interval.totalSeconds(), + context.getSettingsRef().temporary_live_view_timeout.totalSeconds()); if (no_users_thread.joinable()) { diff --git a/dbms/src/Storages/LiveView/StorageLiveView.h b/dbms/src/Storages/LiveView/StorageLiveView.h index 9930d8d6154..710f4ec1602 100644 --- a/dbms/src/Storages/LiveView/StorageLiveView.h +++ b/dbms/src/Storages/LiveView/StorageLiveView.h @@ -71,11 +71,10 @@ public: { return active_ptr.use_count() > 1; } - /// Background thread for temporary tables - /// which drops this table if there are no users + /// No users thread mutex, predicate and wake up condition void startNoUsersThread(const UInt64 & timeout); std::mutex no_users_thread_mutex; - bool no_users_thread_wakeup{false}; + bool no_users_thread_wakeup = false; std::condition_variable no_users_thread_condition; /// Get blocks hash /// must be called with mutex locked @@ -105,7 +104,7 @@ public: } void checkTableCanBeDropped() const override; - void drop() override; + void drop(TableStructureWriteLockHolder &) override; void startup() override; void shutdown() override; @@ -149,7 +148,7 @@ private: String database_name; ASTPtr inner_query; Context & global_context; - bool is_temporary {false}; + bool is_temporary = false; mutable Block sample_block; /// Mutex for the blocks and ready condition @@ -166,10 +165,12 @@ private: std::shared_ptr blocks_metadata_ptr; BlocksPtrs mergeable_blocks; - void noUsersThread(const UInt64 & timeout); + /// Background thread for temporary tables + /// which drops this table if there are no users + static void noUsersThread(std::shared_ptr storage, const UInt64 & timeout); std::thread no_users_thread; - std::atomic shutdown_called{false}; - std::atomic start_no_users_thread_called{false}; + std::atomic shutdown_called = false; + std::atomic start_no_users_thread_called = false; UInt64 temporary_live_view_timeout; StorageLiveView( diff --git a/dbms/src/Storages/MergeTree/MergeTreeData.cpp b/dbms/src/Storages/MergeTree/MergeTreeData.cpp index b2d4a4b9d73..70ae2414418 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeData.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeData.cpp @@ -1690,7 +1690,7 @@ void MergeTreeData::removeEmptyColumnsFromPart(MergeTreeData::MutableDataPartPtr empty_columns.clear(); } -void MergeTreeData::freezeAll(const String & with_name, const Context & context) +void MergeTreeData::freezeAll(const String & with_name, const Context & context, TableStructureReadLockHolder &) { freezePartitionsByMatcher([] (const DataPartPtr &){ return true; }, with_name, context); } @@ -2550,7 +2550,7 @@ void MergeTreeData::removePartContributionToColumnSizes(const DataPartPtr & part } -void MergeTreeData::freezePartition(const ASTPtr & partition_ast, const String & with_name, const Context & context) +void MergeTreeData::freezePartition(const ASTPtr & partition_ast, const String & with_name, const Context & context, TableStructureReadLockHolder &) { std::optional prefix; String partition_id; diff --git a/dbms/src/Storages/MergeTree/MergeTreeData.h b/dbms/src/Storages/MergeTree/MergeTreeData.h index 0440a3181c8..ea9db0920a3 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeData.h +++ b/dbms/src/Storages/MergeTree/MergeTreeData.h @@ -549,7 +549,7 @@ public: void removeEmptyColumnsFromPart(MergeTreeData::MutableDataPartPtr & data_part); /// Freezes all parts. - void freezeAll(const String & with_name, const Context & context); + void freezeAll(const String & with_name, const Context & context, TableStructureReadLockHolder & table_lock_holder); /// Should be called if part data is suspected to be corrupted. void reportBrokenPart(const String & name) const @@ -577,7 +577,7 @@ public: * Backup is created in directory clickhouse_dir/shadow/i/, where i - incremental number, * or if 'with_name' is specified - backup is created in directory with specified name. */ - void freezePartition(const ASTPtr & partition, const String & with_name, const Context & context); + void freezePartition(const ASTPtr & partition, const String & with_name, const Context & context, TableStructureReadLockHolder & table_lock_holder); size_t getColumnCompressedSize(const std::string & name) const { diff --git a/dbms/src/Storages/StorageBuffer.h b/dbms/src/Storages/StorageBuffer.h index b81ca42a0eb..1c565a7d8f0 100644 --- a/dbms/src/Storages/StorageBuffer.h +++ b/dbms/src/Storages/StorageBuffer.h @@ -73,7 +73,11 @@ public: void shutdown() override; bool optimize(const ASTPtr & query, const ASTPtr & partition, bool final, bool deduplicate, const Context & context) override; - void rename(const String & /*new_path_to_db*/, const String & new_database_name, const String & new_table_name) override { table_name = new_table_name; database_name = new_database_name; } + void rename(const String & /*new_path_to_db*/, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) override + { + table_name = new_table_name; + database_name = new_database_name; + } bool supportsSampling() const override { return true; } bool supportsPrewhere() const override diff --git a/dbms/src/Storages/StorageDictionary.h b/dbms/src/Storages/StorageDictionary.h index 097e81d15c6..9539240e75d 100644 --- a/dbms/src/Storages/StorageDictionary.h +++ b/dbms/src/Storages/StorageDictionary.h @@ -34,7 +34,6 @@ public: size_t max_block_size = DEFAULT_BLOCK_SIZE, unsigned threads = 1) override; - void drop() override {} static NamesAndTypesList getNamesAndTypes(const DictionaryStructure & dictionary_structure); template diff --git a/dbms/src/Storages/StorageDistributed.cpp b/dbms/src/Storages/StorageDistributed.cpp index b2e632f9414..2c289dd714e 100644 --- a/dbms/src/Storages/StorageDistributed.cpp +++ b/dbms/src/Storages/StorageDistributed.cpp @@ -415,7 +415,7 @@ void StorageDistributed::shutdown() } -void StorageDistributed::truncate(const ASTPtr &, const Context &) +void StorageDistributed::truncate(const ASTPtr &, const Context &, TableStructureWriteLockHolder &) { std::lock_guard lock(cluster_nodes_mutex); diff --git a/dbms/src/Storages/StorageDistributed.h b/dbms/src/Storages/StorageDistributed.h index 3261f5a0beb..153ada5d284 100644 --- a/dbms/src/Storages/StorageDistributed.h +++ b/dbms/src/Storages/StorageDistributed.h @@ -77,12 +77,17 @@ public: BlockOutputStreamPtr write(const ASTPtr & query, const Context & context) override; - void drop() override {} + void drop(TableStructureWriteLockHolder &) override {} /// Removes temporary data in local filesystem. - void truncate(const ASTPtr &, const Context &) override; + void truncate(const ASTPtr &, const Context &, TableStructureWriteLockHolder &) override; + + void rename(const String & /*new_path_to_db*/, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) override + { + table_name = new_table_name; + database_name = new_database_name; + } - void rename(const String & /*new_path_to_db*/, const String & new_database_name, const String & new_table_name) override { table_name = new_table_name; database_name = new_database_name; } /// in the sub-tables, you need to manually add and delete columns /// the structure of the sub-table is not checked void alter( diff --git a/dbms/src/Storages/StorageFile.cpp b/dbms/src/Storages/StorageFile.cpp index 2db24bbd610..cfd14c58a2d 100644 --- a/dbms/src/Storages/StorageFile.cpp +++ b/dbms/src/Storages/StorageFile.cpp @@ -264,13 +264,7 @@ BlockOutputStreamPtr StorageFile::write( } -void StorageFile::drop() -{ - /// Extra actions are not required. -} - - -void StorageFile::rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name) +void StorageFile::rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) { if (!is_db_table) throw Exception("Can't rename table '" + table_name + "' binded to user-defined file (or FD)", ErrorCodes::DATABASE_ACCESS_DENIED); diff --git a/dbms/src/Storages/StorageFile.h b/dbms/src/Storages/StorageFile.h index 7268c8ddff0..1410cc5f215 100644 --- a/dbms/src/Storages/StorageFile.h +++ b/dbms/src/Storages/StorageFile.h @@ -38,9 +38,7 @@ public: const ASTPtr & query, const Context & context) override; - void drop() override; - - void rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name) override; + void rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) override; String getDataPath() const override { return path; } diff --git a/dbms/src/Storages/StorageHDFS.cpp b/dbms/src/Storages/StorageHDFS.cpp index aa055f7d907..cb25580248f 100644 --- a/dbms/src/Storages/StorageHDFS.cpp +++ b/dbms/src/Storages/StorageHDFS.cpp @@ -148,7 +148,7 @@ BlockInputStreams StorageHDFS::read( max_block_size)}; } -void StorageHDFS::rename(const String & /*new_path_to_db*/, const String & new_database_name, const String & new_table_name) +void StorageHDFS::rename(const String & /*new_path_to_db*/, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) { table_name = new_table_name; database_name = new_database_name; diff --git a/dbms/src/Storages/StorageHDFS.h b/dbms/src/Storages/StorageHDFS.h index 30a99c9de70..8361916e0e2 100644 --- a/dbms/src/Storages/StorageHDFS.h +++ b/dbms/src/Storages/StorageHDFS.h @@ -30,7 +30,7 @@ public: BlockOutputStreamPtr write(const ASTPtr & query, const Context & context) override; - void rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name) override; + void rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) override; protected: StorageHDFS(const String & uri_, diff --git a/dbms/src/Storages/StorageJoin.cpp b/dbms/src/Storages/StorageJoin.cpp index e2cbe542e11..54effdcd4fa 100644 --- a/dbms/src/Storages/StorageJoin.cpp +++ b/dbms/src/Storages/StorageJoin.cpp @@ -55,7 +55,7 @@ StorageJoin::StorageJoin( } -void StorageJoin::truncate(const ASTPtr &, const Context &) +void StorageJoin::truncate(const ASTPtr &, const Context &, TableStructureWriteLockHolder &) { Poco::File(path).remove(true); Poco::File(path).createDirectories(); diff --git a/dbms/src/Storages/StorageJoin.h b/dbms/src/Storages/StorageJoin.h index 760e9eb1815..bdc50b9d767 100644 --- a/dbms/src/Storages/StorageJoin.h +++ b/dbms/src/Storages/StorageJoin.h @@ -26,7 +26,7 @@ class StorageJoin : public ext::shared_ptr_helper, public StorageSe public: String getName() const override { return "Join"; } - void truncate(const ASTPtr &, const Context &) override; + void truncate(const ASTPtr &, const Context &, TableStructureWriteLockHolder &) override; /// Access the innards. JoinPtr & getJoin() { return join; } diff --git a/dbms/src/Storages/StorageLog.cpp b/dbms/src/Storages/StorageLog.cpp index 69e37ce2305..3811b226357 100644 --- a/dbms/src/Storages/StorageLog.cpp +++ b/dbms/src/Storages/StorageLog.cpp @@ -512,7 +512,7 @@ void StorageLog::loadMarks() } -void StorageLog::rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name) +void StorageLog::rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) { std::unique_lock lock(rwlock); @@ -530,7 +530,7 @@ void StorageLog::rename(const String & new_path_to_db, const String & new_databa marks_file = Poco::File(path + escapeForFileName(table_name) + '/' + DBMS_STORAGE_LOG_MARKS_FILE_NAME); } -void StorageLog::truncate(const ASTPtr &, const Context &) +void StorageLog::truncate(const ASTPtr &, const Context &, TableStructureWriteLockHolder &) { std::shared_lock lock(rwlock); diff --git a/dbms/src/Storages/StorageLog.h b/dbms/src/Storages/StorageLog.h index 70de62cb47b..7f792337abc 100644 --- a/dbms/src/Storages/StorageLog.h +++ b/dbms/src/Storages/StorageLog.h @@ -38,11 +38,11 @@ public: BlockOutputStreamPtr write(const ASTPtr & query, const Context & context) override; - void rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name) override; + void rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) override; CheckResults checkData(const ASTPtr & /* query */, const Context & /* context */) override; - void truncate(const ASTPtr &, const Context &) override; + void truncate(const ASTPtr &, const Context &, TableStructureWriteLockHolder &) override; std::string full_path() const { return path + escapeForFileName(table_name) + '/';} diff --git a/dbms/src/Storages/StorageMaterializedView.cpp b/dbms/src/Storages/StorageMaterializedView.cpp index df39f711ab3..87008fce5bf 100644 --- a/dbms/src/Storages/StorageMaterializedView.cpp +++ b/dbms/src/Storages/StorageMaterializedView.cpp @@ -232,7 +232,7 @@ static void executeDropQuery(ASTDropQuery::Kind kind, Context & global_context, } -void StorageMaterializedView::drop() +void StorageMaterializedView::drop(TableStructureWriteLockHolder &) { global_context.removeDependency( DatabaseAndTableName(select_database_name, select_table_name), @@ -242,7 +242,7 @@ void StorageMaterializedView::drop() executeDropQuery(ASTDropQuery::Kind::Drop, global_context, target_database_name, target_table_name); } -void StorageMaterializedView::truncate(const ASTPtr &, const Context &) +void StorageMaterializedView::truncate(const ASTPtr &, const Context &, TableStructureWriteLockHolder &) { if (has_inner_table) executeDropQuery(ASTDropQuery::Kind::Truncate, global_context, target_database_name, target_table_name); @@ -299,7 +299,8 @@ static void executeRenameQuery(Context & global_context, const String & database } -void StorageMaterializedView::rename(const String & /*new_path_to_db*/, const String & new_database_name, const String & new_table_name) +void StorageMaterializedView::rename( + const String & /*new_path_to_db*/, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) { if (has_inner_table && tryGetTargetTable()) { diff --git a/dbms/src/Storages/StorageMaterializedView.h b/dbms/src/Storages/StorageMaterializedView.h index b635634addf..327e8feed79 100644 --- a/dbms/src/Storages/StorageMaterializedView.h +++ b/dbms/src/Storages/StorageMaterializedView.h @@ -33,9 +33,9 @@ public: BlockOutputStreamPtr write(const ASTPtr & query, const Context & context) override; - void drop() override; + void drop(TableStructureWriteLockHolder &) override; - void truncate(const ASTPtr &, const Context &) override; + void truncate(const ASTPtr &, const Context &, TableStructureWriteLockHolder &) override; bool optimize(const ASTPtr & query, const ASTPtr & partition, bool final, bool deduplicate, const Context & context) override; @@ -43,7 +43,7 @@ public: void mutate(const MutationCommands & commands, const Context & context) override; - void rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name) override; + void rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) override; void shutdown() override; diff --git a/dbms/src/Storages/StorageMemory.cpp b/dbms/src/Storages/StorageMemory.cpp index 560da7dc2d8..1b820e55c5e 100644 --- a/dbms/src/Storages/StorageMemory.cpp +++ b/dbms/src/Storages/StorageMemory.cpp @@ -123,13 +123,13 @@ BlockOutputStreamPtr StorageMemory::write( } -void StorageMemory::drop() +void StorageMemory::drop(TableStructureWriteLockHolder &) { std::lock_guard lock(mutex); data.clear(); } -void StorageMemory::truncate(const ASTPtr &, const Context &) +void StorageMemory::truncate(const ASTPtr &, const Context &, TableStructureWriteLockHolder &) { std::lock_guard lock(mutex); data.clear(); diff --git a/dbms/src/Storages/StorageMemory.h b/dbms/src/Storages/StorageMemory.h index 92dcd4be18f..eb2d6ff2e21 100644 --- a/dbms/src/Storages/StorageMemory.h +++ b/dbms/src/Storages/StorageMemory.h @@ -40,11 +40,15 @@ public: BlockOutputStreamPtr write(const ASTPtr & query, const Context & context) override; - void drop() override; + void drop(TableStructureWriteLockHolder &) override; - void truncate(const ASTPtr &, const Context &) override; + void truncate(const ASTPtr &, const Context &, TableStructureWriteLockHolder &) override; - void rename(const String & /*new_path_to_db*/, const String & new_database_name, const String & new_table_name) override { table_name = new_table_name; database_name = new_database_name; } + void rename(const String & /*new_path_to_db*/, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) override + { + table_name = new_table_name; + database_name = new_database_name; + } private: String database_name; diff --git a/dbms/src/Storages/StorageMerge.h b/dbms/src/Storages/StorageMerge.h index 6708a92c3b0..dbf5d219957 100644 --- a/dbms/src/Storages/StorageMerge.h +++ b/dbms/src/Storages/StorageMerge.h @@ -42,8 +42,11 @@ public: size_t max_block_size, unsigned num_streams) override; - void drop() override {} - void rename(const String & /*new_path_to_db*/, const String & new_database_name, const String & new_table_name) override { table_name = new_table_name; database_name = new_database_name; } + void rename(const String & /*new_path_to_db*/, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) override + { + table_name = new_table_name; + database_name = new_database_name; + } /// you need to add and remove columns in the sub-tables manually /// the structure of sub-tables is not checked diff --git a/dbms/src/Storages/StorageMergeTree.cpp b/dbms/src/Storages/StorageMergeTree.cpp index 4b7b2c446f6..46fcc977a60 100644 --- a/dbms/src/Storages/StorageMergeTree.cpp +++ b/dbms/src/Storages/StorageMergeTree.cpp @@ -157,13 +157,13 @@ void StorageMergeTree::checkPartitionCanBeDropped(const ASTPtr & partition) global_context.checkPartitionCanBeDropped(database_name, table_name, partition_size); } -void StorageMergeTree::drop() +void StorageMergeTree::drop(TableStructureWriteLockHolder &) { shutdown(); dropAllData(); } -void StorageMergeTree::truncate(const ASTPtr &, const Context &) +void StorageMergeTree::truncate(const ASTPtr &, const Context &, TableStructureWriteLockHolder &) { { /// Asks to complete merges and does not allow them to start. @@ -181,7 +181,7 @@ void StorageMergeTree::truncate(const ASTPtr &, const Context &) clearOldPartsFromFilesystem(); } -void StorageMergeTree::rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name) +void StorageMergeTree::rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) { std::string new_full_path = new_path_to_db + escapeForFileName(new_table_name) + '/'; @@ -995,7 +995,7 @@ void StorageMergeTree::alterPartition(const ASTPtr & query, const PartitionComma case PartitionCommand::FREEZE_PARTITION: { auto lock = lockStructureForShare(false, context.getCurrentQueryId()); - freezePartition(command.partition, command.with_name, context); + freezePartition(command.partition, command.with_name, context, lock); } break; @@ -1020,7 +1020,7 @@ void StorageMergeTree::alterPartition(const ASTPtr & query, const PartitionComma case PartitionCommand::FREEZE_ALL_PARTITIONS: { auto lock = lockStructureForShare(false, context.getCurrentQueryId()); - freezeAll(command.with_name, context); + freezeAll(command.with_name, context, lock); } break; diff --git a/dbms/src/Storages/StorageMergeTree.h b/dbms/src/Storages/StorageMergeTree.h index d135ffd6e1a..06ffa94c8ec 100644 --- a/dbms/src/Storages/StorageMergeTree.h +++ b/dbms/src/Storages/StorageMergeTree.h @@ -55,10 +55,10 @@ public: std::vector getMutationsStatus() const override; CancellationCode killMutation(const String & mutation_id) override; - void drop() override; - void truncate(const ASTPtr &, const Context &) override; + void drop(TableStructureWriteLockHolder &) override; + void truncate(const ASTPtr &, const Context &, TableStructureWriteLockHolder &) override; - void rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name) override; + void rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) override; void alter(const AlterCommands & params, const Context & context, TableStructureWriteLockHolder & table_lock_holder) override; diff --git a/dbms/src/Storages/StorageNull.h b/dbms/src/Storages/StorageNull.h index 04cd5f25e8f..e1a80f3fbaf 100644 --- a/dbms/src/Storages/StorageNull.h +++ b/dbms/src/Storages/StorageNull.h @@ -38,7 +38,7 @@ public: return std::make_shared(getSampleBlock()); } - void rename(const String & /*new_path_to_db*/, const String & new_database_name, const String & new_table_name) override + void rename(const String & /*new_path_to_db*/, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) override { table_name = new_table_name; database_name = new_database_name; diff --git a/dbms/src/Storages/StorageReplicatedMergeTree.cpp b/dbms/src/Storages/StorageReplicatedMergeTree.cpp index afcc5d968af..e5821c1bcaf 100644 --- a/dbms/src/Storages/StorageReplicatedMergeTree.cpp +++ b/dbms/src/Storages/StorageReplicatedMergeTree.cpp @@ -3132,7 +3132,6 @@ bool StorageReplicatedMergeTree::optimize(const ASTPtr & query, const ASTPtr & p if (query_context.getSettingsRef().replication_alter_partitions_sync != 0) { /// NOTE Table lock must not be held while waiting. Some combination of R-W-R locks from different threads will yield to deadlock. - /// TODO Check all other "wait" places. for (auto & merge_entry : merge_entries) waitForAllReplicasToProcessLogEntry(merge_entry); } @@ -3484,7 +3483,7 @@ void StorageReplicatedMergeTree::alterPartition(const ASTPtr & query, const Part case PartitionCommand::FREEZE_PARTITION: { auto lock = lockStructureForShare(false, query_context.getCurrentQueryId()); - freezePartition(command.partition, command.with_name, query_context); + freezePartition(command.partition, command.with_name, query_context, lock); } break; @@ -3509,7 +3508,7 @@ void StorageReplicatedMergeTree::alterPartition(const ASTPtr & query, const Part case PartitionCommand::FREEZE_ALL_PARTITIONS: { auto lock = lockStructureForShare(false, query_context.getCurrentQueryId()); - freezeAll(command.with_name, query_context); + freezeAll(command.with_name, query_context, lock); } break; } @@ -3633,8 +3632,10 @@ void StorageReplicatedMergeTree::dropPartition(const ASTPtr & query, const ASTPt } -void StorageReplicatedMergeTree::truncate(const ASTPtr & query, const Context & query_context) +void StorageReplicatedMergeTree::truncate(const ASTPtr & query, const Context & query_context, TableStructureWriteLockHolder & table_lock) { + table_lock.release(); /// Truncate is done asynchronously. + assertNotReadonly(); zkutil::ZooKeeperPtr zookeeper = getZooKeeper(); @@ -3701,7 +3702,7 @@ void StorageReplicatedMergeTree::checkPartitionCanBeDropped(const ASTPtr & parti } -void StorageReplicatedMergeTree::drop() +void StorageReplicatedMergeTree::drop(TableStructureWriteLockHolder &) { { auto zookeeper = tryGetZooKeeper(); @@ -3731,7 +3732,8 @@ void StorageReplicatedMergeTree::drop() } -void StorageReplicatedMergeTree::rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name) +void StorageReplicatedMergeTree::rename( + const String & new_path_to_db, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) { std::string new_full_path = new_path_to_db + escapeForFileName(new_table_name) + '/'; diff --git a/dbms/src/Storages/StorageReplicatedMergeTree.h b/dbms/src/Storages/StorageReplicatedMergeTree.h index 0fa2e3631e2..c5000944439 100644 --- a/dbms/src/Storages/StorageReplicatedMergeTree.h +++ b/dbms/src/Storages/StorageReplicatedMergeTree.h @@ -109,11 +109,11 @@ public: /** Removes a replica from ZooKeeper. If there are no other replicas, it deletes the entire table from ZooKeeper. */ - void drop() override; + void drop(TableStructureWriteLockHolder &) override; - void truncate(const ASTPtr &, const Context &) override; + void truncate(const ASTPtr &, const Context &, TableStructureWriteLockHolder &) override; - void rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name) override; + void rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) override; bool supportsIndexForIn() const override { return true; } diff --git a/dbms/src/Storages/StorageSet.cpp b/dbms/src/Storages/StorageSet.cpp index 26af630ca62..c76857bf610 100644 --- a/dbms/src/Storages/StorageSet.cpp +++ b/dbms/src/Storages/StorageSet.cpp @@ -126,7 +126,7 @@ void StorageSet::insertBlock(const Block & block) { set->insertFromBlock(block); size_t StorageSet::getSize() const { return set->getTotalRowCount(); } -void StorageSet::truncate(const ASTPtr &, const Context &) +void StorageSet::truncate(const ASTPtr &, const Context &, TableStructureWriteLockHolder &) { Poco::File(path).remove(true); Poco::File(path).createDirectories(); @@ -193,7 +193,8 @@ void StorageSetOrJoinBase::restoreFromFile(const String & file_path) } -void StorageSetOrJoinBase::rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name) +void StorageSetOrJoinBase::rename( + const String & new_path_to_db, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) { /// Rename directory with data. String new_path = new_path_to_db + escapeForFileName(new_table_name); diff --git a/dbms/src/Storages/StorageSet.h b/dbms/src/Storages/StorageSet.h index fe6cd332ed8..671bc78897b 100644 --- a/dbms/src/Storages/StorageSet.h +++ b/dbms/src/Storages/StorageSet.h @@ -22,7 +22,7 @@ public: String getTableName() const override { return table_name; } String getDatabaseName() const override { return database_name; } - void rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name) override; + void rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) override; BlockOutputStreamPtr write(const ASTPtr & query, const Context & context) override; @@ -69,7 +69,7 @@ public: /// Access the insides. SetPtr & getSet() { return set; } - void truncate(const ASTPtr &, const Context &) override; + void truncate(const ASTPtr &, const Context &, TableStructureWriteLockHolder &) override; private: SetPtr set; diff --git a/dbms/src/Storages/StorageStripeLog.cpp b/dbms/src/Storages/StorageStripeLog.cpp index 447e325095d..00c359f9797 100644 --- a/dbms/src/Storages/StorageStripeLog.cpp +++ b/dbms/src/Storages/StorageStripeLog.cpp @@ -223,7 +223,7 @@ StorageStripeLog::StorageStripeLog( } -void StorageStripeLog::rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name) +void StorageStripeLog::rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) { std::unique_lock lock(rwlock); @@ -294,7 +294,7 @@ CheckResults StorageStripeLog::checkData(const ASTPtr & /* query */, const Conte return file_checker.check(); } -void StorageStripeLog::truncate(const ASTPtr &, const Context &) +void StorageStripeLog::truncate(const ASTPtr &, const Context &, TableStructureWriteLockHolder &) { if (table_name.empty()) throw Exception("Logical error: table name is empty", ErrorCodes::LOGICAL_ERROR); diff --git a/dbms/src/Storages/StorageStripeLog.h b/dbms/src/Storages/StorageStripeLog.h index d53f000f277..31da4d9d502 100644 --- a/dbms/src/Storages/StorageStripeLog.h +++ b/dbms/src/Storages/StorageStripeLog.h @@ -40,7 +40,7 @@ public: BlockOutputStreamPtr write(const ASTPtr & query, const Context & context) override; - void rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name) override; + void rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) override; CheckResults checkData(const ASTPtr & /* query */, const Context & /* context */) override; @@ -55,7 +55,7 @@ public: String getDataPath() const override { return full_path(); } - void truncate(const ASTPtr &, const Context &) override; + void truncate(const ASTPtr &, const Context &, TableStructureWriteLockHolder &) override; private: String path; diff --git a/dbms/src/Storages/StorageTinyLog.cpp b/dbms/src/Storages/StorageTinyLog.cpp index 45d9771822e..da7013a62c3 100644 --- a/dbms/src/Storages/StorageTinyLog.cpp +++ b/dbms/src/Storages/StorageTinyLog.cpp @@ -378,7 +378,7 @@ void StorageTinyLog::addFiles(const String & column_name, const IDataType & type } -void StorageTinyLog::rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name) +void StorageTinyLog::rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) { std::unique_lock lock(rwlock); @@ -424,7 +424,7 @@ CheckResults StorageTinyLog::checkData(const ASTPtr & /* query */, const Context return file_checker.check(); } -void StorageTinyLog::truncate(const ASTPtr &, const Context &) +void StorageTinyLog::truncate(const ASTPtr &, const Context &, TableStructureWriteLockHolder &) { if (table_name.empty()) throw Exception("Logical error: table name is empty", ErrorCodes::LOGICAL_ERROR); diff --git a/dbms/src/Storages/StorageTinyLog.h b/dbms/src/Storages/StorageTinyLog.h index 1c148acf957..505edd7c556 100644 --- a/dbms/src/Storages/StorageTinyLog.h +++ b/dbms/src/Storages/StorageTinyLog.h @@ -39,7 +39,7 @@ public: BlockOutputStreamPtr write(const ASTPtr & query, const Context & context) override; - void rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name) override; + void rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) override; CheckResults checkData(const ASTPtr & /* query */, const Context & /* context */) override; @@ -54,7 +54,7 @@ public: String getDataPath() const override { return full_path(); } - void truncate(const ASTPtr &, const Context &) override; + void truncate(const ASTPtr &, const Context &, TableStructureWriteLockHolder &) override; private: String path; diff --git a/dbms/src/Storages/StorageURL.cpp b/dbms/src/Storages/StorageURL.cpp index ee385af0fe8..4f3d41604f5 100644 --- a/dbms/src/Storages/StorageURL.cpp +++ b/dbms/src/Storages/StorageURL.cpp @@ -187,7 +187,7 @@ BlockInputStreams IStorageURLBase::read(const Names & column_names, return {std::make_shared(block_input, column_defaults, context)}; } -void IStorageURLBase::rename(const String & /*new_path_to_db*/, const String & new_database_name, const String & new_table_name) +void IStorageURLBase::rename(const String & /*new_path_to_db*/, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) { table_name = new_table_name; database_name = new_database_name; diff --git a/dbms/src/Storages/StorageURL.h b/dbms/src/Storages/StorageURL.h index 2facca8ce38..cdd78c7b60f 100644 --- a/dbms/src/Storages/StorageURL.h +++ b/dbms/src/Storages/StorageURL.h @@ -29,7 +29,7 @@ public: BlockOutputStreamPtr write(const ASTPtr & query, const Context & context) override; - void rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name) override; + void rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) override; protected: IStorageURLBase( diff --git a/dbms/src/Storages/StorageView.h b/dbms/src/Storages/StorageView.h index de56f120fa1..6d2e1d04e6f 100644 --- a/dbms/src/Storages/StorageView.h +++ b/dbms/src/Storages/StorageView.h @@ -30,7 +30,7 @@ public: size_t max_block_size, unsigned num_streams) override; - void rename(const String & /*new_path_to_db*/, const String & new_database_name, const String & new_table_name) override + void rename(const String & /*new_path_to_db*/, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) override { table_name = new_table_name; database_name = new_database_name; diff --git a/dbms/src/Storages/System/StorageSystemColumns.cpp b/dbms/src/Storages/System/StorageSystemColumns.cpp index 30b673ddbbb..e4c84de23da 100644 --- a/dbms/src/Storages/System/StorageSystemColumns.cpp +++ b/dbms/src/Storages/System/StorageSystemColumns.cpp @@ -18,6 +18,7 @@ namespace DB namespace ErrorCodes { extern const int LOGICAL_ERROR; + extern const int TABLE_IS_DROPPED; } StorageSystemColumns::StorageSystemColumns(const std::string & name_) diff --git a/dbms/src/Storages/System/StorageSystemPartsBase.cpp b/dbms/src/Storages/System/StorageSystemPartsBase.cpp index bced500a072..69d11891198 100644 --- a/dbms/src/Storages/System/StorageSystemPartsBase.cpp +++ b/dbms/src/Storages/System/StorageSystemPartsBase.cpp @@ -17,6 +17,11 @@ namespace DB { +namespace ErrorCodes +{ + extern const int TABLE_IS_DROPPED; +} + bool StorageSystemPartsBase::hasStateColumn(const Names & column_names) const { bool has_state_column = false; diff --git a/dbms/tests/integration/test_atomic_drop_table/__init__.py b/dbms/tests/integration/test_atomic_drop_table/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/integration/test_atomic_drop_table/configs/config.d/zookeeper_session_timeout.xml b/dbms/tests/integration/test_atomic_drop_table/configs/config.d/zookeeper_session_timeout.xml new file mode 100644 index 00000000000..071725b5391 --- /dev/null +++ b/dbms/tests/integration/test_atomic_drop_table/configs/config.d/zookeeper_session_timeout.xml @@ -0,0 +1,6 @@ + + + + 3000 + + diff --git a/dbms/tests/integration/test_atomic_drop_table/configs/remote_servers.xml b/dbms/tests/integration/test_atomic_drop_table/configs/remote_servers.xml new file mode 100644 index 00000000000..538aa72d386 --- /dev/null +++ b/dbms/tests/integration/test_atomic_drop_table/configs/remote_servers.xml @@ -0,0 +1,14 @@ + + + + + true + + shard_0 + node1 + 9000 + + + + + diff --git a/dbms/tests/integration/test_atomic_drop_table/test.py b/dbms/tests/integration/test_atomic_drop_table/test.py new file mode 100644 index 00000000000..7d845baeec6 --- /dev/null +++ b/dbms/tests/integration/test_atomic_drop_table/test.py @@ -0,0 +1,37 @@ +import time +import pytest + +from helpers.network import PartitionManager +from helpers.cluster import ClickHouseCluster + + +cluster = ClickHouseCluster(__file__) +node1 = cluster.add_instance('node1', config_dir="configs", with_zookeeper=True) + + +@pytest.fixture(scope="module") +def start_cluster(): + try: + cluster.start() + node1.query("CREATE DATABASE zktest;") + node1.query( + ''' + CREATE TABLE zktest.atomic_drop_table (n UInt32) + ENGINE = ReplicatedMergeTree('/clickhouse/zktest/tables/atomic_drop_table', 'node1') + PARTITION BY n ORDER BY n + ''' + ) + yield cluster + finally: + cluster.shutdown() + +def test_atomic_delete_with_stopped_zookeeper(start_cluster): + node1.query("insert into zktest.atomic_drop_table values (8192)") + + with PartitionManager() as pm: + pm.drop_instance_zk_connections(node1) + error = node1.query_and_get_error("DROP TABLE zktest.atomic_drop_table") #Table won't drop + assert error != "" + + time.sleep(5) + assert '8192' in node1.query("select * from zktest.atomic_drop_table") diff --git a/dbms/tests/queries/0_stateless/00600_replace_running_query.sh b/dbms/tests/queries/0_stateless/00600_replace_running_query.sh index dbbf41dd772..9fc25291548 100755 --- a/dbms/tests/queries/0_stateless/00600_replace_running_query.sh +++ b/dbms/tests/queries/0_stateless/00600_replace_running_query.sh @@ -35,6 +35,6 @@ wait ${CLICKHOUSE_CLIENT} --query_id=42 --query='SELECT 3, count() FROM system.numbers' 2>&1 | grep -cF 'was cancelled' & wait_for_query_to_start '42' -${CLICKHOUSE_CLIENT} --query_id=42 --replace_running_query=1 --queue_max_wait_ms=500 --query='SELECT 43' 2>&1 | grep -F "can't be stopped" > /dev/null +${CLICKHOUSE_CLIENT} --query_id=42 --replace_running_query=1 --replace_running_query_max_wait_ms=500 --query='SELECT 43' 2>&1 | grep -F "can't be stopped" > /dev/null ${CLICKHOUSE_CLIENT} --query_id=42 --replace_running_query=1 --query='SELECT 44' wait diff --git a/dbms/tests/queries/0_stateless/00816_long_concurrent_alter_column.sh b/dbms/tests/queries/0_stateless/00816_long_concurrent_alter_column.sh index 17779b73add..965408065cf 100755 --- a/dbms/tests/queries/0_stateless/00816_long_concurrent_alter_column.sh +++ b/dbms/tests/queries/0_stateless/00816_long_concurrent_alter_column.sh @@ -8,14 +8,58 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) echo "DROP TABLE IF EXISTS concurrent_alter_column" | ${CLICKHOUSE_CLIENT} echo "CREATE TABLE concurrent_alter_column (ts DATETIME) ENGINE = MergeTree PARTITION BY toStartOfDay(ts) ORDER BY tuple()" | ${CLICKHOUSE_CLIENT} -for i in {1..500}; do echo "ALTER TABLE concurrent_alter_column ADD COLUMN c$i DOUBLE;"; done | ${CLICKHOUSE_CLIENT} -n +function thread1() +{ + while true; do + for i in {1..500}; do echo "ALTER TABLE concurrent_alter_column ADD COLUMN c$i DOUBLE;"; done | ${CLICKHOUSE_CLIENT} -n --query_id=alter1 + done +} -for i in {1..100}; do echo "ALTER TABLE concurrent_alter_column ADD COLUMN d DOUBLE" | ${CLICKHOUSE_CLIENT}; sleep `echo 0.0$RANDOM`; echo "ALTER TABLE concurrent_alter_column DROP COLUMN d" | ${CLICKHOUSE_CLIENT} -n; done & -for i in {1..100}; do echo "ALTER TABLE concurrent_alter_column ADD COLUMN e DOUBLE" | ${CLICKHOUSE_CLIENT}; sleep `echo 0.0$RANDOM`; echo "ALTER TABLE concurrent_alter_column DROP COLUMN e" | ${CLICKHOUSE_CLIENT} -n; done & -for i in {1..100}; do echo "ALTER TABLE concurrent_alter_column ADD COLUMN f DOUBLE" | ${CLICKHOUSE_CLIENT}; sleep `echo 0.0$RANDOM`; echo "ALTER TABLE concurrent_alter_column DROP COLUMN f" | ${CLICKHOUSE_CLIENT} -n; done & +function thread2() +{ + while true; do + echo "ALTER TABLE concurrent_alter_column ADD COLUMN d DOUBLE" | ${CLICKHOUSE_CLIENT} --query_id=alter2; + sleep `echo 0.0$RANDOM`; + echo "ALTER TABLE concurrent_alter_column DROP COLUMN d" | ${CLICKHOUSE_CLIENT} --query_id=alter2; + done +} + +function thread3() +{ + while true; do + echo "ALTER TABLE concurrent_alter_column ADD COLUMN e DOUBLE" | ${CLICKHOUSE_CLIENT} --query_id=alter3; + sleep `echo 0.0$RANDOM`; + echo "ALTER TABLE concurrent_alter_column DROP COLUMN e" | ${CLICKHOUSE_CLIENT} --query_id=alter3; + done +} + +function thread4() +{ + while true; do + echo "ALTER TABLE concurrent_alter_column ADD COLUMN f DOUBLE" | ${CLICKHOUSE_CLIENT} --query_id=alter4; + sleep `echo 0.0$RANDOM`; + echo "ALTER TABLE concurrent_alter_column DROP COLUMN f" | ${CLICKHOUSE_CLIENT} --query_id=alter4; + done +} + +# https://stackoverflow.com/questions/9954794/execute-a-shell-function-with-timeout +export -f thread1; +export -f thread2; +export -f thread3; +export -f thread4; + +TIMEOUT=30 + +timeout $TIMEOUT bash -c thread1 2> /dev/null & +timeout $TIMEOUT bash -c thread2 2> /dev/null & +timeout $TIMEOUT bash -c thread3 2> /dev/null & +timeout $TIMEOUT bash -c thread4 2> /dev/null & wait echo "DROP TABLE concurrent_alter_column" | ${CLICKHOUSE_CLIENT} +# Check for deadlocks +echo "SELECT * FROM system.processes WHERE query_id LIKE 'alter%'" | ${CLICKHOUSE_CLIENT} + echo 'did not crash' diff --git a/dbms/tests/queries/0_stateless/00881_unknown_identifier_in_in.reference b/dbms/tests/queries/0_stateless/00881_unknown_identifier_in_in.reference new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00881_unknown_identifier_in_in.reference @@ -0,0 +1 @@ +1 diff --git a/dbms/tests/queries/0_stateless/00881_unknown_identifier_in_in.sql b/dbms/tests/queries/0_stateless/00881_unknown_identifier_in_in.sql new file mode 100644 index 00000000000..2ce709c45be --- /dev/null +++ b/dbms/tests/queries/0_stateless/00881_unknown_identifier_in_in.sql @@ -0,0 +1,4 @@ +SELECT toUInt64(1) x FROM (select 1) +GROUP BY 1 +HAVING x +IN ( SELECT countIf(y, z == 1) FROM (SELECT 1 y, 1 z) ); diff --git a/dbms/tests/queries/0_stateless/00900_orc_load.reference b/dbms/tests/queries/0_stateless/00900_orc_load.reference new file mode 100644 index 00000000000..fe79e37ee18 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00900_orc_load.reference @@ -0,0 +1,2 @@ +0 0 0 0 0 2019-01-01 test1 +2147483647 -1 9223372036854775806 123.345345 345345.3453451212 2019-01-01 test2 diff --git a/dbms/tests/queries/0_stateless/00900_orc_load.sh b/dbms/tests/queries/0_stateless/00900_orc_load.sh new file mode 100755 index 00000000000..cd553f6d234 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00900_orc_load.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CUR_DIR/../shell_config.sh + +CB_DIR=$(dirname "$CLICKHOUSE_CLIENT_BINARY") +[ "$CB_DIR" == "." ] && ROOT_DIR=$CUR_DIR/../../../.. +[ "$CB_DIR" != "." ] && BUILD_DIR=$CB_DIR/../.. +[ -z "$ROOT_DIR" ] && ROOT_DIR=$CB_DIR/../../.. + +DATA_FILE=$CUR_DIR/data_orc/test.orc + +${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS orc_load" +${CLICKHOUSE_CLIENT} --query="CREATE TABLE orc_load (int Int32, smallint Int8, bigint Int64, float Float32, double Float64, date Date, y String) ENGINE = Memory" +cat $DATA_FILE | ${CLICKHOUSE_CLIENT} -q "insert into orc_load format ORC" +${CLICKHOUSE_CLIENT} --query="select * from orc_load" + diff --git a/dbms/tests/queries/0_stateless/00960_live_view_watch_events_live.py b/dbms/tests/queries/0_stateless/00960_live_view_watch_events_live.py new file mode 100755 index 00000000000..2095683720e --- /dev/null +++ b/dbms/tests/queries/0_stateless/00960_live_view_watch_events_live.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python +import os +import sys +import signal + +CURDIR = os.path.dirname(os.path.realpath(__file__)) +sys.path.insert(0, os.path.join(CURDIR, 'helpers')) + +from client import client, prompt, end_of_block + +log = None +# uncomment the line below for debugging +#log=sys.stdout + +with client(name='client1>', log=log) as client1, client(name='client2>', log=log) as client2: + client1.expect(prompt) + client2.expect(prompt) + + client1.send('SET allow_experimental_live_view = 1') + client1.expect(prompt) + client2.send('SET allow_experimental_live_view = 1') + client2.expect(prompt) + + client1.send('DROP TABLE IF EXISTS test.lv') + client1.expect(prompt) + client1.send(' DROP TABLE IF EXISTS test.mt') + client1.expect(prompt) + client1.send('CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()') + client1.expect(prompt) + client1.send('CREATE LIVE VIEW test.lv AS SELECT sum(a) FROM test.mt') + client1.expect(prompt) + client1.send('WATCH test.lv EVENTS') + client1.expect('1.*' + end_of_block) + client2.send('INSERT INTO test.mt VALUES (1),(2),(3)') + client1.expect('2.*' + end_of_block) + client2.send('INSERT INTO test.mt VALUES (4),(5),(6)') + client1.expect('3.*' + end_of_block) + # send Ctrl-C + client1.send('\x03', eol='') + match = client1.expect('(%s)|([#\$] )' % prompt) + if match.groups()[1]: + client1.send(client1.command) + client1.expect(prompt) + client1.send('DROP TABLE test.lv') + client1.expect(prompt) + client1.send('DROP TABLE test.mt') + client1.expect(prompt) diff --git a/dbms/tests/queries/0_stateless/00960_live_view_watch_events_live.reference b/dbms/tests/queries/0_stateless/00960_live_view_watch_events_live.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/queries/0_stateless/00961_temporary_live_view_watch.reference b/dbms/tests/queries/0_stateless/00961_temporary_live_view_watch.reference new file mode 100644 index 00000000000..6fbbedf1b21 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00961_temporary_live_view_watch.reference @@ -0,0 +1,3 @@ +0 1 +6 2 +21 3 diff --git a/dbms/tests/queries/0_stateless/00961_temporary_live_view_watch.sql b/dbms/tests/queries/0_stateless/00961_temporary_live_view_watch.sql new file mode 100644 index 00000000000..7992da92f97 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00961_temporary_live_view_watch.sql @@ -0,0 +1,20 @@ +SET allow_experimental_live_view = 1; + +DROP TABLE IF EXISTS test.lv; +DROP TABLE IF EXISTS test.mt; + +CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple(); +CREATE TEMPORARY LIVE VIEW test.lv AS SELECT sum(a) FROM test.mt; + +WATCH test.lv LIMIT 0; + +INSERT INTO test.mt VALUES (1),(2),(3); + +WATCH test.lv LIMIT 0; + +INSERT INTO test.mt VALUES (4),(5),(6); + +WATCH test.lv LIMIT 0; + +DROP TABLE test.lv; +DROP TABLE test.mt; diff --git a/dbms/tests/queries/0_stateless/00962_temporary_live_view_watch_live.py b/dbms/tests/queries/0_stateless/00962_temporary_live_view_watch_live.py new file mode 100755 index 00000000000..3dbec01b29a --- /dev/null +++ b/dbms/tests/queries/0_stateless/00962_temporary_live_view_watch_live.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python +import os +import sys +import signal + +CURDIR = os.path.dirname(os.path.realpath(__file__)) +sys.path.insert(0, os.path.join(CURDIR, 'helpers')) + +from client import client, prompt, end_of_block + +log = None +# uncomment the line below for debugging +#log=sys.stdout + +with client(name='client1>', log=log) as client1, client(name='client2>', log=log) as client2: + client1.expect(prompt) + client2.expect(prompt) + + client1.send('SET allow_experimental_live_view = 1') + client1.expect(prompt) + client2.send('SET allow_experimental_live_view = 1') + client2.expect(prompt) + + client1.send('DROP TABLE IF EXISTS test.lv') + client1.expect(prompt) + client1.send('DROP TABLE IF EXISTS test.mt') + client1.expect(prompt) + client1.send('CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()') + client1.expect(prompt) + client1.send('CREATE TEMPORARY LIVE VIEW test.lv AS SELECT sum(a) FROM test.mt') + client1.expect(prompt) + client1.send('WATCH test.lv') + client1.expect(r'0.*1' + end_of_block) + client2.send('INSERT INTO test.mt VALUES (1),(2),(3)') + client1.expect(r'6.*2' + end_of_block) + client2.send('INSERT INTO test.mt VALUES (4),(5),(6)') + client1.expect(r'21.*3' + end_of_block) + # send Ctrl-C + client1.send('\x03', eol='') + match = client1.expect('(%s)|([#\$] )' % prompt) + if match.groups()[1]: + client1.send(client1.command) + client1.expect(prompt) + client1.send('DROP TABLE test.lv') + client1.expect(prompt) + client1.send('DROP TABLE test.mt') + client1.expect(prompt) diff --git a/dbms/tests/queries/0_stateless/00962_temporary_live_view_watch_live.reference b/dbms/tests/queries/0_stateless/00962_temporary_live_view_watch_live.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/queries/0_stateless/00963_temporary_live_view_watch_live_timeout.py.disabled b/dbms/tests/queries/0_stateless/00963_temporary_live_view_watch_live_timeout.py.disabled new file mode 100755 index 00000000000..b324c1b90cc --- /dev/null +++ b/dbms/tests/queries/0_stateless/00963_temporary_live_view_watch_live_timeout.py.disabled @@ -0,0 +1,54 @@ +#!/usr/bin/env python +import os +import sys +import signal + +CURDIR = os.path.dirname(os.path.realpath(__file__)) +sys.path.insert(0, os.path.join(CURDIR, 'helpers')) + +from client import client, prompt, end_of_block + +log = None +# uncomment the line below for debugging +#log=sys.stdout + +with client(name='client1>', log=log) as client1, client(name='client2>', log=log) as client2: + client1.expect(prompt) + client2.expect(prompt) + + client1.send('SET allow_experimental_live_view = 1') + client1.expect(prompt) + client2.send('SET allow_experimental_live_view = 1') + client2.expect(prompt) + + client1.send('DROP TABLE IF EXISTS test.lv') + client1.expect(prompt) + client1.send('DROP TABLE IF EXISTS test.mt') + client1.expect(prompt) + client1.send('SET temporary_live_view_timeout=1') + client1.expect(prompt) + client1.send('CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()') + client1.expect(prompt) + client1.send('CREATE TEMPORARY LIVE VIEW test.lv AS SELECT sum(a) FROM test.mt') + client1.expect(prompt) + client1.send('WATCH test.lv') + client1.expect(r'0.*1' + end_of_block) + client2.send('INSERT INTO test.mt VALUES (1),(2),(3)') + client2.expect(prompt) + client1.expect(r'6.*2' + end_of_block) + client2.send('INSERT INTO test.mt VALUES (4),(5),(6)') + client2.expect(prompt) + client1.expect(r'21.*3' + end_of_block) + # send Ctrl-C + client1.send('\x03', eol='') + match = client1.expect('(%s)|([#\$] )' % prompt) + if match.groups()[1]: + client1.send(client1.command) + client1.expect(prompt) + client1.send('SELECT sleep(1)') + client1.expect(prompt) + client1.send('DROP TABLE test.lv') + client1.expect('Table test.lv doesn\'t exist') + client1.expect(prompt) + client1.send('DROP TABLE test.mt') + client1.expect(prompt) diff --git a/dbms/tests/queries/0_stateless/00963_temporary_live_view_watch_live_timeout.reference b/dbms/tests/queries/0_stateless/00963_temporary_live_view_watch_live_timeout.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/queries/0_stateless/00964_live_view_watch_events_heartbeat.py b/dbms/tests/queries/0_stateless/00964_live_view_watch_events_heartbeat.py new file mode 100755 index 00000000000..528f18839bb --- /dev/null +++ b/dbms/tests/queries/0_stateless/00964_live_view_watch_events_heartbeat.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python +import os +import sys +import signal + +CURDIR = os.path.dirname(os.path.realpath(__file__)) +sys.path.insert(0, os.path.join(CURDIR, 'helpers')) + +from client import client, prompt, end_of_block + +log = None +# uncomment the line below for debugging +#log=sys.stdout + +with client(name='client1>', log=log) as client1, client(name='client2>', log=log) as client2: + client1.expect(prompt) + client2.expect(prompt) + + client1.send('SET allow_experimental_live_view = 1') + client1.expect(prompt) + client2.send('SET allow_experimental_live_view = 1') + client2.expect(prompt) + + client1.send('DROP TABLE IF EXISTS test.lv') + client1.expect(prompt) + client1.send(' DROP TABLE IF EXISTS test.mt') + client1.expect(prompt) + client1.send('SET live_view_heartbeat_interval=1') + client1.expect(prompt) + client1.send('CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()') + client1.expect(prompt) + client1.send('CREATE TEMPORARY LIVE VIEW test.lv AS SELECT sum(a) FROM test.mt') + client1.expect(prompt) + client1.send('WATCH test.lv EVENTS') + client2.send('INSERT INTO test.mt VALUES (1),(2),(3)') + client1.expect('2.*' + end_of_block) + client1.expect('Progress: 2.00 rows.*\)') + # wait for heartbeat + client1.expect('Progress: 2.00 rows.*\)') + # send Ctrl-C + client1.send('\x03', eol='') + match = client1.expect('(%s)|([#\$] )' % prompt) + if match.groups()[1]: + client1.send(client1.command) + client1.expect(prompt) + client1.send('DROP TABLE test.lv') + client1.expect(prompt) + client1.send('DROP TABLE test.mt') + client1.expect(prompt) diff --git a/dbms/tests/queries/0_stateless/00964_live_view_watch_events_heartbeat.reference b/dbms/tests/queries/0_stateless/00964_live_view_watch_events_heartbeat.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/queries/0_stateless/00965_live_view_watch_heartbeat.py b/dbms/tests/queries/0_stateless/00965_live_view_watch_heartbeat.py new file mode 100755 index 00000000000..2723936f876 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00965_live_view_watch_heartbeat.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python +import os +import sys +import signal + +CURDIR = os.path.dirname(os.path.realpath(__file__)) +sys.path.insert(0, os.path.join(CURDIR, 'helpers')) + +from client import client, prompt, end_of_block + +log = None +# uncomment the line below for debugging +#log=sys.stdout + +with client(name='client1>', log=log) as client1, client(name='client2>', log=log) as client2: + client1.expect(prompt) + client2.expect(prompt) + + client1.send('SET allow_experimental_live_view = 1') + client1.expect(prompt) + client2.send('SET allow_experimental_live_view = 1') + client2.expect(prompt) + + client1.send('DROP TABLE IF EXISTS test.lv') + client1.expect(prompt) + client1.send(' DROP TABLE IF EXISTS test.mt') + client1.expect(prompt) + client1.send('SET live_view_heartbeat_interval=1') + client1.expect(prompt) + client1.send('CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()') + client1.expect(prompt) + client1.send('CREATE TEMPORARY LIVE VIEW test.lv AS SELECT sum(a) FROM test.mt') + client1.expect(prompt) + client1.send('WATCH test.lv') + client1.expect(r'0.*1' + end_of_block) + client2.send('INSERT INTO test.mt VALUES (1),(2),(3)') + client1.expect(r'6.*2' + end_of_block) + client1.expect('Progress: 2.00 rows.*\)') + # wait for heartbeat + client1.expect('Progress: 2.00 rows.*\)') + # send Ctrl-C + client1.send('\x03', eol='') + match = client1.expect('(%s)|([#\$] )' % prompt) + if match.groups()[1]: + client1.send(client1.command) + client1.expect(prompt) + client1.send('DROP TABLE test.lv') + client1.expect(prompt) + client1.send('DROP TABLE test.mt') + client1.expect(prompt) diff --git a/dbms/tests/queries/0_stateless/00965_live_view_watch_heartbeat.reference b/dbms/tests/queries/0_stateless/00965_live_view_watch_heartbeat.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/queries/0_stateless/00966_live_view_watch_events_http.py b/dbms/tests/queries/0_stateless/00966_live_view_watch_events_http.py new file mode 100755 index 00000000000..72ab3ea8818 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00966_live_view_watch_events_http.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python +import os +import sys + +CURDIR = os.path.dirname(os.path.realpath(__file__)) +sys.path.insert(0, os.path.join(CURDIR, 'helpers')) + +from client import client, prompt, end_of_block +from httpclient import client as http_client + +log = None +# uncomment the line below for debugging +#log=sys.stdout + +with client(name='client1>', log=log) as client1: + client1.expect(prompt) + + client1.send('SET allow_experimental_live_view = 1') + client1.expect(prompt) + + client1.send('DROP TABLE IF EXISTS test.lv') + client1.expect(prompt) + client1.send(' DROP TABLE IF EXISTS test.mt') + client1.expect(prompt) + client1.send('CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()') + client1.expect(prompt) + client1.send('CREATE LIVE VIEW test.lv AS SELECT sum(a) FROM test.mt') + client1.expect(prompt) + + + with http_client({'method':'GET', 'url': '/?allow_experimental_live_view=1&query=WATCH%20test.lv%20EVENTS'}, name='client2>', log=log) as client2: + client2.expect('.*1\n') + client1.send('INSERT INTO test.mt VALUES (1),(2),(3)') + client1.expect(prompt) + client2.expect('.*2\n') + + client1.send('DROP TABLE test.lv') + client1.expect(prompt) + client1.send('DROP TABLE test.mt') + client1.expect(prompt) diff --git a/dbms/tests/queries/0_stateless/00966_live_view_watch_events_http.reference b/dbms/tests/queries/0_stateless/00966_live_view_watch_events_http.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/queries/0_stateless/00967_live_view_watch_http.py b/dbms/tests/queries/0_stateless/00967_live_view_watch_http.py new file mode 100755 index 00000000000..e2f33971c3d --- /dev/null +++ b/dbms/tests/queries/0_stateless/00967_live_view_watch_http.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python +import os +import sys + +CURDIR = os.path.dirname(os.path.realpath(__file__)) +sys.path.insert(0, os.path.join(CURDIR, 'helpers')) + +from client import client, prompt, end_of_block +from httpclient import client as http_client + +log = None +# uncomment the line below for debugging +#log=sys.stdout + +with client(name='client1>', log=log) as client1: + client1.expect(prompt) + + client1.send('SET allow_experimental_live_view = 1') + client1.expect(prompt) + + client1.send('DROP TABLE IF EXISTS test.lv') + client1.expect(prompt) + client1.send(' DROP TABLE IF EXISTS test.mt') + client1.expect(prompt) + client1.send('CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()') + client1.expect(prompt) + client1.send('CREATE LIVE VIEW test.lv AS SELECT sum(a) FROM test.mt') + client1.expect(prompt) + + + with http_client({'method':'GET', 'url':'/?allow_experimental_live_view=1&query=WATCH%20test.lv'}, name='client2>', log=log) as client2: + client2.expect('.*0\t1\n') + client1.send('INSERT INTO test.mt VALUES (1),(2),(3)') + client1.expect(prompt) + client2.expect('.*6\t2\n') + + client1.send('DROP TABLE test.lv') + client1.expect(prompt) + client1.send('DROP TABLE test.mt') + client1.expect(prompt) diff --git a/dbms/tests/queries/0_stateless/00967_live_view_watch_http.reference b/dbms/tests/queries/0_stateless/00967_live_view_watch_http.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/queries/0_stateless/00968_live_view_select_format_jsoneachrowwithprogress.reference b/dbms/tests/queries/0_stateless/00968_live_view_select_format_jsoneachrowwithprogress.reference new file mode 100644 index 00000000000..5ae423d90d1 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00968_live_view_select_format_jsoneachrowwithprogress.reference @@ -0,0 +1,4 @@ +{"row":{"a":1}} +{"row":{"a":2}} +{"row":{"a":3}} +{"progress":{"read_rows":"3","read_bytes":"36","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}} diff --git a/dbms/tests/queries/0_stateless/00968_live_view_select_format_jsoneachrowwithprogress.sql b/dbms/tests/queries/0_stateless/00968_live_view_select_format_jsoneachrowwithprogress.sql new file mode 100644 index 00000000000..1023cdf6b29 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00968_live_view_select_format_jsoneachrowwithprogress.sql @@ -0,0 +1,14 @@ +SET allow_experimental_live_view = 1; + +DROP TABLE IF EXISTS test.lv; +DROP TABLE IF EXISTS test.mt; + +CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple(); +CREATE LIVE VIEW test.lv AS SELECT * FROM test.mt; + +INSERT INTO test.mt VALUES (1),(2),(3); + +SELECT * FROM test.lv FORMAT JSONEachRowWithProgress; + +DROP TABLE test.lv; +DROP TABLE test.mt; diff --git a/dbms/tests/queries/0_stateless/00969_live_view_watch_format_jsoneachrowwithprogress.reference b/dbms/tests/queries/0_stateless/00969_live_view_watch_format_jsoneachrowwithprogress.reference new file mode 100644 index 00000000000..287a1ced92d --- /dev/null +++ b/dbms/tests/queries/0_stateless/00969_live_view_watch_format_jsoneachrowwithprogress.reference @@ -0,0 +1,6 @@ +{"row":{"sum(a)":"0","_version":"1"}} +{"progress":{"read_rows":"1","read_bytes":"16","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}} +{"row":{"sum(a)":"6","_version":"2"}} +{"progress":{"read_rows":"1","read_bytes":"16","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}} +{"row":{"sum(a)":"21","_version":"3"}} +{"progress":{"read_rows":"1","read_bytes":"16","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}} diff --git a/dbms/tests/queries/0_stateless/00969_live_view_watch_format_jsoneachrowwithprogress.sql b/dbms/tests/queries/0_stateless/00969_live_view_watch_format_jsoneachrowwithprogress.sql new file mode 100644 index 00000000000..3e46d55c014 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00969_live_view_watch_format_jsoneachrowwithprogress.sql @@ -0,0 +1,20 @@ +SET allow_experimental_live_view = 1; + +DROP TABLE IF EXISTS test.lv; +DROP TABLE IF EXISTS test.mt; + +CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple(); +CREATE LIVE VIEW test.lv AS SELECT sum(a) FROM test.mt; + +WATCH test.lv LIMIT 0 FORMAT JSONEachRowWithProgress; + +INSERT INTO test.mt VALUES (1),(2),(3); + +WATCH test.lv LIMIT 0 FORMAT JSONEachRowWithProgress; + +INSERT INTO test.mt VALUES (4),(5),(6); + +WATCH test.lv LIMIT 0 FORMAT JSONEachRowWithProgress; + +DROP TABLE test.lv; +DROP TABLE test.mt; diff --git a/dbms/tests/queries/0_stateless/00970_live_view_watch_events_http_heartbeat.py b/dbms/tests/queries/0_stateless/00970_live_view_watch_events_http_heartbeat.py new file mode 100755 index 00000000000..8435cdc147a --- /dev/null +++ b/dbms/tests/queries/0_stateless/00970_live_view_watch_events_http_heartbeat.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python +import os +import sys + +CURDIR = os.path.dirname(os.path.realpath(__file__)) +sys.path.insert(0, os.path.join(CURDIR, 'helpers')) + +from client import client, prompt, end_of_block +from httpclient import client as http_client + +log = None +# uncomment the line below for debugging +#log=sys.stdout + +with client(name='client1>', log=log) as client1: + client1.expect(prompt) + + client1.send('SET allow_experimental_live_view = 1') + client1.expect(prompt) + + client1.send('DROP TABLE IF EXISTS test.lv') + client1.expect(prompt) + client1.send(' DROP TABLE IF EXISTS test.mt') + client1.expect(prompt) + client1.send('CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()') + client1.expect(prompt) + client1.send('CREATE LIVE VIEW test.lv AS SELECT sum(a) FROM test.mt') + client1.expect(prompt) + + with http_client({'method':'GET', 'url': '/?allow_experimental_live_view=1&live_view_heartbeat_interval=1&query=WATCH%20test.lv%20EVENTS%20FORMAT%20JSONEachRowWithProgress'}, name='client2>', log=log) as client2: + client2.expect('{"progress":{"read_rows":"1","read_bytes":"8","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}}\n', escape=True) + client2.expect('{"row":{"version":"1"}', escape=True) + client2.expect('{"progress":{"read_rows":"1","read_bytes":"8","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}}', escape=True) + # heartbeat is provided by progress message + client2.expect('{"progress":{"read_rows":"1","read_bytes":"8","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}}', escape=True) + + client1.send('INSERT INTO test.mt VALUES (1),(2),(3)') + client1.expect(prompt) + + client2.expect('{"row":{"version":"2"}}\n', escape=True) + + client1.send('DROP TABLE test.lv') + client1.expect(prompt) + client1.send('DROP TABLE test.mt') + client1.expect(prompt) diff --git a/dbms/tests/queries/0_stateless/00970_live_view_watch_events_http_heartbeat.reference b/dbms/tests/queries/0_stateless/00970_live_view_watch_events_http_heartbeat.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/queries/0_stateless/00971_live_view_watch_http_heartbeat.py b/dbms/tests/queries/0_stateless/00971_live_view_watch_http_heartbeat.py new file mode 100755 index 00000000000..2317d705efe --- /dev/null +++ b/dbms/tests/queries/0_stateless/00971_live_view_watch_http_heartbeat.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python +import os +import sys + +CURDIR = os.path.dirname(os.path.realpath(__file__)) +sys.path.insert(0, os.path.join(CURDIR, 'helpers')) + +from client import client, prompt, end_of_block +from httpclient import client as http_client + +log = None +# uncomment the line below for debugging +#log=sys.stdout + +with client(name='client1>', log=log) as client1: + client1.expect(prompt) + + client1.send('SET allow_experimental_live_view = 1') + client1.expect(prompt) + + client1.send('DROP TABLE IF EXISTS test.lv') + client1.expect(prompt) + client1.send(' DROP TABLE IF EXISTS test.mt') + client1.expect(prompt) + client1.send('CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()') + client1.expect(prompt) + client1.send('CREATE LIVE VIEW test.lv AS SELECT sum(a) FROM test.mt') + client1.expect(prompt) + + with http_client({'method':'GET', 'url':'/?allow_experimental_live_view=1&live_view_heartbeat_interval=1&query=WATCH%20test.lv%20FORMAT%20JSONEachRowWithProgress'}, name='client2>', log=log) as client2: + client2.expect('"progress".*',) + client2.expect('{"row":{"sum(a)":"0","_version":"1"}}\n', escape=True) + client2.expect('"progress".*\n') + # heartbeat is provided by progress message + client2.expect('"progress".*\n') + + client1.send('INSERT INTO test.mt VALUES (1),(2),(3)') + client1.expect(prompt) + + client2.expect('"progress".*"read_rows":"2".*\n') + client2.expect('{"row":{"sum(a)":"6","_version":"2"}}\n', escape=True) + + client1.send('DROP TABLE test.lv') + client1.expect(prompt) + client1.send('DROP TABLE test.mt') + client1.expect(prompt) diff --git a/dbms/tests/queries/0_stateless/00971_live_view_watch_http_heartbeat.reference b/dbms/tests/queries/0_stateless/00971_live_view_watch_http_heartbeat.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/queries/0_stateless/00972_live_view_select_1.reference b/dbms/tests/queries/0_stateless/00972_live_view_select_1.reference new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00972_live_view_select_1.reference @@ -0,0 +1 @@ +1 diff --git a/dbms/tests/queries/0_stateless/00972_live_view_select_1.sql b/dbms/tests/queries/0_stateless/00972_live_view_select_1.sql new file mode 100644 index 00000000000..135516b0cd3 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00972_live_view_select_1.sql @@ -0,0 +1,9 @@ +SET allow_experimental_live_view = 1; + +DROP TABLE IF EXISTS test.lv; + +CREATE LIVE VIEW test.lv AS SELECT 1; + +SELECT * FROM test.lv; + +DROP TABLE test.lv; diff --git a/dbms/tests/queries/0_stateless/00973_live_view_select.reference b/dbms/tests/queries/0_stateless/00973_live_view_select.reference new file mode 100644 index 00000000000..75236c0daf7 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00973_live_view_select.reference @@ -0,0 +1,4 @@ +6 1 +6 1 +12 2 +12 2 diff --git a/dbms/tests/queries/0_stateless/00973_live_view_select.sql b/dbms/tests/queries/0_stateless/00973_live_view_select.sql new file mode 100644 index 00000000000..4b5ca0a2dd7 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00973_live_view_select.sql @@ -0,0 +1,20 @@ +SET allow_experimental_live_view = 1; + +DROP TABLE IF EXISTS test.lv; +DROP TABLE IF EXISTS test.mt; + +CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple(); +CREATE LIVE VIEW test.lv AS SELECT sum(a) FROM test.mt; + +INSERT INTO test.mt VALUES (1),(2),(3); + +SELECT *,_version FROM test.lv; +SELECT *,_version FROM test.lv; + +INSERT INTO test.mt VALUES (1),(2),(3); + +SELECT *,_version FROM test.lv; +SELECT *,_version FROM test.lv; + +DROP TABLE test.lv; +DROP TABLE test.mt; diff --git a/dbms/tests/queries/0_stateless/00974_live_view_select_with_aggregation.reference b/dbms/tests/queries/0_stateless/00974_live_view_select_with_aggregation.reference new file mode 100644 index 00000000000..6d50f0e9c3a --- /dev/null +++ b/dbms/tests/queries/0_stateless/00974_live_view_select_with_aggregation.reference @@ -0,0 +1,2 @@ +6 +21 diff --git a/dbms/tests/queries/0_stateless/00974_live_view_select_with_aggregation.sql b/dbms/tests/queries/0_stateless/00974_live_view_select_with_aggregation.sql new file mode 100644 index 00000000000..3faaec8f623 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00974_live_view_select_with_aggregation.sql @@ -0,0 +1,18 @@ +SET allow_experimental_live_view = 1; + +DROP TABLE IF EXISTS test.lv; +DROP TABLE IF EXISTS test.mt; + +CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple(); +CREATE LIVE VIEW test.lv AS SELECT * FROM test.mt; + +INSERT INTO test.mt VALUES (1),(2),(3); + +SELECT sum(a) FROM test.lv; + +INSERT INTO test.mt VALUES (4),(5),(6); + +SELECT sum(a) FROM test.lv; + +DROP TABLE test.lv; +DROP TABLE test.mt; diff --git a/dbms/tests/queries/0_stateless/00975_live_view_create.reference b/dbms/tests/queries/0_stateless/00975_live_view_create.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/queries/0_stateless/00975_live_view_create.sql b/dbms/tests/queries/0_stateless/00975_live_view_create.sql new file mode 100644 index 00000000000..02c1644d193 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00975_live_view_create.sql @@ -0,0 +1,9 @@ +SET allow_experimental_live_view = 1; + +DROP TABLE IF EXISTS test.mt; + +CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple(); +CREATE LIVE VIEW test.lv AS SELECT * FROM test.mt; + +DROP TABLE test.lv; +DROP TABLE test.mt; diff --git a/dbms/tests/queries/0_stateless/00976_live_view_select_version.reference b/dbms/tests/queries/0_stateless/00976_live_view_select_version.reference new file mode 100644 index 00000000000..453bd800469 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00976_live_view_select_version.reference @@ -0,0 +1,3 @@ +1 1 +2 1 +3 1 diff --git a/dbms/tests/queries/0_stateless/00976_live_view_select_version.sql b/dbms/tests/queries/0_stateless/00976_live_view_select_version.sql new file mode 100644 index 00000000000..ae1c59a92d7 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00976_live_view_select_version.sql @@ -0,0 +1,14 @@ +SET allow_experimental_live_view = 1; + +DROP TABLE IF EXISTS test.lv; +DROP TABLE IF EXISTS test.mt; + +CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple(); +CREATE LIVE VIEW test.lv AS SELECT * FROM test.mt; + +INSERT INTO test.mt VALUES (1),(2),(3); + +SELECT *,_version FROM test.lv; + +DROP TABLE test.lv; +DROP TABLE test.mt; diff --git a/dbms/tests/queries/0_stateless/00977_live_view_watch_events.reference b/dbms/tests/queries/0_stateless/00977_live_view_watch_events.reference new file mode 100644 index 00000000000..01e79c32a8c --- /dev/null +++ b/dbms/tests/queries/0_stateless/00977_live_view_watch_events.reference @@ -0,0 +1,3 @@ +1 +2 +3 diff --git a/dbms/tests/queries/0_stateless/00977_live_view_watch_events.sql b/dbms/tests/queries/0_stateless/00977_live_view_watch_events.sql new file mode 100644 index 00000000000..3e0d066fb8d --- /dev/null +++ b/dbms/tests/queries/0_stateless/00977_live_view_watch_events.sql @@ -0,0 +1,20 @@ +SET allow_experimental_live_view = 1; + +DROP TABLE IF EXISTS test.lv; +DROP TABLE IF EXISTS test.mt; + +CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple(); +CREATE LIVE VIEW test.lv AS SELECT sum(a) FROM test.mt; + +WATCH test.lv EVENTS LIMIT 0; + +INSERT INTO test.mt VALUES (1),(2),(3); + +WATCH test.lv EVENTS LIMIT 0; + +INSERT INTO test.mt VALUES (4),(5),(6); + +WATCH test.lv EVENTS LIMIT 0; + +DROP TABLE test.lv; +DROP TABLE test.mt; diff --git a/dbms/tests/queries/0_stateless/00978_live_view_watch.reference b/dbms/tests/queries/0_stateless/00978_live_view_watch.reference new file mode 100644 index 00000000000..6fbbedf1b21 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00978_live_view_watch.reference @@ -0,0 +1,3 @@ +0 1 +6 2 +21 3 diff --git a/dbms/tests/queries/0_stateless/00978_live_view_watch.sql b/dbms/tests/queries/0_stateless/00978_live_view_watch.sql new file mode 100644 index 00000000000..b8d0d93ccab --- /dev/null +++ b/dbms/tests/queries/0_stateless/00978_live_view_watch.sql @@ -0,0 +1,20 @@ +SET allow_experimental_live_view = 1; + +DROP TABLE IF EXISTS test.lv; +DROP TABLE IF EXISTS test.mt; + +CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple(); +CREATE LIVE VIEW test.lv AS SELECT sum(a) FROM test.mt; + +WATCH test.lv LIMIT 0; + +INSERT INTO test.mt VALUES (1),(2),(3); + +WATCH test.lv LIMIT 0; + +INSERT INTO test.mt VALUES (4),(5),(6); + +WATCH test.lv LIMIT 0; + +DROP TABLE test.lv; +DROP TABLE test.mt; diff --git a/dbms/tests/queries/0_stateless/00979_live_view_watch_live.py b/dbms/tests/queries/0_stateless/00979_live_view_watch_live.py new file mode 100755 index 00000000000..8c5bc5b8eb2 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00979_live_view_watch_live.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python +import os +import sys +import signal + +CURDIR = os.path.dirname(os.path.realpath(__file__)) +sys.path.insert(0, os.path.join(CURDIR, 'helpers')) + +from client import client, prompt, end_of_block + +log = None +# uncomment the line below for debugging +#log=sys.stdout + +with client(name='client1>', log=log) as client1, client(name='client2>', log=log) as client2: + client1.expect(prompt) + client2.expect(prompt) + + client1.send('SET allow_experimental_live_view = 1') + client1.expect(prompt) + client2.send('SET allow_experimental_live_view = 1') + client2.expect(prompt) + + client1.send('DROP TABLE IF EXISTS test.lv') + client1.expect(prompt) + client1.send(' DROP TABLE IF EXISTS test.mt') + client1.expect(prompt) + client1.send('CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()') + client1.expect(prompt) + client1.send('CREATE LIVE VIEW test.lv AS SELECT sum(a) FROM test.mt') + client1.expect(prompt) + client1.send('WATCH test.lv') + client1.expect(r'0.*1' + end_of_block) + client2.send('INSERT INTO test.mt VALUES (1),(2),(3)') + client1.expect(r'6.*2' + end_of_block) + client2.expect(prompt) + client2.send('INSERT INTO test.mt VALUES (4),(5),(6)') + client1.expect(r'21.*3' + end_of_block) + client2.expect(prompt) + for i in range(1,129): + client2.send('INSERT INTO test.mt VALUES (1)') + client1.expect(r'%d.*%d' % (21+i, 3+i) + end_of_block) + client2.expect(prompt) + # send Ctrl-C + client1.send('\x03', eol='') + match = client1.expect('(%s)|([#\$] )' % prompt) + if match.groups()[1]: + client1.send(client1.command) + client1.expect(prompt) + client1.send('DROP TABLE test.lv') + client1.expect(prompt) + client1.send('DROP TABLE test.mt') + client1.expect(prompt) diff --git a/dbms/tests/queries/0_stateless/00979_live_view_watch_live.reference b/dbms/tests/queries/0_stateless/00979_live_view_watch_live.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/queries/0_stateless/00980_alter_settings_race.sh b/dbms/tests/queries/0_stateless/00980_alter_settings_race.sh index 4a948841ed7..3a9e854210d 100755 --- a/dbms/tests/queries/0_stateless/00980_alter_settings_race.sh +++ b/dbms/tests/queries/0_stateless/00980_alter_settings_race.sh @@ -8,7 +8,7 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS table_for_concurrent_alter" $CLICKHOUSE_CLIENT --query="CREATE TABLE table_for_concurrent_alter (id UInt64, Data String) ENGINE = MergeTree() ORDER BY id SETTINGS index_granularity=4096;"; n=0 -while [ "$n" -lt 100 ]; +while [ "$n" -lt 50 ]; do n=$(( n + 1 )) $CLICKHOUSE_CLIENT --query="INSERT INTO table_for_concurrent_alter VALUES(1, 'Hello')" > /dev/null 2> /dev/null & @@ -17,7 +17,7 @@ done & q=0 -while [ "$q" -lt 100 ]; +while [ "$q" -lt 50 ]; do q=$(( q + 1 )) counter=$(( 100 + q )) diff --git a/dbms/tests/queries/0_stateless/00980_create_temporary_live_view.reference b/dbms/tests/queries/0_stateless/00980_create_temporary_live_view.reference new file mode 100644 index 00000000000..7f9fcbb2e9c --- /dev/null +++ b/dbms/tests/queries/0_stateless/00980_create_temporary_live_view.reference @@ -0,0 +1,3 @@ +temporary_live_view_timeout 5 +live_view_heartbeat_interval 15 +0 diff --git a/dbms/tests/queries/0_stateless/00980_create_temporary_live_view.sql b/dbms/tests/queries/0_stateless/00980_create_temporary_live_view.sql new file mode 100644 index 00000000000..037c2a9e587 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00980_create_temporary_live_view.sql @@ -0,0 +1,17 @@ +SET allow_experimental_live_view = 1; + +DROP TABLE IF EXISTS test.lv; +DROP TABLE IF EXISTS test.mt; + +SELECT name, value from system.settings WHERE name = 'temporary_live_view_timeout'; +SELECT name, value from system.settings WHERE name = 'live_view_heartbeat_interval'; + +SET temporary_live_view_timeout=1; +CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple(); +CREATE TEMPORARY LIVE VIEW test.lv AS SELECT sum(a) FROM test.mt; + +SHOW TABLES LIKE 'lv'; +SELECT sleep(2); +SHOW TABLES LIKE 'lv'; + +DROP TABLE test.mt; diff --git a/dbms/tests/queries/0_stateless/00991_live_view_watch_event_live.python b/dbms/tests/queries/0_stateless/00991_live_view_watch_event_live.python new file mode 100644 index 00000000000..782671cdfaf --- /dev/null +++ b/dbms/tests/queries/0_stateless/00991_live_view_watch_event_live.python @@ -0,0 +1,81 @@ +#!/usr/bin/env python + +import subprocess +import threading +import Queue as queue +import os +import sys +import signal + + +CLICKHOUSE_CLIENT = os.environ.get('CLICKHOUSE_CLIENT') +CLICKHOUSE_CURL = os.environ.get('CLICKHOUSE_CURL') +CLICKHOUSE_URL = os.environ.get('CLICKHOUSE_URL') + + +def send_query(query): + cmd = list(CLICKHOUSE_CLIENT.split()) + cmd += ['--query', query] + # print(cmd) + return subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout + + +def send_query_in_process_group(query): + cmd = list(CLICKHOUSE_CLIENT.split()) + cmd += ['--query', query] + # print(cmd) + return subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, preexec_fn=os.setsid) + + +def read_lines_and_push_to_queue(pipe, queue): + try: + for line in iter(pipe.readline, ''): + line = line.strip() + print(line) + sys.stdout.flush() + queue.put(line) + except KeyboardInterrupt: + pass + + queue.put(None) + + +def test(): + send_query('DROP TABLE IF EXISTS test.lv').read() + send_query('DROP TABLE IF EXISTS test.mt').read() + send_query('CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()').read() + send_query('CREATE LIVE VIEW test.lv AS SELECT sum(a) FROM test.mt').read() + + q = queue.Queue() + p = send_query_in_process_group('WATCH test.lv') + thread = threading.Thread(target=read_lines_and_push_to_queue, args=(p.stdout, q)) + thread.start() + + line = q.get() + print(line) + assert (line == '0\t1') + + send_query('INSERT INTO test.mt VALUES (1),(2),(3)').read() + line = q.get() + print(line) + assert (line == '6\t2') + + send_query('INSERT INTO test.mt VALUES (4),(5),(6)').read() + line = q.get() + print(line) + assert (line == '21\t3') + + # Send Ctrl+C to client. + os.killpg(os.getpgid(p.pid), signal.SIGINT) + # This insert shouldn't affect lv. + send_query('INSERT INTO test.mt VALUES (7),(8),(9)').read() + line = q.get() + print(line) + assert (line is None) + + send_query('DROP TABLE if exists test.lv').read() + send_query('DROP TABLE if exists test.lv').read() + + thread.join() + +test() diff --git a/dbms/tests/queries/0_stateless/00991_live_view_watch_event_live.reference b/dbms/tests/queries/0_stateless/00991_live_view_watch_event_live.reference new file mode 100644 index 00000000000..1e94cdade41 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00991_live_view_watch_event_live.reference @@ -0,0 +1,7 @@ +0 1 +0 1 +6 2 +6 2 +21 3 +21 3 +None diff --git a/dbms/tests/queries/0_stateless/00991_live_view_watch_event_live.sh.disabled b/dbms/tests/queries/0_stateless/00991_live_view_watch_event_live.sh.disabled new file mode 100755 index 00000000000..10e4e98b2e3 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00991_live_view_watch_event_live.sh.disabled @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CURDIR/../shell_config.sh + +python $CURDIR/00991_live_view_watch_event_live.python diff --git a/dbms/tests/queries/0_stateless/00991_live_view_watch_http.python b/dbms/tests/queries/0_stateless/00991_live_view_watch_http.python new file mode 100755 index 00000000000..938547ca0cb --- /dev/null +++ b/dbms/tests/queries/0_stateless/00991_live_view_watch_http.python @@ -0,0 +1,63 @@ +#!/usr/bin/env python + +import subprocess +import threading +import Queue as queue +import os +import sys + + +CLICKHOUSE_CLIENT = os.environ.get('CLICKHOUSE_CLIENT') +CLICKHOUSE_CURL = os.environ.get('CLICKHOUSE_CURL') +CLICKHOUSE_URL = os.environ.get('CLICKHOUSE_URL') + + +def send_query(query): + cmd = list(CLICKHOUSE_CLIENT.split()) + cmd += ['--query', query] + # print(cmd) + return subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout + + +def send_http_query(query): + cmd = list(CLICKHOUSE_CURL.split()) # list(['curl', '-sSN', '--max-time', '10']) + cmd += ['-sSN', CLICKHOUSE_URL, '-d', query] + return subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout + + +def read_lines_and_push_to_queue(pipe, queue): + for line in iter(pipe.readline, ''): + line = line.strip() + print(line) + sys.stdout.flush() + queue.put(line) + + queue.put(None) + + +def test(): + send_query('DROP TABLE IF EXISTS test.lv').read() + send_query('DROP TABLE IF EXISTS test.mt').read() + send_query('CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()').read() + send_query('CREATE LIVE VIEW test.lv AS SELECT sum(a) FROM test.mt').read() + + q = queue.Queue() + pipe = send_http_query('WATCH test.lv') + thread = threading.Thread(target=read_lines_and_push_to_queue, args=(pipe, q)) + thread.start() + + line = q.get() + print(line) + assert (line == '0\t1') + + send_query('INSERT INTO test.mt VALUES (1),(2),(3)').read() + line = q.get() + print(line) + assert (line == '6\t2') + + send_query('DROP TABLE if exists test.lv').read() + send_query('DROP TABLE if exists test.lv').read() + + thread.join() + +test() diff --git a/dbms/tests/queries/0_stateless/00991_live_view_watch_http.reference b/dbms/tests/queries/0_stateless/00991_live_view_watch_http.reference new file mode 100644 index 00000000000..489457d751b --- /dev/null +++ b/dbms/tests/queries/0_stateless/00991_live_view_watch_http.reference @@ -0,0 +1,4 @@ +0 1 +0 1 +6 2 +6 2 diff --git a/dbms/tests/queries/0_stateless/00991_live_view_watch_http.sh.disabled b/dbms/tests/queries/0_stateless/00991_live_view_watch_http.sh.disabled new file mode 100755 index 00000000000..88cce77f595 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00991_live_view_watch_http.sh.disabled @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CURDIR/../shell_config.sh + +python $CURDIR/00991_live_view_watch_http.python diff --git a/dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.python b/dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.python new file mode 100644 index 00000000000..70063adc6e3 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.python @@ -0,0 +1,83 @@ +#!/usr/bin/env python + +import subprocess +import threading +import Queue as queue +import os +import sys +import signal + + +CLICKHOUSE_CLIENT = os.environ.get('CLICKHOUSE_CLIENT') +CLICKHOUSE_CURL = os.environ.get('CLICKHOUSE_CURL') +CLICKHOUSE_URL = os.environ.get('CLICKHOUSE_URL') + + +def send_query(query): + cmd = list(CLICKHOUSE_CLIENT.split()) + cmd += ['--query', query] + # print(cmd) + return subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout + + +def send_query_in_process_group(query): + cmd = list(CLICKHOUSE_CLIENT.split()) + cmd += ['--query', query, '--live_view_heartbeat_interval=1', '--progress'] + # print(cmd) + return subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, preexec_fn=os.setsid) + + +def read_lines_and_push_to_queue(pipe, queue): + try: + for line in iter(pipe.readline, ''): + line = line.strip() + # print(line) + sys.stdout.flush() + queue.put(line) + except KeyboardInterrupt: + pass + + queue.put(None) + + +def test(): + send_query('DROP TABLE IF EXISTS test.lv').read() + send_query('DROP TABLE IF EXISTS test.mt').read() + send_query('CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()').read() + send_query('CREATE TEMPORARY LIVE VIEW test.lv AS SELECT sum(a) FROM test.mt').read() + + q = queue.Queue() + p = send_query_in_process_group('WATCH test.lv') + thread = threading.Thread(target=read_lines_and_push_to_queue, args=(p.stdout, q)) + thread.start() + + line = q.get() + # print(line) + assert (line.endswith('0\t1')) + assert ('Progress: 0.00 rows' in line) + + send_query('INSERT INTO test.mt VALUES (1),(2),(3)').read() + line = q.get() + assert (line.endswith('6\t2')) + assert ('Progress: 1.00 rows' in line) + + # send_query('INSERT INTO test.mt VALUES (4),(5),(6)').read() + # line = q.get() + # print(line) + # assert (line.endswith('6\t2')) + # assert ('Progress: 1.00 rows' in line) + + # Send Ctrl+C to client. + os.killpg(os.getpgid(p.pid), signal.SIGINT) + # This insert shouldn't affect lv. + send_query('INSERT INTO test.mt VALUES (7),(8),(9)').read() + line = q.get() + # print(line) + # assert (line is None) + + send_query('DROP TABLE if exists test.lv').read() + send_query('DROP TABLE if exists test.lv').read() + + thread.join() + +test() diff --git a/dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.reference b/dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.sh.disabled b/dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.sh.disabled new file mode 100755 index 00000000000..f7aa13d52b3 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.sh.disabled @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CURDIR/../shell_config.sh + +python $CURDIR/00991_temporary_live_view_watch_events_heartbeat.python diff --git a/dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_live.python b/dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_live.python new file mode 100644 index 00000000000..d290018a02c --- /dev/null +++ b/dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_live.python @@ -0,0 +1,81 @@ +#!/usr/bin/env python + +import subprocess +import threading +import Queue as queue +import os +import sys +import signal + + +CLICKHOUSE_CLIENT = os.environ.get('CLICKHOUSE_CLIENT') +CLICKHOUSE_CURL = os.environ.get('CLICKHOUSE_CURL') +CLICKHOUSE_URL = os.environ.get('CLICKHOUSE_URL') + + +def send_query(query): + cmd = list(CLICKHOUSE_CLIENT.split()) + cmd += ['--query', query] + # print(cmd) + return subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout + + +def send_query_in_process_group(query): + cmd = list(CLICKHOUSE_CLIENT.split()) + cmd += ['--query', query] + # print(cmd) + return subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, preexec_fn=os.setsid) + + +def read_lines_and_push_to_queue(pipe, queue): + try: + for line in iter(pipe.readline, ''): + line = line.strip() + print(line) + sys.stdout.flush() + queue.put(line) + except KeyboardInterrupt: + pass + + queue.put(None) + + +def test(): + send_query('DROP TABLE IF EXISTS test.lv').read() + send_query('DROP TABLE IF EXISTS test.mt').read() + send_query('CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()').read() + send_query('CREATE TEMPORARY LIVE VIEW test.lv AS SELECT sum(a) FROM test.mt').read() + + q = queue.Queue() + p = send_query_in_process_group('WATCH test.lv') + thread = threading.Thread(target=read_lines_and_push_to_queue, args=(p.stdout, q)) + thread.start() + + line = q.get() + print(line) + assert (line == '0\t1') + + send_query('INSERT INTO test.mt VALUES (1),(2),(3)').read() + line = q.get() + print(line) + assert (line == '6\t2') + + send_query('INSERT INTO test.mt VALUES (4),(5),(6)').read() + line = q.get() + print(line) + assert (line == '21\t3') + + # Send Ctrl+C to client. + os.killpg(os.getpgid(p.pid), signal.SIGINT) + # This insert shouldn't affect lv. + send_query('INSERT INTO test.mt VALUES (7),(8),(9)').read() + line = q.get() + print(line) + assert (line is None) + + send_query('DROP TABLE if exists test.lv').read() + send_query('DROP TABLE if exists test.lv').read() + + thread.join() + +test() diff --git a/dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_live.reference b/dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_live.reference new file mode 100644 index 00000000000..1e94cdade41 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_live.reference @@ -0,0 +1,7 @@ +0 1 +0 1 +6 2 +6 2 +21 3 +21 3 +None diff --git a/dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_live.sh.disabled b/dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_live.sh.disabled new file mode 100755 index 00000000000..4d01d1c3a8e --- /dev/null +++ b/dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_live.sh.disabled @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CURDIR/../shell_config.sh + +python $CURDIR/00991_temporary_live_view_watch_live.python diff --git a/dbms/tests/queries/0_stateless/00999_join_not_nullable_types.reference b/dbms/tests/queries/0_stateless/00999_join_not_nullable_types.reference new file mode 100644 index 00000000000..7b6947fa9a2 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00999_join_not_nullable_types.reference @@ -0,0 +1,8 @@ +0 ['left'] 0 ['left'] \N +1 ['left'] 1 ['left'] 1 +2 [] \N [] 2 +['left'] 0 ['left'] \N +['left'] 1 ['left'] 1 +[] \N [] 2 +['left'] 42 \N +['right'] \N 42 diff --git a/dbms/tests/queries/0_stateless/00999_join_not_nullable_types.sql b/dbms/tests/queries/0_stateless/00999_join_not_nullable_types.sql new file mode 100644 index 00000000000..2a24c6dd296 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00999_join_not_nullable_types.sql @@ -0,0 +1,34 @@ +SET join_use_nulls = 1; + +SELECT * FROM +( + SELECT number, ['left'] as ar, number AS left_number FROM system.numbers LIMIT 2 +) +FULL JOIN +( + SELECT number, ['right'] as ar, number AS right_number FROM system.numbers LIMIT 1, 2 +) +USING (number) +ORDER BY number; + +SELECT * FROM +( + SELECT ['left'] as ar, number AS left_number FROM system.numbers LIMIT 2 +) +FULL JOIN +( + SELECT ['right'] as ar, number AS right_number FROM system.numbers LIMIT 1, 2 +) +ON left_number = right_number +ORDER BY left_number; + +SELECT * FROM +( + SELECT ['left'] as ar, 42 AS left_number +) +FULL JOIN +( + SELECT ['right'] as ar, 42 AS right_number +) +USING(ar) +ORDER BY left_number; diff --git a/dbms/tests/queries/0_stateless/00999_nullable_nested_types_4877.reference b/dbms/tests/queries/0_stateless/00999_nullable_nested_types_4877.reference new file mode 100644 index 00000000000..1e2036c94c7 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00999_nullable_nested_types_4877.reference @@ -0,0 +1,26 @@ +a ('b','c') ('b','c') +d ('e','f') ('','') +a +x +a ('b','c') ('b','c') +x ('','') ('y','z') +a +d +a +x +a ('b','c') ('b','c') +d ('e','f') ('','') +a ('b','c') ('b','c') +x ('','') ('y','z') +a b ['b','c'] +d e [] +a b ['b','c'] +x ['y','z'] +a +d +a +x +a b ['b','c'] +d e [] +a b ['b','c'] +x \N ['y','z'] diff --git a/dbms/tests/queries/0_stateless/00999_nullable_nested_types_4877.sql b/dbms/tests/queries/0_stateless/00999_nullable_nested_types_4877.sql new file mode 100644 index 00000000000..ca523d77235 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00999_nullable_nested_types_4877.sql @@ -0,0 +1,41 @@ +DROP TABLE IF EXISTS l; +DROP TABLE IF EXISTS r; + +CREATE TABLE l (a String, b Tuple(String, String)) ENGINE = Memory(); +CREATE TABLE r (a String, c Tuple(String, String)) ENGINE = Memory(); + +INSERT INTO l (a, b) VALUES ('a', ('b', 'c')), ('d', ('e', 'f')); +INSERT INTO r (a, c) VALUES ('a', ('b', 'c')), ('x', ('y', 'z')); + +SET join_use_nulls = 0; +SELECT * from l LEFT JOIN r USING a ORDER BY a; +SELECT a from l RIGHT JOIN r USING a ORDER BY a; +SELECT * from l RIGHT JOIN r USING a ORDER BY a; + +SET join_use_nulls = 1; +SELECT a from l LEFT JOIN r USING a ORDER BY a; +SELECT a from l RIGHT JOIN r USING a ORDER BY a; +SELECT * from l LEFT JOIN r USING a ORDER BY a; +SELECT * from l RIGHT JOIN r USING a ORDER BY a; + +DROP TABLE l; +DROP TABLE r; + +CREATE TABLE l (a String, b String) ENGINE = Memory(); +CREATE TABLE r (a String, c Array(String)) ENGINE = Memory(); + +INSERT INTO l (a, b) VALUES ('a', 'b'), ('d', 'e'); +INSERT INTO r (a, c) VALUES ('a', ['b', 'c']), ('x', ['y', 'z']); + +SET join_use_nulls = 0; +SELECT * from l LEFT JOIN r USING a ORDER BY a; +SELECT * from l RIGHT JOIN r USING a ORDER BY a; + +SET join_use_nulls = 1; +SELECT a from l LEFT JOIN r USING a ORDER BY a; +SELECT a from l RIGHT JOIN r USING a ORDER BY a; +SELECT * from l LEFT JOIN r USING a ORDER BY a; +SELECT * from l RIGHT JOIN r USING a ORDER BY a; + +DROP TABLE l; +DROP TABLE r; diff --git a/dbms/tests/queries/0_stateless/00999_settings_no_extra_quotes.reference b/dbms/tests/queries/0_stateless/00999_settings_no_extra_quotes.reference new file mode 100644 index 00000000000..573541ac970 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00999_settings_no_extra_quotes.reference @@ -0,0 +1 @@ +0 diff --git a/dbms/tests/queries/0_stateless/00999_settings_no_extra_quotes.sql b/dbms/tests/queries/0_stateless/00999_settings_no_extra_quotes.sql new file mode 100644 index 00000000000..55d9ff2780d --- /dev/null +++ b/dbms/tests/queries/0_stateless/00999_settings_no_extra_quotes.sql @@ -0,0 +1 @@ +SELECT DISTINCT description LIKE '"%"' FROM system.settings; diff --git a/dbms/tests/queries/0_stateless/01000_unneeded_substitutions_client.reference b/dbms/tests/queries/0_stateless/01000_unneeded_substitutions_client.reference new file mode 100644 index 00000000000..13a393df666 --- /dev/null +++ b/dbms/tests/queries/0_stateless/01000_unneeded_substitutions_client.reference @@ -0,0 +1 @@ +${} diff --git a/dbms/tests/queries/0_stateless/01000_unneeded_substitutions_client.sh b/dbms/tests/queries/0_stateless/01000_unneeded_substitutions_client.sh new file mode 100755 index 00000000000..f6517fc2a42 --- /dev/null +++ b/dbms/tests/queries/0_stateless/01000_unneeded_substitutions_client.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CURDIR/../shell_config.sh + +$CLICKHOUSE_CLIENT -q "SELECT '\${}'" diff --git a/dbms/tests/queries/0_stateless/data_orc/test.orc b/dbms/tests/queries/0_stateless/data_orc/test.orc new file mode 100644 index 00000000000..1b2c9aa4922 Binary files /dev/null and b/dbms/tests/queries/0_stateless/data_orc/test.orc differ diff --git a/dbms/tests/queries/0_stateless/helpers/client.py b/dbms/tests/queries/0_stateless/helpers/client.py new file mode 100644 index 00000000000..f3938d3bf63 --- /dev/null +++ b/dbms/tests/queries/0_stateless/helpers/client.py @@ -0,0 +1,36 @@ +import os +import sys +import time + +CURDIR = os.path.dirname(os.path.realpath(__file__)) + +sys.path.insert(0, os.path.join(CURDIR)) + +import uexpect + +prompt = ':\) ' +end_of_block = r'.*\r\n.*\r\n' + +class client(object): + def __init__(self, command=None, name='', log=None): + self.client = uexpect.spawn(['/bin/bash','--noediting']) + if command is None: + command = os.environ.get('CLICKHOUSE_BINARY', 'clickhouse') + '-client' + self.client.command = command + self.client.eol('\r') + self.client.logger(log, prefix=name) + self.client.timeout(20) + self.client.expect('[#\$] ', timeout=2) + self.client.send(command) + + def __enter__(self): + return self.client.__enter__() + + def __exit__(self, type, value, traceback): + self.client.reader['kill_event'].set() + # send Ctrl-C + self.client.send('\x03', eol='') + time.sleep(0.3) + self.client.send('quit', eol='\r') + self.client.send('\x03', eol='') + return self.client.__exit__(type, value, traceback) diff --git a/dbms/tests/queries/0_stateless/helpers/httpclient.py b/dbms/tests/queries/0_stateless/helpers/httpclient.py new file mode 100644 index 00000000000..a42fad2cbc3 --- /dev/null +++ b/dbms/tests/queries/0_stateless/helpers/httpclient.py @@ -0,0 +1,14 @@ +import os +import sys + +CURDIR = os.path.dirname(os.path.realpath(__file__)) + +sys.path.insert(0, os.path.join(CURDIR)) + +import httpexpect + +def client(request, name='', log=None): + client = httpexpect.spawn({'host':'localhost','port':8123}, request) + client.logger(log, prefix=name) + client.timeout(20) + return client diff --git a/dbms/tests/queries/0_stateless/helpers/httpexpect.py b/dbms/tests/queries/0_stateless/helpers/httpexpect.py new file mode 100644 index 00000000000..e440dafce4e --- /dev/null +++ b/dbms/tests/queries/0_stateless/helpers/httpexpect.py @@ -0,0 +1,73 @@ +# Copyright (c) 2019 Vitaliy Zakaznikov +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import sys +import httplib + +CURDIR = os.path.dirname(os.path.realpath(__file__)) +sys.path.insert(0, CURDIR) + +import uexpect + +from threading import Thread, Event +from Queue import Queue, Empty + +class IO(uexpect.IO): + def __init__(self, connection, response, queue, reader): + self.connection = connection + self.response = response + super(IO, self).__init__(None, None, queue, reader) + + def write(self, data): + raise NotImplementedError + + def close(self, force=True): + self.reader['kill_event'].set() + self.connection.close() + if self._logger: + self._logger.write('\n') + self._logger.flush() + + +def reader(response, queue, kill_event): + while True: + try: + if kill_event.is_set(): + break + data = response.read(1) + queue.put(data) + except Exception, e: + if kill_event.is_set(): + break + raise + +def spawn(connection, request): + connection = httplib.HTTPConnection(**connection) + connection.request(**request) + response = connection.getresponse() + + queue = Queue() + reader_kill_event = Event() + thread = Thread(target=reader, args=(response, queue, reader_kill_event)) + thread.daemon = True + thread.start() + + return IO(connection, response, queue, reader={'thread':thread, 'kill_event':reader_kill_event}) + +if __name__ == '__main__': + with http({'host':'localhost','port':8123},{'method':'GET', 'url':'?query=SELECT%201'}) as client: + client.logger(sys.stdout) + client.timeout(2) + print client.response.status, client.response.reason + client.expect('1\n') diff --git a/dbms/tests/queries/0_stateless/helpers/uexpect.py b/dbms/tests/queries/0_stateless/helpers/uexpect.py new file mode 100644 index 00000000000..f71b32a53e1 --- /dev/null +++ b/dbms/tests/queries/0_stateless/helpers/uexpect.py @@ -0,0 +1,206 @@ +# Copyright (c) 2019 Vitaliy Zakaznikov +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import pty +import time +import sys +import re + +from threading import Thread, Event +from subprocess import Popen +from Queue import Queue, Empty + +class TimeoutError(Exception): + def __init__(self, timeout): + self.timeout = timeout + + def __str__(self): + return 'Timeout %.3fs' % float(self.timeout) + +class ExpectTimeoutError(Exception): + def __init__(self, pattern, timeout, buffer): + self.pattern = pattern + self.timeout = timeout + self.buffer = buffer + + def __str__(self): + s = 'Timeout %.3fs ' % float(self.timeout) + if self.pattern: + s += 'for %s ' % repr(self.pattern.pattern) + if self.buffer: + s += 'buffer %s ' % repr(self.buffer[:]) + s += 'or \'%s\'' % ','.join(['%x' % ord(c) for c in self.buffer[:]]) + return s + +class IO(object): + class EOF(object): + pass + + class Timeout(object): + pass + + EOF = EOF + TIMEOUT = Timeout + + class Logger(object): + def __init__(self, logger, prefix=''): + self._logger = logger + self._prefix = prefix + + def write(self, data): + self._logger.write(('\n' + data).replace('\n','\n' + self._prefix)) + + def flush(self): + self._logger.flush() + + def __init__(self, process, master, queue, reader): + self.process = process + self.master = master + self.queue = queue + self.buffer = None + self.before = None + self.after = None + self.match = None + self.pattern = None + self.reader = reader + self._timeout = None + self._logger = None + self._eol = '' + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.close() + + def logger(self, logger=None, prefix=''): + if logger: + self._logger = self.Logger(logger, prefix=prefix) + return self._logger + + def timeout(self, timeout=None): + if timeout: + self._timeout = timeout + return self._timeout + + def eol(self, eol=None): + if eol: + self._eol = eol + return self._eol + + def close(self, force=True): + self.reader['kill_event'].set() + os.system('pkill -TERM -P %d' % self.process.pid) + if force: + self.process.kill() + else: + self.process.terminate() + os.close(self.master) + if self._logger: + self._logger.write('\n') + self._logger.flush() + + def send(self, data, eol=None): + if eol is None: + eol = self._eol + return self.write(data + eol) + + def write(self, data): + return os.write(self.master, data) + + def expect(self, pattern, timeout=None, escape=False): + self.match = None + self.before = None + self.after = None + if escape: + pattern = re.escape(pattern) + pattern = re.compile(pattern) + if timeout is None: + timeout = self._timeout + timeleft = timeout + while True: + start_time = time.time() + if self.buffer is not None: + self.match = pattern.search(self.buffer, 0) + if self.match is not None: + self.after = self.buffer[self.match.start():self.match.end()] + self.before = self.buffer[:self.match.start()] + self.buffer = self.buffer[self.match.end():] + break + if timeleft < 0: + break + try: + data = self.read(timeout=timeleft, raise_exception=True) + except TimeoutError: + if self._logger: + self._logger.write((self.buffer or '') + '\n') + self._logger.flush() + exception = ExpectTimeoutError(pattern, timeout, self.buffer) + self.buffer = None + raise exception + timeleft -= (time.time() - start_time) + if data: + self.buffer = (self.buffer + data) if self.buffer else data + if self._logger: + self._logger.write((self.before or '') + (self.after or '')) + self._logger.flush() + if self.match is None: + exception = ExpectTimeoutError(pattern, timeout, self.buffer) + self.buffer = None + raise exception + return self.match + + def read(self, timeout=0, raise_exception=False): + data = '' + timeleft = timeout + try: + while timeleft >= 0 : + start_time = time.time() + data += self.queue.get(timeout=timeleft) + if data: + break + timeleft -= (time.time() - start_time) + except Empty: + if data: + return data + if raise_exception: + raise TimeoutError(timeout) + pass + if not data and raise_exception: + raise TimeoutError(timeout) + + return data + +def spawn(command): + master, slave = pty.openpty() + process = Popen(command, preexec_fn=os.setsid, stdout=slave, stdin=slave, stderr=slave, bufsize=1) + os.close(slave) + + queue = Queue() + reader_kill_event = Event() + thread = Thread(target=reader, args=(process, master, queue, reader_kill_event)) + thread.daemon = True + thread.start() + + return IO(process, master, queue, reader={'thread':thread, 'kill_event':reader_kill_event}) + +def reader(process, out, queue, kill_event): + while True: + try: + data = os.read(out, 65536) + queue.put(data) + except: + if kill_event.is_set(): + break + raise diff --git a/dbms/tests/tsan_suppressions.txt b/dbms/tests/tsan_suppressions.txt index 476e135de14..3dc306ee133 100644 --- a/dbms/tests/tsan_suppressions.txt +++ b/dbms/tests/tsan_suppressions.txt @@ -1,2 +1,5 @@ # libc++ race:locale + +# Too many mutexes: https://github.com/google/sanitizers/issues/950 +deadlock:DB::MergeTreeReadPool::fillPerPartInfo diff --git a/docker/packager/packager b/docker/packager/packager index 0e8bf6ea98d..c132f514569 100755 --- a/docker/packager/packager +++ b/docker/packager/packager @@ -143,10 +143,10 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, cache, di result.append("ALIEN_PKGS='" + ' '.join(['--' + pkg for pkg in alien_pkgs]) + "'") if unbundled: - cmake_flags.append('-DUNBUNDLED=1 -DENABLE_MYSQL=0 -DENABLE_POCO_ODBC=0 -DENABLE_ODBC=0') + cmake_flags.append('-DUNBUNDLED=1 -DENABLE_MYSQL=0 -DENABLE_POCO_ODBC=0 -DENABLE_ODBC=0 -DUSE_CAPNP=0') if split_binary: - cmake_flags.append('-DUSE_STATIC_LIBRARIES=0 -DSPLIT_SHARED_LIBRARIES=1 -DCLICKHOUSE_SPLIT_BINARY=1 -DGLIBC_COMPATIBILITY=ON') + cmake_flags.append('-DUSE_STATIC_LIBRARIES=0 -DSPLIT_SHARED_LIBRARIES=1 -DCLICKHOUSE_SPLIT_BINARY=1') if with_coverage: cmake_flags.append('-DWITH_COVERAGE=1') diff --git a/libs/CMakeLists.txt b/libs/CMakeLists.txt index d526a662dc0..b5bcbd804be 100644 --- a/libs/CMakeLists.txt +++ b/libs/CMakeLists.txt @@ -15,10 +15,6 @@ if (USE_INTERNAL_MEMCPY) add_subdirectory (libmemcpy) endif() -if (GLIBC_COMPATIBILITY) - add_subdirectory (libglibc-compatibility) -endif () - if (USE_MYSQL) add_subdirectory (libmysqlxx) endif () diff --git a/libs/libcommon/CMakeLists.txt b/libs/libcommon/CMakeLists.txt index 2744714a9c4..c78473890dc 100644 --- a/libs/libcommon/CMakeLists.txt +++ b/libs/libcommon/CMakeLists.txt @@ -123,9 +123,7 @@ target_link_libraries (common PUBLIC ${Boost_SYSTEM_LIBRARY} PRIVATE - ${CMAKE_DL_LIBS} ${MALLOC_LIBRARIES} - Threads::Threads ${MEMCPY_LIBRARIES}) if (RT_LIBRARY) diff --git a/libs/libcommon/include/common/DateLUTImpl.h b/libs/libcommon/include/common/DateLUTImpl.h index 2258620eb26..ef50d6ede3f 100644 --- a/libs/libcommon/include/common/DateLUTImpl.h +++ b/libs/libcommon/include/common/DateLUTImpl.h @@ -28,7 +28,7 @@ enum class WeekModeFlag : UInt8 FIRST_WEEKDAY = 4, NEWYEAR_DAY = 8 }; -typedef std::pair YearWeek; +using YearWeek = std::pair; /** Lookup table to conversion of time to date, and to month / year / day of week / day of month and so on. * First time was implemented for OLAPServer, that needed to do billions of such transformations. diff --git a/libs/libcommon/include/common/config_common.h.in b/libs/libcommon/include/common/config_common.h.in index 1301049b24b..810cf0b87f9 100644 --- a/libs/libcommon/include/common/config_common.h.in +++ b/libs/libcommon/include/common/config_common.h.in @@ -8,5 +8,4 @@ #cmakedefine01 USE_LIBEDIT #cmakedefine01 HAVE_READLINE_HISTORY #cmakedefine01 UNBUNDLED -#cmakedefine01 USE_INTERNAL_UNWIND_LIBRARY #cmakedefine01 WITH_COVERAGE diff --git a/libs/libcommon/src/tests/CMakeLists.txt b/libs/libcommon/src/tests/CMakeLists.txt index 2bb8afe6fa1..15d872ac49d 100644 --- a/libs/libcommon/src/tests/CMakeLists.txt +++ b/libs/libcommon/src/tests/CMakeLists.txt @@ -16,7 +16,7 @@ target_link_libraries (date_lut3 common ${PLATFORM_LIBS}) target_link_libraries (date_lut4 common ${PLATFORM_LIBS}) target_link_libraries (date_lut_default_timezone common ${PLATFORM_LIBS}) target_link_libraries (local_date_time_comparison common) -target_link_libraries (realloc-perf common Threads::Threads) +target_link_libraries (realloc-perf common) add_check(local_date_time_comparison) if(USE_GTEST) diff --git a/libs/libglibc-compatibility/CMakeLists.txt b/libs/libglibc-compatibility/CMakeLists.txt index fe98ae9bf0d..a62f5e75e17 100644 --- a/libs/libglibc-compatibility/CMakeLists.txt +++ b/libs/libglibc-compatibility/CMakeLists.txt @@ -1,25 +1,45 @@ -enable_language(ASM) -include(CheckIncludeFile) +if (GLIBC_COMPATIBILITY) + set (USE_INTERNAL_MEMCPY ON) -check_include_file("sys/random.h" HAVE_SYS_RANDOM_H) + enable_language(ASM) + include(CheckIncludeFile) -if(COMPILER_CLANG) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-builtin-requires-header") -endif() + check_include_file("sys/random.h" HAVE_SYS_RANDOM_H) -add_headers_and_sources(glibc_compatibility .) -add_headers_and_sources(glibc_compatibility musl) -list(APPEND glibc_compatibility_sources musl/syscall.s musl/longjmp.s) + if(COMPILER_CLANG) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-builtin-requires-header") + endif() -list(REMOVE_ITEM glibc_compatibility_sources musl/getentropy.c) -if(HAVE_SYS_RANDOM_H) - list(APPEND glibc_compatibility_sources musl/getentropy.c) -endif() + add_headers_and_sources(glibc_compatibility .) + add_headers_and_sources(glibc_compatibility musl) + list(APPEND glibc_compatibility_sources musl/syscall.s musl/longjmp.s) -if(MAKE_STATIC_LIBRARIES) - list(APPEND glibc_compatibility_sources libcxxabi/cxa_thread_atexit.cpp) -endif() + list(REMOVE_ITEM glibc_compatibility_sources musl/getentropy.c) + if(HAVE_SYS_RANDOM_H) + list(APPEND glibc_compatibility_sources musl/getentropy.c) + endif() -add_library(glibc-compatibility STATIC ${glibc_compatibility_sources}) + if(MAKE_STATIC_LIBRARIES) + list(APPEND glibc_compatibility_sources libcxxabi/cxa_thread_atexit.cpp) + endif() -target_include_directories(glibc-compatibility PRIVATE libcxxabi) + add_library(glibc-compatibility STATIC ${glibc_compatibility_sources}) + + target_include_directories(glibc-compatibility PRIVATE libcxxabi) + + if (USE_STATIC_LIBRARIES=0 AND MAKE_STATIC_LIBRARIES=OFF) + target_compile_options(PRIVATE -fPIC) + endif () + + target_link_libraries(global-libs INTERFACE glibc-compatibility) + + install( + TARGETS glibc-compatibility + EXPORT global + ARCHIVE DESTINATION lib + ) + + message (STATUS "Some symbols from glibc will be replaced for compatibility") +elseif (YANDEX_OFFICIAL_BUILD) + message (WARNING "Option GLIBC_COMPATIBILITY must be turned on for production builds.") +endif () diff --git a/utils/compressor/CMakeLists.txt b/utils/compressor/CMakeLists.txt index 3fdf8aa5eaf..c032054187b 100644 --- a/utils/compressor/CMakeLists.txt +++ b/utils/compressor/CMakeLists.txt @@ -4,7 +4,7 @@ add_executable (zstd_test zstd_test.cpp) if(ZSTD_LIBRARY) target_link_libraries(zstd_test PRIVATE ${ZSTD_LIBRARY}) endif() -target_link_libraries (zstd_test PRIVATE common Threads::Threads) +target_link_libraries (zstd_test PRIVATE common) add_executable (mutator mutator.cpp) target_link_libraries(mutator PRIVATE clickhouse_common_io)