Merge branch 'master' into fix_doc_tupleHammingDistance

This commit is contained in:
mergify[bot] 2022-01-19 14:15:32 +00:00 committed by GitHub
commit e6d10d8bfb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
76 changed files with 818 additions and 147 deletions

View File

@ -7,10 +7,10 @@ env:
on: # yamllint disable-line rule:truthy
push:
branches:
# 22.1 and 22.10
# 22.1 and 22.10
- '2[1-9].[1-9][0-9]'
- '2[1-9].[1-9]'
jobs:
DockerHubPushAarch64:
runs-on: [self-hosted, func-tester-aarch64]

1
.gitignore vendored
View File

@ -13,6 +13,7 @@
/build_*
/build-*
/tests/venv
/obj-x86_64-linux-gnu/
# logs
*.log

View File

@ -415,13 +415,14 @@ else ()
endif ()
if (WERROR)
add_warning(error)
# Don't pollute CMAKE_CXX_FLAGS with -Werror as it will break some CMake checks.
# Instead, adopt modern cmake usage requirement.
target_compile_options(global-libs INTERFACE "-Werror")
endif ()
# Make this extra-checks for correct library dependencies.
if (OS_LINUX AND NOT SANITIZE)
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-undefined")
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--no-undefined")
target_link_options(global-libs INTERFACE "-Wl,--no-undefined")
endif ()
# Increase stack size on Musl. We need big stack for our recursive-descend parser.

View File

@ -25,8 +25,11 @@ endif ()
if (USE_DEBUG_HELPERS)
get_target_property(MAGIC_ENUM_INCLUDE_DIR magic_enum INTERFACE_INCLUDE_DIRECTORIES)
set (INCLUDE_DEBUG_HELPERS "-I\"${MAGIC_ENUM_INCLUDE_DIR}\" -include \"${ClickHouse_SOURCE_DIR}/base/base/iostream_debug_helpers.h\"")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${INCLUDE_DEBUG_HELPERS}")
# CMake generator expression will do insane quoting when it encounters special character like quotes, spaces, etc.
# Prefixing "SHELL:" will force it to use the original text.
set (INCLUDE_DEBUG_HELPERS "SHELL:-I\"${MAGIC_ENUM_INCLUDE_DIR}\" -include \"${ClickHouse_SOURCE_DIR}/base/base/iostream_debug_helpers.h\"")
# Use generator expression as we don't want to pollute CMAKE_CXX_FLAGS, which will interfere with CMake check system.
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:${INCLUDE_DEBUG_HELPERS}>)
endif ()
add_library (common ${SRCS})

View File

@ -1,7 +1,7 @@
#pragma once
#include "strong_typedef.h"
#include "extended_types.h"
#include <base/strong_typedef.h>
#include <base/extended_types.h>
namespace DB
{

View File

@ -5,9 +5,11 @@
#include <Poco/Util/AbstractConfiguration.h>
#include "OwnFormattingChannel.h"
#include "OwnPatternFormatter.h"
#include "OwnSplitChannel.h"
#include <Poco/ConsoleChannel.h>
#include <Poco/Logger.h>
#include <Poco/Net/RemoteSyslogChannel.h>
#include <Interpreters/TextLog.h>
#include <filesystem>
namespace fs = std::filesystem;

View File

@ -5,9 +5,12 @@
#include <Poco/AutoPtr.h>
#include <Poco/FileChannel.h>
#include <Poco/Util/Application.h>
#include <Interpreters/TextLog.h>
#include "OwnSplitChannel.h"
namespace DB
{
class TextLog;
}
namespace Poco::Util
{

View File

@ -1,11 +1,16 @@
#pragma once
#include <atomic>
#include <vector>
#include <map>
#include <mutex>
#include <Poco/AutoPtr.h>
#include <Poco/Channel.h>
#include "ExtendedLogChannel.h"
#include <Interpreters/TextLog.h>
namespace DB
{
class TextLog;
}
namespace DB
{

View File

@ -14,9 +14,9 @@ set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-aarch
set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/aarch64-linux-gnu/libc")
set (CMAKE_C_FLAGS_INIT "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_ASM_FLAGS_INIT "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)

View File

@ -14,9 +14,9 @@ set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-power
set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/powerpc64le-linux-gnu/libc")
set (CMAKE_C_FLAGS_INIT "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_ASM_FLAGS_INIT "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)

View File

@ -14,9 +14,9 @@ set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-riscv
set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}")
set (CMAKE_C_FLAGS_INIT "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_ASM_FLAGS_INIT "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=bfd")
set (CMAKE_SHARED_LINKER_FLAGS_INIT "-fuse-ld=bfd")

View File

@ -14,9 +14,9 @@ set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-x86_6
set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}")
set (CMAKE_C_FLAGS_INIT "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_ASM_FLAGS_INIT "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)

View File

@ -345,7 +345,7 @@ if (USE_INTERNAL_PROTOBUF_LIBRARY)
add_dependencies(${ARROW_LIBRARY} protoc)
endif ()
target_include_directories(${ARROW_LIBRARY} SYSTEM PUBLIC "${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src")
target_include_directories(${ARROW_LIBRARY} SYSTEM PUBLIC ${ARROW_SRC_DIR})
target_include_directories(${ARROW_LIBRARY} SYSTEM PUBLIC "${CMAKE_CURRENT_BINARY_DIR}/cpp/src")
target_link_libraries(${ARROW_LIBRARY} PRIVATE ${DOUBLE_CONVERSION_LIBRARIES} ${Protobuf_LIBRARY})
target_link_libraries(${ARROW_LIBRARY} PRIVATE lz4)
@ -360,16 +360,15 @@ if (ARROW_WITH_ZSTD)
target_include_directories(${ARROW_LIBRARY} SYSTEM BEFORE PRIVATE ${ZLIB_INCLUDE_DIR})
endif ()
target_include_directories(${ARROW_LIBRARY} PRIVATE SYSTEM ${ORC_INCLUDE_DIR})
target_include_directories(${ARROW_LIBRARY} PRIVATE SYSTEM ${ORC_SOURCE_SRC_DIR})
target_include_directories(${ARROW_LIBRARY} PRIVATE SYSTEM ${ORC_SOURCE_WRAP_DIR})
target_include_directories(${ARROW_LIBRARY} PRIVATE SYSTEM ${GOOGLE_PROTOBUF_DIR})
target_include_directories(${ARROW_LIBRARY} PRIVATE SYSTEM ${ORC_BUILD_SRC_DIR})
target_include_directories(${ARROW_LIBRARY} PRIVATE SYSTEM ${ORC_BUILD_INCLUDE_DIR})
target_include_directories(${ARROW_LIBRARY} PRIVATE SYSTEM ${ORC_ADDITION_SOURCE_DIR})
target_include_directories(${ARROW_LIBRARY} PRIVATE SYSTEM ${ARROW_SRC_DIR})
target_include_directories(${ARROW_LIBRARY} PRIVATE SYSTEM ${FLATBUFFERS_INCLUDE_DIR})
target_include_directories(${ARROW_LIBRARY} PRIVATE SYSTEM ${HDFS_INCLUDE_DIR})
target_include_directories(${ARROW_LIBRARY} SYSTEM PRIVATE ${ORC_INCLUDE_DIR})
target_include_directories(${ARROW_LIBRARY} SYSTEM PRIVATE ${ORC_SOURCE_SRC_DIR})
target_include_directories(${ARROW_LIBRARY} SYSTEM PRIVATE ${ORC_SOURCE_WRAP_DIR})
target_include_directories(${ARROW_LIBRARY} SYSTEM PRIVATE ${GOOGLE_PROTOBUF_DIR})
target_include_directories(${ARROW_LIBRARY} SYSTEM PRIVATE ${ORC_BUILD_SRC_DIR})
target_include_directories(${ARROW_LIBRARY} SYSTEM PRIVATE ${ORC_BUILD_INCLUDE_DIR})
target_include_directories(${ARROW_LIBRARY} SYSTEM PRIVATE ${ORC_ADDITION_SOURCE_DIR})
target_include_directories(${ARROW_LIBRARY} SYSTEM PRIVATE ${FLATBUFFERS_INCLUDE_DIR})
target_include_directories(${ARROW_LIBRARY} SYSTEM PRIVATE ${HDFS_INCLUDE_DIR})
# === parquet

View File

@ -81,7 +81,7 @@ set(S3_INCLUDES
)
add_library(aws_s3_checksums ${AWS_CHECKSUMS_SOURCES})
target_include_directories(aws_s3_checksums PUBLIC "${AWS_CHECKSUMS_LIBRARY_DIR}/include/")
target_include_directories(aws_s3_checksums SYSTEM PUBLIC "${AWS_CHECKSUMS_LIBRARY_DIR}/include/")
if(CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
target_compile_definitions(aws_s3_checksums PRIVATE "-DDEBUG_BUILD")
endif()
@ -93,7 +93,7 @@ add_library(aws_s3 ${S3_UNIFIED_SRC})
target_compile_definitions(aws_s3 PUBLIC "AWS_SDK_VERSION_MAJOR=1")
target_compile_definitions(aws_s3 PUBLIC "AWS_SDK_VERSION_MINOR=7")
target_compile_definitions(aws_s3 PUBLIC "AWS_SDK_VERSION_PATCH=231")
target_include_directories(aws_s3 PUBLIC ${S3_INCLUDES})
target_include_directories(aws_s3 SYSTEM PUBLIC ${S3_INCLUDES})
if (OPENSSL_FOUND)
target_compile_definitions(aws_s3 PUBLIC -DENABLE_OPENSSL_ENCRYPTION)

View File

@ -131,8 +131,6 @@ if(BUILD_SHARED_LIBS)
set(CMAKE_POSITION_INDEPENDENT_CODE TRUE)
endif()
include_directories("${BORINGSSL_SOURCE_DIR}/include")
set(
CRYPTO_ios_aarch64_SOURCES

View File

@ -57,7 +57,7 @@ if (NOT EXTERNAL_CCTZ_LIBRARY_FOUND OR NOT EXTERNAL_CCTZ_LIBRARY_WORKS)
)
add_library (cctz ${SRCS})
target_include_directories (cctz PUBLIC "${LIBRARY_DIR}/include")
target_include_directories (cctz SYSTEM PUBLIC "${LIBRARY_DIR}/include")
if (OS_FREEBSD)
# yes, need linux, because bsd check inside linux in time_zone_libc.cc:24

View File

@ -4,5 +4,5 @@ add_library(cityhash
include/city.h
src/config.h)
target_include_directories(cityhash BEFORE PUBLIC include)
target_include_directories(cityhash PRIVATE src)
target_include_directories(cityhash SYSTEM BEFORE PUBLIC include)
target_include_directories(cityhash SYSTEM PRIVATE src)

View File

@ -1,2 +1,2 @@
add_library(consistent-hashing consistent_hashing.cpp popcount.cpp)
target_include_directories(consistent-hashing PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
target_include_directories(consistent-hashing SYSTEM PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})

View File

@ -153,7 +153,7 @@ target_compile_definitions (curl PRIVATE
libcurl_EXPORTS
OS="${CMAKE_SYSTEM_NAME}"
)
target_include_directories (curl PUBLIC
target_include_directories (curl SYSTEM PUBLIC
"${LIBRARY_DIR}/include"
"${LIBRARY_DIR}/lib"
. # curl_config.h

View File

@ -6,4 +6,4 @@ set(SRCS
)
add_library(lemmagen STATIC ${SRCS})
target_include_directories(lemmagen PUBLIC "${LEMMAGEN_INCLUDE_DIR}")
target_include_directories(lemmagen SYSTEM PUBLIC "${LEMMAGEN_INCLUDE_DIR}")

View File

@ -55,8 +55,8 @@ set(SRCS
add_library(libpq ${SRCS})
target_include_directories (libpq PUBLIC ${LIBPQ_SOURCE_DIR})
target_include_directories (libpq PUBLIC "${LIBPQ_SOURCE_DIR}/include")
target_include_directories (libpq PRIVATE "${LIBPQ_SOURCE_DIR}/configs")
target_include_directories (libpq SYSTEM PUBLIC ${LIBPQ_SOURCE_DIR})
target_include_directories (libpq SYSTEM PUBLIC "${LIBPQ_SOURCE_DIR}/include")
target_include_directories (libpq SYSTEM PRIVATE "${LIBPQ_SOURCE_DIR}/configs")
target_link_libraries (libpq PRIVATE ssl)

2
contrib/libpqxx vendored

@ -1 +1 @@
Subproject commit 63e20f9485b8cbeabf99008123248fc9f033e766
Subproject commit a4e834839270a8c1f7ff1db351ba85afced3f0e2

View File

@ -68,12 +68,3 @@ add_library(libpqxx ${SRCS} ${HDRS})
target_link_libraries(libpqxx PUBLIC ${LIBPQ_LIBRARY})
target_include_directories (libpqxx SYSTEM PRIVATE "${LIBRARY_DIR}/include")
# crutch
set(CM_CONFIG_H_IN "${LIBRARY_DIR}/include/pqxx/config.h.in")
set(CM_CONFIG_PUB "${LIBRARY_DIR}/include/pqxx/config-public-compiler.h")
set(CM_CONFIG_INT "${LIBRARY_DIR}/include/pqxx/config-internal-compiler.h")
set(CM_CONFIG_PQ "${LIBRARY_DIR}/include/pqxx/config-internal-libpq.h")
configure_file("${CM_CONFIG_H_IN}" "${CM_CONFIG_INT}" @ONLY)
configure_file("${CM_CONFIG_H_IN}" "${CM_CONFIG_PUB}" @ONLY)
configure_file("${CM_CONFIG_H_IN}" "${CM_CONFIG_PQ}" @ONLY)

View File

@ -28,4 +28,4 @@ endforeach ()
# all the sources parsed. Now just add the lib
add_library ( stemmer STATIC ${_SOURCES} ${_HEADERS} )
target_include_directories (stemmer PUBLIC "${STEMMER_INCLUDE_DIR}")
target_include_directories (stemmer SYSTEM PUBLIC "${STEMMER_INCLUDE_DIR}")

View File

@ -1,3 +1,3 @@
set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/magic_enum")
add_library (magic_enum INTERFACE)
target_include_directories(magic_enum INTERFACE ${LIBRARY_DIR}/include)
target_include_directories(magic_enum SYSTEM INTERFACE ${LIBRARY_DIR}/include)

View File

@ -40,5 +40,5 @@ target_sources(snappy
"${SOURCE_DIR}/snappy-stubs-internal.cc"
"${SOURCE_DIR}/snappy.cc")
target_include_directories(snappy PUBLIC ${SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
target_include_directories(snappy SYSTEM PUBLIC ${SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
target_compile_definitions(snappy PRIVATE -DHAVE_CONFIG_H)

View File

@ -23,6 +23,7 @@ set (SRCS_LTDL
add_library (ltdl ${SRCS_LTDL})
target_include_directories(ltdl
SYSTEM
PRIVATE
linux_x86_64/libltdl
PUBLIC
@ -276,6 +277,7 @@ target_link_libraries (unixodbc PRIVATE ltdl)
# SYSTEM_FILE_PATH was changed to /etc
target_include_directories (unixodbc
SYSTEM
PRIVATE
linux_x86_64/private
PUBLIC

View File

@ -10,4 +10,4 @@ add_library(wnb ${SRCS})
target_link_libraries(wnb PRIVATE boost::headers_only boost::graph)
target_include_directories(wnb PUBLIC "${LIBRARY_DIR}")
target_include_directories(wnb SYSTEM PUBLIC "${LIBRARY_DIR}")

View File

@ -241,7 +241,7 @@ add_library(liblzma
${SRC_DIR}/src/liblzma/simple/x86.c
)
target_include_directories(liblzma PRIVATE
target_include_directories(liblzma SYSTEM PUBLIC
${SRC_DIR}/src/liblzma/api
${SRC_DIR}/src/liblzma/common
${SRC_DIR}/src/liblzma/check

View File

@ -158,4 +158,4 @@ if (ARCH_AMD64 OR ARCH_AARCH64)
target_compile_definitions (zlib PUBLIC X86_64 UNALIGNED_OK)
endif ()
target_include_directories(zlib PUBLIC ${SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
target_include_directories(zlib SYSTEM PUBLIC ${SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})

7
debian/.gitignore vendored
View File

@ -2,9 +2,16 @@ control
copyright
tmp/
clickhouse-benchmark/
clickhouse-client.docs
clickhouse-client/
clickhouse-common-static-dbg/
clickhouse-common-static.docs
clickhouse-common-static/
clickhouse-server-base/
clickhouse-server-common/
clickhouse-server/
clickhouse-test/
debhelper-build-stamp
files
*.debhelper.log
*.debhelper

6
debian/.pbuilderrc vendored
View File

@ -104,8 +104,7 @@ ALLOWUNTRUSTED=${SET_ALLOWUNTRUSTED:=${ALLOWUNTRUSTED}}
if $(echo ${DEBIAN_SUITES[@]} | grep -q $DIST); then
# Debian configuration
OSNAME=debian
#MIRRORSITE=${SET_MIRRORSITE="http://deb.debian.org/$OSNAME/"}
MIRRORSITE=${SET_MIRRORSITE="http://mirror.yandex.ru/$OSNAME/"}
MIRRORSITE=${SET_MIRRORSITE="http://deb.debian.org/$OSNAME/"}
COMPONENTS="main contrib non-free"
if $(echo "$STABLE_CODENAME stable" | grep -q $DIST); then
OTHERMIRROR="$OTHERMIRROR | deb $MIRRORSITE $STABLE_BACKPORTS_SUITE $COMPONENTS"
@ -125,8 +124,7 @@ elif $(echo ${UBUNTU_SUITES[@]} | grep -q $DIST); then
OSNAME=ubuntu
if [[ "$ARCH" == "amd64" || "$ARCH" == "i386" ]]; then
#MIRRORSITE=${SET_MIRRORSITE="http://archive.ubuntu.com/$OSNAME/"}
MIRRORSITE=${SET_MIRRORSITE="http://mirror.yandex.ru/$OSNAME/"}
MIRRORSITE=${SET_MIRRORSITE="http://archive.ubuntu.com/$OSNAME/"}
else
MIRRORSITE=${SET_MIRRORSITE="http://ports.ubuntu.com/ubuntu-ports/"}
fi

View File

@ -5,7 +5,7 @@
# Default-Stop: 0 1 6
# Should-Start: $time $network
# Should-Stop: $network
# Short-Description: Yandex clickhouse-server daemon
# Short-Description: clickhouse-server daemon
### END INIT INFO
#
# NOTES:

6
debian/control vendored
View File

@ -18,7 +18,7 @@ Depends: ${shlibs:Depends}, ${misc:Depends}, clickhouse-common-static (= ${binar
Replaces: clickhouse-compressor
Conflicts: clickhouse-compressor
Description: Client binary for ClickHouse
Yandex ClickHouse is a column-oriented database management system
ClickHouse is a column-oriented database management system
that allows generating analytical data reports in real time.
.
This package provides clickhouse-client , clickhouse-local and clickhouse-benchmark
@ -30,7 +30,7 @@ Suggests: clickhouse-common-static-dbg
Replaces: clickhouse-common, clickhouse-server-base
Provides: clickhouse-common, clickhouse-server-base
Description: Common files for ClickHouse
Yandex ClickHouse is a column-oriented database management system
ClickHouse is a column-oriented database management system
that allows generating analytical data reports in real time.
.
This package provides common files for both clickhouse server and client
@ -42,7 +42,7 @@ Recommends: libcap2-bin
Replaces: clickhouse-server-common, clickhouse-server-base
Provides: clickhouse-server-common
Description: Server binary for ClickHouse
Yandex ClickHouse is a column-oriented database management system
ClickHouse is a column-oriented database management system
that allows generating analytical data reports in real time.
.
This package provides clickhouse common configuration files

3
debian/rules vendored
View File

@ -96,6 +96,9 @@ ifeq (,$(filter nocheck,$(DEB_BUILD_OPTIONS)))
cd $(BUILDDIR) && ctest -j$(THREADS_COUNT) -V
endif
# Disable config.guess and config.sub update
override_dh_update_autotools_config:
override_dh_clean:
rm -rf debian/copyright debian/clickhouse-client.docs debian/clickhouse-common-static.docs
dh_clean # -X contrib

View File

@ -30,10 +30,10 @@ def pull_image(image_name):
def build_image(image_name, filepath):
context = os.path.dirname(filepath)
build_cmd = "docker build --network=host -t {} -f {} {}".format(image_name, filepath, context)
logging.info("Will build image with cmd: '{}'".format(build_cmd))
subprocess.check_call(
"docker build --network=host -t {} -f {} {}".format(
image_name, filepath, context
),
build_cmd,
shell=True,
)
@ -61,7 +61,6 @@ def run_docker_image_with_env(image_name, output, env_variables, ch_root, ccache
subprocess.check_call(cmd, shell=True)
def parse_env_variables(build_type, compiler, sanitizer, package_type, image_type, cache, distcc_hosts, split_binary, clang_tidy, version, author, official, alien_pkgs, with_coverage, with_binaries):
CLANG_PREFIX = "clang"
DARWIN_SUFFIX = "-darwin"
DARWIN_ARM_SUFFIX = "-darwin-aarch64"
ARM_SUFFIX = "-aarch64"
@ -71,13 +70,11 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ
result = []
cmake_flags = ['$CMAKE_FLAGS']
is_clang = compiler.startswith(CLANG_PREFIX)
is_cross_darwin = compiler.endswith(DARWIN_SUFFIX)
is_cross_darwin_arm = compiler.endswith(DARWIN_ARM_SUFFIX)
is_cross_arm = compiler.endswith(ARM_SUFFIX)
is_cross_ppc = compiler.endswith(PPC_SUFFIX)
is_cross_freebsd = compiler.endswith(FREEBSD_SUFFIX)
is_cross_compile = is_cross_darwin or is_cross_darwin_arm or is_cross_arm or is_cross_freebsd or is_cross_ppc
if is_cross_darwin:
cc = compiler[:-len(DARWIN_SUFFIX)]

View File

@ -812,4 +812,41 @@ Result:
└─────────────────────┘
```
## h3ToCenterChild {#h3tocenterchild}
Returns the center child (finer) [H3](#h3index) index contained by given [H3](#h3index) at the given resolution.
**Syntax**
``` sql
h3ToCenterChild(index, resolution)
```
**Parameter**
- `index` — Hexagon index number. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
- `resolution` — Index resolution. Range: `[0, 15]`. Type: [UInt8](../../../sql-reference/data-types/int-uint.md).
**Returned values**
- [H3](#h3index) index of the center child contained by given [H3](#h3index) at the given resolution.
Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
**Example**
Query:
``` sql
SELECT h3ToCenterChild(577023702256844799,1) AS centerToChild;
```
Result:
``` text
┌──────centerToChild─┐
│ 581496515558637567 │
└────────────────────┘
```
[Original article](https://clickhouse.com/docs/en/sql-reference/functions/geo/h3) <!--hide-->

View File

@ -29,8 +29,11 @@ configure_file (Core/config_core.h.in "${CMAKE_CURRENT_BINARY_DIR}/Core/include/
if (USE_DEBUG_HELPERS)
get_target_property(MAGIC_ENUM_INCLUDE_DIR magic_enum INTERFACE_INCLUDE_DIRECTORIES)
set (INCLUDE_DEBUG_HELPERS "-I\"${ClickHouse_SOURCE_DIR}/base\" -I\"${MAGIC_ENUM_INCLUDE_DIR}\" -include \"${ClickHouse_SOURCE_DIR}/src/Core/iostream_debug_helpers.h\"")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${INCLUDE_DEBUG_HELPERS}")
# CMake generator expression will do insane quoting when it encounters special character like quotes, spaces, etc.
# Prefixing "SHELL:" will force it to use the original text.
set (INCLUDE_DEBUG_HELPERS "SHELL:-I\"${ClickHouse_SOURCE_DIR}/base\" -I\"${MAGIC_ENUM_INCLUDE_DIR}\" -include \"${ClickHouse_SOURCE_DIR}/src/Core/iostream_debug_helpers.h\"")
# Use generator expression as we don't want to pollute CMAKE_CXX_FLAGS, which will interfere with CMake check system.
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:${INCLUDE_DEBUG_HELPERS}>)
endif ()
if (COMPILER_GCC)
@ -385,9 +388,9 @@ dbms_target_link_libraries (
target_include_directories(clickhouse_common_io PUBLIC "${CMAKE_CURRENT_BINARY_DIR}/Core/include") # uses some includes from core
dbms_target_include_directories(PUBLIC "${CMAKE_CURRENT_BINARY_DIR}/Core/include")
target_include_directories(clickhouse_common_io BEFORE PUBLIC ${PDQSORT_INCLUDE_DIR})
target_include_directories(clickhouse_common_io SYSTEM BEFORE PUBLIC ${PDQSORT_INCLUDE_DIR})
dbms_target_include_directories(SYSTEM BEFORE PUBLIC ${PDQSORT_INCLUDE_DIR})
target_include_directories(clickhouse_common_io BEFORE PUBLIC ${MINISELECT_INCLUDE_DIR})
target_include_directories(clickhouse_common_io SYSTEM BEFORE PUBLIC ${MINISELECT_INCLUDE_DIR})
dbms_target_include_directories(SYSTEM BEFORE PUBLIC ${MINISELECT_INCLUDE_DIR})
if (ZSTD_LIBRARY)

View File

@ -1,5 +1,7 @@
#pragma once
#include <string>
namespace Poco
{
namespace Util

View File

@ -64,7 +64,7 @@ public:
/// Conversion of infinite values to integer is undefined.
throw Exception("Cannot convert infinite value to integer type", ErrorCodes::CANNOT_CONVERT_TYPE);
}
else if (x > std::numeric_limits<T>::max() || x < std::numeric_limits<T>::lowest())
else if (x > Float64(std::numeric_limits<T>::max()) || x < Float64(std::numeric_limits<T>::lowest()))
{
throw Exception("Cannot convert out of range floating point value to integer type", ErrorCodes::CANNOT_CONVERT_TYPE);
}

View File

@ -999,6 +999,7 @@ void DiskS3::restoreFileOperations(const RestoreInformation & restore_informatio
if (metadata_disk->exists(to_path))
metadata_disk->removeRecursive(to_path);
createDirectories(directoryPath(to_path));
metadata_disk->moveDirectory(from_path, to_path);
}
}

View File

@ -19,6 +19,7 @@ namespace DB
namespace ErrorCodes
{
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int ILLEGAL_COLUMN;
}
namespace
@ -52,6 +53,14 @@ public:
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
{
const auto * column = checkAndGetColumn<ColumnUInt64>(arguments[0].column.get());
if (!column)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be UInt64.",
arguments[0].type->getName(),
1,
getName());
const auto & data = column->getData();
auto result_column_data = ColumnUInt8::create();

View File

@ -17,6 +17,7 @@ namespace DB
namespace ErrorCodes
{
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int ILLEGAL_COLUMN;
}
namespace
@ -50,13 +51,21 @@ public:
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
{
const auto * column = checkAndGetColumn<ColumnUInt64>(arguments[0].column.get());
if (!column)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be UInt64",
arguments[0].type->getName(),
1,
getName());
const auto & data = column->getData();
auto dst = ColumnVector<UInt8>::create();
auto & dst_data = dst->getData();
dst_data.resize(input_rows_count);
for (size_t row = 0 ; row < input_rows_count ; ++row)
for (size_t row = 0; row < input_rows_count; ++row)
{
UInt8 res = isPentagon(data[row]);
dst_data[row] = res;

View File

@ -17,6 +17,7 @@ namespace DB
namespace ErrorCodes
{
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int ILLEGAL_COLUMN;
}
namespace
@ -50,13 +51,21 @@ public:
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
{
const auto * column = checkAndGetColumn<ColumnUInt64>(arguments[0].column.get());
if (!column)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be UInt64",
arguments[0].type->getName(),
1,
getName());
const auto & data = column->getData();
auto dst = ColumnVector<UInt8>::create();
auto & dst_data = dst->getData();
dst_data.resize(input_rows_count);
for (size_t row = 0 ; row < input_rows_count ; ++row)
for (size_t row = 0; row < input_rows_count; ++row)
{
UInt8 res = isResClassIII(data[row]);
dst_data[row] = res;

View File

@ -0,0 +1,115 @@
#include "config_functions.h"
#if USE_H3
#include <Columns/ColumnArray.h>
#include <Columns/ColumnsNumber.h>
#include <DataTypes/DataTypeArray.h>
#include <DataTypes/DataTypesNumber.h>
#include <Functions/FunctionFactory.h>
#include <Functions/IFunction.h>
#include <Common/typeid_cast.h>
#include <IO/WriteHelpers.h>
#include <base/range.h>
#include <constants.h>
#include <h3api.h>
namespace DB
{
namespace ErrorCodes
{
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int ARGUMENT_OUT_OF_BOUND;
extern const int ILLEGAL_COLUMN;
}
namespace
{
class FunctionH3ToCenterChild : public IFunction
{
public:
static constexpr auto name = "h3ToCenterChild";
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionH3ToCenterChild>(); }
std::string getName() const override { return name; }
size_t getNumberOfArguments() const override { return 2; }
bool useDefaultImplementationForConstants() const override { return true; }
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
{
const auto * arg = arguments[0].get();
if (!WhichDataType(arg).isUInt64())
throw Exception(
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"Illegal type {} of argument {} of function {}. Must be UInt64",
arg->getName(), 1, getName());
arg = arguments[1].get();
if (!WhichDataType(arg).isUInt8())
throw Exception(
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"Illegal type {} of argument {} of function {}. Must be UInt8",
arg->getName(), 2, getName());
return std::make_shared<DataTypeUInt64>();
}
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
{
const auto * col_hindex = checkAndGetColumn<ColumnUInt64>(arguments[0].column.get());
if (!col_hindex)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be UInt64.",
arguments[0].type->getName(),
1,
getName());
const auto & data_hindex = col_hindex->getData();
const auto * col_resolution = checkAndGetColumn<ColumnUInt8>(arguments[1].column.get());
if (!col_resolution)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Illegal type {} of argument {} of function {}. Must be UInt8.",
arguments[0].type->getName(),
1,
getName());
const auto & data_resolution = col_resolution->getData();
auto dst = ColumnVector<UInt64>::create();
auto & dst_data = dst->getData();
dst_data.resize(input_rows_count);
for (size_t row = 0; row < input_rows_count; ++row)
{
if (data_resolution[row] > MAX_H3_RES)
throw Exception(
ErrorCodes::ARGUMENT_OUT_OF_BOUND,
"The argument 'resolution' ({}) of function {} is out of bounds because the maximum resolution in H3 library is {}",
toString(data_resolution[row]),
getName(),
toString(MAX_H3_RES));
UInt64 res = cellToCenterChild(data_hindex[row], data_resolution[row]);
dst_data[row] = res;
}
return dst;
}
};
}
void registerFunctionH3ToCenterChild(FunctionFactory & factory)
{
factory.registerFunction<FunctionH3ToCenterChild>();
}
}
#endif

View File

@ -36,6 +36,7 @@ void registerFunctionH3KRing(FunctionFactory &);
void registerFunctionH3GetBaseCell(FunctionFactory &);
void registerFunctionH3ToParent(FunctionFactory &);
void registerFunctionH3ToChildren(FunctionFactory &);
void registerFunctionH3ToCenterChild(FunctionFactory &);
void registerFunctionH3IndexesAreNeighbors(FunctionFactory &);
void registerFunctionStringToH3(FunctionFactory &);
void registerFunctionH3ToString(FunctionFactory &);
@ -96,6 +97,7 @@ void registerFunctionsGeo(FunctionFactory & factory)
registerFunctionH3GetBaseCell(factory);
registerFunctionH3ToParent(factory);
registerFunctionH3ToChildren(factory);
registerFunctionH3ToCenterChild(factory);
registerFunctionH3IndexesAreNeighbors(factory);
registerFunctionStringToH3(factory);
registerFunctionH3ToString(factory);

View File

@ -226,7 +226,8 @@ bool isStorageTouchedByMutations(
/// Interpreter must be alive, when we use result of execute() method.
/// For some reason it may copy context and and give it into ExpressionTransform
/// after that we will use context from destroyed stack frame in our stream.
InterpreterSelectQuery interpreter(select_query, context_copy, storage, metadata_snapshot, SelectQueryOptions().ignoreLimits());
InterpreterSelectQuery interpreter(
select_query, context_copy, storage, metadata_snapshot, SelectQueryOptions().ignoreLimits().ignoreProjections());
auto io = interpreter.execute();
PullingPipelineExecutor executor(io.pipeline);
@ -291,7 +292,7 @@ MutationsInterpreter::MutationsInterpreter(
, commands(std::move(commands_))
, context(Context::createCopy(context_))
, can_execute(can_execute_)
, select_limits(SelectQueryOptions().analyze(!can_execute).ignoreLimits())
, select_limits(SelectQueryOptions().analyze(!can_execute).ignoreLimits().ignoreProjections())
{
mutation_ast = prepare(!can_execute);
}
@ -732,7 +733,7 @@ ASTPtr MutationsInterpreter::prepare(bool dry_run)
const ASTPtr select_query = prepareInterpreterSelectQuery(stages_copy, /* dry_run = */ true);
InterpreterSelectQuery interpreter{
select_query, context, storage, metadata_snapshot,
SelectQueryOptions().analyze(/* dry_run = */ false).ignoreLimits()};
SelectQueryOptions().analyze(/* dry_run = */ false).ignoreLimits().ignoreProjections()};
auto first_stage_header = interpreter.getSampleBlock();
QueryPlan plan;

View File

@ -3,6 +3,7 @@
#include <Parsers/ASTFunction.h>
#include <Parsers/ASTIdentifier.h>
#include <Common/checkStackSize.h>
#include <IO/Operators.h>
namespace DB

View File

@ -3,7 +3,6 @@
#include <set>
#include <unordered_map>
#include <vector>
#include <IO/Operators.h>
#include <Parsers/ASTLiteral.h>
#include <Parsers/IAST_fwd.h>

View File

@ -7,8 +7,11 @@ add_library(clickhouse_parsers ${clickhouse_parsers_headers} ${clickhouse_parser
target_link_libraries(clickhouse_parsers PUBLIC clickhouse_common_io clickhouse_common_access)
if (USE_DEBUG_HELPERS)
set (INCLUDE_DEBUG_HELPERS "-I\"${ClickHouse_SOURCE_DIR}/base\" -include \"${ClickHouse_SOURCE_DIR}/src/Parsers/iostream_debug_helpers.h\"")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${INCLUDE_DEBUG_HELPERS}")
# CMake generator expression will do insane quoting when it encounters special character like quotes, spaces, etc.
# Prefixing "SHELL:" will force it to use the original text.
set (INCLUDE_DEBUG_HELPERS "SHELL:-I\"${ClickHouse_SOURCE_DIR}/base\" -include \"${ClickHouse_SOURCE_DIR}/src/Parsers/iostream_debug_helpers.h\"")
# Use generator expression as we don't want to pollute CMAKE_CXX_FLAGS, which will interfere with CMake check system.
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:${INCLUDE_DEBUG_HELPERS}>)
endif ()
if(ENABLE_EXAMPLES)

View File

@ -40,7 +40,7 @@ add_executable(codegen_select_fuzzer ${FUZZER_SRCS})
set_source_files_properties("${PROTO_SRCS}" "out.cpp" PROPERTIES COMPILE_FLAGS "-Wno-reserved-identifier")
target_include_directories(codegen_select_fuzzer BEFORE PRIVATE "${Protobuf_INCLUDE_DIR}" "${CMAKE_CURRENT_BINARY_DIR}")
target_include_directories(codegen_select_fuzzer BEFORE PRIVATE "${LibProtobufMutator_SOURCE_DIR}")
target_include_directories(codegen_select_fuzzer BEFORE PRIVATE "${LibProtobufMutator_SOURCE_DIR}/src")
target_include_directories(codegen_select_fuzzer SYSTEM BEFORE PRIVATE "${Protobuf_INCLUDE_DIR}" "${CMAKE_CURRENT_BINARY_DIR}")
target_include_directories(codegen_select_fuzzer SYSTEM BEFORE PRIVATE "${LibProtobufMutator_SOURCE_DIR}")
target_include_directories(codegen_select_fuzzer SYSTEM BEFORE PRIVATE "${LibProtobufMutator_SOURCE_DIR}/src")
target_link_libraries(codegen_select_fuzzer PRIVATE protobuf-mutator ${Protobuf_LIBRARY} ${Protobuf_PROTOC_LIBRARY} dbms ${LIB_FUZZING_ENGINE})

View File

@ -22,6 +22,7 @@
#include <Storages/MergeTree/MergeTreeDataSelectExecutor.h>
#include <Storages/MergeTree/MergeTreeReadPool.h>
#include <Storages/VirtualColumnUtils.h>
#include <IO/Operators.h>
#include <Interpreters/ExpressionAnalyzer.h>
#include <Interpreters/TreeRewriter.h>
#include <base/logger_useful.h>

View File

@ -332,7 +332,7 @@ namespace
uint64_t doubleToUInt64(double d)
{
if (d >= std::numeric_limits<uint64_t>::max())
if (d >= double(std::numeric_limits<uint64_t>::max()))
return std::numeric_limits<uint64_t>::max();
return static_cast<uint64_t>(d);
}

View File

@ -52,7 +52,6 @@ public:
}
private:
Block res_block;
Pipes pipes;
QueryProcessingStage::Enum to_stage;
};

View File

@ -1310,7 +1310,7 @@ void IMergeTreeDataPart::remove() const
void IMergeTreeDataPart::projectionRemove(const String & parent_to, bool keep_shared_data) const
{
String to = parent_to + "/" + relative_path;
String to = fs::path(parent_to) / relative_path;
auto disk = volume->getDisk();
if (checksums.empty())
{
@ -1320,7 +1320,7 @@ void IMergeTreeDataPart::projectionRemove(const String & parent_to, bool keep_sh
"Cannot quickly remove directory {} by removing files; fallback to recursive removal. Reason: checksums.txt is missing",
fullPath(disk, to));
/// If the part is not completely written, we cannot use fast path by listing files.
disk->removeSharedRecursive(to + "/", keep_shared_data);
disk->removeSharedRecursive(fs::path(to) / "", keep_shared_data);
}
else
{
@ -1333,15 +1333,15 @@ void IMergeTreeDataPart::projectionRemove(const String & parent_to, bool keep_sh
# pragma GCC diagnostic ignored "-Wunused-variable"
#endif
for (const auto & [file, _] : checksums.files)
disk->removeSharedFile(to + "/" + file, keep_shared_data);
disk->removeSharedFile(fs::path(to) / file, keep_shared_data);
#if !defined(__clang__)
# pragma GCC diagnostic pop
#endif
for (const auto & file : {"checksums.txt", "columns.txt"})
disk->removeSharedFile(to + "/" + file, keep_shared_data);
disk->removeSharedFileIfExists(to + "/" + DEFAULT_COMPRESSION_CODEC_FILE_NAME, keep_shared_data);
disk->removeSharedFileIfExists(to + "/" + DELETE_ON_DESTROY_MARKER_FILE_NAME, keep_shared_data);
disk->removeSharedFile(fs::path(to) / file, keep_shared_data);
disk->removeSharedFileIfExists(fs::path(to) / DEFAULT_COMPRESSION_CODEC_FILE_NAME, keep_shared_data);
disk->removeSharedFileIfExists(fs::path(to) / DELETE_ON_DESTROY_MARKER_FILE_NAME, keep_shared_data);
disk->removeSharedRecursive(to, keep_shared_data);
}
@ -1351,7 +1351,7 @@ void IMergeTreeDataPart::projectionRemove(const String & parent_to, bool keep_sh
LOG_ERROR(storage.log, "Cannot quickly remove directory {} by removing files; fallback to recursive removal. Reason: {}", fullPath(disk, to), getCurrentExceptionMessage(false));
disk->removeSharedRecursive(to + "/", keep_shared_data);
disk->removeSharedRecursive(fs::path(to) / "", keep_shared_data);
}
}
}

View File

@ -120,7 +120,7 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare()
ctx->disk = global_ctx->space_reservation->getDisk();
String local_part_path = global_ctx->data->relative_data_path;
String local_tmp_part_basename = local_tmp_prefix + global_ctx->future_part->name + (global_ctx->parent_part ? ".proj" : "");
String local_tmp_part_basename = local_tmp_prefix + global_ctx->future_part->name + local_tmp_suffix;
String local_new_part_tmp_path = local_part_path + local_tmp_part_basename + "/";
if (ctx->disk->exists(local_new_part_tmp_path))

View File

@ -11,6 +11,7 @@
#include <Processors/Transforms/ColumnGathererTransform.h>
#include <Processors/Executors/PullingPipelineExecutor.h>
#include <Compression/CompressedReadBufferFromFile.h>
#include <Common/filesystemHelpers.h>
#include <memory>
#include <list>

View File

@ -21,6 +21,7 @@
#include <IO/Operators.h>
#include <IO/ReadBufferFromMemory.h>
#include <IO/WriteBufferFromString.h>
#include <Interpreters/Aggregator.h>
#include <Interpreters/ExpressionAnalyzer.h>
#include <Interpreters/PartLog.h>
#include <Interpreters/TreeRewriter.h>
@ -340,6 +341,16 @@ StoragePolicyPtr MergeTreeData::getStoragePolicy() const
return getContext()->getStoragePolicy(getSettings()->storage_policy);
}
bool MergeTreeData::supportsFinal() const
{
return merging_params.mode == MergingParams::Collapsing
|| merging_params.mode == MergingParams::Summing
|| merging_params.mode == MergingParams::Aggregating
|| merging_params.mode == MergingParams::Replacing
|| merging_params.mode == MergingParams::Graphite
|| merging_params.mode == MergingParams::VersionedCollapsing;
}
static void checkKeyExpression(const ExpressionActions & expr, const Block & sample_block, const String & key_name, bool allow_nullable_key)
{
if (expr.hasArrayJoin())
@ -1115,13 +1126,18 @@ void MergeTreeData::loadDataPartsFromDisk(
if (suspicious_broken_parts > settings->max_suspicious_broken_parts && !skip_sanity_checks)
throw Exception(ErrorCodes::TOO_MANY_UNEXPECTED_DATA_PARTS,
"Suspiciously many ({}) broken parts to remove.",
suspicious_broken_parts);
"Suspiciously many ({} parts, {} in total) broken parts to remove while maximum allowed broken parts count is {}. You can change the maximum value "
"with merge tree setting 'max_suspicious_broken_parts' in <merge_tree> configuration section or in table settings in .sql file "
"(don't forget to return setting back to default value)",
suspicious_broken_parts, formatReadableSizeWithBinarySuffix(suspicious_broken_parts_bytes), settings->max_suspicious_broken_parts);
if (suspicious_broken_parts_bytes > settings->max_suspicious_broken_parts_bytes && !skip_sanity_checks)
throw Exception(ErrorCodes::TOO_MANY_UNEXPECTED_DATA_PARTS,
"Suspiciously big size ({}) of all broken parts to remove.",
formatReadableSizeWithBinarySuffix(suspicious_broken_parts_bytes));
"Suspiciously big size ({} parts, {} in total) of all broken parts to remove while maximum allowed broken parts size is {}. "
"You can change the maximum value with merge tree setting 'max_suspicious_broken_parts_bytes' in <merge_tree> configuration "
"section or in table settings in .sql file (don't forget to return setting back to default value)",
suspicious_broken_parts, formatReadableSizeWithBinarySuffix(suspicious_broken_parts_bytes),
formatReadableSizeWithBinarySuffix(settings->max_suspicious_broken_parts_bytes));
}

View File

@ -24,7 +24,6 @@
#include <Storages/MergeTree/PinnedPartUUIDs.h>
#include <Interpreters/PartLog.h>
#include <Disks/StoragePolicy.h>
#include <Interpreters/Aggregator.h>
#include <Storages/extractKeyExpressionList.h>
#include <Storages/PartitionCommands.h>
#include <Storages/MergeTree/ZeroCopyLock.h>
@ -411,15 +410,7 @@ public:
bool supportsPrewhere() const override { return true; }
bool supportsFinal() const override
{
return merging_params.mode == MergingParams::Collapsing
|| merging_params.mode == MergingParams::Summing
|| merging_params.mode == MergingParams::Aggregating
|| merging_params.mode == MergingParams::Replacing
|| merging_params.mode == MergingParams::Graphite
|| merging_params.mode == MergingParams::VersionedCollapsing;
}
bool supportsFinal() const override;
bool supportsSubcolumns() const override { return true; }
@ -450,7 +441,7 @@ public:
static void validateDetachedPartName(const String & name);
void dropDetached(const ASTPtr & partition, bool part, ContextPtr context);
void dropDetached(const ASTPtr & partition, bool part, ContextPtr local_context);
MutableDataPartsVector tryLoadPartsToAttach(const ASTPtr & partition, bool attach_part,
ContextPtr context, PartsTemporaryRename & renamed_parts);

View File

@ -1,5 +1,6 @@
#include <Storages/MergeTree/MergeTreeIndexFullText.h>
#include <Columns/ColumnArray.h>
#include <DataTypes/DataTypesNumber.h>
#include <DataTypes/DataTypeArray.h>
#include <IO/WriteHelpers.h>

View File

@ -1,5 +1,6 @@
#include <Storages/MergeTree/IMergeTreeReader.h>
#include <Columns/FilterDescription.h>
#include <Columns/ColumnConst.h>
#include <Columns/ColumnsCommon.h>
#include <base/range.h>
#include <Interpreters/castColumn.h>

View File

@ -650,7 +650,6 @@ public:
".tmp_proj");
next_level_parts.push_back(executeHere(tmp_part_merge_task));
next_level_parts.back()->is_temp = true;
}
@ -1081,9 +1080,7 @@ private:
ctx->disk->createDirectories(destination);
for (auto p_it = ctx->disk->iterateDirectory(it->path()); p_it->isValid(); p_it->next())
{
String p_destination = destination + "/";
String p_file_name = p_it->name();
p_destination += p_it->name();
String p_destination = fs::path(destination) / p_it->name();
ctx->disk->createHardLink(p_it->path(), p_destination);
}
}

View File

@ -141,7 +141,6 @@ private:
std::atomic<bool> shutdown_called {false};
private:
void loadMutations();
/// Load and initialize deduplication logs. Even if deduplication setting

View File

@ -3,10 +3,12 @@
#include <Storages/MergeTree/MergeTreeData.h>
#include <Columns/ColumnsNumber.h>
#include <Columns/ColumnString.h>
#include <Columns/ColumnNullable.h>
#include <DataTypes/DataTypeString.h>
#include <DataTypes/DataTypesNumber.h>
#include <DataTypes/DataTypesDecimal.h>
#include <DataTypes/DataTypeDateTime64.h>
#include <DataTypes/DataTypeNullable.h>
#include <Storages/VirtualColumnUtils.h>
#include <Parsers/queryToString.h>
#include <Access/ContextAccess.h>

View File

@ -63,11 +63,11 @@ def generate_values(date_str, count, sign=1):
return ",".join(["('{}',{},'{}',{})".format(x, y, z, 0) for x, y, z in data])
def create_table(node, table_name, attach=False, replicated=False):
node.query("CREATE DATABASE IF NOT EXISTS s3 ENGINE = Ordinary")
def create_table(node, table_name, attach=False, replicated=False, db_atomic=False, uuid=""):
node.query("CREATE DATABASE IF NOT EXISTS s3 ENGINE = {engine}".format(engine="Atomic" if db_atomic else "Ordinary"))
create_table_statement = """
{create} TABLE s3.{table_name} {on_cluster} (
{create} TABLE s3.{table_name} {uuid} {on_cluster} (
dt Date,
id Int64,
data String,
@ -81,9 +81,10 @@ def create_table(node, table_name, attach=False, replicated=False):
old_parts_lifetime=600,
index_granularity=512
""".format(create="ATTACH" if attach else "CREATE",
table_name=table_name,
on_cluster="ON CLUSTER '{}'".format(node.name) if replicated else "",
engine="ReplicatedMergeTree('/clickhouse/tables/{cluster}/test', '{replica}')" if replicated else "MergeTree()")
table_name=table_name,
uuid="UUID '{uuid}'".format(uuid=uuid) if db_atomic and uuid else "",
on_cluster="ON CLUSTER '{}'".format(node.name) if replicated else "",
engine="ReplicatedMergeTree('/clickhouse/tables/{cluster}/test', '{replica}')" if replicated else "MergeTree()")
node.query(create_table_statement)
@ -124,6 +125,13 @@ def get_revision_counter(node, backup_number):
['bash', '-c', 'cat /var/lib/clickhouse/disks/s3/shadow/{}/revision.txt'.format(backup_number)], user='root'))
def get_table_uuid(node, db_atomic, table):
uuid = ""
if db_atomic:
uuid = node.query("SELECT uuid FROM system.tables WHERE database='s3' AND table='{}' FORMAT TabSeparated".format(table)).strip()
return uuid
@pytest.fixture(autouse=True)
def drop_table(cluster):
yield
@ -146,10 +154,13 @@ def drop_table(cluster):
@pytest.mark.parametrize(
"replicated", [False, True]
)
def test_full_restore(cluster, replicated):
@pytest.mark.parametrize(
"db_atomic", [False, True]
)
def test_full_restore(cluster, replicated, db_atomic):
node = cluster.instances["node"]
create_table(node, "test", attach=False, replicated=replicated)
create_table(node, "test", attach=False, replicated=replicated, db_atomic=db_atomic)
node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-03', 4096)))
node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-04', 4096, -1)))
@ -166,10 +177,14 @@ def test_full_restore(cluster, replicated):
assert node.query("SELECT sum(id) FROM s3.test FORMAT Values") == "({})".format(0)
def test_restore_another_bucket_path(cluster):
@pytest.mark.parametrize(
"db_atomic", [False, True]
)
def test_restore_another_bucket_path(cluster, db_atomic):
node = cluster.instances["node"]
create_table(node, "test")
create_table(node, "test", db_atomic=db_atomic)
uuid = get_table_uuid(node, db_atomic, "test")
node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-03', 4096)))
node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-04', 4096, -1)))
@ -186,7 +201,7 @@ def test_restore_another_bucket_path(cluster):
create_restore_file(node_another_bucket, bucket="root")
node_another_bucket.query("SYSTEM RESTART DISK s3")
create_table(node_another_bucket, "test", attach=True)
create_table(node_another_bucket, "test", attach=True, db_atomic=db_atomic, uuid=uuid)
assert node_another_bucket.query("SELECT count(*) FROM s3.test FORMAT Values") == "({})".format(4096 * 4)
assert node_another_bucket.query("SELECT sum(id) FROM s3.test FORMAT Values") == "({})".format(0)
@ -195,16 +210,20 @@ def test_restore_another_bucket_path(cluster):
create_restore_file(node_another_bucket_path, bucket="root2", path="data")
node_another_bucket_path.query("SYSTEM RESTART DISK s3")
create_table(node_another_bucket_path, "test", attach=True)
create_table(node_another_bucket_path, "test", attach=True, db_atomic=db_atomic, uuid=uuid)
assert node_another_bucket_path.query("SELECT count(*) FROM s3.test FORMAT Values") == "({})".format(4096 * 4)
assert node_another_bucket_path.query("SELECT sum(id) FROM s3.test FORMAT Values") == "({})".format(0)
def test_restore_different_revisions(cluster):
@pytest.mark.parametrize(
"db_atomic", [False, True]
)
def test_restore_different_revisions(cluster, db_atomic):
node = cluster.instances["node"]
create_table(node, "test")
create_table(node, "test", db_atomic=db_atomic)
uuid = get_table_uuid(node, db_atomic, "test")
node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-03', 4096)))
node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-04', 4096, -1)))
@ -233,7 +252,7 @@ def test_restore_different_revisions(cluster):
# Restore to revision 1 (2 parts).
create_restore_file(node_another_bucket, revision=revision1, bucket="root")
node_another_bucket.query("SYSTEM RESTART DISK s3")
create_table(node_another_bucket, "test", attach=True)
create_table(node_another_bucket, "test", attach=True, db_atomic=db_atomic, uuid=uuid)
assert node_another_bucket.query("SELECT count(*) FROM s3.test FORMAT Values") == "({})".format(4096 * 2)
assert node_another_bucket.query("SELECT sum(id) FROM s3.test FORMAT Values") == "({})".format(0)
@ -260,10 +279,14 @@ def test_restore_different_revisions(cluster):
assert node_another_bucket.query("SELECT count(*) from system.parts where table = 'test'") == '5\n'
def test_restore_mutations(cluster):
@pytest.mark.parametrize(
"db_atomic", [False, True]
)
def test_restore_mutations(cluster, db_atomic):
node = cluster.instances["node"]
create_table(node, "test")
create_table(node, "test", db_atomic=db_atomic)
uuid = get_table_uuid(node, db_atomic, "test")
node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-03', 4096)))
node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-03', 4096, -1)))
@ -281,7 +304,7 @@ def test_restore_mutations(cluster):
# Restore to revision before mutation.
create_restore_file(node_another_bucket, revision=revision_before_mutation, bucket="root")
node_another_bucket.query("SYSTEM RESTART DISK s3")
create_table(node_another_bucket, "test", attach=True)
create_table(node_another_bucket, "test", attach=True, db_atomic=db_atomic, uuid=uuid)
assert node_another_bucket.query("SELECT count(*) FROM s3.test FORMAT Values") == "({})".format(4096 * 2)
assert node_another_bucket.query("SELECT sum(id) FROM s3.test FORMAT Values") == "({})".format(0)
@ -315,10 +338,14 @@ def test_restore_mutations(cluster):
assert node_another_bucket.query("SELECT sum(counter) FROM s3.test WHERE id > 0 FORMAT Values") == "({})".format(4096)
def test_migrate_to_restorable_schema(cluster):
@pytest.mark.parametrize(
"db_atomic", [False, True]
)
def test_migrate_to_restorable_schema(cluster, db_atomic):
node = cluster.instances["node_not_restorable"]
create_table(node, "test")
create_table(node, "test", db_atomic=db_atomic)
uuid = get_table_uuid(node, db_atomic, "test")
node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-03', 4096)))
node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-04', 4096, -1)))
@ -341,7 +368,7 @@ def test_migrate_to_restorable_schema(cluster):
# Restore to revision before mutation.
create_restore_file(node_another_bucket, revision=revision, bucket="root", path="another_data")
node_another_bucket.query("SYSTEM RESTART DISK s3")
create_table(node_another_bucket, "test", attach=True)
create_table(node_another_bucket, "test", attach=True, db_atomic=db_atomic, uuid=uuid)
assert node_another_bucket.query("SELECT count(*) FROM s3.test FORMAT Values") == "({})".format(4096 * 6)
assert node_another_bucket.query("SELECT sum(id) FROM s3.test FORMAT Values") == "({})".format(0)
@ -350,10 +377,14 @@ def test_migrate_to_restorable_schema(cluster):
@pytest.mark.parametrize(
"replicated", [False, True]
)
def test_restore_to_detached(cluster, replicated):
@pytest.mark.parametrize(
"db_atomic", [False, True]
)
def test_restore_to_detached(cluster, replicated, db_atomic):
node = cluster.instances["node"]
create_table(node, "test", attach=False, replicated=replicated)
create_table(node, "test", attach=False, replicated=replicated, db_atomic=db_atomic)
uuid = get_table_uuid(node, db_atomic, "test")
node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-03', 4096)))
node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-04', 4096, -1)))
@ -374,7 +405,7 @@ def test_restore_to_detached(cluster, replicated):
create_restore_file(node_another_bucket, revision=revision, bucket="root", path="data", detached=True)
node_another_bucket.query("SYSTEM RESTART DISK s3")
create_table(node_another_bucket, "test", replicated=replicated)
create_table(node_another_bucket, "test", replicated=replicated, db_atomic=db_atomic, uuid=uuid)
assert node_another_bucket.query("SELECT count(*) FROM s3.test FORMAT Values") == "({})".format(0)
@ -393,3 +424,35 @@ def test_restore_to_detached(cluster, replicated):
assert node_another_bucket.query("SELECT count(*) FROM s3.test FORMAT Values") == "({})".format(4096 * 5)
assert node_another_bucket.query("SELECT sum(id) FROM s3.test FORMAT Values") == "({})".format(0)
assert node_another_bucket.query("SELECT sum(counter) FROM s3.test FORMAT Values") == "({})".format(4096 * 5)
@pytest.mark.parametrize(
"replicated", [False, True]
)
@pytest.mark.parametrize(
"db_atomic", [False, True]
)
def test_restore_without_detached(cluster, replicated, db_atomic):
node = cluster.instances["node"]
create_table(node, "test", attach=False, replicated=replicated, db_atomic=db_atomic)
uuid = get_table_uuid(node, db_atomic, "test")
node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-03', 1)))
assert node.query("SELECT count(*) FROM s3.test FORMAT Values") == "({})".format(1)
node.query("ALTER TABLE s3.test FREEZE")
revision = get_revision_counter(node, 1)
node_another_bucket = cluster.instances["node_another_bucket"]
create_restore_file(node_another_bucket, revision=revision, bucket="root", path="data", detached=True)
node_another_bucket.query("SYSTEM RESTART DISK s3")
create_table(node_another_bucket, "test", replicated=replicated, db_atomic=db_atomic, uuid=uuid)
assert node_another_bucket.query("SELECT count(*) FROM s3.test FORMAT Values") == "({})".format(0)
node_another_bucket.query("ALTER TABLE s3.test ATTACH PARTITION '2020-01-03'")
assert node_another_bucket.query("SELECT count(*) FROM s3.test FORMAT Values") == "({})".format(1)

View File

@ -0,0 +1,12 @@
<?xml version="1.0"?>
<clickhouse>
<logger>
<log>/var/log/clickhouse-server/log.log</log>
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
<stderr>/var/log/clickhouse-server/stderr.log</stderr>
<stdout>/var/log/clickhouse-server/stdout.log</stdout>
</logger>
<path>/var/lib/clickhouse/</path>
<users_config>users.xml</users_config>
</clickhouse>

View File

@ -0,0 +1,11 @@
<?xml version="1.0"?>
<clickhouse>
<profiles>
<default>
<!-- Lower block size to test multi-level merge of projection parts -->
<max_block_size>1000</max_block_size>
<min_insert_block_size_bytes>1000</min_insert_block_size_bytes>
<min_insert_block_size_rows>1000</min_insert_block_size_rows>
</default>
</profiles>
</clickhouse>

View File

@ -0,0 +1,88 @@
import time
import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
instance_test_mutations = cluster.add_instance(
"test_mutations_with_projection",
main_configs=["configs/config.xml"],
user_configs=["configs/users.xml"],
)
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
instance_test_mutations.query(
"""
CREATE TABLE video_log
(
`datetime` DateTime, -- 20,000 records per second
`user_id` UInt64, -- Cardinality == 100,000,000
`device_id` UInt64, -- Cardinality == 200,000,000
`video_id` UInt64, -- Cardinality == 100,00000
`domain` LowCardinality(String), -- Cardinality == 100
`bytes` UInt64, -- Ranging from 128 to 1152
`duration` UInt64, -- Ranging from 100 to 400
PROJECTION p_norm (SELECT datetime, device_id, bytes, duration ORDER BY device_id),
PROJECTION p_agg (SELECT toStartOfHour(datetime) AS hour, domain, sum(bytes), avg(duration) GROUP BY hour, domain)
)
ENGINE = MergeTree
PARTITION BY toDate(datetime) -- Daily partitioning
ORDER BY (user_id, device_id, video_id) -- Can only favor one column here
SETTINGS index_granularity = 1000;
"""
)
instance_test_mutations.query(
"""CREATE TABLE rng (`user_id_raw` UInt64, `device_id_raw` UInt64, `video_id_raw` UInt64, `domain_raw` UInt64, `bytes_raw` UInt64, `duration_raw` UInt64) ENGINE = GenerateRandom(1024);"""
)
instance_test_mutations.query(
"""INSERT INTO video_log SELECT toUnixTimestamp(toDateTime(today())) + (rowNumberInAllBlocks() / 20000), user_id_raw % 100000000 AS user_id, device_id_raw % 200000000 AS device_id, video_id_raw % 100000000 AS video_id, domain_raw % 100, (bytes_raw % 1024) + 128, (duration_raw % 300) + 100 FROM rng LIMIT 500000;"""
)
instance_test_mutations.query("""OPTIMIZE TABLE video_log FINAL;""")
yield cluster
finally:
cluster.shutdown()
def test_mutations_with_multi_level_merge_of_projections(started_cluster):
try:
instance_test_mutations.query(
"""ALTER TABLE video_log UPDATE bytes = bytes + 10086 WHERE 1;"""
)
def count_and_changed():
return instance_test_mutations.query(
"SELECT count(), countIf(bytes > 10000) FROM video_log SETTINGS force_index_by_date = 0, force_primary_key = 0 FORMAT CSV"
).splitlines()
all_done = False
for wait_times_for_mutation in range(
100
): # wait for replication 80 seconds max
time.sleep(0.8)
if count_and_changed() == ["500000,500000"]:
all_done = True
break
print(
instance_test_mutations.query(
"SELECT mutation_id, command, parts_to_do, is_done, latest_failed_part, latest_fail_reason, parts_to_do_names FROM system.mutations WHERE table = 'video_log' SETTINGS force_index_by_date = 0, force_primary_key = 0 FORMAT TSVWithNames"
)
)
assert (count_and_changed(), all_done) == (["500000,500000"], True)
assert instance_test_mutations.query(
f"SELECT DISTINCT arraySort(projections) FROM system.parts WHERE table = 'video_log' SETTINGS force_index_by_date = 0, force_primary_key = 0 FORMAT TSVRaw"
).splitlines() == ["['p_agg','p_norm']"]
finally:
instance_test_mutations.query(f"""DROP TABLE video_log""")

View File

@ -0,0 +1,7 @@
DROP TABLE IF EXISTS t;
CREATE TABLE t (`key` UInt32, `created_at` Date, `value` UInt32, PROJECTION xxx (SELECT key, created_at, sum(value) GROUP BY key, created_at)) ENGINE = MergeTree PARTITION BY toYYYYMM(created_at) ORDER BY key;
INSERT INTO t SELECT 1 AS key, today() + (number % 30), number FROM numbers(1000);
ALTER TABLE t UPDATE value = 0 WHERE (value > 0) AND (created_at >= '2021-12-21') SETTINGS allow_experimental_projection_optimization = 1;

View File

@ -0,0 +1,120 @@
581496515558637567
585996266895310847
590499385486344191
595002924984172543
599506517095350271
604010115783196671
608513715293126655
613017314905817087
617520914531352575
622024514158493695
626528113785835519
631031713413202431
635535313040572479
640038912667942919
644542512295313408
586018257127866367
590521375718899711
595024915216728063
599528507327905791
604032106015752191
608535705525682175
613039305138372607
617542904763908095
622046504391049215
626550104018391039
631053703645757951
635557303273127999
640060902900498439
644564502527868928
590524674253783039
595028213751611391
599531805862789119
604035404550635519
608539004060565503
613042603673255935
617546203298791423
622049802925932543
626553402553274367
631057002180641279
635560601808011327
640064201435381767
644567801062752256
595028557348995071
599532149460172799
604035748148019199
608539347657949183
613042947270639615
617546546896175103
622050146523316223
626553746150658047
631057345778024959
635560945405395007
640064545032765447
644568144660135936
599532200999780351
604035799687626751
608539399197556735
613042998810247167
617546598435782655
622050198062923775
626553797690265599
631057397317632511
635560996945002559
640064596572372999
644568196199743488
604035805056335871
608539404566265855
613043004178956287
617546603804491775
622050203431632895
626553803058974719
631057402686341631
635561002313711679
640064601941082119
644568201568452608
608539405371572223
613043004984262655
617546604609798143
622050204236939263
626553803864281087
631057403491647999
635561003119018047
640064602746388487
644568202373758976
612640339485786111
617143939111321599
621647538738462719
626151138365804543
630654737993171455
635158337620541503
639661937247911943
644165536875282432
617143939115515903
621647538742657023
626151138369998847
630654737997365759
635158337624735807
639661937252106247
644165536879476736
621647538742657023
626151138369998847
630654737997365759
635158337624735807
639661937252106247
644165536879476736
626151138369998847
630654737997365759
635158337624735807
639661937252106247
644165536879476736
630654737997365759
635158337624735807
639661937252106247
644165536879476736
635158337624735807
639661937252106247
644165536879476736
639661937252106247
644165536879476736
644165536879476736

View File

@ -0,0 +1,135 @@
-- Tags: no-fasttest
DROP TABLE IF EXISTS h3_indexes;
--Note: id column just exists to keep the test results sorted.
-- Order is not guaranteed with h3_index or res columns as we test the same h3_index at various resolutions.
CREATE TABLE h3_indexes (id UInt8, h3_index UInt64, res UInt8) ENGINE = Memory;
-- Test cases taken from fixture: https://github.com/uber/h3/blob/master/src/apps/testapps/testCellToCenterChild.c
INSERT INTO h3_indexes VALUES (1,577023702256844799,1);
INSERT INTO h3_indexes VALUES (2,577023702256844799,2);
INSERT INTO h3_indexes VALUES (3,577023702256844799,3);
INSERT INTO h3_indexes VALUES (4,577023702256844799,4);
INSERT INTO h3_indexes VALUES (5,577023702256844799,5);
INSERT INTO h3_indexes VALUES (6,577023702256844799,6);
INSERT INTO h3_indexes VALUES (7,577023702256844799,7);
INSERT INTO h3_indexes VALUES (8,577023702256844799,8);
INSERT INTO h3_indexes VALUES (9,577023702256844799,9);
INSERT INTO h3_indexes VALUES (10,577023702256844799,10);
INSERT INTO h3_indexes VALUES (11,577023702256844799,11);
INSERT INTO h3_indexes VALUES (12,577023702256844799,12);
INSERT INTO h3_indexes VALUES (13,577023702256844799,13);
INSERT INTO h3_indexes VALUES (14,577023702256844799,14);
INSERT INTO h3_indexes VALUES (15,577023702256844799,15);
INSERT INTO h3_indexes VALUES (16,581518505791193087,2);
INSERT INTO h3_indexes VALUES (17,581518505791193087,3);
INSERT INTO h3_indexes VALUES (18,581518505791193087,4);
INSERT INTO h3_indexes VALUES (19,581518505791193087,5);
INSERT INTO h3_indexes VALUES (20,581518505791193087,6);
INSERT INTO h3_indexes VALUES (21,581518505791193087,7);
INSERT INTO h3_indexes VALUES (22,581518505791193087,8);
INSERT INTO h3_indexes VALUES (23,581518505791193087,9);
INSERT INTO h3_indexes VALUES (24,581518505791193087,10);
INSERT INTO h3_indexes VALUES (25,581518505791193087,11);
INSERT INTO h3_indexes VALUES (26,581518505791193087,12);
INSERT INTO h3_indexes VALUES (27,581518505791193087,13);
INSERT INTO h3_indexes VALUES (28,581518505791193087,14);
INSERT INTO h3_indexes VALUES (29,581518505791193087,15);
INSERT INTO h3_indexes VALUES (30,586021555662749695,3);
INSERT INTO h3_indexes VALUES (31,586021555662749695,4);
INSERT INTO h3_indexes VALUES (32,586021555662749695,5);
INSERT INTO h3_indexes VALUES (33,586021555662749695,6);
INSERT INTO h3_indexes VALUES (34,586021555662749695,7);
INSERT INTO h3_indexes VALUES (35,586021555662749695,8);
INSERT INTO h3_indexes VALUES (36,586021555662749695,9);
INSERT INTO h3_indexes VALUES (37,586021555662749695,10);
INSERT INTO h3_indexes VALUES (38,586021555662749695,11);
INSERT INTO h3_indexes VALUES (39,586021555662749695,12);
INSERT INTO h3_indexes VALUES (40,586021555662749695,13);
INSERT INTO h3_indexes VALUES (41,586021555662749695,14);
INSERT INTO h3_indexes VALUES (42,586021555662749695,15);
INSERT INTO h3_indexes VALUES (43,590525017851166719,4);
INSERT INTO h3_indexes VALUES (44,590525017851166719,5);
INSERT INTO h3_indexes VALUES (45,590525017851166719,6);
INSERT INTO h3_indexes VALUES (46,590525017851166719,7);
INSERT INTO h3_indexes VALUES (47,590525017851166719,8);
INSERT INTO h3_indexes VALUES (48,590525017851166719,9);
INSERT INTO h3_indexes VALUES (49,590525017851166719,10);
INSERT INTO h3_indexes VALUES (50,590525017851166719,11);
INSERT INTO h3_indexes VALUES (51,590525017851166719,12);
INSERT INTO h3_indexes VALUES (52,590525017851166719,13);
INSERT INTO h3_indexes VALUES (53,590525017851166719,14);
INSERT INTO h3_indexes VALUES (54,590525017851166719,15);
INSERT INTO h3_indexes VALUES (55,595028608888602623,5);
INSERT INTO h3_indexes VALUES (56,595028608888602623,6);
INSERT INTO h3_indexes VALUES (57,595028608888602623,7);
INSERT INTO h3_indexes VALUES (58,595028608888602623,8);
INSERT INTO h3_indexes VALUES (59,595028608888602623,9);
INSERT INTO h3_indexes VALUES (60,595028608888602623,10);
INSERT INTO h3_indexes VALUES (61,595028608888602623,11);
INSERT INTO h3_indexes VALUES (62,595028608888602623,12);
INSERT INTO h3_indexes VALUES (63,595028608888602623,13);
INSERT INTO h3_indexes VALUES (64,595028608888602623,14);
INSERT INTO h3_indexes VALUES (65,595028608888602623,15);
INSERT INTO h3_indexes VALUES (66,599532206368489471,6);
INSERT INTO h3_indexes VALUES (67,599532206368489471,7);
INSERT INTO h3_indexes VALUES (68,599532206368489471,8);
INSERT INTO h3_indexes VALUES (69,599532206368489471,9);
INSERT INTO h3_indexes VALUES (70,599532206368489471,10);
INSERT INTO h3_indexes VALUES (71,599532206368489471,11);
INSERT INTO h3_indexes VALUES (72,599532206368489471,12);
INSERT INTO h3_indexes VALUES (73,599532206368489471,13);
INSERT INTO h3_indexes VALUES (74,599532206368489471,14);
INSERT INTO h3_indexes VALUES (75,599532206368489471,15);
INSERT INTO h3_indexes VALUES (76,604035805861642239,7);
INSERT INTO h3_indexes VALUES (77,604035805861642239,8);
INSERT INTO h3_indexes VALUES (78,604035805861642239,9);
INSERT INTO h3_indexes VALUES (79,604035805861642239,10);
INSERT INTO h3_indexes VALUES (80,604035805861642239,11);
INSERT INTO h3_indexes VALUES (81,604035805861642239,12);
INSERT INTO h3_indexes VALUES (82,604035805861642239,13);
INSERT INTO h3_indexes VALUES (83,604035805861642239,14);
INSERT INTO h3_indexes VALUES (84,604035805861642239,15);
INSERT INTO h3_indexes VALUES (85,608136739873095679,8);
INSERT INTO h3_indexes VALUES (86,608136739873095679,9);
INSERT INTO h3_indexes VALUES (87,608136739873095679,10);
INSERT INTO h3_indexes VALUES (88,608136739873095679,11);
INSERT INTO h3_indexes VALUES (89,608136739873095679,12);
INSERT INTO h3_indexes VALUES (90,608136739873095679,13);
INSERT INTO h3_indexes VALUES (91,608136739873095679,14);
INSERT INTO h3_indexes VALUES (92,608136739873095679,15);
INSERT INTO h3_indexes VALUES (93,612640339489980415,9);
INSERT INTO h3_indexes VALUES (94,612640339489980415,10);
INSERT INTO h3_indexes VALUES (95,612640339489980415,11);
INSERT INTO h3_indexes VALUES (96,612640339489980415,12);
INSERT INTO h3_indexes VALUES (97,612640339489980415,13);
INSERT INTO h3_indexes VALUES (98,612640339489980415,14);
INSERT INTO h3_indexes VALUES (99,612640339489980415,15);
INSERT INTO h3_indexes VALUES (100,617143939115515903,10);
INSERT INTO h3_indexes VALUES (101,617143939115515903,11);
INSERT INTO h3_indexes VALUES (102,617143939115515903,12);
INSERT INTO h3_indexes VALUES (103,617143939115515903,13);
INSERT INTO h3_indexes VALUES (104,617143939115515903,14);
INSERT INTO h3_indexes VALUES (105,617143939115515903,15);
INSERT INTO h3_indexes VALUES (106,621647538742657023,11);
INSERT INTO h3_indexes VALUES (107,621647538742657023,12);
INSERT INTO h3_indexes VALUES (108,621647538742657023,13);
INSERT INTO h3_indexes VALUES (109,621647538742657023,14);
INSERT INTO h3_indexes VALUES (110,621647538742657023,15);
INSERT INTO h3_indexes VALUES (111,626151138369998847,12);
INSERT INTO h3_indexes VALUES (112,626151138369998847,13);
INSERT INTO h3_indexes VALUES (113,626151138369998847,14);
INSERT INTO h3_indexes VALUES (114,626151138369998847,15);
INSERT INTO h3_indexes VALUES (115,630654737997365759,13);
INSERT INTO h3_indexes VALUES (116,630654737997365759,14);
INSERT INTO h3_indexes VALUES (117,630654737997365759,15);
INSERT INTO h3_indexes VALUES (118,635158337624735807,14);
INSERT INTO h3_indexes VALUES (119,635158337624735807,15);
INSERT INTO h3_indexes VALUES (120,639661937252106247,15);
SELECT h3ToCenterChild(h3_index,res) FROM h3_indexes ORDER BY id;
DROP TABLE h3_indexes;

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

View File

@ -99,6 +99,19 @@
{{ _('Software Engineer') }}
</p>
</div>
<div class="col-xl-3 col-lg-4 col-md-6 mb-3 mb-md-8 text-center">
<a href="https://www.linkedin.com/in/ryadh/" target="_blank" class="photo-frame mx-auto" rel="external nofollow">
<img src="/images/photos/ryadh-dahimene.jpg">
</a>
<h3 class="display-4">
{{ _('Ryadh Dahimene') }}
</h3>
<p class="font-lg text-muted mb-0 mx-auto w-75">
{{ _('Consulting Architect') }}
</p>
</div>
<div class="col-xl-3 col-lg-4 col-md-6 mb-3 mb-md-8 text-center">
@ -203,6 +216,19 @@
{{ _('Account Executive, AMER') }}
</p>
</div>
<div class="col-xl-3 col-lg-4 col-md-6 mb-3 mb-md-8 text-center">
<a href="https://www.linkedin.com/in/yossi-kahlon-a7a5693/" target="_blank" class="photo-frame mx-auto" rel="external nofollow">
<img src="/images/photos/yossi-kahlon.jpg">
</a>
<h3 class="display-4">
{{ _('Yossi Kahlon') }}
</h3>
<p class="font-lg text-muted mb-0 mx-auto w-75">
{{ _('Director, Engineering - Control Plane') }}
</p>
</div>
<div class="col-xl-3 col-lg-4 col-md-6 mb-3 mb-md-8 text-center">