Merge branch 'master' into add-test-41727

This commit is contained in:
Alexey Milovidov 2023-07-17 01:35:00 +03:00 committed by GitHub
commit b6090600e2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
152 changed files with 2499 additions and 743 deletions

View File

@ -3903,6 +3903,216 @@ jobs:
docker ps --quiet | xargs --no-run-if-empty docker kill ||: docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH" sudo rm -fr "$TEMP_PATH"
IntegrationTestsAnalyzerAsan0:
needs: [BuilderDebAsan]
runs-on: [self-hosted, stress-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/integration_tests_asan
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Integration tests (asan, analyzer)
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
RUN_BY_HASH_NUM=0
RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Integration test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsAnalyzerAsan1:
needs: [BuilderDebAsan]
runs-on: [self-hosted, stress-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/integration_tests_asan
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Integration tests (asan, analyzer)
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
RUN_BY_HASH_NUM=1
RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Integration test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsAnalyzerAsan2:
needs: [BuilderDebAsan]
runs-on: [self-hosted, stress-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/integration_tests_asan
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Integration tests (asan, analyzer)
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
RUN_BY_HASH_NUM=2
RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Integration test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsAnalyzerAsan3:
needs: [BuilderDebAsan]
runs-on: [self-hosted, stress-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/integration_tests_asan
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Integration tests (asan, analyzer)
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
RUN_BY_HASH_NUM=3
RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Integration test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsAnalyzerAsan4:
needs: [BuilderDebAsan]
runs-on: [self-hosted, stress-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/integration_tests_asan
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Integration tests (asan, analyzer)
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
RUN_BY_HASH_NUM=4
RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Integration test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsAnalyzerAsan5:
needs: [BuilderDebAsan]
runs-on: [self-hosted, stress-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/integration_tests_asan
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Integration tests (asan, analyzer)
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
RUN_BY_HASH_NUM=5
RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Integration test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsTsan0: IntegrationTestsTsan0:
needs: [BuilderDebTsan] needs: [BuilderDebTsan]
runs-on: [self-hosted, stress-tester] runs-on: [self-hosted, stress-tester]

View File

@ -23,7 +23,7 @@ curl https://clickhouse.com/ | sh
## Upcoming Events ## Upcoming Events
* [**v23.6 Release Webinar**](https://clickhouse.com/company/events/v23-6-release-call?utm_source=github&utm_medium=social&utm_campaign=release-webinar-2023-06) - Jun 29 - 23.6 is rapidly approaching. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release. * [**v23.7 Release Webinar**](https://clickhouse.com/company/events/v23-7-community-release-call?utm_source=github&utm_medium=social&utm_campaign=release-webinar-2023-07) - Jul 27 - 23.7 is rapidly approaching. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release.
* [**ClickHouse Meetup in Boston**](https://www.meetup.com/clickhouse-boston-user-group/events/293913596) - Jul 18 * [**ClickHouse Meetup in Boston**](https://www.meetup.com/clickhouse-boston-user-group/events/293913596) - Jul 18
* [**ClickHouse Meetup in NYC**](https://www.meetup.com/clickhouse-new-york-user-group/events/293913441) - Jul 19 * [**ClickHouse Meetup in NYC**](https://www.meetup.com/clickhouse-new-york-user-group/events/293913441) - Jul 19
* [**ClickHouse Meetup in Toronto**](https://www.meetup.com/clickhouse-toronto-user-group/events/294183127) - Jul 20 * [**ClickHouse Meetup in Toronto**](https://www.meetup.com/clickhouse-toronto-user-group/events/294183127) - Jul 20
@ -34,13 +34,13 @@ Also, keep an eye out for upcoming meetups around the world. Somewhere else you
## Recent Recordings ## Recent Recordings
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments" * **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
* **Recording available**: [**v23.4 Release Webinar**](https://www.youtube.com/watch?v=4rrf6bk_mOg) Faster Parquet Reading, Asynchonous Connections to Reoplicas, Trailing Comma before FROM, extractKeyValuePairs, integrations updates, and so much more! Watch it now! * **Recording available**: [**v23.6 Release Webinar**](https://www.youtube.com/watch?v=cuf_hYn7dqU) All the features of 23.6, one convenient video! Watch it now!
* **All release webinar recordings**: [YouTube playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3jAlSy1JxyP8zluvXaN3nxU) * **All release webinar recordings**: [YouTube playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3jAlSy1JxyP8zluvXaN3nxU)
## Interested in joining ClickHouse and making it your full time job? ## Interested in joining ClickHouse and making it your full-time job?
We are a globally diverse and distributed team, united behind a common goal of creating industry-leading, real-time analytics. Here, you will have an opportunity to solve some of the most cutting edge technical challenges and have direct ownership of your work and vision. If you are a contributor by nature, a thinker as well as a doer - well definitely click! We are a globally diverse and distributed team, united behind a common goal of creating industry-leading, real-time analytics. Here, you will have an opportunity to solve some of the most cutting-edge technical challenges and have direct ownership of your work and vision. If you are a contributor by nature, a thinker and a doer - well definitely click!
Check out our **current openings** here: https://clickhouse.com/company/careers Check out our **current openings** here: https://clickhouse.com/company/careers

View File

@ -1,5 +1,5 @@
if (SANITIZE OR NOT ( if (SANITIZE OR NOT (
((OS_LINUX OR OS_FREEBSD) AND (ARCH_AMD64 OR ARCH_AARCH64 OR ARCH_PPC64LE OR ARCH_RISCV64)) OR ((OS_LINUX OR OS_FREEBSD) AND (ARCH_AMD64 OR ARCH_AARCH64 OR ARCH_PPC64LE OR ARCH_RISCV64 OR ARCH_S390X)) OR
(OS_DARWIN AND (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" OR CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")) (OS_DARWIN AND (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" OR CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG"))
)) ))
if (ENABLE_JEMALLOC) if (ENABLE_JEMALLOC)
@ -17,17 +17,17 @@ if (NOT ENABLE_JEMALLOC)
endif () endif ()
if (NOT OS_LINUX) if (NOT OS_LINUX)
message (WARNING "jemalloc support on non-linux is EXPERIMENTAL") message (WARNING "jemalloc support on non-Linux is EXPERIMENTAL")
endif() endif()
if (OS_LINUX) if (OS_LINUX)
# ThreadPool select job randomly, and there can be some threads that had been # ThreadPool select job randomly, and there can be some threads that have been
# performed some memory heavy task before and will be inactive for some time, # performed some memory-heavy tasks before and will be inactive for some time,
# but until it will became active again, the memory will not be freed since by # but until it becomes active again, the memory will not be freed since, by
# default each thread has it's own arena, but there should be not more then # default, each thread has its arena, but there should be no more than
# 4*CPU arenas (see opt.nareans description). # 4*CPU arenas (see opt.nareans description).
# #
# By enabling percpu_arena number of arenas limited to number of CPUs and hence # By enabling percpu_arena number of arenas is limited to the number of CPUs, and hence
# this problem should go away. # this problem should go away.
# #
# muzzy_decay_ms -- use MADV_FREE when available on newer Linuxes, to # muzzy_decay_ms -- use MADV_FREE when available on newer Linuxes, to
@ -38,7 +38,7 @@ if (OS_LINUX)
else() else()
set (JEMALLOC_CONFIG_MALLOC_CONF "oversize_threshold:0,muzzy_decay_ms:5000,dirty_decay_ms:5000") set (JEMALLOC_CONFIG_MALLOC_CONF "oversize_threshold:0,muzzy_decay_ms:5000,dirty_decay_ms:5000")
endif() endif()
# CACHE variable is empty, to allow changing defaults without necessity # CACHE variable is empty to allow changing defaults without the necessity
# to purge cache # to purge cache
set (JEMALLOC_CONFIG_MALLOC_CONF_OVERRIDE "" CACHE STRING "Change default configuration string of JEMalloc" ) set (JEMALLOC_CONFIG_MALLOC_CONF_OVERRIDE "" CACHE STRING "Change default configuration string of JEMalloc" )
if (JEMALLOC_CONFIG_MALLOC_CONF_OVERRIDE) if (JEMALLOC_CONFIG_MALLOC_CONF_OVERRIDE)
@ -148,6 +148,8 @@ elseif (ARCH_PPC64LE)
set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_ppc64le") set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_ppc64le")
elseif (ARCH_RISCV64) elseif (ARCH_RISCV64)
set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_riscv64") set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_riscv64")
elseif (ARCH_S390X)
set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_s390x")
else () else ()
message (FATAL_ERROR "internal jemalloc: This arch is not supported") message (FATAL_ERROR "internal jemalloc: This arch is not supported")
endif () endif ()
@ -172,7 +174,7 @@ target_compile_definitions(_jemalloc PRIVATE -DJEMALLOC_PROF=1)
# jemalloc provides support for two different libunwind flavors: the original HP libunwind and the one coming with gcc / g++ / libstdc++. # jemalloc provides support for two different libunwind flavors: the original HP libunwind and the one coming with gcc / g++ / libstdc++.
# The latter is identified by `JEMALLOC_PROF_LIBGCC` and uses `_Unwind_Backtrace` method instead of `unw_backtrace`. # The latter is identified by `JEMALLOC_PROF_LIBGCC` and uses `_Unwind_Backtrace` method instead of `unw_backtrace`.
# At the time ClickHouse uses LLVM libunwind which follows libgcc's way of backtracing. # At the time ClickHouse uses LLVM libunwind which follows libgcc's way of backtracking.
# #
# ClickHouse has to provide `unw_backtrace` method by the means of [commit 8e2b31e](https://github.com/ClickHouse/libunwind/commit/8e2b31e766dd502f6df74909e04a7dbdf5182eb1). # ClickHouse has to provide `unw_backtrace` method by the means of [commit 8e2b31e](https://github.com/ClickHouse/libunwind/commit/8e2b31e766dd502f6df74909e04a7dbdf5182eb1).
target_compile_definitions (_jemalloc PRIVATE -DJEMALLOC_PROF_LIBGCC=1) target_compile_definitions (_jemalloc PRIVATE -DJEMALLOC_PROF_LIBGCC=1)

View File

@ -0,0 +1,435 @@
/* include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */
#ifndef JEMALLOC_INTERNAL_DEFS_H_
#define JEMALLOC_INTERNAL_DEFS_H_
/*
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
* public APIs to be prefixed. This makes it possible, with some care, to use
* multiple allocators simultaneously.
*/
/* #undef JEMALLOC_PREFIX */
/* #undef JEMALLOC_CPREFIX */
/*
* Define overrides for non-standard allocator-related functions if they are
* present on the system.
*/
#define JEMALLOC_OVERRIDE___LIBC_CALLOC
#define JEMALLOC_OVERRIDE___LIBC_FREE
#define JEMALLOC_OVERRIDE___LIBC_MALLOC
#define JEMALLOC_OVERRIDE___LIBC_MEMALIGN
#define JEMALLOC_OVERRIDE___LIBC_REALLOC
#define JEMALLOC_OVERRIDE___LIBC_VALLOC
#define JEMALLOC_OVERRIDE___LIBC_PVALLOC
/* #undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN */
/*
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
* For shared libraries, symbol visibility mechanisms prevent these symbols
* from being exported, but for static libraries, naming collisions are a real
* possibility.
*/
#define JEMALLOC_PRIVATE_NAMESPACE je_
/*
* Hyper-threaded CPUs may need a special instruction inside spin loops in
* order to yield to another virtual CPU.
*/
#define CPU_SPINWAIT
/* 1 if CPU_SPINWAIT is defined, 0 otherwise. */
#define HAVE_CPU_SPINWAIT 0
/*
* Number of significant bits in virtual addresses. This may be less than the
* total number of bits in a pointer, e.g. on x64, for which the uppermost 16
* bits are the same as bit 47.
*/
#define LG_VADDR 64
/* Defined if C11 atomics are available. */
#define JEMALLOC_C11_ATOMICS
/* Defined if GCC __atomic atomics are available. */
#define JEMALLOC_GCC_ATOMIC_ATOMICS
/* and the 8-bit variant support. */
#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS
/* Defined if GCC __sync atomics are available. */
#define JEMALLOC_GCC_SYNC_ATOMICS
/* and the 8-bit variant support. */
#define JEMALLOC_GCC_U8_SYNC_ATOMICS
/*
* Defined if __builtin_clz() and __builtin_clzl() are available.
*/
#define JEMALLOC_HAVE_BUILTIN_CLZ
/*
* Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
*/
/* #undef JEMALLOC_OS_UNFAIR_LOCK */
/* Defined if syscall(2) is usable. */
#define JEMALLOC_USE_SYSCALL
/*
* Defined if secure_getenv(3) is available.
*/
#define JEMALLOC_HAVE_SECURE_GETENV
/*
* Defined if issetugid(2) is available.
*/
/* #undef JEMALLOC_HAVE_ISSETUGID */
/* Defined if pthread_atfork(3) is available. */
#define JEMALLOC_HAVE_PTHREAD_ATFORK
/* Defined if pthread_setname_np(3) is available. */
#define JEMALLOC_HAVE_PTHREAD_SETNAME_NP
/* Defined if pthread_getname_np(3) is available. */
#define JEMALLOC_HAVE_PTHREAD_GETNAME_NP
/* Defined if pthread_get_name_np(3) is available. */
/* #undef JEMALLOC_HAVE_PTHREAD_GET_NAME_NP */
/*
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
*/
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
/*
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
*/
#define JEMALLOC_HAVE_CLOCK_MONOTONIC
/*
* Defined if mach_absolute_time() is available.
*/
/* #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME */
/*
* Defined if clock_gettime(CLOCK_REALTIME, ...) is available.
*/
#define JEMALLOC_HAVE_CLOCK_REALTIME
/*
* Defined if _malloc_thread_cleanup() exists. At least in the case of
* FreeBSD, pthread_key_create() allocates, which if used during malloc
* bootstrapping will cause recursion into the pthreads library. Therefore, if
* _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
* malloc_tsd.
*/
/* #undef JEMALLOC_MALLOC_THREAD_CLEANUP */
/*
* Defined if threaded initialization is known to be safe on this platform.
* Among other things, it must be possible to initialize a mutex without
* triggering allocation in order for threaded allocation to be safe.
*/
#define JEMALLOC_THREADED_INIT
/*
* Defined if the pthreads implementation defines
* _pthread_mutex_init_calloc_cb(), in which case the function is used in order
* to avoid recursive allocation during mutex initialization.
*/
/* #undef JEMALLOC_MUTEX_INIT_CB */
/* Non-empty if the tls_model attribute is supported. */
#define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec")))
/*
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
* inline functions.
*/
/* #undef JEMALLOC_DEBUG */
/* JEMALLOC_STATS enables statistics calculation. */
#define JEMALLOC_STATS
/* JEMALLOC_EXPERIMENTAL_SMALLOCX_API enables experimental smallocx API. */
/* #undef JEMALLOC_EXPERIMENTAL_SMALLOCX_API */
/* JEMALLOC_PROF enables allocation profiling. */
/* #undef JEMALLOC_PROF */
/* Use libunwind for profile backtracing if defined. */
/* #undef JEMALLOC_PROF_LIBUNWIND */
/* Use libgcc for profile backtracing if defined. */
/* #undef JEMALLOC_PROF_LIBGCC */
/* Use gcc intrinsics for profile backtracing if defined. */
/* #undef JEMALLOC_PROF_GCC */
/* JEMALLOC_PAGEID enabled page id */
/* #undef JEMALLOC_PAGEID */
/* JEMALLOC_HAVE_PRCTL checks prctl */
#define JEMALLOC_HAVE_PRCTL
/*
* JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
* segment (DSS).
*/
#define JEMALLOC_DSS
/* Support memory filling (junk/zero). */
#define JEMALLOC_FILL
/* Support utrace(2)-based tracing. */
/* #undef JEMALLOC_UTRACE */
/* Support utrace(2)-based tracing (label based signature). */
/* #undef JEMALLOC_UTRACE_LABEL */
/* Support optional abort() on OOM. */
/* #undef JEMALLOC_XMALLOC */
/* Support lazy locking (avoid locking unless a second thread is launched). */
/* #undef JEMALLOC_LAZY_LOCK */
/*
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
* classes).
*/
/* #undef LG_QUANTUM */
/* One page is 2^LG_PAGE bytes. */
#define LG_PAGE 12
/* Maximum number of regions in a slab. */
/* #undef CONFIG_LG_SLAB_MAXREGS */
/*
* One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
* system does not explicitly support huge pages; system calls that require
* explicit huge page support are separately configured.
*/
#define LG_HUGEPAGE 20
/*
* If defined, adjacent virtual memory mappings with identical attributes
* automatically coalesce, and they fragment when changes are made to subranges.
* This is the normal order of things for mmap()/munmap(), but on Windows
* VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
* mappings do *not* coalesce/fragment.
*/
#define JEMALLOC_MAPS_COALESCE
/*
* If defined, retain memory for later reuse by default rather than using e.g.
* munmap() to unmap freed extents. This is enabled on 64-bit Linux because
* common sequences of mmap()/munmap() calls will cause virtual memory map
* holes.
*/
#define JEMALLOC_RETAIN
/* TLS is used to map arenas and magazine caches to threads. */
#define JEMALLOC_TLS
/*
* Used to mark unreachable code to quiet "end of non-void" compiler warnings.
* Don't use this directly; instead use unreachable() from util.h
*/
#define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable
/*
* ffs*() functions to use for bitmapping. Don't use these directly; instead,
* use ffs_*() from util.h.
*/
#define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll
#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl
#define JEMALLOC_INTERNAL_FFS __builtin_ffs
/*
* popcount*() functions to use for bitmapping.
*/
#define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl
#define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount
/*
* If defined, explicitly attempt to more uniformly distribute large allocation
* pointer alignments across all cache indices.
*/
#define JEMALLOC_CACHE_OBLIVIOUS
/*
* If defined, enable logging facilities. We make this a configure option to
* avoid taking extra branches everywhere.
*/
/* #undef JEMALLOC_LOG */
/*
* If defined, use readlinkat() (instead of readlink()) to follow
* /etc/malloc_conf.
*/
/* #undef JEMALLOC_READLINKAT */
/*
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
*/
/* #undef JEMALLOC_ZONE */
/*
* Methods for determining whether the OS overcommits.
* JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
* /proc/sys/vm.overcommit_memory file.
* JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
*/
/* #undef JEMALLOC_SYSCTL_VM_OVERCOMMIT */
#define JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
/* Defined if madvise(2) is available. */
#define JEMALLOC_HAVE_MADVISE
/*
* Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
* arguments to madvise(2).
*/
#define JEMALLOC_HAVE_MADVISE_HUGE
/*
* Methods for purging unused pages differ between operating systems.
*
* madvise(..., MADV_FREE) : This marks pages as being unused, such that they
* will be discarded rather than swapped out.
* madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is
* defined, this immediately discards pages,
* such that new pages will be demand-zeroed if
* the address region is later touched;
* otherwise this behaves similarly to
* MADV_FREE, though typically with higher
* system overhead.
*/
#define JEMALLOC_PURGE_MADVISE_FREE
#define JEMALLOC_PURGE_MADVISE_DONTNEED
#define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */
/* #undef JEMALLOC_DEFINE_MADVISE_FREE */
/*
* Defined if MADV_DO[NT]DUMP is supported as an argument to madvise.
*/
#define JEMALLOC_MADVISE_DONTDUMP
/*
* Defined if MADV_[NO]CORE is supported as an argument to madvise.
*/
/* #undef JEMALLOC_MADVISE_NOCORE */
/* Defined if mprotect(2) is available. */
#define JEMALLOC_HAVE_MPROTECT
/*
* Defined if transparent huge pages (THPs) are supported via the
* MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
*/
/* #undef JEMALLOC_THP */
/* Defined if posix_madvise is available. */
/* #undef JEMALLOC_HAVE_POSIX_MADVISE */
/*
* Method for purging unused pages using posix_madvise.
*
* posix_madvise(..., POSIX_MADV_DONTNEED)
*/
/* #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED */
/* #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS */
/*
* Defined if memcntl page admin call is supported
*/
/* #undef JEMALLOC_HAVE_MEMCNTL */
/*
* Defined if malloc_size is supported
*/
/* #undef JEMALLOC_HAVE_MALLOC_SIZE */
/* Define if operating system has alloca.h header. */
#define JEMALLOC_HAS_ALLOCA_H
/* C99 restrict keyword supported. */
#define JEMALLOC_HAS_RESTRICT
/* For use by hash code. */
#define JEMALLOC_BIG_ENDIAN
/* sizeof(int) == 2^LG_SIZEOF_INT. */
#define LG_SIZEOF_INT 2
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
#define LG_SIZEOF_LONG 3
/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
#define LG_SIZEOF_LONG_LONG 3
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
#define LG_SIZEOF_INTMAX_T 3
/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
/* #undef JEMALLOC_GLIBC_MALLOC_HOOK */
/* glibc memalign hook. */
/* #undef JEMALLOC_GLIBC_MEMALIGN_HOOK */
/* pthread support */
#define JEMALLOC_HAVE_PTHREAD
/* dlsym() support */
#define JEMALLOC_HAVE_DLSYM
/* Adaptive mutex support in pthreads. */
#define JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
/* GNU specific sched_getcpu support */
#define JEMALLOC_HAVE_SCHED_GETCPU
/* GNU specific sched_setaffinity support */
#define JEMALLOC_HAVE_SCHED_SETAFFINITY
/*
* If defined, all the features necessary for background threads are present.
*/
#define JEMALLOC_BACKGROUND_THREAD
/*
* If defined, jemalloc symbols are not exported (doesn't work when
* JEMALLOC_PREFIX is not defined).
*/
/* #undef JEMALLOC_EXPORT */
/* config.malloc_conf options string. */
#define JEMALLOC_CONFIG_MALLOC_CONF ""
/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
#define JEMALLOC_IS_MALLOC
/*
* Defined if strerror_r returns char * if _GNU_SOURCE is defined.
*/
#define JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE
/* Performs additional safety checks when defined. */
/* #undef JEMALLOC_OPT_SAFETY_CHECKS */
/* Is C++ support being built? */
#define JEMALLOC_ENABLE_CXX
/* Performs additional size checks when defined. */
/* #undef JEMALLOC_OPT_SIZE_CHECKS */
/* Allows sampled junk and stash for checking use-after-free when defined. */
/* #undef JEMALLOC_UAF_DETECTION */
/* Darwin VM_MAKE_TAG support */
/* #undef JEMALLOC_HAVE_VM_MAKE_TAG */
/* If defined, realloc(ptr, 0) defaults to "free" instead of "alloc". */
#define JEMALLOC_ZERO_REALLOC_DEFAULT_FREE
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */

View File

@ -97,8 +97,8 @@ docker run -d \
You may also want to mount: You may also want to mount:
* `/etc/clickhouse-server/config.d/*.xml` - files with server configuration adjustmenets * `/etc/clickhouse-server/config.d/*.xml` - files with server configuration adjustments
* `/etc/clickhouse-server/users.d/*.xml` - files with user settings adjustmenets * `/etc/clickhouse-server/users.d/*.xml` - files with user settings adjustments
* `/docker-entrypoint-initdb.d/` - folder with database initialization scripts (see below). * `/docker-entrypoint-initdb.d/` - folder with database initialization scripts (see below).
### Linux capabilities ### Linux capabilities

View File

@ -98,6 +98,7 @@ RUN python3 -m pip install --no-cache-dir \
redis \ redis \
requests-kerberos \ requests-kerberos \
tzlocal==2.1 \ tzlocal==2.1 \
retry \
urllib3 urllib3
# Hudi supports only spark 3.3.*, not 3.4 # Hudi supports only spark 3.3.*, not 3.4

View File

@ -0,0 +1,23 @@
---
sidebar_position: 1
sidebar_label: 2023
---
# 2023 Changelog
### ClickHouse release v23.3.8.21-lts (1675f2264f3) FIXME as compared to v23.3.7.5-lts (bc683c11c92)
#### Bug Fix (user-visible misbehavior in an official stable release)
* Fix backward compatibility for IP types hashing in aggregate functions [#50551](https://github.com/ClickHouse/ClickHouse/pull/50551) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Fix segfault in MathUnary [#51499](https://github.com/ClickHouse/ClickHouse/pull/51499) ([Ilya Yatsishin](https://github.com/qoega)).
* Fix for moving 'IN' conditions to PREWHERE [#51610](https://github.com/ClickHouse/ClickHouse/pull/51610) ([Alexander Gololobov](https://github.com/davenger)).
* Fix reading from empty column in `parseSipHashKey` [#51804](https://github.com/ClickHouse/ClickHouse/pull/51804) ([Nikita Taranov](https://github.com/nickitat)).
* Check refcount in `RemoveManyObjectStorageOperation::finalize` instead of `execute` [#51954](https://github.com/ClickHouse/ClickHouse/pull/51954) ([vdimir](https://github.com/vdimir)).
* Allow parametric UDFs [#51964](https://github.com/ClickHouse/ClickHouse/pull/51964) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Decoupled commits from [#51180](https://github.com/ClickHouse/ClickHouse/issues/51180) for backports [#51561](https://github.com/ClickHouse/ClickHouse/pull/51561) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Fix MergeTreeMarksLoader segfaulting if marks file is longer than expected [#51636](https://github.com/ClickHouse/ClickHouse/pull/51636) ([Michael Kolupaev](https://github.com/al13n321)).

View File

@ -0,0 +1,26 @@
---
sidebar_position: 1
sidebar_label: 2023
---
# 2023 Changelog
### ClickHouse release v23.4.6.25-stable (a06848b1770) FIXME as compared to v23.4.5.22-stable (0ced5d6a8da)
#### Improvement
* Backported in [#51234](https://github.com/ClickHouse/ClickHouse/issues/51234): Improve the progress bar for file/s3/hdfs/url table functions by using chunk size from source data and using incremental total size counting in each thread. Fix the progress bar for *Cluster functions. This closes [#47250](https://github.com/ClickHouse/ClickHouse/issues/47250). [#51088](https://github.com/ClickHouse/ClickHouse/pull/51088) ([Kruglov Pavel](https://github.com/Avogar)).
#### Bug Fix (user-visible misbehavior in an official stable release)
* Fix backward compatibility for IP types hashing in aggregate functions [#50551](https://github.com/ClickHouse/ClickHouse/pull/50551) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Fix segfault in MathUnary [#51499](https://github.com/ClickHouse/ClickHouse/pull/51499) ([Ilya Yatsishin](https://github.com/qoega)).
* Fix for moving 'IN' conditions to PREWHERE [#51610](https://github.com/ClickHouse/ClickHouse/pull/51610) ([Alexander Gololobov](https://github.com/davenger)).
* Fix reading from empty column in `parseSipHashKey` [#51804](https://github.com/ClickHouse/ClickHouse/pull/51804) ([Nikita Taranov](https://github.com/nickitat)).
* Allow parametric UDFs [#51964](https://github.com/ClickHouse/ClickHouse/pull/51964) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Decoupled commits from [#51180](https://github.com/ClickHouse/ClickHouse/issues/51180) for backports [#51561](https://github.com/ClickHouse/ClickHouse/pull/51561) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Fix MergeTreeMarksLoader segfaulting if marks file is longer than expected [#51636](https://github.com/ClickHouse/ClickHouse/pull/51636) ([Michael Kolupaev](https://github.com/al13n321)).
* Fix source image for sqllogic [#51728](https://github.com/ClickHouse/ClickHouse/pull/51728) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -11,7 +11,8 @@ Supported platforms:
- x86_64 - x86_64
- AArch64 - AArch64
- Power9 (experimental) - PowerPC 64 LE (experimental)
- RISC-V 64 (experimental)
## Building on Ubuntu ## Building on Ubuntu
@ -42,7 +43,7 @@ sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test
For other Linux distribution - check the availability of LLVM's [prebuild packages](https://releases.llvm.org/download.html). For other Linux distribution - check the availability of LLVM's [prebuild packages](https://releases.llvm.org/download.html).
As of April 2023, any version of Clang >= 15 will work. As of April 2023, clang-16 or higher will work.
GCC as a compiler is not supported. GCC as a compiler is not supported.
To build with a specific Clang version: To build with a specific Clang version:
@ -86,8 +87,8 @@ The build requires the following components:
- Git (used to checkout the sources, not needed for the build) - Git (used to checkout the sources, not needed for the build)
- CMake 3.20 or newer - CMake 3.20 or newer
- Compiler: Clang 15 or newer - Compiler: clang-16 or newer
- Linker: lld 15 or newer - Linker: lld-16 or newer
- Ninja - Ninja
- Yasm - Yasm
- Gawk - Gawk

View File

@ -1201,13 +1201,58 @@ Keys:
- `console` Send `log` and `errorlog` to the console instead of file. To enable, set to `1` or `true`. - `console` Send `log` and `errorlog` to the console instead of file. To enable, set to `1` or `true`.
- `stream_compress` Compress `log` and `errorlog` with `lz4` stream compression. To enable, set to `1` or `true`. - `stream_compress` Compress `log` and `errorlog` with `lz4` stream compression. To enable, set to `1` or `true`.
Both log and error log file names (only file names, not directories) support date and time format specifiers.
**Format specifiers**
Using the following format specifiers, you can define a pattern for the resulting file name. “Example” column shows possible results for `2023-07-06 18:32:07`.
| Specifier | Description | Example |
|-------------|---------------------------------------------------------------------------------------------------------------------|--------------------------|
| %% | Literal % | % |
| %n | New-line character | |
| %t | Horizontal tab character | |
| %Y | Year as a decimal number, e.g. 2017 | 2023 |
| %y | Last 2 digits of year as a decimal number (range [00,99]) | 23 |
| %C | First 2 digits of year as a decimal number (range [00,99]) | 20 |
| %G | Four-digit [ISO 8601 week-based year](https://en.wikipedia.org/wiki/ISO_8601#Week_dates), i.e. the year that contains the specified week. Normally useful only with %V | 2023 |
| %g | Last 2 digits of [ISO 8601 week-based year](https://en.wikipedia.org/wiki/ISO_8601#Week_dates), i.e. the year that contains the specified week. | 23 |
| %b | Abbreviated month name, e.g. Oct (locale dependent) | Jul |
| %h | Synonym of %b | Jul |
| %B | Full month name, e.g. October (locale dependent) | July |
| %m | Month as a decimal number (range [01,12]) | 07 |
| %U | Week of the year as a decimal number (Sunday is the first day of the week) (range [00,53]) | 27 |
| %W | Week of the year as a decimal number (Monday is the first day of the week) (range [00,53]) | 27 |
| %V | ISO 8601 week number (range [01,53]) | 27 |
| %j | Day of the year as a decimal number (range [001,366]) | 187 |
| %d | Day of the month as a zero-padded decimal number (range [01,31]). Single digit is preceded by zero. | 06 |
| %e | Day of the month as a space-padded decimal number (range [1,31]). Single digit is preceded by a space. | &nbsp; 6 |
| %a | Abbreviated weekday name, e.g. Fri (locale dependent) | Thu |
| %A | Full weekday name, e.g. Friday (locale dependent) | Thursday |
| %w | Weekday as a integer number with Sunday as 0 (range [0-6]) | 4 |
| %u | Weekday as a decimal number, where Monday is 1 (ISO 8601 format) (range [1-7]) | 4 |
| %H | Hour as a decimal number, 24 hour clock (range [00-23]) | 18 |
| %I | Hour as a decimal number, 12 hour clock (range [01,12]) | 06 |
| %M | Minute as a decimal number (range [00,59]) | 32 |
| %S | Second as a decimal number (range [00,60]) | 07 |
| %c | Standard date and time string, e.g. Sun Oct 17 04:41:13 2010 (locale dependent) | Thu Jul 6 18:32:07 2023 |
| %x | Localized date representation (locale dependent) | 07/06/23 |
| %X | Localized time representation, e.g. 18:40:20 or 6:40:20 PM (locale dependent) | 18:32:07 |
| %D | Short MM/DD/YY date, equivalent to %m/%d/%y | 07/06/23 |
| %F | Short YYYY-MM-DD date, equivalent to %Y-%m-%d | 2023-07-06 |
| %r | Localized 12-hour clock time (locale dependent) | 06:32:07 PM |
| %R | Equivalent to "%H:%M" | 18:32 |
| %T | Equivalent to "%H:%M:%S" (the ISO 8601 time format) | 18:32:07 |
| %p | Localized a.m. or p.m. designation (locale dependent) | PM |
| %z | Offset from UTC in the ISO 8601 format (e.g. -0430), or no characters if the time zone information is not available | +0800 |
| %Z | Locale-dependent time zone name or abbreviation, or no characters if the time zone information is not available | Z AWST |
**Example** **Example**
``` xml ``` xml
<logger> <logger>
<level>trace</level> <level>trace</level>
<log>/var/log/clickhouse-server/clickhouse-server.log</log> <log>/var/log/clickhouse-server/clickhouse-server-%F-%T.log</log>
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog> <errorlog>/var/log/clickhouse-server/clickhouse-server-%F-%T.err.log</errorlog>
<size>1000M</size> <size>1000M</size>
<count>10</count> <count>10</count>
<stream_compress>true</stream_compress> <stream_compress>true</stream_compress>

View File

@ -242,6 +242,26 @@ See also:
- [DateTime data type.](../../sql-reference/data-types/datetime.md) - [DateTime data type.](../../sql-reference/data-types/datetime.md)
- [Functions for working with dates and times.](../../sql-reference/functions/date-time-functions.md) - [Functions for working with dates and times.](../../sql-reference/functions/date-time-functions.md)
## interval_output_format {#interval_output_format}
Allows choosing different output formats of the text representation of interval types.
Possible values:
- `kusto` - KQL-style output format.
ClickHouse outputs intervals in [KQL format](https://learn.microsoft.com/en-us/dotnet/standard/base-types/standard-timespan-format-strings#the-constant-c-format-specifier). For example, `toIntervalDay(2)` would be formatted as `2.00:00:00`. Please note that for interval types of varying length (ie. `IntervalMonth` and `IntervalYear`) the average number of seconds per interval is taken into account.
- `numeric` - Numeric output format.
ClickHouse outputs intervals as their underlying numeric representation. For example, `toIntervalDay(2)` would be formatted as `2`.
Default value: `numeric`.
See also:
- [Interval](../../sql-reference/data-types/special-data-types/interval.md)
## input_format_ipv4_default_on_conversion_error {#input_format_ipv4_default_on_conversion_error} ## input_format_ipv4_default_on_conversion_error {#input_format_ipv4_default_on_conversion_error}
Deserialization of IPv4 will use default values instead of throwing exception on conversion error. Deserialization of IPv4 will use default values instead of throwing exception on conversion error.

View File

@ -300,7 +300,7 @@ SELECT groupArrayResample(30, 75, 30)(name, age) FROM people
Consider the results. Consider the results.
`Jonh` is out of the sample because hes too young. Other people are distributed according to the specified age intervals. `John` is out of the sample because hes too young. Other people are distributed according to the specified age intervals.
Now lets count the total number of people and their average wage in the specified age intervals. Now lets count the total number of people and their average wage in the specified age intervals.

View File

@ -44,3 +44,5 @@ Result:
``` ```
The groupArray function will remove ᴺᵁᴸᴸ value based on the above results. The groupArray function will remove ᴺᵁᴸᴸ value based on the above results.
- Alias: `array_agg`.

View File

@ -143,5 +143,6 @@ Time shifts for multiple days. Some pacific islands changed their timezone offse
- [The `date_time_input_format` setting](../../operations/settings/settings.md#settings-date_time_input_format) - [The `date_time_input_format` setting](../../operations/settings/settings.md#settings-date_time_input_format)
- [The `date_time_output_format` setting](../../operations/settings/settings.md#settings-date_time_output_format) - [The `date_time_output_format` setting](../../operations/settings/settings.md#settings-date_time_output_format)
- [The `timezone` server configuration parameter](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) - [The `timezone` server configuration parameter](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone)
- [The `session_timezone` setting](../../operations/settings/settings.md#session_timezone)
- [Operators for working with dates and times](../../sql-reference/operators/index.md#operators-datetime) - [Operators for working with dates and times](../../sql-reference/operators/index.md#operators-datetime)
- [The `Date` data type](../../sql-reference/data-types/date.md) - [The `Date` data type](../../sql-reference/data-types/date.md)

View File

@ -119,6 +119,7 @@ FROM dt;
- [The `date_time_input_format` setting](../../operations/settings/settings-formats.md#date_time_input_format) - [The `date_time_input_format` setting](../../operations/settings/settings-formats.md#date_time_input_format)
- [The `date_time_output_format` setting](../../operations/settings/settings-formats.md#date_time_output_format) - [The `date_time_output_format` setting](../../operations/settings/settings-formats.md#date_time_output_format)
- [The `timezone` server configuration parameter](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) - [The `timezone` server configuration parameter](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone)
- [The `session_timezone` setting](../../operations/settings/settings.md#session_timezone)
- [Operators for working with dates and times](../../sql-reference/operators/index.md#operators-for-working-with-dates-and-times) - [Operators for working with dates and times](../../sql-reference/operators/index.md#operators-for-working-with-dates-and-times)
- [`Date` data type](../../sql-reference/data-types/date.md) - [`Date` data type](../../sql-reference/data-types/date.md)
- [`DateTime` data type](../../sql-reference/data-types/datetime.md) - [`DateTime` data type](../../sql-reference/data-types/datetime.md)

View File

@ -694,10 +694,14 @@ SELECT toDate('2016-12-27') AS date, toWeek(date) AS week0, toWeek(date,1) AS we
Returns year and week for a date. The year in the result may be different from the year in the date argument for the first and the last week of the year. Returns year and week for a date. The year in the result may be different from the year in the date argument for the first and the last week of the year.
The mode argument works exactly like the mode argument to `toWeek()`. For the single-argument syntax, a mode value of 0 is used. The mode argument works like the mode argument to `toWeek()`. For the single-argument syntax, a mode value of 0 is used.
`toISOYear()` is a compatibility function that is equivalent to `intDiv(toYearWeek(date,3),100)`. `toISOYear()` is a compatibility function that is equivalent to `intDiv(toYearWeek(date,3),100)`.
:::warning
The week number returned by `toYearWeek()` can be different from what the `toWeek()` returns. `toWeek()` always returns week number in the context of the given year, and in case `toWeek()` returns `0`, `toYearWeek()` returns the value corresponding to the last week of previous year. See `prev_yearWeek` in example below.
:::
**Syntax** **Syntax**
``` sql ``` sql
@ -707,18 +711,18 @@ toYearWeek(t[, mode[, timezone]])
**Example** **Example**
``` sql ``` sql
SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(date,1) AS yearWeek1, toYearWeek(date,9) AS yearWeek9; SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(date,1) AS yearWeek1, toYearWeek(date,9) AS yearWeek9, toYearWeek(toDate('2022-01-01')) AS prev_yearWeek;
``` ```
``` text ``` text
┌───────date─┬─yearWeek0─┬─yearWeek1─┬─yearWeek9─┐ ┌───────date─┬─yearWeek0─┬─yearWeek1─┬─yearWeek9─┬─prev_yearWeek─
│ 2016-12-27 │ 201652 │ 201652 │ 201701 │ │ 2016-12-27 │ 201652 │ 201652 │ 201701 │ 202152 │
└────────────┴───────────┴───────────┴───────────┘ └────────────┴───────────┴───────────┴───────────┴───────────────
``` ```
## age ## age
Returns the `unit` component of the difference between `startdate` and `enddate`. The difference is calculated using a precision of 1 microsecond. Returns the `unit` component of the difference between `startdate` and `enddate`. The difference is calculated using a precision of 1 second.
E.g. the difference between `2021-12-29` and `2022-01-01` is 3 days for `day` unit, 0 months for `month` unit, 0 years for `year` unit. E.g. the difference between `2021-12-29` and `2022-01-01` is 3 days for `day` unit, 0 months for `month` unit, 0 years for `year` unit.
For an alternative to `age`, see function `date\_diff`. For an alternative to `age`, see function `date\_diff`.
@ -734,8 +738,6 @@ age('unit', startdate, enddate, [timezone])
- `unit` — The type of interval for result. [String](../../sql-reference/data-types/string.md). - `unit` — The type of interval for result. [String](../../sql-reference/data-types/string.md).
Possible values: Possible values:
- `microsecond` (possible abbreviations: `us`, `u`)
- `millisecond` (possible abbreviations: `ms`)
- `second` (possible abbreviations: `ss`, `s`) - `second` (possible abbreviations: `ss`, `s`)
- `minute` (possible abbreviations: `mi`, `n`) - `minute` (possible abbreviations: `mi`, `n`)
- `hour` (possible abbreviations: `hh`, `h`) - `hour` (possible abbreviations: `hh`, `h`)
@ -811,8 +813,6 @@ Aliases: `dateDiff`, `DATE_DIFF`, `timestampDiff`, `timestamp_diff`, `TIMESTAMP_
- `unit` — The type of interval for result. [String](../../sql-reference/data-types/string.md). - `unit` — The type of interval for result. [String](../../sql-reference/data-types/string.md).
Possible values: Possible values:
- `microsecond` (possible abbreviations: `us`, `u`)
- `millisecond` (possible abbreviations: `ms`)
- `second` (possible abbreviations: `ss`, `s`) - `second` (possible abbreviations: `ss`, `s`)
- `minute` (possible abbreviations: `mi`, `n`) - `minute` (possible abbreviations: `mi`, `n`)
- `hour` (possible abbreviations: `hh`, `h`) - `hour` (possible abbreviations: `hh`, `h`)

View File

@ -399,7 +399,11 @@ toDateTime(expr[, time_zone ])
- `expr` — The value. [String](/docs/en/sql-reference/data-types/string.md), [Int](/docs/en/sql-reference/data-types/int-uint.md), [Date](/docs/en/sql-reference/data-types/date.md) or [DateTime](/docs/en/sql-reference/data-types/datetime.md). - `expr` — The value. [String](/docs/en/sql-reference/data-types/string.md), [Int](/docs/en/sql-reference/data-types/int-uint.md), [Date](/docs/en/sql-reference/data-types/date.md) or [DateTime](/docs/en/sql-reference/data-types/datetime.md).
- `time_zone` — Time zone. [String](/docs/en/sql-reference/data-types/string.md). - `time_zone` — Time zone. [String](/docs/en/sql-reference/data-types/string.md).
If `expr` is a number, it is interpreted as the number of seconds since the beginning of the Unix Epoch (as Unix timestamp). :::note
If `expr` is a number, it is interpreted as the number of seconds since the beginning of the Unix Epoch (as Unix timestamp).
If `expr` is a [String](/docs/en/sql-reference/data-types/string.md), it may be interpreted as a Unix timestamp or as a string representation of date / date with time.
Thus, parsing of short numbers' string representations (up to 4 digits) is explicitly disabled due to ambiguity, e.g. a string `'1999'` may be both a year (an incomplete string representation of Date / DateTime) or a unix timestamp. Longer numeric strings are allowed.
:::
**Returned value** **Returned value**

View File

@ -575,14 +575,60 @@ ClickHouse поддерживает динамическое изменение
- `errorlog` - Файл лога ошибок. - `errorlog` - Файл лога ошибок.
- `size` - Размер файла. Действует для `log` и `errorlog`. Как только файл достиг размера `size`, ClickHouse архивирует и переименовывает его, а на его месте создает новый файл лога. - `size` - Размер файла. Действует для `log` и `errorlog`. Как только файл достиг размера `size`, ClickHouse архивирует и переименовывает его, а на его месте создает новый файл лога.
- `count` - Количество заархивированных файлов логов, которые сохраняет ClickHouse. - `count` - Количество заархивированных файлов логов, которые сохраняет ClickHouse.
- `stream_compress` Сжимать `log` и `errorlog` с помощью алгоритма `lz4`. Чтобы активировать, узтановите значение `1` или `true`.
Имена файлов `log` и `errorlog` (только имя файла, а не директорий) поддерживают спецификаторы шаблонов даты и времени.
**Спецификаторы форматирования**
С помощью следующих спецификаторов, можно определить шаблон для формирования имени файла. Столбец “Пример” показывает возможные значения на момент времени `2023-07-06 18:32:07`.
| Спецификатор | Описание | Пример |
|--------------|---------------------------------------------------------------------------------------------------------------------|--------------------------|
| %% | Литерал % | % |
| %n | Символ новой строки | |
| %t | Символ горизонтальной табуляции | |
| %Y | Год как десятичное число, например, 2017 | 2023 |
| %y | Последние 2 цифры года в виде десятичного числа (диапазон [00,99]) | 23 |
| %C | Первые 2 цифры года в виде десятичного числа (диапазон [00,99]) | 20 |
| %G | Год по неделям согласно [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601#Week_dates), то есть год, который содержит указанную неделю. Обычно используется вместе с %V. | 2023 |
| %g | Последние 2 цифры [года по неделям ISO 8601](https://en.wikipedia.org/wiki/ISO_8601#Week_dates), т.е. года, содержащего указанную неделю (диапазон [00,99]). | 23 |
| %b | Сокращённое название месяца, например Oct (зависит от локали) | Jul |
| %h | Синоним %b | Jul |
| %B | Полное название месяца, например, October (зависит от локали) | July |
| %m | Месяц в виде десятичного числа (диапазон [01,12]) | 07 |
| %U | Неделя года в виде десятичного числа (воскресенье - первый день недели) (диапазон [00,53]) | 27 |
| %W | Неделя года в виде десятичного числа (понедельник - первый день недели) (диапазон [00,53]) | 27 |
| %V | Неделя года ISO 8601 (диапазон [01,53]) | 27 |
| %j | День года в виде десятичного числа (диапазон [001,366]) | 187 |
| %d | День месяца в виде десятичного числа (диапазон [01,31]) Перед одиночной цифрой ставится ноль. | 06 |
| %e | День месяца в виде десятичного числа (диапазон [1,31]). Перед одиночной цифрой ставится пробел. | &nbsp; 6 |
| %a | Сокращённое название дня недели, например, Fri (зависит от локали) | Thu |
| %A | Полный день недели, например, Friday (зависит от локали) | Thursday |
| %w | День недели в виде десятичного числа, где воскресенье равно 0 (диапазон [0-6]) | 4 |
| %u | День недели в виде десятичного числа, где понедельник равен 1 (формат ISO 8601) (диапазон [1-7]) | 4 |
| %H | Час в виде десятичного числа, 24-часовой формат (диапазон [00-23]) | 18 |
| %I | Час в виде десятичного числа, 12-часовой формат (диапазон [01,12]) | 06 |
| %M | Минуты в виде десятичного числа (диапазон [00,59]) | 32 |
| %S | Секунды как десятичное число (диапазон [00,60]) | 07 |
| %c | Стандартная строка даты и времени, например, Sun Oct 17 04:41:13 2010 (зависит от локали) | Thu Jul 6 18:32:07 2023 |
| %x | Локализованное представление даты (зависит от локали) | 07/06/23 |
| %X | Локализованное представление времени, например, 18:40:20 или 6:40:20 PM (зависит от локали) | 18:32:07 |
| %D | Эквивалентно "%m/%d/%y" | 07/06/23 |
| %F | Эквивалентно "%Y-%m-%d" (формат даты ISO 8601) | 2023-07-06 |
| %r | Локализованное 12-часовое время (зависит от локали) | 06:32:07 PM |
| %R | Эквивалентно "%H:%M" | 18:32 |
| %T | Эквивалентно "%H:%M:%S" (формат времени ISO 8601) | 18:32:07 |
| %p | Локализованное обозначение a.m. или p.m. (зависит от локали) | PM |
| %z | Смещение от UTC в формате ISO 8601 (например, -0430), или без символов, если информация о часовом поясе недоступна | +0800 |
| %Z | Зависящее от локали название или аббревиатура часового пояса, если информация о часовом поясе доступна | Z AWST |
**Пример** **Пример**
``` xml ``` xml
<logger> <logger>
<level>trace</level> <level>trace</level>
<log>/var/log/clickhouse-server/clickhouse-server.log</log> <log>/var/log/clickhouse-server/clickhouse-server-%F-%T.log</log>
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog> <errorlog>/var/log/clickhouse-server/clickhouse-server-%F-%T.err.log</errorlog>
<size>1000M</size> <size>1000M</size>
<count>10</count> <count>10</count>
</logger> </logger>

View File

@ -122,6 +122,7 @@ FROM dt
- [Настройка `date_time_input_format`](../../operations/settings/index.md#settings-date_time_input_format) - [Настройка `date_time_input_format`](../../operations/settings/index.md#settings-date_time_input_format)
- [Настройка `date_time_output_format`](../../operations/settings/index.md) - [Настройка `date_time_output_format`](../../operations/settings/index.md)
- [Конфигурационный параметр сервера `timezone`](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) - [Конфигурационный параметр сервера `timezone`](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone)
- [Параметр `session_timezone`](../../operations/settings/settings.md#session_timezone)
- [Операторы для работы с датой и временем](../../sql-reference/operators/index.md#operators-datetime) - [Операторы для работы с датой и временем](../../sql-reference/operators/index.md#operators-datetime)
- [Тип данных `Date`](date.md) - [Тип данных `Date`](date.md)
- [Тип данных `DateTime64`](datetime64.md) - [Тип данных `DateTime64`](datetime64.md)

View File

@ -102,6 +102,7 @@ FROM dt;
- [Настройка `date_time_input_format`](../../operations/settings/settings.md#settings-date_time_input_format) - [Настройка `date_time_input_format`](../../operations/settings/settings.md#settings-date_time_input_format)
- [Настройка `date_time_output_format`](../../operations/settings/settings.md) - [Настройка `date_time_output_format`](../../operations/settings/settings.md)
- [Конфигурационный параметр сервера `timezone`](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) - [Конфигурационный параметр сервера `timezone`](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone)
- [Параметр `session_timezone`](../../operations/settings/settings.md#session_timezone)
- [Операторы для работы с датой и временем](../../sql-reference/operators/index.md#operators-datetime) - [Операторы для работы с датой и временем](../../sql-reference/operators/index.md#operators-datetime)
- [Тип данных `Date`](date.md) - [Тип данных `Date`](date.md)
- [Тип данных `DateTime`](datetime.md) - [Тип данных `DateTime`](datetime.md)

View File

@ -599,29 +599,33 @@ SELECT toDate('2016-12-27') AS date, toWeek(date) AS week0, toWeek(date,1) AS we
## toYearWeek(date[,mode]) {#toyearweek} ## toYearWeek(date[,mode]) {#toyearweek}
Возвращает год и неделю для даты. Год в результате может отличаться от года в аргументе даты для первой и последней недели года. Возвращает год и неделю для даты. Год в результате может отличаться от года в аргументе даты для первой и последней недели года.
Аргумент mode работает точно так же, как аргумент mode [toWeek()](#toweek). Если mode не задан, используется режим 0. Аргумент mode работает так же, как аргумент mode [toWeek()](#toweek), значение mode по умолчанию -- `0`.
`toISOYear() ` эквивалентно `intDiv(toYearWeek(date,3),100)`. `toISOYear() ` эквивалентно `intDiv(toYearWeek(date,3),100)`
:::warning
Однако, есть отличие в работе функций `toWeek()` и `toYearWeek()`. `toWeek()` возвращает номер недели в контексте заданного года, и в случае, когда `toWeek()` вернёт `0`, `toYearWeek()` вернёт значение, соответствующее последней неделе предыдущего года (см. `prev_yearWeek` в примере).
:::
**Пример** **Пример**
Запрос: Запрос:
```sql ```sql
SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(date,1) AS yearWeek1, toYearWeek(date,9) AS yearWeek9; SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(date,1) AS yearWeek1, toYearWeek(date,9) AS yearWeek9, toYearWeek(toDate('2022-01-01')) AS prev_yearWeek;
``` ```
Результат: Результат:
```text ```text
┌───────date─┬─yearWeek0─┬─yearWeek1─┬─yearWeek9─┐ ┌───────date─┬─yearWeek0─┬─yearWeek1─┬─yearWeek9─┬─prev_yearWeek─
│ 2016-12-27 │ 201652 │ 201652 │ 201701 │ │ 2016-12-27 │ 201652 │ 201652 │ 201701 │ 202152 │
└────────────┴───────────┴───────────┴───────────┘ └────────────┴───────────┴───────────┴───────────┴───────────────
``` ```
## age ## age
Вычисляет компонент `unit` разницы между `startdate` и `enddate`. Разница вычисляется с точностью в 1 микросекунду. Вычисляет компонент `unit` разницы между `startdate` и `enddate`. Разница вычисляется с точностью в 1 секунду.
Например, разница между `2021-12-29` и `2022-01-01` 3 дня для единицы `day`, 0 месяцев для единицы `month`, 0 лет для единицы `year`. Например, разница между `2021-12-29` и `2022-01-01` 3 дня для единицы `day`, 0 месяцев для единицы `month`, 0 лет для единицы `year`.
**Синтаксис** **Синтаксис**
@ -635,8 +639,6 @@ age('unit', startdate, enddate, [timezone])
- `unit` — единица измерения времени, в которой будет выражено возвращаемое значение функции. [String](../../sql-reference/data-types/string.md). - `unit` — единица измерения времени, в которой будет выражено возвращаемое значение функции. [String](../../sql-reference/data-types/string.md).
Возможные значения: Возможные значения:
- `microsecond` (возможные сокращения: `us`, `u`)
- `millisecond` (возможные сокращения: `ms`)
- `second` (возможные сокращения: `ss`, `s`) - `second` (возможные сокращения: `ss`, `s`)
- `minute` (возможные сокращения: `mi`, `n`) - `minute` (возможные сокращения: `mi`, `n`)
- `hour` (возможные сокращения: `hh`, `h`) - `hour` (возможные сокращения: `hh`, `h`)
@ -710,8 +712,6 @@ date_diff('unit', startdate, enddate, [timezone])
- `unit` — единица измерения времени, в которой будет выражено возвращаемое значение функции. [String](../../sql-reference/data-types/string.md). - `unit` — единица измерения времени, в которой будет выражено возвращаемое значение функции. [String](../../sql-reference/data-types/string.md).
Возможные значения: Возможные значения:
- `microsecond` (возможные сокращения: `us`, `u`)
- `millisecond` (возможные сокращения: `ms`)
- `second` (возможные сокращения: `ss`, `s`) - `second` (возможные сокращения: `ss`, `s`)
- `minute` (возможные сокращения: `mi`, `n`) - `minute` (возможные сокращения: `mi`, `n`)
- `hour` (возможные сокращения: `hh`, `h`) - `hour` (возможные сокращения: `hh`, `h`)

View File

@ -284,7 +284,13 @@ toDateTime(expr[, time_zone ])
- `expr` — Значение для преобразования. [String](/docs/ru/sql-reference/data-types/string.md), [Int](/docs/ru/sql-reference/data-types/int-uint.md), [Date](/docs/ru/sql-reference/data-types/date.md) или [DateTime](/docs/ru/sql-reference/data-types/datetime.md). - `expr` — Значение для преобразования. [String](/docs/ru/sql-reference/data-types/string.md), [Int](/docs/ru/sql-reference/data-types/int-uint.md), [Date](/docs/ru/sql-reference/data-types/date.md) или [DateTime](/docs/ru/sql-reference/data-types/datetime.md).
- `time_zone` — Часовой пояс. [String](/docs/ru/sql-reference/data-types/string.md). - `time_zone` — Часовой пояс. [String](/docs/ru/sql-reference/data-types/string.md).
Если `expr` является числом, оно интерпретируется как количество секунд от начала unix эпохи. :::note
Если `expr` является числом, то оно интерпретируется как число секунд с начала Unix-эпохи (Unix Timestamp).
Если же `expr` -- [строка (String)](/docs/ru/sql-reference/data-types/string.md), то оно может быть интерпретировано и как Unix Timestamp, и как строковое представление даты / даты со временем.
Ввиду неоднозначности запрещён парсинг строк длиной 4 и меньше. Так, строка `'1999'` могла бы представлять собой как год (неполное строковое представление даты или даты со временем), так и Unix Timestamp.
Строки длиной 5 символов и более не несут неоднозначности, а следовательно, их парсинг разрешён.
:::
**Возвращаемое значение** **Возвращаемое значение**

View File

@ -643,8 +643,6 @@ date_diff('unit', startdate, enddate, [timezone])
- `unit``value`对应的时间单位。类型为[String](../../sql-reference/data-types/string.md)。 - `unit``value`对应的时间单位。类型为[String](../../sql-reference/data-types/string.md)。
可能的值: 可能的值:
- `microsecond`
- `millisecond`
- `second` - `second`
- `minute` - `minute`
- `hour` - `hour`

View File

@ -1404,10 +1404,9 @@ void Client::readArguments(
else if (arg == "--password" && ((arg_num + 1) >= argc || std::string_view(argv[arg_num + 1]).starts_with('-'))) else if (arg == "--password" && ((arg_num + 1) >= argc || std::string_view(argv[arg_num + 1]).starts_with('-')))
{ {
common_arguments.emplace_back(arg); common_arguments.emplace_back(arg);
/// No password was provided by user. Add '\n' as implicit password, /// if the value of --password is omitted, the password will be asked before
/// which encodes that client should ask user for the password. /// connection start
/// '\n' is used because there is hardly a chance that a user would use '\n' as a password. common_arguments.emplace_back(ConnectionParameters::ASK_PASSWORD);
common_arguments.emplace_back("\n");
} }
else else
common_arguments.emplace_back(arg); common_arguments.emplace_back(arg);

View File

@ -30,7 +30,7 @@ bool parseKeeperPath(IParser::Pos & pos, Expected & expected, String & path)
return parseIdentifierOrStringLiteral(pos, expected, path); return parseIdentifierOrStringLiteral(pos, expected, path);
String result; String result;
while (pos->type == TokenType::BareWord || pos->type == TokenType::Slash || pos->type == TokenType::Dot) while (pos->type != TokenType::Whitespace && pos->type != TokenType::EndOfStream)
{ {
result.append(pos->begin, pos->end); result.append(pos->begin, pos->end);
++pos; ++pos;

View File

@ -125,6 +125,7 @@ void registerAggregateFunctionGroupArray(AggregateFunctionFactory & factory)
AggregateFunctionProperties properties = { .returns_default_when_only_null = false, .is_order_dependent = true }; AggregateFunctionProperties properties = { .returns_default_when_only_null = false, .is_order_dependent = true };
factory.registerFunction("groupArray", { createAggregateFunctionGroupArray<false>, properties }); factory.registerFunction("groupArray", { createAggregateFunctionGroupArray<false>, properties });
factory.registerAlias("array_agg", "groupArray", AggregateFunctionFactory::CaseInsensitive);
factory.registerFunction("groupArraySample", { createAggregateFunctionGroupArraySample, properties }); factory.registerFunction("groupArraySample", { createAggregateFunctionGroupArraySample, properties });
factory.registerFunction("groupArrayLast", { createAggregateFunctionGroupArray<true>, properties }); factory.registerFunction("groupArrayLast", { createAggregateFunctionGroupArray<true>, properties });
} }

View File

@ -35,6 +35,7 @@ public:
std::shared_ptr<IBackupCoordination> backup_coordination; std::shared_ptr<IBackupCoordination> backup_coordination;
std::optional<UUID> backup_uuid; std::optional<UUID> backup_uuid;
bool deduplicate_files = true; bool deduplicate_files = true;
bool allow_s3_native_copy = true;
}; };
static BackupFactory & instance(); static BackupFactory & instance();

View File

@ -101,14 +101,16 @@ namespace
BackupReaderS3::BackupReaderS3( BackupReaderS3::BackupReaderS3(
const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, const ContextPtr & context_) const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, bool allow_s3_native_copy, const ContextPtr & context_)
: BackupReaderDefault(&Poco::Logger::get("BackupReaderS3"), context_) : BackupReaderDefault(&Poco::Logger::get("BackupReaderS3"), context_)
, s3_uri(s3_uri_) , s3_uri(s3_uri_)
, client(makeS3Client(s3_uri_, access_key_id_, secret_access_key_, context_)) , client(makeS3Client(s3_uri_, access_key_id_, secret_access_key_, context_))
, request_settings(context_->getStorageS3Settings().getSettings(s3_uri.uri.toString()).request_settings) , request_settings(context_->getStorageS3Settings().getSettings(s3_uri.uri.toString()).request_settings)
, data_source_description{DataSourceType::S3, s3_uri.endpoint, false, false} , data_source_description{DataSourceType::S3, s3_uri.endpoint, false, false}
{ {
request_settings.updateFromSettings(context_->getSettingsRef());
request_settings.max_single_read_retries = context_->getSettingsRef().s3_max_single_read_retries; // FIXME: Avoid taking value for endpoint request_settings.max_single_read_retries = context_->getSettingsRef().s3_max_single_read_retries; // FIXME: Avoid taking value for endpoint
request_settings.allow_native_copy = allow_s3_native_copy;
} }
BackupReaderS3::~BackupReaderS3() = default; BackupReaderS3::~BackupReaderS3() = default;
@ -141,8 +143,7 @@ void BackupReaderS3::copyFileToDisk(const String & path_in_backup, size_t file_s
if (destination_data_source_description.sameKind(data_source_description) if (destination_data_source_description.sameKind(data_source_description)
&& (destination_data_source_description.is_encrypted == encrypted_in_backup)) && (destination_data_source_description.is_encrypted == encrypted_in_backup))
{ {
/// Use native copy, the more optimal way. LOG_TRACE(log, "Copying {} from S3 to disk {}", path_in_backup, destination_disk->getName());
LOG_TRACE(log, "Copying {} from S3 to disk {} using native copy", path_in_backup, destination_disk->getName());
auto write_blob_function = [&](const Strings & blob_path, WriteMode mode, const std::optional<ObjectAttributes> & object_attributes) -> size_t auto write_blob_function = [&](const Strings & blob_path, WriteMode mode, const std::optional<ObjectAttributes> & object_attributes) -> size_t
{ {
/// Object storage always uses mode `Rewrite` because it simulates append using metadata and different files. /// Object storage always uses mode `Rewrite` because it simulates append using metadata and different files.
@ -177,7 +178,7 @@ void BackupReaderS3::copyFileToDisk(const String & path_in_backup, size_t file_s
BackupWriterS3::BackupWriterS3( BackupWriterS3::BackupWriterS3(
const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, const ContextPtr & context_) const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, bool allow_s3_native_copy, const ContextPtr & context_)
: BackupWriterDefault(&Poco::Logger::get("BackupWriterS3"), context_) : BackupWriterDefault(&Poco::Logger::get("BackupWriterS3"), context_)
, s3_uri(s3_uri_) , s3_uri(s3_uri_)
, client(makeS3Client(s3_uri_, access_key_id_, secret_access_key_, context_)) , client(makeS3Client(s3_uri_, access_key_id_, secret_access_key_, context_))
@ -186,6 +187,7 @@ BackupWriterS3::BackupWriterS3(
{ {
request_settings.updateFromSettings(context_->getSettingsRef()); request_settings.updateFromSettings(context_->getSettingsRef());
request_settings.max_single_read_retries = context_->getSettingsRef().s3_max_single_read_retries; // FIXME: Avoid taking value for endpoint request_settings.max_single_read_retries = context_->getSettingsRef().s3_max_single_read_retries; // FIXME: Avoid taking value for endpoint
request_settings.allow_native_copy = allow_s3_native_copy;
} }
void BackupWriterS3::copyFileFromDisk(const String & path_in_backup, DiskPtr src_disk, const String & src_path, void BackupWriterS3::copyFileFromDisk(const String & path_in_backup, DiskPtr src_disk, const String & src_path,
@ -200,8 +202,7 @@ void BackupWriterS3::copyFileFromDisk(const String & path_in_backup, DiskPtr src
/// In this case we can't use the native copy. /// In this case we can't use the native copy.
if (auto blob_path = src_disk->getBlobPath(src_path); blob_path.size() == 2) if (auto blob_path = src_disk->getBlobPath(src_path); blob_path.size() == 2)
{ {
/// Use native copy, the more optimal way. LOG_TRACE(log, "Copying file {} from disk {} to S3", src_path, src_disk->getName());
LOG_TRACE(log, "Copying file {} from disk {} to S3 using native copy", src_path, src_disk->getName());
copyS3File( copyS3File(
client, client,
/* src_bucket */ blob_path[1], /* src_bucket */ blob_path[1],

View File

@ -17,7 +17,7 @@ namespace DB
class BackupReaderS3 : public BackupReaderDefault class BackupReaderS3 : public BackupReaderDefault
{ {
public: public:
BackupReaderS3(const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, const ContextPtr & context_); BackupReaderS3(const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, bool allow_s3_native_copy, const ContextPtr & context_);
~BackupReaderS3() override; ~BackupReaderS3() override;
bool fileExists(const String & file_name) override; bool fileExists(const String & file_name) override;
@ -38,7 +38,7 @@ private:
class BackupWriterS3 : public BackupWriterDefault class BackupWriterS3 : public BackupWriterDefault
{ {
public: public:
BackupWriterS3(const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, const ContextPtr & context_); BackupWriterS3(const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, bool allow_s3_native_copy, const ContextPtr & context_);
~BackupWriterS3() override; ~BackupWriterS3() override;
bool fileExists(const String & file_name) override; bool fileExists(const String & file_name) override;

View File

@ -25,6 +25,7 @@ namespace ErrorCodes
M(Bool, async) \ M(Bool, async) \
M(Bool, decrypt_files_from_encrypted_disks) \ M(Bool, decrypt_files_from_encrypted_disks) \
M(Bool, deduplicate_files) \ M(Bool, deduplicate_files) \
M(Bool, allow_s3_native_copy) \
M(UInt64, shard_num) \ M(UInt64, shard_num) \
M(UInt64, replica_num) \ M(UInt64, replica_num) \
M(Bool, internal) \ M(Bool, internal) \

View File

@ -38,6 +38,9 @@ struct BackupSettings
/// Whether the BACKUP will omit similar files (within one backup only). /// Whether the BACKUP will omit similar files (within one backup only).
bool deduplicate_files = true; bool deduplicate_files = true;
/// Whether native copy is allowed (optimization for cloud storages, that sometimes could have bugs)
bool allow_s3_native_copy = true;
/// 1-based shard index to store in the backup. 0 means all shards. /// 1-based shard index to store in the backup. 0 means all shards.
/// Can only be used with BACKUP ON CLUSTER. /// Can only be used with BACKUP ON CLUSTER.
size_t shard_num = 0; size_t shard_num = 0;

View File

@ -348,6 +348,7 @@ void BackupsWorker::doBackup(
backup_create_params.backup_coordination = backup_coordination; backup_create_params.backup_coordination = backup_coordination;
backup_create_params.backup_uuid = backup_settings.backup_uuid; backup_create_params.backup_uuid = backup_settings.backup_uuid;
backup_create_params.deduplicate_files = backup_settings.deduplicate_files; backup_create_params.deduplicate_files = backup_settings.deduplicate_files;
backup_create_params.allow_s3_native_copy = backup_settings.allow_s3_native_copy;
BackupMutablePtr backup = BackupFactory::instance().createBackup(backup_create_params); BackupMutablePtr backup = BackupFactory::instance().createBackup(backup_create_params);
/// Write the backup. /// Write the backup.
@ -647,6 +648,7 @@ void BackupsWorker::doRestore(
backup_open_params.backup_info = backup_info; backup_open_params.backup_info = backup_info;
backup_open_params.base_backup_info = restore_settings.base_backup_info; backup_open_params.base_backup_info = restore_settings.base_backup_info;
backup_open_params.password = restore_settings.password; backup_open_params.password = restore_settings.password;
backup_open_params.allow_s3_native_copy = restore_settings.allow_s3_native_copy;
BackupPtr backup = BackupFactory::instance().createBackup(backup_open_params); BackupPtr backup = BackupFactory::instance().createBackup(backup_open_params);
String current_database = context->getCurrentDatabase(); String current_database = context->getCurrentDatabase();

View File

@ -161,6 +161,7 @@ namespace
M(RestoreAccessCreationMode, create_access) \ M(RestoreAccessCreationMode, create_access) \
M(Bool, allow_unresolved_access_dependencies) \ M(Bool, allow_unresolved_access_dependencies) \
M(RestoreUDFCreationMode, create_function) \ M(RestoreUDFCreationMode, create_function) \
M(Bool, allow_s3_native_copy) \
M(Bool, internal) \ M(Bool, internal) \
M(String, host_id) \ M(String, host_id) \
M(OptionalUUID, restore_uuid) M(OptionalUUID, restore_uuid)

View File

@ -107,6 +107,9 @@ struct RestoreSettings
/// How the RESTORE command will handle if a user-defined function which it's going to restore already exists. /// How the RESTORE command will handle if a user-defined function which it's going to restore already exists.
RestoreUDFCreationMode create_function = RestoreUDFCreationMode::kCreateIfNotExists; RestoreUDFCreationMode create_function = RestoreUDFCreationMode::kCreateIfNotExists;
/// Whether native copy is allowed (optimization for cloud storages, that sometimes could have bugs)
bool allow_s3_native_copy = true;
/// Internal, should not be specified by user. /// Internal, should not be specified by user.
bool internal = false; bool internal = false;

View File

@ -107,12 +107,12 @@ void registerBackupEngineS3(BackupFactory & factory)
if (params.open_mode == IBackup::OpenMode::READ) if (params.open_mode == IBackup::OpenMode::READ)
{ {
auto reader = std::make_shared<BackupReaderS3>(S3::URI{s3_uri}, access_key_id, secret_access_key, params.context); auto reader = std::make_shared<BackupReaderS3>(S3::URI{s3_uri}, access_key_id, secret_access_key, params.allow_s3_native_copy, params.context);
return std::make_unique<BackupImpl>(backup_name_for_logging, archive_params, params.base_backup_info, reader, params.context); return std::make_unique<BackupImpl>(backup_name_for_logging, archive_params, params.base_backup_info, reader, params.context);
} }
else else
{ {
auto writer = std::make_shared<BackupWriterS3>(S3::URI{s3_uri}, access_key_id, secret_access_key, params.context); auto writer = std::make_shared<BackupWriterS3>(S3::URI{s3_uri}, access_key_id, secret_access_key, params.allow_s3_native_copy, params.context);
return std::make_unique<BackupImpl>( return std::make_unique<BackupImpl>(
backup_name_for_logging, backup_name_for_logging,
archive_params, archive_params,

View File

@ -46,8 +46,7 @@ ConnectionParameters::ConnectionParameters(const Poco::Util::AbstractConfigurati
else else
{ {
password = config.getString("password", ""); password = config.getString("password", "");
/// if the value of --password is omitted, the password will be set implicitly to "\n" if (password == ASK_PASSWORD)
if (password == "\n")
password_prompt = true; password_prompt = true;
} }
if (password_prompt) if (password_prompt)

View File

@ -28,6 +28,10 @@ struct ConnectionParameters
ConnectionParameters(const Poco::Util::AbstractConfiguration & config, std::string host, std::optional<UInt16> port); ConnectionParameters(const Poco::Util::AbstractConfiguration & config, std::string host, std::optional<UInt16> port);
static UInt16 getPortFromConfig(const Poco::Util::AbstractConfiguration & config); static UInt16 getPortFromConfig(const Poco::Util::AbstractConfiguration & config);
/// Ask to enter the user's password if password option contains this value.
/// "\n" is used because there is hardly a chance that a user would use '\n' as password.
static constexpr std::string_view ASK_PASSWORD = "\n";
}; };
} }

View File

@ -1,6 +1,7 @@
#include "ConnectionString.h" #include "ConnectionString.h"
#include <Common/Exception.h> #include <Common/Exception.h>
#include <Client/ConnectionParameters.h>
#include <Poco/Exception.h> #include <Poco/Exception.h>
#include <Poco/URI.h> #include <Poco/URI.h>
@ -201,8 +202,8 @@ bool tryParseConnectionString(
else else
{ {
// in case of user_info == 'user:', ':' is specified, but password is empty // in case of user_info == 'user:', ':' is specified, but password is empty
// then add password argument "\n" which means: Ask user for a password. // then ask user for a password.
common_arguments.push_back("\n"); common_arguments.emplace_back(ConnectionParameters::ASK_PASSWORD);
} }
} }
else else

View File

@ -57,28 +57,25 @@ inline DB::UInt64 intHash64(DB::UInt64 x)
inline uint32_t s390x_crc32_u8(uint32_t crc, uint8_t v) inline uint32_t s390x_crc32_u8(uint32_t crc, uint8_t v)
{ {
return crc32_be(crc, reinterpret_cast<unsigned char *>(&v), sizeof(v)); return crc32c_le_vx(crc, reinterpret_cast<unsigned char *>(&v), sizeof(v));
} }
inline uint32_t s390x_crc32_u16(uint32_t crc, uint16_t v) inline uint32_t s390x_crc32_u16(uint32_t crc, uint16_t v)
{ {
return crc32_be(crc, reinterpret_cast<unsigned char *>(&v), sizeof(v)); v = std::byteswap(v);
return crc32c_le_vx(crc, reinterpret_cast<unsigned char *>(&v), sizeof(v));
} }
inline uint32_t s390x_crc32_u32(uint32_t crc, uint32_t v) inline uint32_t s390x_crc32_u32(uint32_t crc, uint32_t v)
{ {
return crc32_be(crc, reinterpret_cast<unsigned char *>(&v), sizeof(v)); v = std::byteswap(v);
return crc32c_le_vx(crc, reinterpret_cast<unsigned char *>(&v), sizeof(v));
} }
inline uint64_t s390x_crc32(uint64_t crc, uint64_t v) inline uint64_t s390x_crc32(uint64_t crc, uint64_t v)
{ {
uint64_t _crc = crc; v = std::byteswap(v);
uint32_t value_h, value_l; return crc32c_le_vx(static_cast<uint32_t>(crc), reinterpret_cast<unsigned char *>(&v), sizeof(uint64_t));
value_h = (v >> 32) & 0xffffffff;
value_l = v & 0xffffffff;
_crc = crc32_be(static_cast<uint32_t>(_crc), reinterpret_cast<unsigned char *>(&value_h), sizeof(uint32_t));
_crc = crc32_be(static_cast<uint32_t>(_crc), reinterpret_cast<unsigned char *>(&value_l), sizeof(uint32_t));
return _crc;
} }
#endif #endif

View File

@ -10,6 +10,27 @@ namespace ErrorCodes
extern const int BAD_ARGUMENTS; extern const int BAD_ARGUMENTS;
} }
Int64 IntervalKind::toAvgNanoseconds() const
{
static constexpr Int64 NANOSECONDS_PER_MICROSECOND = 1000;
static constexpr auto NANOSECONDS_PER_MILLISECOND = NANOSECONDS_PER_MICROSECOND * 1000;
static constexpr auto NANOSECONDS_PER_SECOND = NANOSECONDS_PER_MILLISECOND * 1000;
switch (kind)
{
case IntervalKind::Millisecond:
return NANOSECONDS_PER_MILLISECOND;
case IntervalKind::Microsecond:
return NANOSECONDS_PER_MICROSECOND;
case IntervalKind::Nanosecond:
return 1;
default:
return toAvgSeconds() * NANOSECONDS_PER_SECOND;
}
UNREACHABLE();
}
Int32 IntervalKind::toAvgSeconds() const Int32 IntervalKind::toAvgSeconds() const
{ {
switch (kind) switch (kind)

View File

@ -29,6 +29,10 @@ struct IntervalKind
constexpr std::string_view toString() const { return magic_enum::enum_name(kind); } constexpr std::string_view toString() const { return magic_enum::enum_name(kind); }
/// Returns number of nanoseconds in one interval.
/// For `Month`, `Quarter` and `Year` the function returns an average number of nanoseconds.
Int64 toAvgNanoseconds() const;
/// Returns number of seconds in one interval. /// Returns number of seconds in one interval.
/// For `Month`, `Quarter` and `Year` the function returns an average number of seconds. /// For `Month`, `Quarter` and `Year` the function returns an average number of seconds.
Int32 toAvgSeconds() const; Int32 toAvgSeconds() const;

View File

@ -6,17 +6,13 @@
namespace DB namespace DB
{ {
namespace ErrorCodes
{
extern const int LOGICAL_ERROR;
}
struct MemoryTrackerSwitcher struct MemoryTrackerSwitcher
{ {
explicit MemoryTrackerSwitcher(MemoryTracker * new_tracker) explicit MemoryTrackerSwitcher(MemoryTracker * new_tracker)
{ {
/// current_thread is not initialized for the main thread, so simply do not switch anything
if (!current_thread) if (!current_thread)
throw Exception(ErrorCodes::LOGICAL_ERROR, "current_thread is not initialized"); return;
auto * thread_tracker = CurrentThread::getMemoryTracker(); auto * thread_tracker = CurrentThread::getMemoryTracker();
prev_untracked_memory = current_thread->untracked_memory; prev_untracked_memory = current_thread->untracked_memory;
@ -28,6 +24,10 @@ struct MemoryTrackerSwitcher
~MemoryTrackerSwitcher() ~MemoryTrackerSwitcher()
{ {
/// current_thread is not initialized for the main thread, so simply do not switch anything
if (!current_thread)
return;
CurrentThread::flushUntrackedMemory(); CurrentThread::flushUntrackedMemory();
auto * thread_tracker = CurrentThread::getMemoryTracker(); auto * thread_tracker = CurrentThread::getMemoryTracker();
@ -35,6 +35,7 @@ struct MemoryTrackerSwitcher
thread_tracker->setParent(prev_memory_tracker_parent); thread_tracker->setParent(prev_memory_tracker_parent);
} }
private:
MemoryTracker * prev_memory_tracker_parent = nullptr; MemoryTracker * prev_memory_tracker_parent = nullptr;
Int64 prev_untracked_memory = 0; Int64 prev_untracked_memory = 0;
}; };

View File

@ -188,7 +188,7 @@ public:
/// Get the result in some form. This can only be done once! /// Get the result in some form. This can only be done once!
void get128(char * out) ALWAYS_INLINE void get128(char * out)
{ {
finalize(); finalize();
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__

View File

@ -137,25 +137,9 @@ void SystemLogBase<LogElement>::add(const LogElement & element)
template <typename LogElement> template <typename LogElement>
void SystemLogBase<LogElement>::flush(bool force) void SystemLogBase<LogElement>::flush(bool force)
{ {
uint64_t this_thread_requested_offset; uint64_t this_thread_requested_offset = notifyFlushImpl(force);
if (this_thread_requested_offset == uint64_t(-1))
{ return;
std::lock_guard lock(mutex);
if (is_shutdown)
return;
this_thread_requested_offset = queue_front_index + queue.size();
// Publish our flush request, taking care not to overwrite the requests
// made by other threads.
is_force_prepare_tables |= force;
requested_flush_up_to = std::max(requested_flush_up_to, this_thread_requested_offset);
flush_event.notify_all();
}
LOG_DEBUG(log, "Requested flush up to offset {}", this_thread_requested_offset);
// Use an arbitrary timeout to avoid endless waiting. 60s proved to be // Use an arbitrary timeout to avoid endless waiting. 60s proved to be
// too fast for our parallel functional tests, probably because they // too fast for our parallel functional tests, probably because they
@ -174,6 +158,33 @@ void SystemLogBase<LogElement>::flush(bool force)
} }
} }
template <typename LogElement>
void SystemLogBase<LogElement>::notifyFlush(bool force) { notifyFlushImpl(force); }
template <typename LogElement>
uint64_t SystemLogBase<LogElement>::notifyFlushImpl(bool force)
{
uint64_t this_thread_requested_offset;
{
std::lock_guard lock(mutex);
if (is_shutdown)
return uint64_t(-1);
this_thread_requested_offset = queue_front_index + queue.size();
// Publish our flush request, taking care not to overwrite the requests
// made by other threads.
is_force_prepare_tables |= force;
requested_flush_up_to = std::max(requested_flush_up_to, this_thread_requested_offset);
flush_event.notify_all();
}
LOG_DEBUG(log, "Requested flush up to offset {}", this_thread_requested_offset);
return this_thread_requested_offset;
}
#define INSTANTIATE_SYSTEM_LOG_BASE(ELEMENT) template class SystemLogBase<ELEMENT>; #define INSTANTIATE_SYSTEM_LOG_BASE(ELEMENT) template class SystemLogBase<ELEMENT>;
SYSTEM_LOG_ELEMENTS(INSTANTIATE_SYSTEM_LOG_BASE) SYSTEM_LOG_ELEMENTS(INSTANTIATE_SYSTEM_LOG_BASE)

View File

@ -87,9 +87,12 @@ public:
*/ */
void add(const LogElement & element); void add(const LogElement & element);
/// Flush data in the buffer to disk /// Flush data in the buffer to disk. Block the thread until the data is stored on disk.
void flush(bool force) override; void flush(bool force) override;
/// Non-blocking flush data in the buffer to disk.
void notifyFlush(bool force);
String getName() const override { return LogElement::name(); } String getName() const override { return LogElement::name(); }
static const char * getDefaultOrderBy() { return "event_date, event_time"; } static const char * getDefaultOrderBy() { return "event_date, event_time"; }
@ -112,6 +115,10 @@ protected:
uint64_t flushed_up_to = 0; uint64_t flushed_up_to = 0;
// Logged overflow message at this queue front index // Logged overflow message at this queue front index
uint64_t logged_queue_full_at_index = -1; uint64_t logged_queue_full_at_index = -1;
private:
uint64_t notifyFlushImpl(bool force);
}; };
} }

View File

@ -18,6 +18,20 @@
#include <unistd.h> #include <unistd.h>
#include <bit> #include <bit>
namespace
{
String formatZxid(int64_t zxid)
{
/// ZooKeeper print zxid in hex and
String hex = getHexUIntLowercase(zxid);
/// without leading zeros
trimLeft(hex, '0');
return "0x" + hex;
}
}
namespace DB namespace DB
{ {
@ -348,7 +362,7 @@ String ServerStatCommand::run()
write("Sent", toString(stats.getPacketsSent())); write("Sent", toString(stats.getPacketsSent()));
write("Connections", toString(keeper_info.alive_connections_count)); write("Connections", toString(keeper_info.alive_connections_count));
write("Outstanding", toString(keeper_info.outstanding_requests_count)); write("Outstanding", toString(keeper_info.outstanding_requests_count));
write("Zxid", toString(keeper_info.last_zxid)); write("Zxid", formatZxid(keeper_info.last_zxid));
write("Mode", keeper_info.getRole()); write("Mode", keeper_info.getRole());
write("Node count", toString(keeper_info.total_nodes_count)); write("Node count", toString(keeper_info.total_nodes_count));
@ -381,7 +395,7 @@ String StatCommand::run()
write("Sent", toString(stats.getPacketsSent())); write("Sent", toString(stats.getPacketsSent()));
write("Connections", toString(keeper_info.alive_connections_count)); write("Connections", toString(keeper_info.alive_connections_count));
write("Outstanding", toString(keeper_info.outstanding_requests_count)); write("Outstanding", toString(keeper_info.outstanding_requests_count));
write("Zxid", toString(keeper_info.last_zxid)); write("Zxid", formatZxid(keeper_info.last_zxid));
write("Mode", keeper_info.getRole()); write("Mode", keeper_info.getRole());
write("Node count", toString(keeper_info.total_nodes_count)); write("Node count", toString(keeper_info.total_nodes_count));

View File

@ -48,11 +48,7 @@ inline auto scaleMultiplier(UInt32 scale)
/** Components of DecimalX value: /** Components of DecimalX value:
* whole - represents whole part of decimal, can be negative or positive. * whole - represents whole part of decimal, can be negative or positive.
* fractional - for fractional part of decimal. * fractional - for fractional part of decimal, always positive.
*
* 0.123 represents 0 / 0.123
* -0.123 represents 0 / -0.123
* -1.123 represents -1 / 0.123
*/ */
template <typename DecimalType> template <typename DecimalType>
struct DecimalComponents struct DecimalComponents

View File

@ -629,7 +629,7 @@ class IColumn;
M(Bool, database_replicated_allow_only_replicated_engine, false, "Allow to create only Replicated tables in database with engine Replicated", 0) \ M(Bool, database_replicated_allow_only_replicated_engine, false, "Allow to create only Replicated tables in database with engine Replicated", 0) \
M(Bool, database_replicated_allow_replicated_engine_arguments, true, "Allow to create only Replicated tables in database with engine Replicated with explicit arguments", 0) \ M(Bool, database_replicated_allow_replicated_engine_arguments, true, "Allow to create only Replicated tables in database with engine Replicated with explicit arguments", 0) \
M(DistributedDDLOutputMode, distributed_ddl_output_mode, DistributedDDLOutputMode::THROW, "Format of distributed DDL query result", 0) \ M(DistributedDDLOutputMode, distributed_ddl_output_mode, DistributedDDLOutputMode::THROW, "Format of distributed DDL query result", 0) \
M(UInt64, distributed_ddl_entry_format_version, 3, "Compatibility version of distributed DDL (ON CLUSTER) queries", 0) \ M(UInt64, distributed_ddl_entry_format_version, 5, "Compatibility version of distributed DDL (ON CLUSTER) queries", 0) \
\ \
M(UInt64, external_storage_max_read_rows, 0, "Limit maximum number of rows when table with external engine should flush history data. Now supported only for MySQL table engine, database engine, dictionary and MaterializedMySQL. If equal to 0, this setting is disabled", 0) \ M(UInt64, external_storage_max_read_rows, 0, "Limit maximum number of rows when table with external engine should flush history data. Now supported only for MySQL table engine, database engine, dictionary and MaterializedMySQL. If equal to 0, this setting is disabled", 0) \
M(UInt64, external_storage_max_read_bytes, 0, "Limit maximum number of bytes when table with external engine should flush history data. Now supported only for MySQL table engine, database engine, dictionary and MaterializedMySQL. If equal to 0, this setting is disabled", 0) \ M(UInt64, external_storage_max_read_bytes, 0, "Limit maximum number of bytes when table with external engine should flush history data. Now supported only for MySQL table engine, database engine, dictionary and MaterializedMySQL. If equal to 0, this setting is disabled", 0) \
@ -906,6 +906,7 @@ class IColumn;
\ \
M(DateTimeInputFormat, date_time_input_format, FormatSettings::DateTimeInputFormat::Basic, "Method to read DateTime from text input formats. Possible values: 'basic', 'best_effort' and 'best_effort_us'.", 0) \ M(DateTimeInputFormat, date_time_input_format, FormatSettings::DateTimeInputFormat::Basic, "Method to read DateTime from text input formats. Possible values: 'basic', 'best_effort' and 'best_effort_us'.", 0) \
M(DateTimeOutputFormat, date_time_output_format, FormatSettings::DateTimeOutputFormat::Simple, "Method to write DateTime to text output. Possible values: 'simple', 'iso', 'unix_timestamp'.", 0) \ M(DateTimeOutputFormat, date_time_output_format, FormatSettings::DateTimeOutputFormat::Simple, "Method to write DateTime to text output. Possible values: 'simple', 'iso', 'unix_timestamp'.", 0) \
M(IntervalOutputFormat, interval_output_format, FormatSettings::IntervalOutputFormat::Numeric, "Textual representation of Interval. Possible values: 'kusto', 'numeric'.", 0) \
\ \
M(Bool, input_format_ipv4_default_on_conversion_error, false, "Deserialization of IPv4 will use default values instead of throwing exception on conversion error.", 0) \ M(Bool, input_format_ipv4_default_on_conversion_error, false, "Deserialization of IPv4 will use default values instead of throwing exception on conversion error.", 0) \
M(Bool, input_format_ipv6_default_on_conversion_error, false, "Deserialization of IPV6 will use default values instead of throwing exception on conversion error.", 0) \ M(Bool, input_format_ipv6_default_on_conversion_error, false, "Deserialization of IPV6 will use default values instead of throwing exception on conversion error.", 0) \

View File

@ -79,6 +79,10 @@ IMPLEMENT_SETTING_ENUM(DateTimeOutputFormat, ErrorCodes::BAD_ARGUMENTS,
{"iso", FormatSettings::DateTimeOutputFormat::ISO}, {"iso", FormatSettings::DateTimeOutputFormat::ISO},
{"unix_timestamp", FormatSettings::DateTimeOutputFormat::UnixTimestamp}}) {"unix_timestamp", FormatSettings::DateTimeOutputFormat::UnixTimestamp}})
IMPLEMENT_SETTING_ENUM(IntervalOutputFormat, ErrorCodes::BAD_ARGUMENTS,
{{"kusto", FormatSettings::IntervalOutputFormat::Kusto},
{"numeric", FormatSettings::IntervalOutputFormat::Numeric}})
IMPLEMENT_SETTING_AUTO_ENUM(LogsLevel, ErrorCodes::BAD_ARGUMENTS) IMPLEMENT_SETTING_AUTO_ENUM(LogsLevel, ErrorCodes::BAD_ARGUMENTS)
IMPLEMENT_SETTING_AUTO_ENUM(LogQueriesType, ErrorCodes::BAD_ARGUMENTS) IMPLEMENT_SETTING_AUTO_ENUM(LogQueriesType, ErrorCodes::BAD_ARGUMENTS)

View File

@ -72,6 +72,8 @@ DECLARE_SETTING_ENUM_WITH_RENAME(DateTimeInputFormat, FormatSettings::DateTimeIn
DECLARE_SETTING_ENUM_WITH_RENAME(DateTimeOutputFormat, FormatSettings::DateTimeOutputFormat) DECLARE_SETTING_ENUM_WITH_RENAME(DateTimeOutputFormat, FormatSettings::DateTimeOutputFormat)
DECLARE_SETTING_ENUM_WITH_RENAME(IntervalOutputFormat, FormatSettings::IntervalOutputFormat)
DECLARE_SETTING_ENUM_WITH_RENAME(ParquetVersion, FormatSettings::ParquetVersion) DECLARE_SETTING_ENUM_WITH_RENAME(ParquetVersion, FormatSettings::ParquetVersion)
enum class LogsLevel enum class LogsLevel

View File

@ -173,6 +173,9 @@ static void signalHandler(int sig, siginfo_t * info, void * context)
/// This coarse method of synchronization is perfectly ok for fatal signals. /// This coarse method of synchronization is perfectly ok for fatal signals.
sleepForSeconds(1); sleepForSeconds(1);
} }
/// Wait for all logs flush operations
sleepForSeconds(3);
call_default_signal_handler(sig); call_default_signal_handler(sig);
} }

View File

@ -1,16 +1,18 @@
#include <DataTypes/DataTypeInterval.h> #include <DataTypes/DataTypeInterval.h>
#include <DataTypes/DataTypeFactory.h> #include <DataTypes/DataTypeFactory.h>
#include <DataTypes/Serializations/SerializationInterval.h>
namespace DB namespace DB
{ {
SerializationPtr DataTypeInterval::doGetDefaultSerialization() const { return std::make_shared<SerializationInterval>(kind); }
bool DataTypeInterval::equals(const IDataType & rhs) const bool DataTypeInterval::equals(const IDataType & rhs) const
{ {
return typeid(rhs) == typeid(*this) && kind == static_cast<const DataTypeInterval &>(rhs).kind; return typeid(rhs) == typeid(*this) && kind == static_cast<const DataTypeInterval &>(rhs).kind;
} }
void registerDataTypeInterval(DataTypeFactory & factory) void registerDataTypeInterval(DataTypeFactory & factory)
{ {
factory.registerSimpleDataType("IntervalNanosecond", [] { return DataTypePtr(std::make_shared<DataTypeInterval>(IntervalKind::Nanosecond)); }); factory.registerSimpleDataType("IntervalNanosecond", [] { return DataTypePtr(std::make_shared<DataTypeInterval>(IntervalKind::Nanosecond)); });

View File

@ -24,6 +24,7 @@ public:
explicit DataTypeInterval(IntervalKind kind_) : kind(kind_) {} explicit DataTypeInterval(IntervalKind kind_) : kind(kind_) {}
SerializationPtr doGetDefaultSerialization() const override;
std::string doGetName() const override { return fmt::format("Interval{}", kind.toString()); } std::string doGetName() const override { return fmt::format("Interval{}", kind.toString()); }
const char * getFamilyName() const override { return "Interval"; } const char * getFamilyName() const override { return "Interval"; }
String getSQLCompatibleName() const override { return "TEXT"; } String getSQLCompatibleName() const override { return "TEXT"; }

View File

@ -0,0 +1,209 @@
#include "SerializationInterval.h"
#include <Columns/ColumnsNumber.h>
#include <IO/WriteBuffer.h>
#include <Parsers/Kusto/Formatters.h>
namespace DB
{
using ColumnInterval = DataTypeInterval::ColumnType;
namespace ErrorCodes
{
extern const int ILLEGAL_COLUMN;
extern const int NOT_IMPLEMENTED;
}
void SerializationKustoInterval::serializeText(
const IColumn & column, const size_t row, WriteBuffer & ostr, const FormatSettings &) const
{
const auto * interval_column = checkAndGetColumn<ColumnInterval>(column);
if (!interval_column)
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Expected column of underlying type of Interval");
const auto & value = interval_column->getData()[row];
const auto ticks = kind.toAvgNanoseconds() * value / 100;
const auto interval_as_string = formatKQLTimespan(ticks);
ostr.write(interval_as_string.c_str(), interval_as_string.length());
}
void SerializationKustoInterval::deserializeText(
[[maybe_unused]] IColumn & column,
[[maybe_unused]] ReadBuffer & istr,
[[maybe_unused]] const FormatSettings & settings,
[[maybe_unused]] const bool whole) const
{
throw Exception(
ErrorCodes::NOT_IMPLEMENTED, "Deserialization is not implemented for {}", kind.toNameOfFunctionToIntervalDataType());
}
SerializationInterval::SerializationInterval(IntervalKind interval_kind_) : interval_kind(std::move(interval_kind_))
{
}
void SerializationInterval::deserializeBinary(Field & field, ReadBuffer & istr, const FormatSettings & settings) const
{
dispatch(
static_cast<void (ISerialization::*)(Field &, ReadBuffer &, const FormatSettings &) const>(&ISerialization::deserializeBinary),
settings.interval.output_format,
field,
istr,
settings);
}
void SerializationInterval::deserializeBinary(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
{
dispatch(
static_cast<void (ISerialization::*)(IColumn &, ReadBuffer &, const FormatSettings &) const>(&ISerialization::deserializeBinary),
settings.interval.output_format,
column,
istr,
settings);
}
void SerializationInterval::deserializeBinaryBulk(IColumn & column, ReadBuffer & istr, size_t limit, double avg_value_size_hint) const
{
dispatch(
&ISerialization::deserializeBinaryBulk, FormatSettings::IntervalOutputFormat::Numeric, column, istr, limit, avg_value_size_hint);
}
void SerializationInterval::deserializeBinaryBulkStatePrefix(
DeserializeBinaryBulkSettings & settings, DeserializeBinaryBulkStatePtr & state) const
{
dispatch(&ISerialization::deserializeBinaryBulkStatePrefix, FormatSettings::IntervalOutputFormat::Numeric, settings, state);
}
void SerializationInterval::deserializeBinaryBulkWithMultipleStreams(
ColumnPtr & column,
size_t limit,
DeserializeBinaryBulkSettings & settings,
DeserializeBinaryBulkStatePtr & state,
SubstreamsCache * cache) const
{
dispatch(
&ISerialization::deserializeBinaryBulkWithMultipleStreams,
FormatSettings::IntervalOutputFormat::Numeric,
column,
limit,
settings,
state,
cache);
}
void SerializationInterval::deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
{
dispatch(&ISerialization::deserializeTextCSV, settings.interval.output_format, column, istr, settings);
}
void SerializationInterval::deserializeTextEscaped(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
{
dispatch(&ISerialization::deserializeTextEscaped, settings.interval.output_format, column, istr, settings);
}
void SerializationInterval::deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
{
dispatch(&ISerialization::deserializeTextJSON, settings.interval.output_format, column, istr, settings);
}
void SerializationInterval::deserializeTextQuoted(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
{
dispatch(&ISerialization::deserializeTextQuoted, settings.interval.output_format, column, istr, settings);
}
void SerializationInterval::deserializeTextRaw(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
{
dispatch(&ISerialization::deserializeTextRaw, settings.interval.output_format, column, istr, settings);
}
void SerializationInterval::deserializeWholeText(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
{
dispatch(&ISerialization::deserializeWholeText, settings.interval.output_format, column, istr, settings);
}
void SerializationInterval::serializeBinary(const Field & field, WriteBuffer & ostr, const FormatSettings & settings) const
{
dispatch(
static_cast<void (ISerialization::*)(const Field &, WriteBuffer &, const FormatSettings &) const>(&ISerialization::serializeBinary),
settings.interval.output_format,
field,
ostr,
settings);
}
void SerializationInterval::serializeBinary(const IColumn & column, size_t row, WriteBuffer & ostr, const FormatSettings & settings) const
{
dispatch(
static_cast<void (ISerialization::*)(const IColumn &, size_t, WriteBuffer &, const FormatSettings &) const>(
&ISerialization::serializeBinary),
settings.interval.output_format,
column,
row,
ostr,
settings);
}
void SerializationInterval::serializeBinaryBulk(const IColumn & column, WriteBuffer & ostr, size_t offset, size_t limit) const
{
dispatch(&ISerialization::serializeBinaryBulk, FormatSettings::IntervalOutputFormat::Numeric, column, ostr, offset, limit);
}
void SerializationInterval::serializeBinaryBulkStatePrefix(
const IColumn & column, SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const
{
dispatch(&ISerialization::serializeBinaryBulkStatePrefix, FormatSettings::IntervalOutputFormat::Numeric, column, settings, state);
}
void SerializationInterval::serializeBinaryBulkStateSuffix(
SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const
{
dispatch(&ISerialization::serializeBinaryBulkStateSuffix, FormatSettings::IntervalOutputFormat::Numeric, settings, state);
}
void SerializationInterval::serializeBinaryBulkWithMultipleStreams(
const IColumn & column, size_t offset, size_t limit, SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const
{
dispatch(
&ISerialization::serializeBinaryBulkWithMultipleStreams,
FormatSettings::IntervalOutputFormat::Numeric,
column,
offset,
limit,
settings,
state);
}
void SerializationInterval::serializeText(const IColumn & column, size_t row, WriteBuffer & ostr, const FormatSettings & settings) const
{
dispatch(&ISerialization::serializeText, settings.interval.output_format, column, row, ostr, settings);
}
void SerializationInterval::serializeTextCSV(const IColumn & column, size_t row, WriteBuffer & ostr, const FormatSettings & settings) const
{
dispatch(&ISerialization::serializeTextCSV, settings.interval.output_format, column, row, ostr, settings);
}
void SerializationInterval::serializeTextEscaped(
const IColumn & column, size_t row, WriteBuffer & ostr, const FormatSettings & settings) const
{
dispatch(&ISerialization::serializeTextEscaped, settings.interval.output_format, column, row, ostr, settings);
}
void SerializationInterval::serializeTextJSON(const IColumn & column, size_t row, WriteBuffer & ostr, const FormatSettings & settings) const
{
dispatch(&ISerialization::serializeTextJSON, settings.interval.output_format, column, row, ostr, settings);
}
void SerializationInterval::serializeTextQuoted(
const IColumn & column, size_t row, WriteBuffer & ostr, const FormatSettings & settings) const
{
dispatch(&ISerialization::serializeTextQuoted, settings.interval.output_format, column, row, ostr, settings);
}
void SerializationInterval::serializeTextRaw(const IColumn & column, size_t row, WriteBuffer & ostr, const FormatSettings & settings) const
{
dispatch(&ISerialization::serializeTextRaw, settings.interval.output_format, column, row, ostr, settings);
}
}

View File

@ -0,0 +1,90 @@
#pragma once
#include "ISerialization.h"
#include "SerializationCustomSimpleText.h"
#include <DataTypes/DataTypeInterval.h>
#include <Formats/FormatSettings.h>
#include <Common/IntervalKind.h>
namespace DB
{
namespace ErrorCodes
{
extern const int NOT_IMPLEMENTED;
}
class SerializationKustoInterval : public SerializationCustomSimpleText
{
public:
explicit SerializationKustoInterval(IntervalKind kind_) : SerializationCustomSimpleText(nullptr), kind(kind_) { }
void serializeText(const IColumn & column, size_t row, WriteBuffer & ostr, const FormatSettings & settings) const override;
void deserializeText(IColumn & column, ReadBuffer & istr, const FormatSettings & settings, bool whole) const override;
private:
IntervalKind kind;
};
class SerializationInterval : public ISerialization
{
public:
explicit SerializationInterval(IntervalKind kind_);
void deserializeBinary(Field & field, ReadBuffer & istr, const FormatSettings & settings) const override;
void deserializeBinary(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override;
void deserializeBinaryBulk(IColumn & column, ReadBuffer & istr, size_t limit, double avg_value_size_hint) const override;
void deserializeBinaryBulkStatePrefix(DeserializeBinaryBulkSettings & settings, DeserializeBinaryBulkStatePtr & state) const override;
void deserializeBinaryBulkWithMultipleStreams(
ColumnPtr & column,
size_t limit,
DeserializeBinaryBulkSettings & settings,
DeserializeBinaryBulkStatePtr & state,
SubstreamsCache * cache) const override;
void deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override;
void deserializeTextEscaped(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override;
void deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override;
void deserializeTextQuoted(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override;
void deserializeTextRaw(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override;
void deserializeWholeText(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override;
void serializeBinary(const Field & field, WriteBuffer & ostr, const FormatSettings & settings) const override;
void serializeBinary(const IColumn & column, size_t row, WriteBuffer & ostr, const FormatSettings & settings) const override;
void serializeBinaryBulk(const IColumn & column, WriteBuffer & ostr, size_t offset, size_t limit) const override;
void serializeBinaryBulkStatePrefix(
const IColumn & column, SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const override;
void serializeBinaryBulkStateSuffix(SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const override;
void serializeBinaryBulkWithMultipleStreams(
const IColumn & column,
size_t offset,
size_t limit,
SerializeBinaryBulkSettings & settings,
SerializeBinaryBulkStatePtr & state) const override;
void serializeText(const IColumn & column, size_t row, WriteBuffer & ostr, const FormatSettings & settings) const override;
void serializeTextCSV(const IColumn & column, size_t row, WriteBuffer & ostr, const FormatSettings & settings) const override;
void serializeTextEscaped(const IColumn & column, size_t row, WriteBuffer & ostr, const FormatSettings & settings) const override;
void serializeTextJSON(const IColumn & column, size_t row, WriteBuffer & ostr, const FormatSettings & settings) const override;
void serializeTextQuoted(const IColumn & column, size_t row, WriteBuffer & ostr, const FormatSettings & settings) const override;
void serializeTextRaw(const IColumn & column, size_t row, WriteBuffer & ostr, const FormatSettings & settings) const override;
private:
template <typename... Args, std::invocable<const ISerialization *, Args...> Method>
void dispatch(const Method method, const FormatSettings::IntervalOutputFormat format, Args &&... args) const
{
const ISerialization * serialization = nullptr;
if (format == FormatSettings::IntervalOutputFormat::Kusto)
serialization = &serialization_kusto;
else if (format == FormatSettings::IntervalOutputFormat::Numeric)
serialization = &serialization_numeric;
if (!serialization)
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Option {} is not implemented", magic_enum::enum_name(format));
(serialization->*method)(std::forward<Args>(args)...);
}
IntervalKind interval_kind;
SerializationKustoInterval serialization_kusto{interval_kind};
SerializationNumber<typename DataTypeInterval::FieldType> serialization_numeric;
};
}

View File

@ -122,6 +122,14 @@ void registerDiskS3(DiskFactory & factory, bool global_skip_access_check)
auto client = getClient(config, config_prefix, context, *settings); auto client = getClient(config, config_prefix, context, *settings);
if (type == "s3_plain") if (type == "s3_plain")
{ {
/// send_metadata changes the filenames (includes revision), while
/// s3_plain do not care about this, and expect that the file name
/// will not be changed.
///
/// And besides, send_metadata does not make sense for s3_plain.
if (config.getBool(config_prefix + ".send_metadata", false))
throw Exception(ErrorCodes::BAD_ARGUMENTS, "s3_plain does not supports send_metadata");
s3_storage = std::make_shared<S3PlainObjectStorage>(std::move(client), std::move(settings), uri.version_id, s3_capabilities, uri.bucket, uri.endpoint); s3_storage = std::make_shared<S3PlainObjectStorage>(std::move(client), std::move(settings), uri.version_id, s3_capabilities, uri.bucket, uri.endpoint);
metadata_storage = std::make_shared<MetadataStorageFromPlainObjectStorage>(s3_storage, uri.key); metadata_storage = std::make_shared<MetadataStorageFromPlainObjectStorage>(s3_storage, uri.key);
} }

View File

@ -87,6 +87,7 @@ FormatSettings getFormatSettings(ContextPtr context, const Settings & settings)
format_settings.custom.skip_trailing_empty_lines = settings.input_format_custom_skip_trailing_empty_lines; format_settings.custom.skip_trailing_empty_lines = settings.input_format_custom_skip_trailing_empty_lines;
format_settings.date_time_input_format = settings.date_time_input_format; format_settings.date_time_input_format = settings.date_time_input_format;
format_settings.date_time_output_format = settings.date_time_output_format; format_settings.date_time_output_format = settings.date_time_output_format;
format_settings.interval.output_format = settings.interval_output_format;
format_settings.input_format_ipv4_default_on_conversion_error = settings.input_format_ipv4_default_on_conversion_error; format_settings.input_format_ipv4_default_on_conversion_error = settings.input_format_ipv4_default_on_conversion_error;
format_settings.input_format_ipv6_default_on_conversion_error = settings.input_format_ipv6_default_on_conversion_error; format_settings.input_format_ipv6_default_on_conversion_error = settings.input_format_ipv6_default_on_conversion_error;
format_settings.bool_true_representation = settings.bool_true_representation; format_settings.bool_true_representation = settings.bool_true_representation;

View File

@ -77,6 +77,17 @@ struct FormatSettings
DateTimeOutputFormat date_time_output_format = DateTimeOutputFormat::Simple; DateTimeOutputFormat date_time_output_format = DateTimeOutputFormat::Simple;
enum class IntervalOutputFormat
{
Kusto,
Numeric
};
struct
{
IntervalOutputFormat output_format = IntervalOutputFormat::Numeric;
} interval;
bool input_format_ipv4_default_on_conversion_error = false; bool input_format_ipv4_default_on_conversion_error = false;
bool input_format_ipv6_default_on_conversion_error = false; bool input_format_ipv6_default_on_conversion_error = false;

View File

@ -149,6 +149,9 @@ Block NativeReader::read()
rows = index_block_it->num_rows; rows = index_block_it->num_rows;
} }
if (columns == 0 && !header && rows != 0)
throw Exception(ErrorCodes::INCORRECT_DATA, "Zero columns but {} rows in Native format.", rows);
for (size_t i = 0; i < columns; ++i) for (size_t i = 0; i < columns; ++i)
{ {
if (use_index) if (use_index)
@ -290,6 +293,9 @@ Block NativeReader::read()
res.swap(tmp_res); res.swap(tmp_res);
} }
if (res.rows() != rows)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Row count mismatch after desirialization, got: {}, expected: {}", res.rows(), rows);
return res; return res;
} }

View File

@ -19,9 +19,6 @@
namespace DB namespace DB
{ {
static constexpr auto microsecond_multiplier = 1000000;
static constexpr auto millisecond_multiplier = 1000;
namespace ErrorCodes namespace ErrorCodes
{ {
extern const int ILLEGAL_TYPE_OF_ARGUMENT; extern const int ILLEGAL_TYPE_OF_ARGUMENT;
@ -1380,36 +1377,6 @@ struct ToRelativeSecondNumImpl
using FactorTransform = ZeroTransform; using FactorTransform = ZeroTransform;
}; };
template <Int64 scale_multiplier>
struct ToRelativeSubsecondNumImpl
{
static constexpr auto name = "toRelativeSubsecondNumImpl";
static inline Int64 execute(const DateTime64 & t, DateTime64::NativeType scale, const DateLUTImpl &)
{
static_assert(scale_multiplier == 1000 || scale_multiplier == 1000000);
if (scale == scale_multiplier)
return t.value;
if (scale > scale_multiplier)
return t.value / (scale / scale_multiplier);
return t.value * (scale_multiplier / scale);
}
static inline Int64 execute(UInt32 t, const DateLUTImpl &)
{
return t * scale_multiplier;
}
static inline Int64 execute(Int32 d, const DateLUTImpl & time_zone)
{
return static_cast<Int64>(time_zone.fromDayNum(ExtendedDayNum(d))) * scale_multiplier;
}
static inline Int64 execute(UInt16 d, const DateLUTImpl & time_zone)
{
return static_cast<Int64>(time_zone.fromDayNum(DayNum(d)) * scale_multiplier);
}
using FactorTransform = ZeroTransform;
};
struct ToYYYYMMImpl struct ToYYYYMMImpl
{ {
static constexpr auto name = "toYYYYMM"; static constexpr auto name = "toYYYYMM";
@ -1509,47 +1476,25 @@ struct ToYYYYMMDDhhmmssImpl
using FactorTransform = ZeroTransform; using FactorTransform = ZeroTransform;
}; };
struct DateTimeComponentsWithFractionalPart : public DateLUTImpl::DateTimeComponents
{
UInt16 millisecond;
UInt16 microsecond;
};
struct ToDateTimeComponentsImpl struct ToDateTimeComponentsImpl
{ {
static constexpr auto name = "toDateTimeComponents"; static constexpr auto name = "toDateTimeComponents";
static inline DateTimeComponentsWithFractionalPart execute(const DateTime64 & t, DateTime64::NativeType scale_multiplier, const DateLUTImpl & time_zone) static inline DateLUTImpl::DateTimeComponents execute(Int64 t, const DateLUTImpl & time_zone)
{ {
auto components = DecimalUtils::splitWithScaleMultiplier(t, scale_multiplier); return time_zone.toDateTimeComponents(t);
if (t.value < 0 && components.fractional)
{
components.fractional = scale_multiplier + (components.whole ? Int64(-1) : Int64(1)) * components.fractional;
--components.whole;
}
Int64 fractional = components.fractional;
if (scale_multiplier > microsecond_multiplier)
fractional = fractional / (scale_multiplier / microsecond_multiplier);
else if (scale_multiplier < microsecond_multiplier)
fractional = fractional * (microsecond_multiplier / scale_multiplier);
constexpr Int64 divider = microsecond_multiplier/ millisecond_multiplier;
UInt16 millisecond = static_cast<UInt16>(fractional / divider);
UInt16 microsecond = static_cast<UInt16>(fractional % divider);
return DateTimeComponentsWithFractionalPart{time_zone.toDateTimeComponents(components.whole), millisecond, microsecond};
} }
static inline DateTimeComponentsWithFractionalPart execute(UInt32 t, const DateLUTImpl & time_zone) static inline DateLUTImpl::DateTimeComponents execute(UInt32 t, const DateLUTImpl & time_zone)
{ {
return DateTimeComponentsWithFractionalPart{time_zone.toDateTimeComponents(static_cast<DateLUTImpl::Time>(t)), 0, 0}; return time_zone.toDateTimeComponents(static_cast<DateLUTImpl::Time>(t));
} }
static inline DateTimeComponentsWithFractionalPart execute(Int32 d, const DateLUTImpl & time_zone) static inline DateLUTImpl::DateTimeComponents execute(Int32 d, const DateLUTImpl & time_zone)
{ {
return DateTimeComponentsWithFractionalPart{time_zone.toDateTimeComponents(ExtendedDayNum(d)), 0, 0}; return time_zone.toDateTimeComponents(ExtendedDayNum(d));
} }
static inline DateTimeComponentsWithFractionalPart execute(UInt16 d, const DateLUTImpl & time_zone) static inline DateLUTImpl::DateTimeComponents execute(UInt16 d, const DateLUTImpl & time_zone)
{ {
return DateTimeComponentsWithFractionalPart{time_zone.toDateTimeComponents(DayNum(d)), 0, 0}; return time_zone.toDateTimeComponents(DayNum(d));
} }
using FactorTransform = ZeroTransform; using FactorTransform = ZeroTransform;

View File

@ -5,7 +5,7 @@
namespace DB namespace DB
{ {
/** Transform-type wrapper for DateTime64, simplifies DateTime64 support for given Transform. /** Tansform-type wrapper for DateTime64, simplifies DateTime64 support for given Transform.
* *
* Depending on what overloads of Transform::execute() are available, when called with DateTime64 value, * Depending on what overloads of Transform::execute() are available, when called with DateTime64 value,
* invokes Transform::execute() with either: * invokes Transform::execute() with either:
@ -80,10 +80,7 @@ public:
} }
else else
{ {
auto components = DecimalUtils::splitWithScaleMultiplier(t, scale_multiplier); const auto components = DecimalUtils::splitWithScaleMultiplier(t, scale_multiplier);
if (t.value < 0 && components.fractional)
--components.whole;
return wrapped_transform.execute(static_cast<Int64>(components.whole), std::forward<Args>(args)...); return wrapped_transform.execute(static_cast<Int64>(components.whole), std::forward<Args>(args)...);
} }
} }

View File

@ -174,13 +174,12 @@ public:
{ {
auto res = static_cast<Int64>(transform_y.execute(y, timezone_y)) auto res = static_cast<Int64>(transform_y.execute(y, timezone_y))
- static_cast<Int64>(transform_x.execute(x, timezone_x)); - static_cast<Int64>(transform_x.execute(x, timezone_x));
DateTimeComponentsWithFractionalPart a_comp; DateLUTImpl::DateTimeComponents a_comp;
DateTimeComponentsWithFractionalPart b_comp; DateLUTImpl::DateTimeComponents b_comp;
Int64 adjust_value; Int64 adjust_value;
auto x_microseconds = TransformDateTime64<ToRelativeSubsecondNumImpl<microsecond_multiplier>>(transform_x.getScaleMultiplier()).execute(x, timezone_x); auto x_seconds = TransformDateTime64<ToRelativeSecondNumImpl<ResultPrecision::Extended>>(transform_x.getScaleMultiplier()).execute(x, timezone_x);
auto y_microseconds = TransformDateTime64<ToRelativeSubsecondNumImpl<microsecond_multiplier>>(transform_y.getScaleMultiplier()).execute(y, timezone_y); auto y_seconds = TransformDateTime64<ToRelativeSecondNumImpl<ResultPrecision::Extended>>(transform_y.getScaleMultiplier()).execute(y, timezone_y);
if (x_seconds <= y_seconds)
if (x_microseconds <= y_microseconds)
{ {
a_comp = TransformDateTime64<ToDateTimeComponentsImpl>(transform_x.getScaleMultiplier()).execute(x, timezone_x); a_comp = TransformDateTime64<ToDateTimeComponentsImpl>(transform_x.getScaleMultiplier()).execute(x, timezone_x);
b_comp = TransformDateTime64<ToDateTimeComponentsImpl>(transform_y.getScaleMultiplier()).execute(y, timezone_y); b_comp = TransformDateTime64<ToDateTimeComponentsImpl>(transform_y.getScaleMultiplier()).execute(y, timezone_y);
@ -193,16 +192,14 @@ public:
adjust_value = 1; adjust_value = 1;
} }
if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeYearNumImpl<ResultPrecision::Extended>>>) if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeYearNumImpl<ResultPrecision::Extended>>>)
{ {
if ((a_comp.date.month > b_comp.date.month) if ((a_comp.date.month > b_comp.date.month)
|| ((a_comp.date.month == b_comp.date.month) && ((a_comp.date.day > b_comp.date.day) || ((a_comp.date.month == b_comp.date.month) && ((a_comp.date.day > b_comp.date.day)
|| ((a_comp.date.day == b_comp.date.day) && ((a_comp.time.hour > b_comp.time.hour) || ((a_comp.date.day == b_comp.date.day) && ((a_comp.time.hour > b_comp.time.hour)
|| ((a_comp.time.hour == b_comp.time.hour) && ((a_comp.time.minute > b_comp.time.minute) || ((a_comp.time.hour == b_comp.time.hour) && ((a_comp.time.minute > b_comp.time.minute)
|| ((a_comp.time.minute == b_comp.time.minute) && ((a_comp.time.second > b_comp.time.second) || ((a_comp.time.minute == b_comp.time.minute) && (a_comp.time.second > b_comp.time.second))))
|| ((a_comp.time.second == b_comp.time.second) && ((a_comp.millisecond > b_comp.millisecond) )))))
|| ((a_comp.millisecond == b_comp.millisecond) && (a_comp.microsecond > b_comp.microsecond)))))))))))))
res += adjust_value; res += adjust_value;
} }
else if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeQuarterNumImpl<ResultPrecision::Extended>>>) else if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeQuarterNumImpl<ResultPrecision::Extended>>>)
@ -213,9 +210,8 @@ public:
|| ((x_month_in_quarter == y_month_in_quarter) && ((a_comp.date.day > b_comp.date.day) || ((x_month_in_quarter == y_month_in_quarter) && ((a_comp.date.day > b_comp.date.day)
|| ((a_comp.date.day == b_comp.date.day) && ((a_comp.time.hour > b_comp.time.hour) || ((a_comp.date.day == b_comp.date.day) && ((a_comp.time.hour > b_comp.time.hour)
|| ((a_comp.time.hour == b_comp.time.hour) && ((a_comp.time.minute > b_comp.time.minute) || ((a_comp.time.hour == b_comp.time.hour) && ((a_comp.time.minute > b_comp.time.minute)
|| ((a_comp.time.minute == b_comp.time.minute) && ((a_comp.time.second > b_comp.time.second) || ((a_comp.time.minute == b_comp.time.minute) && (a_comp.time.second > b_comp.time.second))))
|| ((a_comp.time.second == b_comp.time.second) && ((a_comp.millisecond > b_comp.millisecond) )))))
|| ((a_comp.millisecond == b_comp.millisecond) && (a_comp.microsecond > b_comp.microsecond)))))))))))))
res += adjust_value; res += adjust_value;
} }
else if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeMonthNumImpl<ResultPrecision::Extended>>>) else if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeMonthNumImpl<ResultPrecision::Extended>>>)
@ -223,9 +219,8 @@ public:
if ((a_comp.date.day > b_comp.date.day) if ((a_comp.date.day > b_comp.date.day)
|| ((a_comp.date.day == b_comp.date.day) && ((a_comp.time.hour > b_comp.time.hour) || ((a_comp.date.day == b_comp.date.day) && ((a_comp.time.hour > b_comp.time.hour)
|| ((a_comp.time.hour == b_comp.time.hour) && ((a_comp.time.minute > b_comp.time.minute) || ((a_comp.time.hour == b_comp.time.hour) && ((a_comp.time.minute > b_comp.time.minute)
|| ((a_comp.time.minute == b_comp.time.minute) && ((a_comp.time.second > b_comp.time.second) || ((a_comp.time.minute == b_comp.time.minute) && (a_comp.time.second > b_comp.time.second))))
|| ((a_comp.time.second == b_comp.time.second) && ((a_comp.millisecond > b_comp.millisecond) )))
|| ((a_comp.millisecond == b_comp.millisecond) && (a_comp.microsecond > b_comp.microsecond)))))))))))
res += adjust_value; res += adjust_value;
} }
else if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeWeekNumImpl<ResultPrecision::Extended>>>) else if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeWeekNumImpl<ResultPrecision::Extended>>>)
@ -235,44 +230,25 @@ public:
if ((x_day_of_week > y_day_of_week) if ((x_day_of_week > y_day_of_week)
|| ((x_day_of_week == y_day_of_week) && (a_comp.time.hour > b_comp.time.hour)) || ((x_day_of_week == y_day_of_week) && (a_comp.time.hour > b_comp.time.hour))
|| ((a_comp.time.hour == b_comp.time.hour) && ((a_comp.time.minute > b_comp.time.minute) || ((a_comp.time.hour == b_comp.time.hour) && ((a_comp.time.minute > b_comp.time.minute)
|| ((a_comp.time.minute == b_comp.time.minute) && ((a_comp.time.second > b_comp.time.second) || ((a_comp.time.minute == b_comp.time.minute) && (a_comp.time.second > b_comp.time.second)))))
|| ((a_comp.time.second == b_comp.time.second) && ((a_comp.millisecond > b_comp.millisecond)
|| ((a_comp.millisecond == b_comp.millisecond) && (a_comp.microsecond > b_comp.microsecond)))))))))
res += adjust_value; res += adjust_value;
} }
else if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeDayNumImpl<ResultPrecision::Extended>>>) else if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeDayNumImpl<ResultPrecision::Extended>>>)
{ {
if ((a_comp.time.hour > b_comp.time.hour) if ((a_comp.time.hour > b_comp.time.hour)
|| ((a_comp.time.hour == b_comp.time.hour) && ((a_comp.time.minute > b_comp.time.minute) || ((a_comp.time.hour == b_comp.time.hour) && ((a_comp.time.minute > b_comp.time.minute)
|| ((a_comp.time.minute == b_comp.time.minute) && ((a_comp.time.second > b_comp.time.second) || ((a_comp.time.minute == b_comp.time.minute) && (a_comp.time.second > b_comp.time.second)))))
|| ((a_comp.time.second == b_comp.time.second) && ((a_comp.millisecond > b_comp.millisecond)
|| ((a_comp.millisecond == b_comp.millisecond) && (a_comp.microsecond > b_comp.microsecond)))))))))
res += adjust_value; res += adjust_value;
} }
else if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeHourNumImpl<ResultPrecision::Extended>>>) else if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeHourNumImpl<ResultPrecision::Extended>>>)
{ {
if ((a_comp.time.minute > b_comp.time.minute) if ((a_comp.time.minute > b_comp.time.minute)
|| ((a_comp.time.minute == b_comp.time.minute) && ((a_comp.time.second > b_comp.time.second) || ((a_comp.time.minute == b_comp.time.minute) && (a_comp.time.second > b_comp.time.second)))
|| ((a_comp.time.second == b_comp.time.second) && ((a_comp.millisecond > b_comp.millisecond)
|| ((a_comp.millisecond == b_comp.millisecond) && (a_comp.microsecond > b_comp.microsecond)))))))
res += adjust_value; res += adjust_value;
} }
else if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeMinuteNumImpl<ResultPrecision::Extended>>>) else if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeMinuteNumImpl<ResultPrecision::Extended>>>)
{ {
if ((a_comp.time.second > b_comp.time.second) if (a_comp.time.second > b_comp.time.second)
|| ((a_comp.time.second == b_comp.time.second) && ((a_comp.millisecond > b_comp.millisecond)
|| ((a_comp.millisecond == b_comp.millisecond) && (a_comp.microsecond > b_comp.microsecond)))))
res += adjust_value;
}
else if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeSecondNumImpl<ResultPrecision::Extended>>>)
{
if ((a_comp.millisecond > b_comp.millisecond)
|| ((a_comp.millisecond == b_comp.millisecond) && (a_comp.microsecond > b_comp.microsecond)))
res += adjust_value;
}
else if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeSubsecondNumImpl<1000>>>)
{
if (a_comp.microsecond > b_comp.microsecond)
res += adjust_value; res += adjust_value;
} }
return res; return res;
@ -397,10 +373,6 @@ public:
impl.template dispatchForColumns<ToRelativeMinuteNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData()); impl.template dispatchForColumns<ToRelativeMinuteNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
else if (unit == "second" || unit == "ss" || unit == "s") else if (unit == "second" || unit == "ss" || unit == "s")
impl.template dispatchForColumns<ToRelativeSecondNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData()); impl.template dispatchForColumns<ToRelativeSecondNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
else if (unit == "millisecond" || unit == "ms")
impl.template dispatchForColumns<ToRelativeSubsecondNumImpl<millisecond_multiplier>>(x, y, timezone_x, timezone_y, res->getData());
else if (unit == "microsecond" || unit == "us" || unit == "u")
impl.template dispatchForColumns<ToRelativeSubsecondNumImpl<microsecond_multiplier>>(x, y, timezone_x, timezone_y, res->getData());
else else
throw Exception(ErrorCodes::BAD_ARGUMENTS, throw Exception(ErrorCodes::BAD_ARGUMENTS,
"Function {} does not support '{}' unit", getName(), unit); "Function {} does not support '{}' unit", getName(), unit);

View File

@ -943,7 +943,16 @@ public:
{ {
if constexpr (std::is_same_v<DataType, DataTypeDateTime64>) if constexpr (std::is_same_v<DataType, DataTypeDateTime64>)
{ {
const auto c = DecimalUtils::split(vec[i], scale); auto c = DecimalUtils::split(vec[i], scale);
// -1.123 splits to -1 / 0.123
if (vec[i].value < 0 && c.fractional)
{
using F = typename DataType::FieldType;
c.fractional = DecimalUtils::scaleMultiplier<F>(scale) + (c.whole ? F(-1) : F(1)) * c.fractional;
--c.whole;
}
for (auto & instruction : instructions) for (auto & instruction : instructions)
instruction.perform(pos, static_cast<Int64>(c.whole), c.fractional, scale, time_zone); instruction.perform(pos, static_cast<Int64>(c.whole), c.fractional, scale, time_zone);
} }

View File

@ -10,6 +10,7 @@
#include <Functions/DateTimeTransforms.h> #include <Functions/DateTimeTransforms.h>
#include <Functions/FunctionFactory.h> #include <Functions/FunctionFactory.h>
#include <Functions/IFunction.h> #include <Functions/IFunction.h>
#include <Functions/TransformDateTime64.h>
#include <IO/WriteHelpers.h> #include <IO/WriteHelpers.h>

View File

@ -1005,8 +1005,8 @@ inline ReturnType readDateTimeTextImpl(DateTime64 & datetime64, UInt32 scale, Re
} }
} }
} }
/// 9908870400 is time_t value for 2184-01-01 UTC (a bit over the last year supported by DateTime64) /// 10413792000 is time_t value for 2300-01-01 UTC (a bit over the last year supported by DateTime64)
else if (whole >= 9908870400LL) else if (whole >= 10413792000LL)
{ {
/// Unix timestamp with subsecond precision, already scaled to integer. /// Unix timestamp with subsecond precision, already scaled to integer.
/// For disambiguation we support only time since 2001-09-09 01:46:40 UTC and less than 30 000 years in future. /// For disambiguation we support only time since 2001-09-09 01:46:40 UTC and less than 30 000 years in future.

View File

@ -822,8 +822,19 @@ void copyS3File(
ThreadPoolCallbackRunner<void> schedule, ThreadPoolCallbackRunner<void> schedule,
bool for_disk_s3) bool for_disk_s3)
{ {
CopyFileHelper helper{s3_client, src_bucket, src_key, src_offset, src_size, dest_bucket, dest_key, settings, object_metadata, schedule, for_disk_s3}; if (settings.allow_native_copy)
helper.performCopy(); {
CopyFileHelper helper{s3_client, src_bucket, src_key, src_offset, src_size, dest_bucket, dest_key, settings, object_metadata, schedule, for_disk_s3};
helper.performCopy();
}
else
{
auto create_read_buffer = [&]
{
return std::make_unique<ReadBufferFromS3>(s3_client, src_bucket, src_key, "", settings, Context::getGlobalContextInstance()->getReadSettings());
};
copyDataToS3File(create_read_buffer, src_offset, src_size, s3_client, dest_bucket, dest_key, settings, object_metadata, schedule, for_disk_s3);
}
} }
} }

View File

@ -21,6 +21,11 @@ using CreateReadBuffer = std::function<std::unique_ptr<SeekableReadBuffer>()>;
/// The same functionality can be done by using the function copyData() and the classes ReadBufferFromS3 and WriteBufferFromS3 /// The same functionality can be done by using the function copyData() and the classes ReadBufferFromS3 and WriteBufferFromS3
/// however copyS3File() is faster and spends less network traffic and memory. /// however copyS3File() is faster and spends less network traffic and memory.
/// The parameters `src_offset` and `src_size` specify a part in the source to copy. /// The parameters `src_offset` and `src_size` specify a part in the source to copy.
///
/// Note, that it tries to copy file using native copy (CopyObject), but if it
/// has been disabled (with settings.allow_native_copy) or request failed
/// because it is a known issue, it is fallbacks to read-write copy
/// (copyDataToS3File()).
void copyS3File( void copyS3File(
const std::shared_ptr<const S3::Client> & s3_client, const std::shared_ptr<const S3::Client> & s3_client,
const String & src_bucket, const String & src_bucket,

View File

@ -84,5 +84,8 @@ void collectCrashLog(Int32 signal, UInt64 thread_id, const String & query_id, co
CrashLogElement element{static_cast<time_t>(time / 1000000000), time, signal, thread_id, query_id, trace, trace_full}; CrashLogElement element{static_cast<time_t>(time / 1000000000), time, signal, thread_id, query_id, trace, trace_full};
crash_log_owned->add(element); crash_log_owned->add(element);
/// Notify savingThreadFunction to start flushing crash log
/// Crash log is storing in parallel with the signal processing thread.
crash_log_owned->notifyFlush(true);
} }
} }

View File

@ -1378,10 +1378,9 @@ void SelectQueryExpressionAnalyzer::appendWindowFunctionsArguments(
void SelectQueryExpressionAnalyzer::appendExpressionsAfterWindowFunctions(ExpressionActionsChain & chain, bool /* only_types */) void SelectQueryExpressionAnalyzer::appendExpressionsAfterWindowFunctions(ExpressionActionsChain & chain, bool /* only_types */)
{ {
ExpressionActionsChain::Step & step = chain.lastStep(columns_after_window); ExpressionActionsChain::Step & step = chain.lastStep(columns_after_window);
for (const auto & expression : syntax->expressions_with_window_function) for (const auto & expression : syntax->expressions_with_window_function)
{
getRootActionsForWindowFunctions(expression->clone(), true, step.actions()); getRootActionsForWindowFunctions(expression->clone(), true, step.actions());
}
} }
void SelectQueryExpressionAnalyzer::appendGroupByModifiers(ActionsDAGPtr & before_aggregation, ExpressionActionsChain & chain, bool /* only_types */) void SelectQueryExpressionAnalyzer::appendGroupByModifiers(ActionsDAGPtr & before_aggregation, ExpressionActionsChain & chain, bool /* only_types */)
@ -1760,9 +1759,9 @@ ExpressionAnalysisResult::ExpressionAnalysisResult(
/// second_stage: Do I need to execute the second part of the pipeline - running on the initiating server during distributed processing. /// second_stage: Do I need to execute the second part of the pipeline - running on the initiating server during distributed processing.
/** First we compose a chain of actions and remember the necessary steps from it. /** First we compose a chain of actions and remember the necessary steps from it.
* Regardless of from_stage and to_stage, we will compose a complete sequence of actions to perform optimization and * Regardless of from_stage and to_stage, we will compose a complete sequence of actions to perform optimization and
* throw out unnecessary columns based on the entire query. In unnecessary parts of the query, we will not execute subqueries. * throw out unnecessary columns based on the entire query. In unnecessary parts of the query, we will not execute subqueries.
*/ */
const ASTSelectQuery & query = *query_analyzer.getSelectQuery(); const ASTSelectQuery & query = *query_analyzer.getSelectQuery();
auto context = query_analyzer.getContext(); auto context = query_analyzer.getContext();
@ -1805,7 +1804,7 @@ ExpressionAnalysisResult::ExpressionAnalysisResult(
if (storage && (query.sampleSize() || settings.parallel_replicas_count > 1)) if (storage && (query.sampleSize() || settings.parallel_replicas_count > 1))
{ {
// we evaluate sampling for Merge lazily so we need to get all the columns // we evaluate sampling for Merge lazily, so we need to get all the columns
if (storage->getName() == "Merge") if (storage->getName() == "Merge")
{ {
const auto columns = metadata_snapshot->getColumns().getAll(); const auto columns = metadata_snapshot->getColumns().getAll();

View File

@ -1,4 +1,6 @@
#include <Interpreters/GetAggregatesVisitor.h> #include <Interpreters/GetAggregatesVisitor.h>
#include <Common/checkStackSize.h>
namespace DB namespace DB
{ {
@ -13,7 +15,7 @@ struct WindowExpressionsCollectorChildInfo
bool window_function_in_subtree = false; bool window_function_in_subtree = false;
}; };
// This visitor travers AST and collects the list of expressions which depend on // This visitor traverses the AST and collects the list of expressions which depend on
// evaluation of window functions. Expression is collected only if // evaluation of window functions. Expression is collected only if
// it's not a part of another expression. // it's not a part of another expression.
// //
@ -26,15 +28,18 @@ struct WindowExpressionsCollectorMatcher
{ {
if (child->as<ASTSubquery>() || child->as<ASTSelectQuery>()) if (child->as<ASTSubquery>() || child->as<ASTSelectQuery>())
return false; return false;
if (auto * select = node->as<ASTSelectQuery>()) if (auto * select = node->as<ASTSelectQuery>())
{ {
// We don't analysis WITH statement because it might contain useless aggregates // We don't analyse the WITH statement because it might contain useless aggregates
if (child == select->with()) if (child == select->with())
return false; return false;
} }
// We procces every expression manually
// We process every expression manually
if (auto * func = node->as<ASTFunction>()) if (auto * func = node->as<ASTFunction>())
return false; return false;
return true; return true;
} }
@ -50,6 +55,8 @@ struct WindowExpressionsCollectorMatcher
ASTPtr & ast, ASTPtr & ast,
const ASTPtr & parent) const ASTPtr & parent)
{ {
checkStackSize();
if (auto * func = ast->as<ASTFunction>()) if (auto * func = ast->as<ASTFunction>())
{ {
if (func->is_window_function) if (func->is_window_function)
@ -67,7 +74,7 @@ struct WindowExpressionsCollectorMatcher
{ {
func->compute_after_window_functions = true; func->compute_after_window_functions = true;
if ((!parent || !parent->as<ASTFunction>())) if ((!parent || !parent->as<ASTFunction>()))
expressions_with_window_functions.push_back(func); expressions_with_window_functions.push_back(ast);
} }
return result; return result;
@ -75,15 +82,16 @@ struct WindowExpressionsCollectorMatcher
return {}; return {};
} }
std::vector<const ASTFunction *> expressions_with_window_functions {}; ASTs expressions_with_window_functions;
}; };
using WindowExpressionsCollectorVisitor = InDepthNodeVisitorWithChildInfo<WindowExpressionsCollectorMatcher>; using WindowExpressionsCollectorVisitor = InDepthNodeVisitorWithChildInfo<WindowExpressionsCollectorMatcher>;
std::vector<const ASTFunction *> getExpressionsWithWindowFunctions(ASTPtr & ast) ASTs getExpressionsWithWindowFunctions(ASTPtr & ast)
{ {
WindowExpressionsCollectorVisitor visitor; WindowExpressionsCollectorVisitor visitor;
visitor.visit(ast); visitor.visit(ast);
return std::move(visitor.expressions_with_window_functions); return std::move(visitor.expressions_with_window_functions);
} }

View File

@ -114,6 +114,6 @@ inline void assertNoAggregates(const ASTPtr & ast, const char * description)
GetAggregatesVisitor(data).visit(ast); GetAggregatesVisitor(data).visit(ast);
} }
std::vector<const ASTFunction *> getExpressionsWithWindowFunctions(ASTPtr & ast); ASTs getExpressionsWithWindowFunctions(ASTPtr & ast);
} }

View File

@ -3181,7 +3181,7 @@ void InterpreterSelectQuery::initSettings()
{ {
auto & query = getSelectQuery(); auto & query = getSelectQuery();
if (query.settings()) if (query.settings())
InterpreterSetQuery(query.settings(), context).executeForCurrentContext(); InterpreterSetQuery(query.settings(), context).executeForCurrentContext(options.ignore_setting_constraints);
auto & client_info = context->getClientInfo(); auto & client_info = context->getClientInfo();
auto min_major = DBMS_MIN_MAJOR_VERSION_WITH_CURRENT_AGGREGATION_VARIANT_SELECTION_METHOD; auto min_major = DBMS_MIN_MAJOR_VERSION_WITH_CURRENT_AGGREGATION_VARIANT_SELECTION_METHOD;

View File

@ -24,10 +24,11 @@ BlockIO InterpreterSetQuery::execute()
} }
void InterpreterSetQuery::executeForCurrentContext() void InterpreterSetQuery::executeForCurrentContext(bool ignore_setting_constraints)
{ {
const auto & ast = query_ptr->as<ASTSetQuery &>(); const auto & ast = query_ptr->as<ASTSetQuery &>();
getContext()->checkSettingsConstraints(ast.changes); if (!ignore_setting_constraints)
getContext()->checkSettingsConstraints(ast.changes);
getContext()->applySettingsChanges(ast.changes); getContext()->applySettingsChanges(ast.changes);
getContext()->resetSettingsToDefaultValue(ast.default_settings); getContext()->resetSettingsToDefaultValue(ast.default_settings);
} }

View File

@ -23,7 +23,7 @@ public:
/** Set setting for current context (query context). /** Set setting for current context (query context).
* It is used for interpretation of SETTINGS clause in SELECT query. * It is used for interpretation of SETTINGS clause in SELECT query.
*/ */
void executeForCurrentContext(); void executeForCurrentContext(bool ignore_setting_constraints = false);
bool supportsTransactions() const override { return true; } bool supportsTransactions() const override { return true; }

View File

@ -51,6 +51,8 @@ struct SelectQueryOptions
bool settings_limit_offset_done = false; bool settings_limit_offset_done = false;
bool is_explain = false; /// The value is true if it's explain statement. bool is_explain = false; /// The value is true if it's explain statement.
bool is_create_parameterized_view = false; bool is_create_parameterized_view = false;
/// Bypass setting constraints for some internal queries such as projection ASTs.
bool ignore_setting_constraints = false;
/// These two fields are used to evaluate shardNum() and shardCount() function when /// These two fields are used to evaluate shardNum() and shardCount() function when
/// prefer_localhost_replica == 1 and local instance is selected. They are needed because local /// prefer_localhost_replica == 1 and local instance is selected. They are needed because local
@ -141,6 +143,12 @@ struct SelectQueryOptions
return *this; return *this;
} }
SelectQueryOptions & ignoreSettingConstraints(bool value = true)
{
ignore_setting_constraints = value;
return *this;
}
SelectQueryOptions & setInternal(bool value = false) SelectQueryOptions & setInternal(bool value = false)
{ {
is_internal = value; is_internal = value;

View File

@ -1287,6 +1287,7 @@ TreeRewriterResultPtr TreeRewriter::analyzeSelect(
bool is_changed = replaceAliasColumnsInQuery(query, result.storage_snapshot->metadata->getColumns(), bool is_changed = replaceAliasColumnsInQuery(query, result.storage_snapshot->metadata->getColumns(),
result.array_join_result_to_source, getContext(), excluded_nodes); result.array_join_result_to_source, getContext(), excluded_nodes);
/// If query is changed, we need to redo some work to correct name resolution. /// If query is changed, we need to redo some work to correct name resolution.
if (is_changed) if (is_changed)
{ {

View File

@ -40,11 +40,10 @@ struct TreeRewriterResult
NameSet expanded_aliases; NameSet expanded_aliases;
Aliases aliases; Aliases aliases;
std::vector<const ASTFunction *> aggregates; std::vector<const ASTFunction *> aggregates;
std::vector<const ASTFunction *> window_function_asts; std::vector<const ASTFunction *> window_function_asts;
ASTs expressions_with_window_function;
std::vector<const ASTFunction *> expressions_with_window_function;
/// Which column is needed to be ARRAY-JOIN'ed to get the specified. /// Which column is needed to be ARRAY-JOIN'ed to get the specified.
/// For example, for `SELECT s.v ... ARRAY JOIN a AS s` will get "s.v" -> "a.v". /// For example, for `SELECT s.v ... ARRAY JOIN a AS s` will get "s.v" -> "a.v".

View File

@ -34,6 +34,16 @@ static std::string createDirectory(const std::string & file)
return path; return path;
} }
static std::string renderFileNameTemplate(time_t now, const std::string & file_path)
{
fs::path path{file_path};
std::tm buf;
localtime_r(&now, &buf);
std::ostringstream ss; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
ss << std::put_time(&buf, file_path.c_str());
return path.replace_filename(ss.str());
}
#ifndef WITHOUT_TEXT_LOG #ifndef WITHOUT_TEXT_LOG
void Loggers::setTextLog(std::shared_ptr<DB::TextLog> log, int max_priority) void Loggers::setTextLog(std::shared_ptr<DB::TextLog> log, int max_priority)
{ {
@ -68,9 +78,12 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
/// The maximum (the most verbose) of those will be used as default for Poco loggers /// The maximum (the most verbose) of those will be used as default for Poco loggers
int max_log_level = 0; int max_log_level = 0;
const auto log_path = config.getString("logger.log", ""); time_t now = std::time({});
if (!log_path.empty())
const auto log_path_prop = config.getString("logger.log", "");
if (!log_path_prop.empty())
{ {
const auto log_path = renderFileNameTemplate(now, log_path_prop);
createDirectory(log_path); createDirectory(log_path);
std::string ext; std::string ext;
@ -109,9 +122,10 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
split->addChannel(log, "log"); split->addChannel(log, "log");
} }
const auto errorlog_path = config.getString("logger.errorlog", ""); const auto errorlog_path_prop = config.getString("logger.errorlog", "");
if (!errorlog_path.empty()) if (!errorlog_path_prop.empty())
{ {
const auto errorlog_path = renderFileNameTemplate(now, errorlog_path_prop);
createDirectory(errorlog_path); createDirectory(errorlog_path);
// NOTE: we don't use notice & critical in the code, so in practice error log collects fatal & error & warning. // NOTE: we don't use notice & critical in the code, so in practice error log collects fatal & error & warning.

View File

@ -142,6 +142,14 @@ ASTPtr ASTProjectionSelectQuery::cloneToASTSelect() const
} }
if (groupBy()) if (groupBy())
select_query->setExpression(ASTSelectQuery::Expression::GROUP_BY, groupBy()->clone()); select_query->setExpression(ASTSelectQuery::Expression::GROUP_BY, groupBy()->clone());
auto settings_query = std::make_shared<ASTSetQuery>();
SettingsChanges settings_changes;
settings_changes.insertSetting("optimize_aggregators_of_group_by_keys", false);
settings_changes.insertSetting("optimize_group_by_function_keys", false);
settings_query->changes = std::move(settings_changes);
settings_query->is_standalone = false;
select_query->setExpression(ASTSelectQuery::Expression::SETTINGS, std::move(settings_query));
return node; return node;
} }

View File

@ -0,0 +1,27 @@
#include "Formatters.h"
#include <format>
namespace DB
{
std::string formatKQLTimespan(const Int64 ticks)
{
static constexpr Int64 TICKS_PER_SECOND = 10000000;
static constexpr auto TICKS_PER_MINUTE = TICKS_PER_SECOND * 60;
static constexpr auto TICKS_PER_HOUR = TICKS_PER_MINUTE * 60;
static constexpr auto TICKS_PER_DAY = TICKS_PER_HOUR * 24;
const auto abs_ticks = std::abs(ticks);
std::string result = ticks < 0 ? "-" : "";
if (abs_ticks >= TICKS_PER_DAY)
result.append(std::format("{}.", abs_ticks / TICKS_PER_DAY));
result.append(std::format(
"{:02}:{:02}:{:02}", (abs_ticks / TICKS_PER_HOUR) % 24, (abs_ticks / TICKS_PER_MINUTE) % 60, (abs_ticks / TICKS_PER_SECOND) % 60));
if (const auto fractional_second = abs_ticks % TICKS_PER_SECOND)
result.append(std::format(".{:07}", fractional_second));
return result;
}
}

View File

@ -0,0 +1,10 @@
#pragma once
#include <base/types.h>
#include <string>
namespace DB
{
std::string formatKQLTimespan(Int64 ticks);
}

View File

@ -115,7 +115,7 @@ CompletedPipelineExecutor::~CompletedPipelineExecutor()
} }
catch (...) catch (...)
{ {
tryLogCurrentException("PullingAsyncPipelineExecutor"); tryLogCurrentException("CompletedPipelineExecutor");
} }
} }

View File

@ -208,7 +208,7 @@ namespace DB
const String & column_name, const String & column_name,
ColumnPtr & column, ColumnPtr & column,
const DataTypePtr & column_type, const DataTypePtr & column_type,
const PaddedPODArray<UInt8> * null_bytemap, const PaddedPODArray<UInt8> *,
arrow::ArrayBuilder * array_builder, arrow::ArrayBuilder * array_builder,
String format_name, String format_name,
size_t start, size_t start,
@ -231,7 +231,9 @@ namespace DB
/// Start new array. /// Start new array.
components_status = builder.Append(); components_status = builder.Append();
checkStatus(components_status, nested_column->getName(), format_name); checkStatus(components_status, nested_column->getName(), format_name);
fillArrowArray(column_name, nested_column, nested_type, null_bytemap, value_builder, format_name, offsets[array_idx - 1], offsets[array_idx], output_string_as_string, output_fixed_string_as_fixed_byte_array, dictionary_values);
/// Pass null null_map, because fillArrowArray will decide whether nested_type is nullable, if nullable, it will create a new null_map from nested_column
fillArrowArray(column_name, nested_column, nested_type, nullptr, value_builder, format_name, offsets[array_idx - 1], offsets[array_idx], output_string_as_string, output_fixed_string_as_fixed_byte_array, dictionary_values);
} }
} }

View File

@ -69,7 +69,7 @@ static AggregateProjectionInfo getAggregatingProjectionInfo(
projection.query_ast, projection.query_ast,
context, context,
Pipe(std::make_shared<SourceFromSingleChunk>(metadata_snapshot->getSampleBlock())), Pipe(std::make_shared<SourceFromSingleChunk>(metadata_snapshot->getSampleBlock())),
SelectQueryOptions{QueryProcessingStage::WithMergeableState}.ignoreASTOptimizations()); SelectQueryOptions{QueryProcessingStage::WithMergeableState}.ignoreASTOptimizations().ignoreSettingConstraints());
const auto & analysis_result = interpreter.getAnalysisResult(); const auto & analysis_result = interpreter.getAnalysisResult();
const auto & query_analyzer = interpreter.getQueryAnalyzer(); const auto & query_analyzer = interpreter.getQueryAnalyzer();

View File

@ -7,18 +7,18 @@
#include <Parsers/ASTProjectionDeclaration.h> #include <Parsers/ASTProjectionDeclaration.h>
#include <Parsers/ASTProjectionSelectQuery.h> #include <Parsers/ASTProjectionSelectQuery.h>
#include <Parsers/ParserCreateQuery.h> #include <Parsers/ParserCreateQuery.h>
#include <Parsers/formatAST.h>
#include <Parsers/parseQuery.h> #include <Parsers/parseQuery.h>
#include <Parsers/queryToString.h> #include <Parsers/queryToString.h>
#include <Parsers/formatAST.h>
#include <Core/Defines.h> #include <Core/Defines.h>
#include <Interpreters/InterpreterSelectQuery.h> #include <Interpreters/InterpreterSelectQuery.h>
#include <QueryPipeline/Pipe.h>
#include <QueryPipeline/QueryPipelineBuilder.h>
#include <Processors/Sources/SourceFromSingleChunk.h>
#include <Processors/Transforms/SquashingChunksTransform.h>
#include <Processors/Executors/PullingPipelineExecutor.h> #include <Processors/Executors/PullingPipelineExecutor.h>
#include <Processors/QueryPlan/QueryPlan.h> #include <Processors/QueryPlan/QueryPlan.h>
#include <Processors/Sources/SourceFromSingleChunk.h>
#include <Processors/Transforms/SquashingChunksTransform.h>
#include <QueryPipeline/Pipe.h>
#include <QueryPipeline/QueryPipelineBuilder.h>
#include <base/range.h> #include <base/range.h>
@ -109,9 +109,16 @@ ProjectionDescription::getProjectionFromAST(const ASTPtr & definition_ast, const
auto external_storage_holder = std::make_shared<TemporaryTableHolder>(query_context, columns, ConstraintsDescription{}); auto external_storage_holder = std::make_shared<TemporaryTableHolder>(query_context, columns, ConstraintsDescription{});
StoragePtr storage = external_storage_holder->getTable(); StoragePtr storage = external_storage_holder->getTable();
InterpreterSelectQuery select( InterpreterSelectQuery select(
result.query_ast, query_context, storage, {}, result.query_ast,
query_context,
storage,
{},
/// Here we ignore ast optimizations because otherwise aggregation keys may be removed from result header as constants. /// Here we ignore ast optimizations because otherwise aggregation keys may be removed from result header as constants.
SelectQueryOptions{QueryProcessingStage::WithMergeableState}.modify().ignoreAlias().ignoreASTOptimizations()); SelectQueryOptions{QueryProcessingStage::WithMergeableState}
.modify()
.ignoreAlias()
.ignoreASTOptimizations()
.ignoreSettingConstraints());
result.required_columns = select.getRequiredColumns(); result.required_columns = select.getRequiredColumns();
result.sample_block = select.getSampleBlock(); result.sample_block = select.getSampleBlock();
@ -220,9 +227,16 @@ ProjectionDescription ProjectionDescription::getMinMaxCountProjection(
auto external_storage_holder = std::make_shared<TemporaryTableHolder>(query_context, columns, ConstraintsDescription{}); auto external_storage_holder = std::make_shared<TemporaryTableHolder>(query_context, columns, ConstraintsDescription{});
StoragePtr storage = external_storage_holder->getTable(); StoragePtr storage = external_storage_holder->getTable();
InterpreterSelectQuery select( InterpreterSelectQuery select(
result.query_ast, query_context, storage, {}, result.query_ast,
query_context,
storage,
{},
/// Here we ignore ast optimizations because otherwise aggregation keys may be removed from result header as constants. /// Here we ignore ast optimizations because otherwise aggregation keys may be removed from result header as constants.
SelectQueryOptions{QueryProcessingStage::WithMergeableState}.modify().ignoreAlias().ignoreASTOptimizations()); SelectQueryOptions{QueryProcessingStage::WithMergeableState}
.modify()
.ignoreAlias()
.ignoreASTOptimizations()
.ignoreSettingConstraints());
result.required_columns = select.getRequiredColumns(); result.required_columns = select.getRequiredColumns();
result.sample_block = select.getSampleBlock(); result.sample_block = select.getSampleBlock();
@ -241,7 +255,8 @@ ProjectionDescription ProjectionDescription::getMinMaxCountProjection(
result.sample_block_for_keys.insert({nullptr, key.type, key.name}); result.sample_block_for_keys.insert({nullptr, key.type, key.name});
auto it = partition_column_name_to_value_index.find(key.name); auto it = partition_column_name_to_value_index.find(key.name);
if (it == partition_column_name_to_value_index.end()) if (it == partition_column_name_to_value_index.end())
throw Exception(ErrorCodes::LOGICAL_ERROR, "minmax_count projection can only have keys about partition columns. It's a bug"); throw Exception(
ErrorCodes::LOGICAL_ERROR, "minmax_count projection can only have keys about partition columns. It's a bug");
result.partition_value_indices.push_back(it->second); result.partition_value_indices.push_back(it->second);
} }
} }
@ -282,7 +297,8 @@ Block ProjectionDescription::calculate(const Block & block, ContextPtr context)
Pipe(std::make_shared<SourceFromSingleChunk>(block)), Pipe(std::make_shared<SourceFromSingleChunk>(block)),
SelectQueryOptions{ SelectQueryOptions{
type == ProjectionDescription::Type::Normal ? QueryProcessingStage::FetchColumns type == ProjectionDescription::Type::Normal ? QueryProcessingStage::FetchColumns
: QueryProcessingStage::WithMergeableState}) : QueryProcessingStage::WithMergeableState}
.ignoreSettingConstraints())
.buildQueryPipeline(); .buildQueryPipeline();
builder.resize(1); builder.resize(1);
// Generate aggregated blocks with rows less or equal than the original block. // Generate aggregated blocks with rows less or equal than the original block.
@ -353,8 +369,8 @@ void ProjectionsDescription::add(ProjectionDescription && projection, const Stri
{ {
if (if_not_exists) if (if_not_exists)
return; return;
throw Exception(ErrorCodes::ILLEGAL_PROJECTION, "Cannot add projection {}: projection with this name already exists", throw Exception(
projection.name); ErrorCodes::ILLEGAL_PROJECTION, "Cannot add projection {}: projection with this name already exists", projection.name);
} }
auto insert_it = projections.cend(); auto insert_it = projections.cend();
@ -363,10 +379,10 @@ void ProjectionsDescription::add(ProjectionDescription && projection, const Stri
insert_it = projections.cbegin(); insert_it = projections.cbegin();
else if (!after_projection.empty()) else if (!after_projection.empty())
{ {
auto it = std::find_if(projections.cbegin(), projections.cend(), [&after_projection](const auto & projection_) auto it = std::find_if(
{ projections.cbegin(),
return projection_.name == after_projection; projections.cend(),
}); [&after_projection](const auto & projection_) { return projection_.name == after_projection; });
if (it != projections.cend()) if (it != projections.cend())
++it; ++it;
insert_it = it; insert_it = it;

View File

@ -373,7 +373,7 @@ void StorageMergeTree::alter(
/// Always execute required mutations synchronously, because alters /// Always execute required mutations synchronously, because alters
/// should be executed in sequential order. /// should be executed in sequential order.
if (!maybe_mutation_commands.empty()) if (!maybe_mutation_commands.empty())
waitForMutation(mutation_version); waitForMutation(mutation_version, false);
} }
{ {
@ -601,7 +601,7 @@ void StorageMergeTree::mutate(const MutationCommands & commands, ContextPtr quer
Int64 version = startMutation(commands, query_context); Int64 version = startMutation(commands, query_context);
if (query_context->getSettingsRef().mutations_sync > 0 || query_context->getCurrentTransaction()) if (query_context->getSettingsRef().mutations_sync > 0 || query_context->getCurrentTransaction())
waitForMutation(version); waitForMutation(version, false);
} }
bool StorageMergeTree::hasLightweightDeletedMask() const bool StorageMergeTree::hasLightweightDeletedMask() const

View File

@ -191,7 +191,7 @@ private:
/// and into in-memory structures. Wake up merge-mutation task. /// and into in-memory structures. Wake up merge-mutation task.
Int64 startMutation(const MutationCommands & commands, ContextPtr query_context); Int64 startMutation(const MutationCommands & commands, ContextPtr query_context);
/// Wait until mutation with version will finish mutation for all parts /// Wait until mutation with version will finish mutation for all parts
void waitForMutation(Int64 version, bool wait_for_another_mutation = false); void waitForMutation(Int64 version, bool wait_for_another_mutation);
void waitForMutation(const String & mutation_id, bool wait_for_another_mutation) override; void waitForMutation(const String & mutation_id, bool wait_for_another_mutation) override;
void waitForMutation(Int64 version, const String & mutation_id, bool wait_for_another_mutation = false); void waitForMutation(Int64 version, const String & mutation_id, bool wait_for_another_mutation = false);
void setMutationCSN(const String & mutation_id, CSN csn) override; void setMutationCSN(const String & mutation_id, CSN csn) override;

View File

@ -19,6 +19,8 @@
#include <Processors/Sinks/SinkToStorage.h> #include <Processors/Sinks/SinkToStorage.h>
#include <unordered_set> #include <unordered_set>
#include <DataTypes/DataTypeArray.h>
namespace DB namespace DB
{ {
@ -127,9 +129,7 @@ public:
for (const auto j : collections::range(0, num_cols)) for (const auto j : collections::range(0, num_cols))
{ {
WriteBufferFromOwnString ostr; insertValueIntoMongoDB(*document, data_names[j], *data_types[j], *columns[j], i);
data_types[j]->getDefaultSerialization()->serializeText(*columns[j], i, ostr, FormatSettings{});
document->add(data_names[j], ostr.str());
} }
documents.push_back(std::move(document)); documents.push_back(std::move(document));
@ -151,6 +151,60 @@ public:
} }
private: private:
void insertValueIntoMongoDB(
Poco::MongoDB::Document & document,
const std::string & name,
const IDataType & data_type,
const IColumn & column,
size_t idx)
{
WhichDataType which(data_type);
if (which.isArray())
{
const ColumnArray & column_array = assert_cast<const ColumnArray &>(column);
const ColumnArray::Offsets & offsets = column_array.getOffsets();
size_t offset = offsets[idx - 1];
size_t next_offset = offsets[idx];
const IColumn & nested_column = column_array.getData();
const auto * array_type = assert_cast<const DataTypeArray *>(&data_type);
const DataTypePtr & nested_type = array_type->getNestedType();
Poco::MongoDB::Array::Ptr array = new Poco::MongoDB::Array();
for (size_t i = 0; i + offset < next_offset; ++i)
{
insertValueIntoMongoDB(*array, Poco::NumberFormatter::format(i), *nested_type, nested_column, i + offset);
}
document.add(name, array);
return;
}
/// MongoDB does not support UInt64 type, so just cast it to Int64
if (which.isNativeUInt())
document.add(name, static_cast<Poco::Int64>(column.getUInt(idx)));
else if (which.isNativeInt())
document.add(name, static_cast<Poco::Int64>(column.getInt(idx)));
else if (which.isFloat32())
document.add(name, static_cast<Float64>(column.getFloat32(idx)));
else if (which.isFloat64())
document.add(name, static_cast<Float64>(column.getFloat64(idx)));
else if (which.isDate())
document.add(name, Poco::Timestamp(DateLUT::instance().fromDayNum(DayNum(column.getUInt(idx))) * 1000000));
else if (which.isDateTime())
document.add(name, Poco::Timestamp(column.getUInt(idx) * 1000000));
else
{
WriteBufferFromOwnString ostr;
data_type.getDefaultSerialization()->serializeText(column, idx, ostr, FormatSettings{});
document.add(name, ostr.str());
}
}
String collection_name; String collection_name;
String db_name; String db_name;
StorageMetadataPtr metadata_snapshot; StorageMetadataPtr metadata_snapshot;

View File

@ -182,6 +182,7 @@ S3Settings::RequestSettings::RequestSettings(const NamedCollection & collection)
max_single_read_retries = collection.getOrDefault<UInt64>("max_single_read_retries", max_single_read_retries); max_single_read_retries = collection.getOrDefault<UInt64>("max_single_read_retries", max_single_read_retries);
max_connections = collection.getOrDefault<UInt64>("max_connections", max_connections); max_connections = collection.getOrDefault<UInt64>("max_connections", max_connections);
list_object_keys_size = collection.getOrDefault<UInt64>("list_object_keys_size", list_object_keys_size); list_object_keys_size = collection.getOrDefault<UInt64>("list_object_keys_size", list_object_keys_size);
allow_native_copy = collection.getOrDefault<bool>("allow_native_copy", allow_native_copy);
throw_on_zero_files_match = collection.getOrDefault<bool>("throw_on_zero_files_match", throw_on_zero_files_match); throw_on_zero_files_match = collection.getOrDefault<bool>("throw_on_zero_files_match", throw_on_zero_files_match);
} }
@ -197,6 +198,7 @@ S3Settings::RequestSettings::RequestSettings(
max_connections = config.getUInt64(key + "max_connections", settings.s3_max_connections); max_connections = config.getUInt64(key + "max_connections", settings.s3_max_connections);
check_objects_after_upload = config.getBool(key + "check_objects_after_upload", settings.s3_check_objects_after_upload); check_objects_after_upload = config.getBool(key + "check_objects_after_upload", settings.s3_check_objects_after_upload);
list_object_keys_size = config.getUInt64(key + "list_object_keys_size", settings.s3_list_object_keys_size); list_object_keys_size = config.getUInt64(key + "list_object_keys_size", settings.s3_list_object_keys_size);
allow_native_copy = config.getBool(key + "allow_native_copy", allow_native_copy);
throw_on_zero_files_match = config.getBool(key + "throw_on_zero_files_match", settings.s3_throw_on_zero_files_match); throw_on_zero_files_match = config.getBool(key + "throw_on_zero_files_match", settings.s3_throw_on_zero_files_match);
retry_attempts = config.getUInt64(key + "retry_attempts", settings.s3_retry_attempts); retry_attempts = config.getUInt64(key + "retry_attempts", settings.s3_retry_attempts);
request_timeout_ms = config.getUInt64(key + "request_timeout_ms", settings.s3_request_timeout_ms); request_timeout_ms = config.getUInt64(key + "request_timeout_ms", settings.s3_request_timeout_ms);

View File

@ -71,6 +71,7 @@ struct S3Settings
size_t retry_attempts = 10; size_t retry_attempts = 10;
size_t request_timeout_ms = 3000; size_t request_timeout_ms = 3000;
size_t long_request_timeout_ms = 30000; // TODO: Take this from config like request_timeout_ms size_t long_request_timeout_ms = 30000; // TODO: Take this from config like request_timeout_ms
bool allow_native_copy = true;
bool throw_on_zero_files_match = false; bool throw_on_zero_files_match = false;

View File

@ -0,0 +1,198 @@
test_access_for_functions/test.py::test_access_rights_for_function
test_backward_compatibility/test_normalized_count_comparison.py::test_select_aggregate_alias_column
test_concurrent_backups_s3/test.py::test_concurrent_backups
test_distributed_ddl/test.py::test_default_database[configs]
test_distributed_ddl/test.py::test_default_database[configs_secure]
test_distributed_ddl/test.py::test_on_server_fail[configs]
test_distributed_ddl/test.py::test_on_server_fail[configs_secure]
test_distributed_insert_backward_compatibility/test.py::test_distributed_in_tuple
test_distributed_inter_server_secret/test.py::test_per_user_inline_settings_secure_cluster[default-]
test_distributed_inter_server_secret/test.py::test_per_user_inline_settings_secure_cluster[nopass-]
test_distributed_inter_server_secret/test.py::test_per_user_inline_settings_secure_cluster[pass-foo]
test_distributed_inter_server_secret/test.py::test_per_user_protocol_settings_secure_cluster[default-]
test_distributed_inter_server_secret/test.py::test_per_user_protocol_settings_secure_cluster[nopass-]
test_distributed_inter_server_secret/test.py::test_per_user_protocol_settings_secure_cluster[pass-foo]
test_distributed_inter_server_secret/test.py::test_user_insecure_cluster[default-]
test_distributed_inter_server_secret/test.py::test_user_insecure_cluster[nopass-]
test_distributed_inter_server_secret/test.py::test_user_insecure_cluster[pass-foo]
test_distributed_inter_server_secret/test.py::test_user_secure_cluster[default-]
test_distributed_inter_server_secret/test.py::test_user_secure_cluster[nopass-]
test_distributed_inter_server_secret/test.py::test_user_secure_cluster[pass-foo]
test_distributed_inter_server_secret/test.py::test_user_secure_cluster_from_backward[default-]
test_distributed_inter_server_secret/test.py::test_user_secure_cluster_from_backward[nopass-]
test_distributed_inter_server_secret/test.py::test_user_secure_cluster_from_backward[pass-foo]
test_distributed_inter_server_secret/test.py::test_user_secure_cluster_with_backward[default-]
test_distributed_inter_server_secret/test.py::test_user_secure_cluster_with_backward[nopass-]
test_distributed_inter_server_secret/test.py::test_user_secure_cluster_with_backward[pass-foo]
test_distributed_load_balancing/test.py::test_distributed_replica_max_ignored_errors
test_distributed_load_balancing/test.py::test_load_balancing_default
test_distributed_load_balancing/test.py::test_load_balancing_priority_round_robin[dist_priority]
test_distributed_load_balancing/test.py::test_load_balancing_priority_round_robin[dist_priority_negative]
test_distributed_load_balancing/test.py::test_load_balancing_round_robin
test_backward_compatibility/test.py::test_backward_compatability1
test_backward_compatibility/test_aggregate_fixed_key.py::test_two_level_merge
test_backward_compatibility/test_aggregate_function_state.py::test_backward_compatability_for_avg
test_backward_compatibility/test_aggregate_function_state.py::test_backward_compatability_for_uniq_exact[1000]
test_backward_compatibility/test_aggregate_function_state.py::test_backward_compatability_for_uniq_exact[500000]
test_backward_compatibility/test_aggregate_function_state.py::test_backward_compatability_for_uniq_exact_variadic[1000]
test_backward_compatibility/test_aggregate_function_state.py::test_backward_compatability_for_uniq_exact_variadic[500000]
test_backward_compatibility/test_ip_types_binary_compatibility.py::test_ip_types_binary_compatibility
test_backward_compatibility/test_select_aggregate_alias_column.py::test_select_aggregate_alias_column
test_backward_compatibility/test_short_strings_aggregation.py::test_backward_compatability
test_mask_sensitive_info/test.py::test_encryption_functions
test_merge_table_over_distributed/test.py::test_global_in
test_merge_table_over_distributed/test.py::test_select_table_name_from_merge_over_distributed
test_mutations_with_merge_tree/test.py::test_mutations_with_merge_background_task
test_passing_max_partitions_to_read_remotely/test.py::test_default_database_on_cluster
test_row_policy/test.py::test_change_of_users_xml_changes_row_policies
test_row_policy/test.py::test_change_of_users_xml_changes_row_policies
test_row_policy/test.py::test_dcl_introspection
test_row_policy/test.py::test_dcl_introspection
test_row_policy/test.py::test_dcl_management
test_row_policy/test.py::test_dcl_management
test_row_policy/test.py::test_dcl_users_with_policies_from_users_xml
test_row_policy/test.py::test_dcl_users_with_policies_from_users_xml
test_row_policy/test.py::test_grant_create_row_policy
test_row_policy/test.py::test_grant_create_row_policy
test_row_policy/test.py::test_introspection
test_row_policy/test.py::test_introspection
test_row_policy/test.py::test_join
test_row_policy/test.py::test_join
test_row_policy/test.py::test_miscellaneous_engines
test_row_policy/test.py::test_miscellaneous_engines
test_row_policy/test.py::test_policy_from_users_xml_affects_only_user_assigned
test_row_policy/test.py::test_policy_from_users_xml_affects_only_user_assigned
test_row_policy/test.py::test_policy_on_distributed_table_via_role
test_row_policy/test.py::test_policy_on_distributed_table_via_role
test_row_policy/test.py::test_reload_users_xml_by_timer
test_row_policy/test.py::test_reload_users_xml_by_timer
test_row_policy/test.py::test_row_policy_filter_with_subquery
test_row_policy/test.py::test_row_policy_filter_with_subquery
test_row_policy/test.py::test_smoke
test_row_policy/test.py::test_smoke
test_row_policy/test.py::test_some_users_without_policies
test_row_policy/test.py::test_some_users_without_policies
test_row_policy/test.py::test_tags_with_db_and_table_names
test_row_policy/test.py::test_tags_with_db_and_table_names
test_row_policy/test.py::test_throwif_error_in_prewhere_with_same_condition_as_filter
test_row_policy/test.py::test_throwif_error_in_prewhere_with_same_condition_as_filter
test_row_policy/test.py::test_throwif_error_in_where_with_same_condition_as_filter
test_row_policy/test.py::test_throwif_error_in_where_with_same_condition_as_filter
test_row_policy/test.py::test_throwif_in_prewhere_doesnt_expose_restricted_data
test_row_policy/test.py::test_throwif_in_prewhere_doesnt_expose_restricted_data
test_row_policy/test.py::test_throwif_in_where_doesnt_expose_restricted_data
test_row_policy/test.py::test_throwif_in_where_doesnt_expose_restricted_data
test_row_policy/test.py::test_users_xml_is_readonly
test_row_policy/test.py::test_users_xml_is_readonly
test_row_policy/test.py::test_with_prewhere
test_row_policy/test.py::test_with_prewhere
test_settings_constraints_distributed/test.py::test_select_clamps_settings
test_backward_compatibility/test_cte_distributed.py::test_cte_distributed
test_compression_codec_read/test.py::test_default_codec_read
test_dictionaries_update_and_reload/test.py::test_reload_after_fail_in_cache_dictionary
test_distributed_type_object/test.py::test_distributed_type_object
test_materialized_mysql_database/test.py::test_select_without_columns_5_7
test_materialized_mysql_database/test.py::test_select_without_columns_8_0
test_shard_level_const_function/test.py::test_remote
test_storage_postgresql/test.py::test_postgres_select_insert
test_storage_rabbitmq/test.py::test_rabbitmq_materialized_view
test_system_merges/test.py::test_mutation_simple[]
test_system_merges/test.py::test_mutation_simple[replicated]
test_backward_compatibility/test_insert_profile_events.py::test_new_client_compatible
test_backward_compatibility/test_insert_profile_events.py::test_old_client_compatible
test_backward_compatibility/test_vertical_merges_from_compact_parts.py::test_vertical_merges_from_compact_parts
test_disk_over_web_server/test.py::test_cache[node2]
test_disk_over_web_server/test.py::test_incorrect_usage
test_disk_over_web_server/test.py::test_replicated_database
test_disk_over_web_server/test.py::test_unavailable_server
test_disk_over_web_server/test.py::test_usage[node2]
test_distributed_backward_compatability/test.py::test_distributed_in_tuple
test_executable_table_function/test.py::test_executable_function_input_python
test_groupBitmapAnd_on_distributed/test_groupBitmapAndState_on_distributed_table.py::test_groupBitmapAndState_on_different_version_nodes
test_groupBitmapAnd_on_distributed/test_groupBitmapAndState_on_distributed_table.py::test_groupBitmapAndState_on_distributed_table
test_settings_profile/test.py::test_show_profiles
test_sql_user_defined_functions_on_cluster/test.py::test_sql_user_defined_functions_on_cluster
test_backward_compatibility/test_functions.py::test_aggregate_states
test_backward_compatibility/test_functions.py::test_string_functions
test_default_compression_codec/test.py::test_default_codec_for_compact_parts
test_default_compression_codec/test.py::test_default_codec_multiple
test_default_compression_codec/test.py::test_default_codec_single
test_default_compression_codec/test.py::test_default_codec_version_update
test_postgresql_protocol/test.py::test_python_client
test_quota/test.py::test_add_remove_interval
test_quota/test.py::test_add_remove_quota
test_quota/test.py::test_consumption_of_show_clusters
test_quota/test.py::test_consumption_of_show_databases
test_quota/test.py::test_consumption_of_show_privileges
test_quota/test.py::test_consumption_of_show_processlist
test_quota/test.py::test_consumption_of_show_tables
test_quota/test.py::test_dcl_introspection
test_quota/test.py::test_dcl_management
test_quota/test.py::test_exceed_quota
test_quota/test.py::test_query_inserts
test_quota/test.py::test_quota_from_users_xml
test_quota/test.py::test_reload_users_xml_by_timer
test_quota/test.py::test_simpliest_quota
test_quota/test.py::test_tracking_quota
test_quota/test.py::test_users_xml_is_readonly
test_replicated_merge_tree_compatibility/test.py::test_replicated_merge_tree_defaults_compatibility
test_polymorphic_parts/test.py::test_different_part_types_on_replicas[polymorphic_table_wide-Wide]
test_old_versions/test.py::test_client_is_older_than_server
test_polymorphic_parts/test.py::test_polymorphic_parts_non_adaptive
test_old_versions/test.py::test_server_is_older_than_client
test_polymorphic_parts/test.py::test_compact_parts_only
test_polymorphic_parts/test.py::test_different_part_types_on_replicas[polymorphic_table_compact-Compact]
test_polymorphic_parts/test.py::test_polymorphic_parts_index
test_old_versions/test.py::test_distributed_query_initiator_is_older_than_shard
test_polymorphic_parts/test.py::test_polymorphic_parts_basics[first_node1-second_node1]
test_polymorphic_parts/test.py::test_polymorphic_parts_basics[first_node0-second_node0]
test_ttl_replicated/test.py::test_ttl_table[DELETE]
test_ttl_replicated/test.py::test_ttl_columns
test_ttl_replicated/test.py::test_ttl_compatibility[node_left2-node_right2-2]
test_ttl_replicated/test.py::test_ttl_table[]
test_version_update/test.py::test_aggregate_function_versioning_server_upgrade
test_version_update/test.py::test_aggregate_function_versioning_fetch_data_from_old_to_new_server
test_ttl_replicated/test.py::test_ttl_double_delete_rule_returns_error
test_ttl_replicated/test.py::test_ttl_alter_delete[test_ttl_alter_delete]
test_ttl_replicated/test.py::test_ttl_alter_delete[test_ttl_alter_delete_replicated]
test_ttl_replicated/test.py::test_ttl_compatibility[node_left0-node_right0-0]
test_version_update/test.py::test_modulo_partition_key_issue_23508
test_ttl_replicated/test.py::test_ttl_many_columns
test_ttl_replicated/test.py::test_modify_column_ttl
test_ttl_replicated/test.py::test_merge_with_ttl_timeout
test_ttl_replicated/test.py::test_ttl_empty_parts
test_ttl_replicated/test.py::test_ttl_compatibility[node_left1-node_right1-1]
test_version_update/test.py::test_aggregate_function_versioning_persisting_metadata
test_version_update/test.py::test_aggregate_function_versioning_issue_16587
test_ttl_replicated/test.py::test_modify_ttl
test_mysql_database_engine/test.py::test_mysql_ddl_for_mysql_database
test_profile_events_s3/test.py::test_profile_events
test_version_update_after_mutation/test.py::test_upgrade_while_mutation
test_version_update_after_mutation/test.py::test_mutate_and_upgrade
test_system_flush_logs/test.py::test_system_logs[system.text_log-0]
test_user_defined_object_persistence/test.py::test_persistence
test_settings_profile/test.py::test_show_profiles
test_sql_user_defined_functions_on_cluster/test.py::test_sql_user_defined_functions_on_cluster
test_select_access_rights/test_main.py::test_alias_columns
test_select_access_rights/test_main.py::test_select_count
test_select_access_rights/test_main.py::test_select_join
test_replicated_merge_tree_compatibility/test.py::test_replicated_merge_tree_defaults_compatibility
test_postgresql_protocol/test.py::test_python_client
test_quota/test.py::test_add_remove_interval
test_quota/test.py::test_add_remove_quota
test_quota/test.py::test_consumption_of_show_clusters
test_quota/test.py::test_consumption_of_show_databases
test_quota/test.py::test_consumption_of_show_privileges
test_quota/test.py::test_consumption_of_show_processlist
test_quota/test.py::test_consumption_of_show_tables
test_quota/test.py::test_dcl_introspection
test_quota/test.py::test_dcl_management
test_quota/test.py::test_exceed_quota
test_quota/test.py::test_query_inserts
test_quota/test.py::test_quota_from_users_xml
test_quota/test.py::test_reload_users_xml_by_timer
test_quota/test.py::test_simpliest_quota
test_quota/test.py::test_tracking_quota
test_quota/test.py::test_users_xml_is_readonly
test_replicating_constants/test.py::test_different_versions
test_merge_tree_s3/test.py::test_heavy_insert_select_check_memory[node]

View File

@ -336,6 +336,9 @@ CI_CONFIG = {
"Integration tests (asan)": { "Integration tests (asan)": {
"required_build": "package_asan", "required_build": "package_asan",
}, },
"Integration tests (asan, analyzer)": {
"required_build": "package_asan",
},
"Integration tests (tsan)": { "Integration tests (tsan)": {
"required_build": "package_tsan", "required_build": "package_tsan",
}, },

View File

@ -190,27 +190,3 @@ def prepare_tests_results_for_clickhouse(
result.append(current_row) result.append(current_row)
return result return result
def mark_flaky_tests(
clickhouse_helper: ClickHouseHelper, check_name: str, test_results: TestResults
) -> None:
try:
query = f"""SELECT DISTINCT test_name
FROM checks
WHERE
check_start_time BETWEEN now() - INTERVAL 3 DAY AND now()
AND check_name = '{check_name}'
AND (test_status = 'FAIL' OR test_status = 'FLAKY')
AND pull_request_number = 0
"""
tests_data = clickhouse_helper.select_json_each_row("default", query)
master_failed_tests = {row["test_name"] for row in tests_data}
logging.info("Found flaky tests: %s", ", ".join(master_failed_tests))
for test_result in test_results:
if test_result.status == "FAIL" and test_result.name in master_failed_tests:
test_result.status = "FLAKY"
except Exception as ex:
logging.error("Exception happened during flaky tests fetch %s", ex)

View File

@ -13,7 +13,6 @@ from github import Github
from build_download_helper import download_builds_filter from build_download_helper import download_builds_filter
from clickhouse_helper import ( from clickhouse_helper import (
ClickHouseHelper, ClickHouseHelper,
mark_flaky_tests,
prepare_tests_results_for_clickhouse, prepare_tests_results_for_clickhouse,
) )
from commit_status_helper import RerunHelper, get_commit, post_commit_status from commit_status_helper import RerunHelper, get_commit, post_commit_status
@ -231,7 +230,6 @@ def main():
) )
ch_helper = ClickHouseHelper() ch_helper = ClickHouseHelper()
mark_flaky_tests(ch_helper, args.check_name, test_results)
report_url = upload_results( report_url = upload_results(
s3_helper, s3_helper,

View File

@ -14,7 +14,6 @@ from github import Github
from build_check import get_release_or_pr from build_check import get_release_or_pr
from clickhouse_helper import ( from clickhouse_helper import (
ClickHouseHelper, ClickHouseHelper,
mark_flaky_tests,
prepare_tests_results_for_clickhouse, prepare_tests_results_for_clickhouse,
) )
from commit_status_helper import ( from commit_status_helper import (
@ -190,7 +189,6 @@ def main():
state, description, test_results, additional_logs = process_results(output_path) state, description, test_results, additional_logs = process_results(output_path)
ch_helper = ClickHouseHelper() ch_helper = ClickHouseHelper()
mark_flaky_tests(ch_helper, NAME, test_results)
s3_path_prefix = os.path.join( s3_path_prefix = os.path.join(
get_release_or_pr(pr_info, get_version_from_repo())[0], get_release_or_pr(pr_info, get_version_from_repo())[0],
pr_info.sha, pr_info.sha,

View File

@ -16,7 +16,6 @@ from github import Github
from build_download_helper import download_all_deb_packages from build_download_helper import download_all_deb_packages
from clickhouse_helper import ( from clickhouse_helper import (
ClickHouseHelper, ClickHouseHelper,
mark_flaky_tests,
prepare_tests_results_for_clickhouse, prepare_tests_results_for_clickhouse,
) )
from commit_status_helper import ( from commit_status_helper import (
@ -368,7 +367,6 @@ def main():
state = override_status(state, check_name, invert=validate_bugfix_check) state = override_status(state, check_name, invert=validate_bugfix_check)
ch_helper = ClickHouseHelper() ch_helper = ClickHouseHelper()
mark_flaky_tests(ch_helper, check_name, test_results)
report_url = upload_results( report_url = upload_results(
s3_helper, s3_helper,

View File

@ -15,7 +15,6 @@ from github import Github
from build_download_helper import download_builds_filter from build_download_helper import download_builds_filter
from clickhouse_helper import ( from clickhouse_helper import (
ClickHouseHelper, ClickHouseHelper,
mark_flaky_tests,
prepare_tests_results_for_clickhouse, prepare_tests_results_for_clickhouse,
) )
from commit_status_helper import ( from commit_status_helper import (
@ -345,7 +344,6 @@ def main():
return return
ch_helper = ClickHouseHelper() ch_helper = ClickHouseHelper()
mark_flaky_tests(ch_helper, args.check_name, test_results)
description = format_description(description) description = format_description(description)

View File

@ -15,7 +15,6 @@ from github import Github
from build_download_helper import download_all_deb_packages from build_download_helper import download_all_deb_packages
from clickhouse_helper import ( from clickhouse_helper import (
ClickHouseHelper, ClickHouseHelper,
mark_flaky_tests,
prepare_tests_results_for_clickhouse, prepare_tests_results_for_clickhouse,
) )
from commit_status_helper import ( from commit_status_helper import (
@ -71,7 +70,7 @@ def get_json_params_dict(
} }
def get_env_for_runner(build_path, repo_path, result_path, work_path): def get_env_for_runner(check_name, build_path, repo_path, result_path, work_path):
binary_path = os.path.join(build_path, "clickhouse") binary_path = os.path.join(build_path, "clickhouse")
odbc_bridge_path = os.path.join(build_path, "clickhouse-odbc-bridge") odbc_bridge_path = os.path.join(build_path, "clickhouse-odbc-bridge")
library_bridge_path = os.path.join(build_path, "clickhouse-library-bridge") library_bridge_path = os.path.join(build_path, "clickhouse-library-bridge")
@ -88,6 +87,9 @@ def get_env_for_runner(build_path, repo_path, result_path, work_path):
my_env["CLICKHOUSE_TESTS_JSON_PARAMS_PATH"] = os.path.join(work_path, "params.json") my_env["CLICKHOUSE_TESTS_JSON_PARAMS_PATH"] = os.path.join(work_path, "params.json")
my_env["CLICKHOUSE_TESTS_RUNNER_RESTART_DOCKER"] = "0" my_env["CLICKHOUSE_TESTS_RUNNER_RESTART_DOCKER"] = "0"
if "analyzer" in check_name.lower():
my_env["CLICKHOUSE_USE_NEW_ANALYZER"] = "1"
return my_env return my_env
@ -225,7 +227,9 @@ def main():
else: else:
download_all_deb_packages(check_name, reports_path, build_path) download_all_deb_packages(check_name, reports_path, build_path)
my_env = get_env_for_runner(build_path, repo_path, result_path, work_path) my_env = get_env_for_runner(
check_name, build_path, repo_path, result_path, work_path
)
json_path = os.path.join(work_path, "params.json") json_path = os.path.join(work_path, "params.json")
with open(json_path, "w", encoding="utf-8") as json_params: with open(json_path, "w", encoding="utf-8") as json_params:
@ -271,7 +275,6 @@ def main():
state = override_status(state, check_name, invert=validate_bugfix_check) state = override_status(state, check_name, invert=validate_bugfix_check)
ch_helper = ClickHouseHelper() ch_helper = ClickHouseHelper()
mark_flaky_tests(ch_helper, check_name, test_results)
s3_helper = S3Helper() s3_helper = S3Helper()
report_url = upload_results( report_url = upload_results(

View File

@ -349,7 +349,7 @@ def create_test_html_report(
has_log_urls = True has_log_urls = True
row = "<tr>" row = "<tr>"
has_error = test_result.status in ("FAIL", "FLAKY", "NOT_FAILED") has_error = test_result.status in ("FAIL", "NOT_FAILED")
if has_error and test_result.raw_logs is not None: if has_error and test_result.raw_logs is not None:
row = '<tr class="failed">' row = '<tr class="failed">'
row += "<td>" + test_result.name + "</td>" row += "<td>" + test_result.name + "</td>"

View File

@ -13,7 +13,6 @@ from github import Github
from build_download_helper import download_all_deb_packages from build_download_helper import download_all_deb_packages
from clickhouse_helper import ( from clickhouse_helper import (
ClickHouseHelper, ClickHouseHelper,
mark_flaky_tests,
prepare_tests_results_for_clickhouse, prepare_tests_results_for_clickhouse,
) )
from commit_status_helper import RerunHelper, get_commit, post_commit_status from commit_status_helper import RerunHelper, get_commit, post_commit_status
@ -168,7 +167,6 @@ def run_stress_test(docker_image_name):
result_path, server_log_path, run_log_path result_path, server_log_path, run_log_path
) )
ch_helper = ClickHouseHelper() ch_helper = ClickHouseHelper()
mark_flaky_tests(ch_helper, check_name, test_results)
report_url = upload_results( report_url = upload_results(
s3_helper, s3_helper,

Some files were not shown because too many files have changed in this diff Show More