mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 23:52:03 +00:00
Merge branch 'master' into tests/test_keeper_s3_snapshot
This commit is contained in:
commit
47a33efc73
@ -23,7 +23,7 @@ curl https://clickhouse.com/ | sh
|
||||
|
||||
## Upcoming Events
|
||||
|
||||
* [**v23.6 Release Webinar**](https://clickhouse.com/company/events/v23-6-release-call?utm_source=github&utm_medium=social&utm_campaign=release-webinar-2023-06) - Jun 29 - 23.6 is rapidly approaching. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release.
|
||||
* [**v23.7 Release Webinar**](https://clickhouse.com/company/events/v23-7-community-release-call?utm_source=github&utm_medium=social&utm_campaign=release-webinar-2023-07) - Jul 27 - 23.7 is rapidly approaching. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release.
|
||||
* [**ClickHouse Meetup in Boston**](https://www.meetup.com/clickhouse-boston-user-group/events/293913596) - Jul 18
|
||||
* [**ClickHouse Meetup in NYC**](https://www.meetup.com/clickhouse-new-york-user-group/events/293913441) - Jul 19
|
||||
* [**ClickHouse Meetup in Toronto**](https://www.meetup.com/clickhouse-toronto-user-group/events/294183127) - Jul 20
|
||||
@ -34,13 +34,13 @@ Also, keep an eye out for upcoming meetups around the world. Somewhere else you
|
||||
|
||||
## Recent Recordings
|
||||
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
||||
* **Recording available**: [**v23.4 Release Webinar**](https://www.youtube.com/watch?v=4rrf6bk_mOg) Faster Parquet Reading, Asynchonous Connections to Reoplicas, Trailing Comma before FROM, extractKeyValuePairs, integrations updates, and so much more! Watch it now!
|
||||
* **Recording available**: [**v23.6 Release Webinar**](https://www.youtube.com/watch?v=cuf_hYn7dqU) All the features of 23.6, one convenient video! Watch it now!
|
||||
* **All release webinar recordings**: [YouTube playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3jAlSy1JxyP8zluvXaN3nxU)
|
||||
|
||||
|
||||
## Interested in joining ClickHouse and making it your full time job?
|
||||
## Interested in joining ClickHouse and making it your full-time job?
|
||||
|
||||
We are a globally diverse and distributed team, united behind a common goal of creating industry-leading, real-time analytics. Here, you will have an opportunity to solve some of the most cutting edge technical challenges and have direct ownership of your work and vision. If you are a contributor by nature, a thinker as well as a doer - we’ll definitely click!
|
||||
We are a globally diverse and distributed team, united behind a common goal of creating industry-leading, real-time analytics. Here, you will have an opportunity to solve some of the most cutting-edge technical challenges and have direct ownership of your work and vision. If you are a contributor by nature, a thinker and a doer - we’ll definitely click!
|
||||
|
||||
Check out our **current openings** here: https://clickhouse.com/company/careers
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
if (SANITIZE OR NOT (
|
||||
((OS_LINUX OR OS_FREEBSD) AND (ARCH_AMD64 OR ARCH_AARCH64 OR ARCH_PPC64LE OR ARCH_RISCV64)) OR
|
||||
((OS_LINUX OR OS_FREEBSD) AND (ARCH_AMD64 OR ARCH_AARCH64 OR ARCH_PPC64LE OR ARCH_RISCV64 OR ARCH_S390X)) OR
|
||||
(OS_DARWIN AND (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" OR CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG"))
|
||||
))
|
||||
if (ENABLE_JEMALLOC)
|
||||
@ -17,17 +17,17 @@ if (NOT ENABLE_JEMALLOC)
|
||||
endif ()
|
||||
|
||||
if (NOT OS_LINUX)
|
||||
message (WARNING "jemalloc support on non-linux is EXPERIMENTAL")
|
||||
message (WARNING "jemalloc support on non-Linux is EXPERIMENTAL")
|
||||
endif()
|
||||
|
||||
if (OS_LINUX)
|
||||
# ThreadPool select job randomly, and there can be some threads that had been
|
||||
# performed some memory heavy task before and will be inactive for some time,
|
||||
# but until it will became active again, the memory will not be freed since by
|
||||
# default each thread has it's own arena, but there should be not more then
|
||||
# ThreadPool select job randomly, and there can be some threads that have been
|
||||
# performed some memory-heavy tasks before and will be inactive for some time,
|
||||
# but until it becomes active again, the memory will not be freed since, by
|
||||
# default, each thread has its arena, but there should be no more than
|
||||
# 4*CPU arenas (see opt.nareans description).
|
||||
#
|
||||
# By enabling percpu_arena number of arenas limited to number of CPUs and hence
|
||||
# By enabling percpu_arena number of arenas is limited to the number of CPUs, and hence
|
||||
# this problem should go away.
|
||||
#
|
||||
# muzzy_decay_ms -- use MADV_FREE when available on newer Linuxes, to
|
||||
@ -38,7 +38,7 @@ if (OS_LINUX)
|
||||
else()
|
||||
set (JEMALLOC_CONFIG_MALLOC_CONF "oversize_threshold:0,muzzy_decay_ms:5000,dirty_decay_ms:5000")
|
||||
endif()
|
||||
# CACHE variable is empty, to allow changing defaults without necessity
|
||||
# CACHE variable is empty to allow changing defaults without the necessity
|
||||
# to purge cache
|
||||
set (JEMALLOC_CONFIG_MALLOC_CONF_OVERRIDE "" CACHE STRING "Change default configuration string of JEMalloc" )
|
||||
if (JEMALLOC_CONFIG_MALLOC_CONF_OVERRIDE)
|
||||
@ -148,6 +148,8 @@ elseif (ARCH_PPC64LE)
|
||||
set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_ppc64le")
|
||||
elseif (ARCH_RISCV64)
|
||||
set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_riscv64")
|
||||
elseif (ARCH_S390X)
|
||||
set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_s390x")
|
||||
else ()
|
||||
message (FATAL_ERROR "internal jemalloc: This arch is not supported")
|
||||
endif ()
|
||||
@ -172,7 +174,7 @@ target_compile_definitions(_jemalloc PRIVATE -DJEMALLOC_PROF=1)
|
||||
|
||||
# jemalloc provides support for two different libunwind flavors: the original HP libunwind and the one coming with gcc / g++ / libstdc++.
|
||||
# The latter is identified by `JEMALLOC_PROF_LIBGCC` and uses `_Unwind_Backtrace` method instead of `unw_backtrace`.
|
||||
# At the time ClickHouse uses LLVM libunwind which follows libgcc's way of backtracing.
|
||||
# At the time ClickHouse uses LLVM libunwind which follows libgcc's way of backtracking.
|
||||
#
|
||||
# ClickHouse has to provide `unw_backtrace` method by the means of [commit 8e2b31e](https://github.com/ClickHouse/libunwind/commit/8e2b31e766dd502f6df74909e04a7dbdf5182eb1).
|
||||
target_compile_definitions (_jemalloc PRIVATE -DJEMALLOC_PROF_LIBGCC=1)
|
||||
|
@ -0,0 +1,435 @@
|
||||
/* include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */
|
||||
#ifndef JEMALLOC_INTERNAL_DEFS_H_
|
||||
#define JEMALLOC_INTERNAL_DEFS_H_
|
||||
/*
|
||||
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
|
||||
* public APIs to be prefixed. This makes it possible, with some care, to use
|
||||
* multiple allocators simultaneously.
|
||||
*/
|
||||
/* #undef JEMALLOC_PREFIX */
|
||||
/* #undef JEMALLOC_CPREFIX */
|
||||
|
||||
/*
|
||||
* Define overrides for non-standard allocator-related functions if they are
|
||||
* present on the system.
|
||||
*/
|
||||
#define JEMALLOC_OVERRIDE___LIBC_CALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_FREE
|
||||
#define JEMALLOC_OVERRIDE___LIBC_MALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_MEMALIGN
|
||||
#define JEMALLOC_OVERRIDE___LIBC_REALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_VALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_PVALLOC
|
||||
/* #undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN */
|
||||
|
||||
/*
|
||||
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
|
||||
* For shared libraries, symbol visibility mechanisms prevent these symbols
|
||||
* from being exported, but for static libraries, naming collisions are a real
|
||||
* possibility.
|
||||
*/
|
||||
#define JEMALLOC_PRIVATE_NAMESPACE je_
|
||||
|
||||
/*
|
||||
* Hyper-threaded CPUs may need a special instruction inside spin loops in
|
||||
* order to yield to another virtual CPU.
|
||||
*/
|
||||
#define CPU_SPINWAIT
|
||||
/* 1 if CPU_SPINWAIT is defined, 0 otherwise. */
|
||||
#define HAVE_CPU_SPINWAIT 0
|
||||
|
||||
/*
|
||||
* Number of significant bits in virtual addresses. This may be less than the
|
||||
* total number of bits in a pointer, e.g. on x64, for which the uppermost 16
|
||||
* bits are the same as bit 47.
|
||||
*/
|
||||
#define LG_VADDR 64
|
||||
|
||||
/* Defined if C11 atomics are available. */
|
||||
#define JEMALLOC_C11_ATOMICS
|
||||
|
||||
/* Defined if GCC __atomic atomics are available. */
|
||||
#define JEMALLOC_GCC_ATOMIC_ATOMICS
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS
|
||||
|
||||
/* Defined if GCC __sync atomics are available. */
|
||||
#define JEMALLOC_GCC_SYNC_ATOMICS
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_SYNC_ATOMICS
|
||||
|
||||
/*
|
||||
* Defined if __builtin_clz() and __builtin_clzl() are available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_BUILTIN_CLZ
|
||||
|
||||
/*
|
||||
* Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
|
||||
*/
|
||||
/* #undef JEMALLOC_OS_UNFAIR_LOCK */
|
||||
|
||||
/* Defined if syscall(2) is usable. */
|
||||
#define JEMALLOC_USE_SYSCALL
|
||||
|
||||
/*
|
||||
* Defined if secure_getenv(3) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_SECURE_GETENV
|
||||
|
||||
/*
|
||||
* Defined if issetugid(2) is available.
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_ISSETUGID */
|
||||
|
||||
/* Defined if pthread_atfork(3) is available. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_ATFORK
|
||||
|
||||
/* Defined if pthread_setname_np(3) is available. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_SETNAME_NP
|
||||
|
||||
/* Defined if pthread_getname_np(3) is available. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_GETNAME_NP
|
||||
|
||||
/* Defined if pthread_get_name_np(3) is available. */
|
||||
/* #undef JEMALLOC_HAVE_PTHREAD_GET_NAME_NP */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC
|
||||
|
||||
/*
|
||||
* Defined if mach_absolute_time() is available.
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_REALTIME, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_REALTIME
|
||||
|
||||
/*
|
||||
* Defined if _malloc_thread_cleanup() exists. At least in the case of
|
||||
* FreeBSD, pthread_key_create() allocates, which if used during malloc
|
||||
* bootstrapping will cause recursion into the pthreads library. Therefore, if
|
||||
* _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
|
||||
* malloc_tsd.
|
||||
*/
|
||||
/* #undef JEMALLOC_MALLOC_THREAD_CLEANUP */
|
||||
|
||||
/*
|
||||
* Defined if threaded initialization is known to be safe on this platform.
|
||||
* Among other things, it must be possible to initialize a mutex without
|
||||
* triggering allocation in order for threaded allocation to be safe.
|
||||
*/
|
||||
#define JEMALLOC_THREADED_INIT
|
||||
|
||||
/*
|
||||
* Defined if the pthreads implementation defines
|
||||
* _pthread_mutex_init_calloc_cb(), in which case the function is used in order
|
||||
* to avoid recursive allocation during mutex initialization.
|
||||
*/
|
||||
/* #undef JEMALLOC_MUTEX_INIT_CB */
|
||||
|
||||
/* Non-empty if the tls_model attribute is supported. */
|
||||
#define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec")))
|
||||
|
||||
/*
|
||||
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
|
||||
* inline functions.
|
||||
*/
|
||||
/* #undef JEMALLOC_DEBUG */
|
||||
|
||||
/* JEMALLOC_STATS enables statistics calculation. */
|
||||
#define JEMALLOC_STATS
|
||||
|
||||
/* JEMALLOC_EXPERIMENTAL_SMALLOCX_API enables experimental smallocx API. */
|
||||
/* #undef JEMALLOC_EXPERIMENTAL_SMALLOCX_API */
|
||||
|
||||
/* JEMALLOC_PROF enables allocation profiling. */
|
||||
/* #undef JEMALLOC_PROF */
|
||||
|
||||
/* Use libunwind for profile backtracing if defined. */
|
||||
/* #undef JEMALLOC_PROF_LIBUNWIND */
|
||||
|
||||
/* Use libgcc for profile backtracing if defined. */
|
||||
/* #undef JEMALLOC_PROF_LIBGCC */
|
||||
|
||||
/* Use gcc intrinsics for profile backtracing if defined. */
|
||||
/* #undef JEMALLOC_PROF_GCC */
|
||||
|
||||
/* JEMALLOC_PAGEID enabled page id */
|
||||
/* #undef JEMALLOC_PAGEID */
|
||||
|
||||
/* JEMALLOC_HAVE_PRCTL checks prctl */
|
||||
#define JEMALLOC_HAVE_PRCTL
|
||||
|
||||
/*
|
||||
* JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
|
||||
* segment (DSS).
|
||||
*/
|
||||
#define JEMALLOC_DSS
|
||||
|
||||
/* Support memory filling (junk/zero). */
|
||||
#define JEMALLOC_FILL
|
||||
|
||||
/* Support utrace(2)-based tracing. */
|
||||
/* #undef JEMALLOC_UTRACE */
|
||||
|
||||
/* Support utrace(2)-based tracing (label based signature). */
|
||||
/* #undef JEMALLOC_UTRACE_LABEL */
|
||||
|
||||
/* Support optional abort() on OOM. */
|
||||
/* #undef JEMALLOC_XMALLOC */
|
||||
|
||||
/* Support lazy locking (avoid locking unless a second thread is launched). */
|
||||
/* #undef JEMALLOC_LAZY_LOCK */
|
||||
|
||||
/*
|
||||
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
|
||||
* classes).
|
||||
*/
|
||||
/* #undef LG_QUANTUM */
|
||||
|
||||
/* One page is 2^LG_PAGE bytes. */
|
||||
#define LG_PAGE 12
|
||||
|
||||
/* Maximum number of regions in a slab. */
|
||||
/* #undef CONFIG_LG_SLAB_MAXREGS */
|
||||
|
||||
/*
|
||||
* One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
|
||||
* system does not explicitly support huge pages; system calls that require
|
||||
* explicit huge page support are separately configured.
|
||||
*/
|
||||
#define LG_HUGEPAGE 20
|
||||
|
||||
/*
|
||||
* If defined, adjacent virtual memory mappings with identical attributes
|
||||
* automatically coalesce, and they fragment when changes are made to subranges.
|
||||
* This is the normal order of things for mmap()/munmap(), but on Windows
|
||||
* VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
|
||||
* mappings do *not* coalesce/fragment.
|
||||
*/
|
||||
#define JEMALLOC_MAPS_COALESCE
|
||||
|
||||
/*
|
||||
* If defined, retain memory for later reuse by default rather than using e.g.
|
||||
* munmap() to unmap freed extents. This is enabled on 64-bit Linux because
|
||||
* common sequences of mmap()/munmap() calls will cause virtual memory map
|
||||
* holes.
|
||||
*/
|
||||
#define JEMALLOC_RETAIN
|
||||
|
||||
/* TLS is used to map arenas and magazine caches to threads. */
|
||||
#define JEMALLOC_TLS
|
||||
|
||||
/*
|
||||
* Used to mark unreachable code to quiet "end of non-void" compiler warnings.
|
||||
* Don't use this directly; instead use unreachable() from util.h
|
||||
*/
|
||||
#define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable
|
||||
|
||||
/*
|
||||
* ffs*() functions to use for bitmapping. Don't use these directly; instead,
|
||||
* use ffs_*() from util.h.
|
||||
*/
|
||||
#define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll
|
||||
#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl
|
||||
#define JEMALLOC_INTERNAL_FFS __builtin_ffs
|
||||
|
||||
/*
|
||||
* popcount*() functions to use for bitmapping.
|
||||
*/
|
||||
#define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl
|
||||
#define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount
|
||||
|
||||
/*
|
||||
* If defined, explicitly attempt to more uniformly distribute large allocation
|
||||
* pointer alignments across all cache indices.
|
||||
*/
|
||||
#define JEMALLOC_CACHE_OBLIVIOUS
|
||||
|
||||
/*
|
||||
* If defined, enable logging facilities. We make this a configure option to
|
||||
* avoid taking extra branches everywhere.
|
||||
*/
|
||||
/* #undef JEMALLOC_LOG */
|
||||
|
||||
/*
|
||||
* If defined, use readlinkat() (instead of readlink()) to follow
|
||||
* /etc/malloc_conf.
|
||||
*/
|
||||
/* #undef JEMALLOC_READLINKAT */
|
||||
|
||||
/*
|
||||
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
|
||||
*/
|
||||
/* #undef JEMALLOC_ZONE */
|
||||
|
||||
/*
|
||||
* Methods for determining whether the OS overcommits.
|
||||
* JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
|
||||
* /proc/sys/vm.overcommit_memory file.
|
||||
* JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
|
||||
*/
|
||||
/* #undef JEMALLOC_SYSCTL_VM_OVERCOMMIT */
|
||||
#define JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
|
||||
|
||||
/* Defined if madvise(2) is available. */
|
||||
#define JEMALLOC_HAVE_MADVISE
|
||||
|
||||
/*
|
||||
* Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
|
||||
* arguments to madvise(2).
|
||||
*/
|
||||
#define JEMALLOC_HAVE_MADVISE_HUGE
|
||||
|
||||
/*
|
||||
* Methods for purging unused pages differ between operating systems.
|
||||
*
|
||||
* madvise(..., MADV_FREE) : This marks pages as being unused, such that they
|
||||
* will be discarded rather than swapped out.
|
||||
* madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is
|
||||
* defined, this immediately discards pages,
|
||||
* such that new pages will be demand-zeroed if
|
||||
* the address region is later touched;
|
||||
* otherwise this behaves similarly to
|
||||
* MADV_FREE, though typically with higher
|
||||
* system overhead.
|
||||
*/
|
||||
#define JEMALLOC_PURGE_MADVISE_FREE
|
||||
#define JEMALLOC_PURGE_MADVISE_DONTNEED
|
||||
#define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
|
||||
|
||||
/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */
|
||||
/* #undef JEMALLOC_DEFINE_MADVISE_FREE */
|
||||
|
||||
/*
|
||||
* Defined if MADV_DO[NT]DUMP is supported as an argument to madvise.
|
||||
*/
|
||||
#define JEMALLOC_MADVISE_DONTDUMP
|
||||
|
||||
/*
|
||||
* Defined if MADV_[NO]CORE is supported as an argument to madvise.
|
||||
*/
|
||||
/* #undef JEMALLOC_MADVISE_NOCORE */
|
||||
|
||||
/* Defined if mprotect(2) is available. */
|
||||
#define JEMALLOC_HAVE_MPROTECT
|
||||
|
||||
/*
|
||||
* Defined if transparent huge pages (THPs) are supported via the
|
||||
* MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
|
||||
*/
|
||||
/* #undef JEMALLOC_THP */
|
||||
|
||||
/* Defined if posix_madvise is available. */
|
||||
/* #undef JEMALLOC_HAVE_POSIX_MADVISE */
|
||||
|
||||
/*
|
||||
* Method for purging unused pages using posix_madvise.
|
||||
*
|
||||
* posix_madvise(..., POSIX_MADV_DONTNEED)
|
||||
*/
|
||||
/* #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED */
|
||||
/* #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS */
|
||||
|
||||
/*
|
||||
* Defined if memcntl page admin call is supported
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_MEMCNTL */
|
||||
|
||||
/*
|
||||
* Defined if malloc_size is supported
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_MALLOC_SIZE */
|
||||
|
||||
/* Define if operating system has alloca.h header. */
|
||||
#define JEMALLOC_HAS_ALLOCA_H
|
||||
|
||||
/* C99 restrict keyword supported. */
|
||||
#define JEMALLOC_HAS_RESTRICT
|
||||
|
||||
/* For use by hash code. */
|
||||
#define JEMALLOC_BIG_ENDIAN
|
||||
|
||||
/* sizeof(int) == 2^LG_SIZEOF_INT. */
|
||||
#define LG_SIZEOF_INT 2
|
||||
|
||||
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
|
||||
#define LG_SIZEOF_LONG 3
|
||||
|
||||
/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
|
||||
#define LG_SIZEOF_LONG_LONG 3
|
||||
|
||||
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
|
||||
#define LG_SIZEOF_INTMAX_T 3
|
||||
|
||||
/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
|
||||
/* #undef JEMALLOC_GLIBC_MALLOC_HOOK */
|
||||
|
||||
/* glibc memalign hook. */
|
||||
/* #undef JEMALLOC_GLIBC_MEMALIGN_HOOK */
|
||||
|
||||
/* pthread support */
|
||||
#define JEMALLOC_HAVE_PTHREAD
|
||||
|
||||
/* dlsym() support */
|
||||
#define JEMALLOC_HAVE_DLSYM
|
||||
|
||||
/* Adaptive mutex support in pthreads. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
|
||||
|
||||
/* GNU specific sched_getcpu support */
|
||||
#define JEMALLOC_HAVE_SCHED_GETCPU
|
||||
|
||||
/* GNU specific sched_setaffinity support */
|
||||
#define JEMALLOC_HAVE_SCHED_SETAFFINITY
|
||||
|
||||
/*
|
||||
* If defined, all the features necessary for background threads are present.
|
||||
*/
|
||||
#define JEMALLOC_BACKGROUND_THREAD
|
||||
|
||||
/*
|
||||
* If defined, jemalloc symbols are not exported (doesn't work when
|
||||
* JEMALLOC_PREFIX is not defined).
|
||||
*/
|
||||
/* #undef JEMALLOC_EXPORT */
|
||||
|
||||
/* config.malloc_conf options string. */
|
||||
#define JEMALLOC_CONFIG_MALLOC_CONF ""
|
||||
|
||||
/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
|
||||
#define JEMALLOC_IS_MALLOC
|
||||
|
||||
/*
|
||||
* Defined if strerror_r returns char * if _GNU_SOURCE is defined.
|
||||
*/
|
||||
#define JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE
|
||||
|
||||
/* Performs additional safety checks when defined. */
|
||||
/* #undef JEMALLOC_OPT_SAFETY_CHECKS */
|
||||
|
||||
/* Is C++ support being built? */
|
||||
#define JEMALLOC_ENABLE_CXX
|
||||
|
||||
/* Performs additional size checks when defined. */
|
||||
/* #undef JEMALLOC_OPT_SIZE_CHECKS */
|
||||
|
||||
/* Allows sampled junk and stash for checking use-after-free when defined. */
|
||||
/* #undef JEMALLOC_UAF_DETECTION */
|
||||
|
||||
/* Darwin VM_MAKE_TAG support */
|
||||
/* #undef JEMALLOC_HAVE_VM_MAKE_TAG */
|
||||
|
||||
/* If defined, realloc(ptr, 0) defaults to "free" instead of "alloc". */
|
||||
#define JEMALLOC_ZERO_REALLOC_DEFAULT_FREE
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
|
23
docs/changelogs/v23.3.8.21-lts.md
Normal file
23
docs/changelogs/v23.3.8.21-lts.md
Normal file
@ -0,0 +1,23 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.3.8.21-lts (1675f2264f3) FIXME as compared to v23.3.7.5-lts (bc683c11c92)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix backward compatibility for IP types hashing in aggregate functions [#50551](https://github.com/ClickHouse/ClickHouse/pull/50551) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix segfault in MathUnary [#51499](https://github.com/ClickHouse/ClickHouse/pull/51499) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Fix for moving 'IN' conditions to PREWHERE [#51610](https://github.com/ClickHouse/ClickHouse/pull/51610) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Fix reading from empty column in `parseSipHashKey` [#51804](https://github.com/ClickHouse/ClickHouse/pull/51804) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Check refcount in `RemoveManyObjectStorageOperation::finalize` instead of `execute` [#51954](https://github.com/ClickHouse/ClickHouse/pull/51954) ([vdimir](https://github.com/vdimir)).
|
||||
* Allow parametric UDFs [#51964](https://github.com/ClickHouse/ClickHouse/pull/51964) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Decoupled commits from [#51180](https://github.com/ClickHouse/ClickHouse/issues/51180) for backports [#51561](https://github.com/ClickHouse/ClickHouse/pull/51561) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix MergeTreeMarksLoader segfaulting if marks file is longer than expected [#51636](https://github.com/ClickHouse/ClickHouse/pull/51636) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
|
@ -11,7 +11,8 @@ Supported platforms:
|
||||
|
||||
- x86_64
|
||||
- AArch64
|
||||
- Power9 (experimental)
|
||||
- PowerPC 64 LE (experimental)
|
||||
- RISC-V 64 (experimental)
|
||||
|
||||
## Building on Ubuntu
|
||||
|
||||
@ -42,7 +43,7 @@ sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test
|
||||
|
||||
For other Linux distribution - check the availability of LLVM's [prebuild packages](https://releases.llvm.org/download.html).
|
||||
|
||||
As of April 2023, any version of Clang >= 15 will work.
|
||||
As of April 2023, clang-16 or higher will work.
|
||||
GCC as a compiler is not supported.
|
||||
To build with a specific Clang version:
|
||||
|
||||
@ -86,8 +87,8 @@ The build requires the following components:
|
||||
|
||||
- Git (used to checkout the sources, not needed for the build)
|
||||
- CMake 3.20 or newer
|
||||
- Compiler: Clang 15 or newer
|
||||
- Linker: lld 15 or newer
|
||||
- Compiler: clang-16 or newer
|
||||
- Linker: lld-16 or newer
|
||||
- Ninja
|
||||
- Yasm
|
||||
- Gawk
|
||||
|
@ -1201,13 +1201,58 @@ Keys:
|
||||
- `console` – Send `log` and `errorlog` to the console instead of file. To enable, set to `1` or `true`.
|
||||
- `stream_compress` – Compress `log` and `errorlog` with `lz4` stream compression. To enable, set to `1` or `true`.
|
||||
|
||||
Both log and error log file names (only file names, not directories) support date and time format specifiers.
|
||||
|
||||
**Format specifiers**
|
||||
Using the following format specifiers, you can define a pattern for the resulting file name. “Example” column shows possible results for `2023-07-06 18:32:07`.
|
||||
|
||||
| Specifier | Description | Example |
|
||||
|-------------|---------------------------------------------------------------------------------------------------------------------|--------------------------|
|
||||
| %% | Literal % | % |
|
||||
| %n | New-line character | |
|
||||
| %t | Horizontal tab character | |
|
||||
| %Y | Year as a decimal number, e.g. 2017 | 2023 |
|
||||
| %y | Last 2 digits of year as a decimal number (range [00,99]) | 23 |
|
||||
| %C | First 2 digits of year as a decimal number (range [00,99]) | 20 |
|
||||
| %G | Four-digit [ISO 8601 week-based year](https://en.wikipedia.org/wiki/ISO_8601#Week_dates), i.e. the year that contains the specified week. Normally useful only with %V | 2023 |
|
||||
| %g | Last 2 digits of [ISO 8601 week-based year](https://en.wikipedia.org/wiki/ISO_8601#Week_dates), i.e. the year that contains the specified week. | 23 |
|
||||
| %b | Abbreviated month name, e.g. Oct (locale dependent) | Jul |
|
||||
| %h | Synonym of %b | Jul |
|
||||
| %B | Full month name, e.g. October (locale dependent) | July |
|
||||
| %m | Month as a decimal number (range [01,12]) | 07 |
|
||||
| %U | Week of the year as a decimal number (Sunday is the first day of the week) (range [00,53]) | 27 |
|
||||
| %W | Week of the year as a decimal number (Monday is the first day of the week) (range [00,53]) | 27 |
|
||||
| %V | ISO 8601 week number (range [01,53]) | 27 |
|
||||
| %j | Day of the year as a decimal number (range [001,366]) | 187 |
|
||||
| %d | Day of the month as a zero-padded decimal number (range [01,31]). Single digit is preceded by zero. | 06 |
|
||||
| %e | Day of the month as a space-padded decimal number (range [1,31]). Single digit is preceded by a space. | 6 |
|
||||
| %a | Abbreviated weekday name, e.g. Fri (locale dependent) | Thu |
|
||||
| %A | Full weekday name, e.g. Friday (locale dependent) | Thursday |
|
||||
| %w | Weekday as a integer number with Sunday as 0 (range [0-6]) | 4 |
|
||||
| %u | Weekday as a decimal number, where Monday is 1 (ISO 8601 format) (range [1-7]) | 4 |
|
||||
| %H | Hour as a decimal number, 24 hour clock (range [00-23]) | 18 |
|
||||
| %I | Hour as a decimal number, 12 hour clock (range [01,12]) | 06 |
|
||||
| %M | Minute as a decimal number (range [00,59]) | 32 |
|
||||
| %S | Second as a decimal number (range [00,60]) | 07 |
|
||||
| %c | Standard date and time string, e.g. Sun Oct 17 04:41:13 2010 (locale dependent) | Thu Jul 6 18:32:07 2023 |
|
||||
| %x | Localized date representation (locale dependent) | 07/06/23 |
|
||||
| %X | Localized time representation, e.g. 18:40:20 or 6:40:20 PM (locale dependent) | 18:32:07 |
|
||||
| %D | Short MM/DD/YY date, equivalent to %m/%d/%y | 07/06/23 |
|
||||
| %F | Short YYYY-MM-DD date, equivalent to %Y-%m-%d | 2023-07-06 |
|
||||
| %r | Localized 12-hour clock time (locale dependent) | 06:32:07 PM |
|
||||
| %R | Equivalent to "%H:%M" | 18:32 |
|
||||
| %T | Equivalent to "%H:%M:%S" (the ISO 8601 time format) | 18:32:07 |
|
||||
| %p | Localized a.m. or p.m. designation (locale dependent) | PM |
|
||||
| %z | Offset from UTC in the ISO 8601 format (e.g. -0430), or no characters if the time zone information is not available | +0800 |
|
||||
| %Z | Locale-dependent time zone name or abbreviation, or no characters if the time zone information is not available | Z AWST |
|
||||
|
||||
**Example**
|
||||
|
||||
``` xml
|
||||
<logger>
|
||||
<level>trace</level>
|
||||
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
|
||||
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
|
||||
<log>/var/log/clickhouse-server/clickhouse-server-%F-%T.log</log>
|
||||
<errorlog>/var/log/clickhouse-server/clickhouse-server-%F-%T.err.log</errorlog>
|
||||
<size>1000M</size>
|
||||
<count>10</count>
|
||||
<stream_compress>true</stream_compress>
|
||||
|
@ -300,7 +300,7 @@ SELECT groupArrayResample(30, 75, 30)(name, age) FROM people
|
||||
|
||||
Consider the results.
|
||||
|
||||
`Jonh` is out of the sample because he’s too young. Other people are distributed according to the specified age intervals.
|
||||
`John` is out of the sample because he’s too young. Other people are distributed according to the specified age intervals.
|
||||
|
||||
Now let’s count the total number of people and their average wage in the specified age intervals.
|
||||
|
||||
|
@ -718,7 +718,7 @@ SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(d
|
||||
|
||||
## age
|
||||
|
||||
Returns the `unit` component of the difference between `startdate` and `enddate`. The difference is calculated using a precision of 1 microsecond.
|
||||
Returns the `unit` component of the difference between `startdate` and `enddate`. The difference is calculated using a precision of 1 second.
|
||||
E.g. the difference between `2021-12-29` and `2022-01-01` is 3 days for `day` unit, 0 months for `month` unit, 0 years for `year` unit.
|
||||
|
||||
For an alternative to `age`, see function `date\_diff`.
|
||||
@ -734,8 +734,6 @@ age('unit', startdate, enddate, [timezone])
|
||||
- `unit` — The type of interval for result. [String](../../sql-reference/data-types/string.md).
|
||||
Possible values:
|
||||
|
||||
- `microsecond` (possible abbreviations: `us`, `u`)
|
||||
- `millisecond` (possible abbreviations: `ms`)
|
||||
- `second` (possible abbreviations: `ss`, `s`)
|
||||
- `minute` (possible abbreviations: `mi`, `n`)
|
||||
- `hour` (possible abbreviations: `hh`, `h`)
|
||||
@ -811,8 +809,6 @@ Aliases: `dateDiff`, `DATE_DIFF`, `timestampDiff`, `timestamp_diff`, `TIMESTAMP_
|
||||
- `unit` — The type of interval for result. [String](../../sql-reference/data-types/string.md).
|
||||
Possible values:
|
||||
|
||||
- `microsecond` (possible abbreviations: `us`, `u`)
|
||||
- `millisecond` (possible abbreviations: `ms`)
|
||||
- `second` (possible abbreviations: `ss`, `s`)
|
||||
- `minute` (possible abbreviations: `mi`, `n`)
|
||||
- `hour` (possible abbreviations: `hh`, `h`)
|
||||
|
@ -575,14 +575,60 @@ ClickHouse поддерживает динамическое изменение
|
||||
- `errorlog` - Файл лога ошибок.
|
||||
- `size` - Размер файла. Действует для `log` и `errorlog`. Как только файл достиг размера `size`, ClickHouse архивирует и переименовывает его, а на его месте создает новый файл лога.
|
||||
- `count` - Количество заархивированных файлов логов, которые сохраняет ClickHouse.
|
||||
- `stream_compress` – Сжимать `log` и `errorlog` с помощью алгоритма `lz4`. Чтобы активировать, узтановите значение `1` или `true`.
|
||||
|
||||
Имена файлов `log` и `errorlog` (только имя файла, а не директорий) поддерживают спецификаторы шаблонов даты и времени.
|
||||
|
||||
**Спецификаторы форматирования**
|
||||
С помощью следующих спецификаторов, можно определить шаблон для формирования имени файла. Столбец “Пример” показывает возможные значения на момент времени `2023-07-06 18:32:07`.
|
||||
|
||||
| Спецификатор | Описание | Пример |
|
||||
|--------------|---------------------------------------------------------------------------------------------------------------------|--------------------------|
|
||||
| %% | Литерал % | % |
|
||||
| %n | Символ новой строки | |
|
||||
| %t | Символ горизонтальной табуляции | |
|
||||
| %Y | Год как десятичное число, например, 2017 | 2023 |
|
||||
| %y | Последние 2 цифры года в виде десятичного числа (диапазон [00,99]) | 23 |
|
||||
| %C | Первые 2 цифры года в виде десятичного числа (диапазон [00,99]) | 20 |
|
||||
| %G | Год по неделям согласно [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601#Week_dates), то есть год, который содержит указанную неделю. Обычно используется вместе с %V. | 2023 |
|
||||
| %g | Последние 2 цифры [года по неделям ISO 8601](https://en.wikipedia.org/wiki/ISO_8601#Week_dates), т.е. года, содержащего указанную неделю (диапазон [00,99]). | 23 |
|
||||
| %b | Сокращённое название месяца, например Oct (зависит от локали) | Jul |
|
||||
| %h | Синоним %b | Jul |
|
||||
| %B | Полное название месяца, например, October (зависит от локали) | July |
|
||||
| %m | Месяц в виде десятичного числа (диапазон [01,12]) | 07 |
|
||||
| %U | Неделя года в виде десятичного числа (воскресенье - первый день недели) (диапазон [00,53]) | 27 |
|
||||
| %W | Неделя года в виде десятичного числа (понедельник - первый день недели) (диапазон [00,53]) | 27 |
|
||||
| %V | Неделя года ISO 8601 (диапазон [01,53]) | 27 |
|
||||
| %j | День года в виде десятичного числа (диапазон [001,366]) | 187 |
|
||||
| %d | День месяца в виде десятичного числа (диапазон [01,31]) Перед одиночной цифрой ставится ноль. | 06 |
|
||||
| %e | День месяца в виде десятичного числа (диапазон [1,31]). Перед одиночной цифрой ставится пробел. | 6 |
|
||||
| %a | Сокращённое название дня недели, например, Fri (зависит от локали) | Thu |
|
||||
| %A | Полный день недели, например, Friday (зависит от локали) | Thursday |
|
||||
| %w | День недели в виде десятичного числа, где воскресенье равно 0 (диапазон [0-6]) | 4 |
|
||||
| %u | День недели в виде десятичного числа, где понедельник равен 1 (формат ISO 8601) (диапазон [1-7]) | 4 |
|
||||
| %H | Час в виде десятичного числа, 24-часовой формат (диапазон [00-23]) | 18 |
|
||||
| %I | Час в виде десятичного числа, 12-часовой формат (диапазон [01,12]) | 06 |
|
||||
| %M | Минуты в виде десятичного числа (диапазон [00,59]) | 32 |
|
||||
| %S | Секунды как десятичное число (диапазон [00,60]) | 07 |
|
||||
| %c | Стандартная строка даты и времени, например, Sun Oct 17 04:41:13 2010 (зависит от локали) | Thu Jul 6 18:32:07 2023 |
|
||||
| %x | Локализованное представление даты (зависит от локали) | 07/06/23 |
|
||||
| %X | Локализованное представление времени, например, 18:40:20 или 6:40:20 PM (зависит от локали) | 18:32:07 |
|
||||
| %D | Эквивалентно "%m/%d/%y" | 07/06/23 |
|
||||
| %F | Эквивалентно "%Y-%m-%d" (формат даты ISO 8601) | 2023-07-06 |
|
||||
| %r | Локализованное 12-часовое время (зависит от локали) | 06:32:07 PM |
|
||||
| %R | Эквивалентно "%H:%M" | 18:32 |
|
||||
| %T | Эквивалентно "%H:%M:%S" (формат времени ISO 8601) | 18:32:07 |
|
||||
| %p | Локализованное обозначение a.m. или p.m. (зависит от локали) | PM |
|
||||
| %z | Смещение от UTC в формате ISO 8601 (например, -0430), или без символов, если информация о часовом поясе недоступна | +0800 |
|
||||
| %Z | Зависящее от локали название или аббревиатура часового пояса, если информация о часовом поясе доступна | Z AWST |
|
||||
|
||||
**Пример**
|
||||
|
||||
``` xml
|
||||
<logger>
|
||||
<level>trace</level>
|
||||
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
|
||||
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
|
||||
<log>/var/log/clickhouse-server/clickhouse-server-%F-%T.log</log>
|
||||
<errorlog>/var/log/clickhouse-server/clickhouse-server-%F-%T.err.log</errorlog>
|
||||
<size>1000M</size>
|
||||
<count>10</count>
|
||||
</logger>
|
||||
|
@ -621,7 +621,7 @@ SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(d
|
||||
|
||||
## age
|
||||
|
||||
Вычисляет компонент `unit` разницы между `startdate` и `enddate`. Разница вычисляется с точностью в 1 микросекунду.
|
||||
Вычисляет компонент `unit` разницы между `startdate` и `enddate`. Разница вычисляется с точностью в 1 секунду.
|
||||
Например, разница между `2021-12-29` и `2022-01-01` 3 дня для единицы `day`, 0 месяцев для единицы `month`, 0 лет для единицы `year`.
|
||||
|
||||
**Синтаксис**
|
||||
@ -635,8 +635,6 @@ age('unit', startdate, enddate, [timezone])
|
||||
- `unit` — единица измерения времени, в которой будет выражено возвращаемое значение функции. [String](../../sql-reference/data-types/string.md).
|
||||
Возможные значения:
|
||||
|
||||
- `microsecond` (возможные сокращения: `us`, `u`)
|
||||
- `millisecond` (возможные сокращения: `ms`)
|
||||
- `second` (возможные сокращения: `ss`, `s`)
|
||||
- `minute` (возможные сокращения: `mi`, `n`)
|
||||
- `hour` (возможные сокращения: `hh`, `h`)
|
||||
@ -710,8 +708,6 @@ date_diff('unit', startdate, enddate, [timezone])
|
||||
- `unit` — единица измерения времени, в которой будет выражено возвращаемое значение функции. [String](../../sql-reference/data-types/string.md).
|
||||
Возможные значения:
|
||||
|
||||
- `microsecond` (возможные сокращения: `us`, `u`)
|
||||
- `millisecond` (возможные сокращения: `ms`)
|
||||
- `second` (возможные сокращения: `ss`, `s`)
|
||||
- `minute` (возможные сокращения: `mi`, `n`)
|
||||
- `hour` (возможные сокращения: `hh`, `h`)
|
||||
|
@ -643,8 +643,6 @@ date_diff('unit', startdate, enddate, [timezone])
|
||||
- `unit` — `value`对应的时间单位。类型为[String](../../sql-reference/data-types/string.md)。
|
||||
可能的值:
|
||||
|
||||
- `microsecond`
|
||||
- `millisecond`
|
||||
- `second`
|
||||
- `minute`
|
||||
- `hour`
|
||||
|
@ -35,6 +35,7 @@ public:
|
||||
std::shared_ptr<IBackupCoordination> backup_coordination;
|
||||
std::optional<UUID> backup_uuid;
|
||||
bool deduplicate_files = true;
|
||||
bool allow_s3_native_copy = true;
|
||||
};
|
||||
|
||||
static BackupFactory & instance();
|
||||
|
@ -101,14 +101,16 @@ namespace
|
||||
|
||||
|
||||
BackupReaderS3::BackupReaderS3(
|
||||
const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, const ContextPtr & context_)
|
||||
const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, bool allow_s3_native_copy, const ContextPtr & context_)
|
||||
: BackupReaderDefault(&Poco::Logger::get("BackupReaderS3"), context_)
|
||||
, s3_uri(s3_uri_)
|
||||
, client(makeS3Client(s3_uri_, access_key_id_, secret_access_key_, context_))
|
||||
, request_settings(context_->getStorageS3Settings().getSettings(s3_uri.uri.toString()).request_settings)
|
||||
, data_source_description{DataSourceType::S3, s3_uri.endpoint, false, false}
|
||||
{
|
||||
request_settings.updateFromSettings(context_->getSettingsRef());
|
||||
request_settings.max_single_read_retries = context_->getSettingsRef().s3_max_single_read_retries; // FIXME: Avoid taking value for endpoint
|
||||
request_settings.allow_native_copy = allow_s3_native_copy;
|
||||
}
|
||||
|
||||
BackupReaderS3::~BackupReaderS3() = default;
|
||||
@ -141,8 +143,7 @@ void BackupReaderS3::copyFileToDisk(const String & path_in_backup, size_t file_s
|
||||
if (destination_data_source_description.sameKind(data_source_description)
|
||||
&& (destination_data_source_description.is_encrypted == encrypted_in_backup))
|
||||
{
|
||||
/// Use native copy, the more optimal way.
|
||||
LOG_TRACE(log, "Copying {} from S3 to disk {} using native copy", path_in_backup, destination_disk->getName());
|
||||
LOG_TRACE(log, "Copying {} from S3 to disk {}", path_in_backup, destination_disk->getName());
|
||||
auto write_blob_function = [&](const Strings & blob_path, WriteMode mode, const std::optional<ObjectAttributes> & object_attributes) -> size_t
|
||||
{
|
||||
/// Object storage always uses mode `Rewrite` because it simulates append using metadata and different files.
|
||||
@ -177,7 +178,7 @@ void BackupReaderS3::copyFileToDisk(const String & path_in_backup, size_t file_s
|
||||
|
||||
|
||||
BackupWriterS3::BackupWriterS3(
|
||||
const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, const ContextPtr & context_)
|
||||
const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, bool allow_s3_native_copy, const ContextPtr & context_)
|
||||
: BackupWriterDefault(&Poco::Logger::get("BackupWriterS3"), context_)
|
||||
, s3_uri(s3_uri_)
|
||||
, client(makeS3Client(s3_uri_, access_key_id_, secret_access_key_, context_))
|
||||
@ -186,6 +187,7 @@ BackupWriterS3::BackupWriterS3(
|
||||
{
|
||||
request_settings.updateFromSettings(context_->getSettingsRef());
|
||||
request_settings.max_single_read_retries = context_->getSettingsRef().s3_max_single_read_retries; // FIXME: Avoid taking value for endpoint
|
||||
request_settings.allow_native_copy = allow_s3_native_copy;
|
||||
}
|
||||
|
||||
void BackupWriterS3::copyFileFromDisk(const String & path_in_backup, DiskPtr src_disk, const String & src_path,
|
||||
@ -200,8 +202,7 @@ void BackupWriterS3::copyFileFromDisk(const String & path_in_backup, DiskPtr src
|
||||
/// In this case we can't use the native copy.
|
||||
if (auto blob_path = src_disk->getBlobPath(src_path); blob_path.size() == 2)
|
||||
{
|
||||
/// Use native copy, the more optimal way.
|
||||
LOG_TRACE(log, "Copying file {} from disk {} to S3 using native copy", src_path, src_disk->getName());
|
||||
LOG_TRACE(log, "Copying file {} from disk {} to S3", src_path, src_disk->getName());
|
||||
copyS3File(
|
||||
client,
|
||||
/* src_bucket */ blob_path[1],
|
||||
|
@ -17,7 +17,7 @@ namespace DB
|
||||
class BackupReaderS3 : public BackupReaderDefault
|
||||
{
|
||||
public:
|
||||
BackupReaderS3(const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, const ContextPtr & context_);
|
||||
BackupReaderS3(const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, bool allow_s3_native_copy, const ContextPtr & context_);
|
||||
~BackupReaderS3() override;
|
||||
|
||||
bool fileExists(const String & file_name) override;
|
||||
@ -38,7 +38,7 @@ private:
|
||||
class BackupWriterS3 : public BackupWriterDefault
|
||||
{
|
||||
public:
|
||||
BackupWriterS3(const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, const ContextPtr & context_);
|
||||
BackupWriterS3(const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, bool allow_s3_native_copy, const ContextPtr & context_);
|
||||
~BackupWriterS3() override;
|
||||
|
||||
bool fileExists(const String & file_name) override;
|
||||
|
@ -25,6 +25,7 @@ namespace ErrorCodes
|
||||
M(Bool, async) \
|
||||
M(Bool, decrypt_files_from_encrypted_disks) \
|
||||
M(Bool, deduplicate_files) \
|
||||
M(Bool, allow_s3_native_copy) \
|
||||
M(UInt64, shard_num) \
|
||||
M(UInt64, replica_num) \
|
||||
M(Bool, internal) \
|
||||
|
@ -38,6 +38,9 @@ struct BackupSettings
|
||||
/// Whether the BACKUP will omit similar files (within one backup only).
|
||||
bool deduplicate_files = true;
|
||||
|
||||
/// Whether native copy is allowed (optimization for cloud storages, that sometimes could have bugs)
|
||||
bool allow_s3_native_copy = true;
|
||||
|
||||
/// 1-based shard index to store in the backup. 0 means all shards.
|
||||
/// Can only be used with BACKUP ON CLUSTER.
|
||||
size_t shard_num = 0;
|
||||
|
@ -348,6 +348,7 @@ void BackupsWorker::doBackup(
|
||||
backup_create_params.backup_coordination = backup_coordination;
|
||||
backup_create_params.backup_uuid = backup_settings.backup_uuid;
|
||||
backup_create_params.deduplicate_files = backup_settings.deduplicate_files;
|
||||
backup_create_params.allow_s3_native_copy = backup_settings.allow_s3_native_copy;
|
||||
BackupMutablePtr backup = BackupFactory::instance().createBackup(backup_create_params);
|
||||
|
||||
/// Write the backup.
|
||||
@ -647,6 +648,7 @@ void BackupsWorker::doRestore(
|
||||
backup_open_params.backup_info = backup_info;
|
||||
backup_open_params.base_backup_info = restore_settings.base_backup_info;
|
||||
backup_open_params.password = restore_settings.password;
|
||||
backup_open_params.allow_s3_native_copy = restore_settings.allow_s3_native_copy;
|
||||
BackupPtr backup = BackupFactory::instance().createBackup(backup_open_params);
|
||||
|
||||
String current_database = context->getCurrentDatabase();
|
||||
|
@ -161,6 +161,7 @@ namespace
|
||||
M(RestoreAccessCreationMode, create_access) \
|
||||
M(Bool, allow_unresolved_access_dependencies) \
|
||||
M(RestoreUDFCreationMode, create_function) \
|
||||
M(Bool, allow_s3_native_copy) \
|
||||
M(Bool, internal) \
|
||||
M(String, host_id) \
|
||||
M(OptionalUUID, restore_uuid)
|
||||
|
@ -107,6 +107,9 @@ struct RestoreSettings
|
||||
/// How the RESTORE command will handle if a user-defined function which it's going to restore already exists.
|
||||
RestoreUDFCreationMode create_function = RestoreUDFCreationMode::kCreateIfNotExists;
|
||||
|
||||
/// Whether native copy is allowed (optimization for cloud storages, that sometimes could have bugs)
|
||||
bool allow_s3_native_copy = true;
|
||||
|
||||
/// Internal, should not be specified by user.
|
||||
bool internal = false;
|
||||
|
||||
|
@ -107,12 +107,12 @@ void registerBackupEngineS3(BackupFactory & factory)
|
||||
|
||||
if (params.open_mode == IBackup::OpenMode::READ)
|
||||
{
|
||||
auto reader = std::make_shared<BackupReaderS3>(S3::URI{s3_uri}, access_key_id, secret_access_key, params.context);
|
||||
auto reader = std::make_shared<BackupReaderS3>(S3::URI{s3_uri}, access_key_id, secret_access_key, params.allow_s3_native_copy, params.context);
|
||||
return std::make_unique<BackupImpl>(backup_name_for_logging, archive_params, params.base_backup_info, reader, params.context);
|
||||
}
|
||||
else
|
||||
{
|
||||
auto writer = std::make_shared<BackupWriterS3>(S3::URI{s3_uri}, access_key_id, secret_access_key, params.context);
|
||||
auto writer = std::make_shared<BackupWriterS3>(S3::URI{s3_uri}, access_key_id, secret_access_key, params.allow_s3_native_copy, params.context);
|
||||
return std::make_unique<BackupImpl>(
|
||||
backup_name_for_logging,
|
||||
archive_params,
|
||||
|
@ -57,28 +57,25 @@ inline DB::UInt64 intHash64(DB::UInt64 x)
|
||||
|
||||
inline uint32_t s390x_crc32_u8(uint32_t crc, uint8_t v)
|
||||
{
|
||||
return crc32_be(crc, reinterpret_cast<unsigned char *>(&v), sizeof(v));
|
||||
return crc32c_le_vx(crc, reinterpret_cast<unsigned char *>(&v), sizeof(v));
|
||||
}
|
||||
|
||||
inline uint32_t s390x_crc32_u16(uint32_t crc, uint16_t v)
|
||||
{
|
||||
return crc32_be(crc, reinterpret_cast<unsigned char *>(&v), sizeof(v));
|
||||
v = std::byteswap(v);
|
||||
return crc32c_le_vx(crc, reinterpret_cast<unsigned char *>(&v), sizeof(v));
|
||||
}
|
||||
|
||||
inline uint32_t s390x_crc32_u32(uint32_t crc, uint32_t v)
|
||||
{
|
||||
return crc32_be(crc, reinterpret_cast<unsigned char *>(&v), sizeof(v));
|
||||
v = std::byteswap(v);
|
||||
return crc32c_le_vx(crc, reinterpret_cast<unsigned char *>(&v), sizeof(v));
|
||||
}
|
||||
|
||||
inline uint64_t s390x_crc32(uint64_t crc, uint64_t v)
|
||||
{
|
||||
uint64_t _crc = crc;
|
||||
uint32_t value_h, value_l;
|
||||
value_h = (v >> 32) & 0xffffffff;
|
||||
value_l = v & 0xffffffff;
|
||||
_crc = crc32_be(static_cast<uint32_t>(_crc), reinterpret_cast<unsigned char *>(&value_h), sizeof(uint32_t));
|
||||
_crc = crc32_be(static_cast<uint32_t>(_crc), reinterpret_cast<unsigned char *>(&value_l), sizeof(uint32_t));
|
||||
return _crc;
|
||||
v = std::byteswap(v);
|
||||
return crc32c_le_vx(static_cast<uint32_t>(crc), reinterpret_cast<unsigned char *>(&v), sizeof(uint64_t));
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -6,17 +6,13 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
struct MemoryTrackerSwitcher
|
||||
{
|
||||
explicit MemoryTrackerSwitcher(MemoryTracker * new_tracker)
|
||||
{
|
||||
/// current_thread is not initialized for the main thread, so simply do not switch anything
|
||||
if (!current_thread)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "current_thread is not initialized");
|
||||
return;
|
||||
|
||||
auto * thread_tracker = CurrentThread::getMemoryTracker();
|
||||
prev_untracked_memory = current_thread->untracked_memory;
|
||||
@ -28,6 +24,10 @@ struct MemoryTrackerSwitcher
|
||||
|
||||
~MemoryTrackerSwitcher()
|
||||
{
|
||||
/// current_thread is not initialized for the main thread, so simply do not switch anything
|
||||
if (!current_thread)
|
||||
return;
|
||||
|
||||
CurrentThread::flushUntrackedMemory();
|
||||
auto * thread_tracker = CurrentThread::getMemoryTracker();
|
||||
|
||||
@ -35,6 +35,7 @@ struct MemoryTrackerSwitcher
|
||||
thread_tracker->setParent(prev_memory_tracker_parent);
|
||||
}
|
||||
|
||||
private:
|
||||
MemoryTracker * prev_memory_tracker_parent = nullptr;
|
||||
Int64 prev_untracked_memory = 0;
|
||||
};
|
||||
|
@ -48,11 +48,7 @@ inline auto scaleMultiplier(UInt32 scale)
|
||||
|
||||
/** Components of DecimalX value:
|
||||
* whole - represents whole part of decimal, can be negative or positive.
|
||||
* fractional - for fractional part of decimal.
|
||||
*
|
||||
* 0.123 represents 0 / 0.123
|
||||
* -0.123 represents 0 / -0.123
|
||||
* -1.123 represents -1 / 0.123
|
||||
* fractional - for fractional part of decimal, always positive.
|
||||
*/
|
||||
template <typename DecimalType>
|
||||
struct DecimalComponents
|
||||
|
@ -629,7 +629,7 @@ class IColumn;
|
||||
M(Bool, database_replicated_allow_only_replicated_engine, false, "Allow to create only Replicated tables in database with engine Replicated", 0) \
|
||||
M(Bool, database_replicated_allow_replicated_engine_arguments, true, "Allow to create only Replicated tables in database with engine Replicated with explicit arguments", 0) \
|
||||
M(DistributedDDLOutputMode, distributed_ddl_output_mode, DistributedDDLOutputMode::THROW, "Format of distributed DDL query result", 0) \
|
||||
M(UInt64, distributed_ddl_entry_format_version, 3, "Compatibility version of distributed DDL (ON CLUSTER) queries", 0) \
|
||||
M(UInt64, distributed_ddl_entry_format_version, 5, "Compatibility version of distributed DDL (ON CLUSTER) queries", 0) \
|
||||
\
|
||||
M(UInt64, external_storage_max_read_rows, 0, "Limit maximum number of rows when table with external engine should flush history data. Now supported only for MySQL table engine, database engine, dictionary and MaterializedMySQL. If equal to 0, this setting is disabled", 0) \
|
||||
M(UInt64, external_storage_max_read_bytes, 0, "Limit maximum number of bytes when table with external engine should flush history data. Now supported only for MySQL table engine, database engine, dictionary and MaterializedMySQL. If equal to 0, this setting is disabled", 0) \
|
||||
|
@ -19,9 +19,6 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
static constexpr auto microsecond_multiplier = 1000000;
|
||||
static constexpr auto millisecond_multiplier = 1000;
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
@ -1380,36 +1377,6 @@ struct ToRelativeSecondNumImpl
|
||||
using FactorTransform = ZeroTransform;
|
||||
};
|
||||
|
||||
template <Int64 scale_multiplier>
|
||||
struct ToRelativeSubsecondNumImpl
|
||||
{
|
||||
static constexpr auto name = "toRelativeSubsecondNumImpl";
|
||||
|
||||
static inline Int64 execute(const DateTime64 & t, DateTime64::NativeType scale, const DateLUTImpl &)
|
||||
{
|
||||
static_assert(scale_multiplier == 1000 || scale_multiplier == 1000000);
|
||||
if (scale == scale_multiplier)
|
||||
return t.value;
|
||||
if (scale > scale_multiplier)
|
||||
return t.value / (scale / scale_multiplier);
|
||||
return t.value * (scale_multiplier / scale);
|
||||
}
|
||||
static inline Int64 execute(UInt32 t, const DateLUTImpl &)
|
||||
{
|
||||
return t * scale_multiplier;
|
||||
}
|
||||
static inline Int64 execute(Int32 d, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return static_cast<Int64>(time_zone.fromDayNum(ExtendedDayNum(d))) * scale_multiplier;
|
||||
}
|
||||
static inline Int64 execute(UInt16 d, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return static_cast<Int64>(time_zone.fromDayNum(DayNum(d)) * scale_multiplier);
|
||||
}
|
||||
|
||||
using FactorTransform = ZeroTransform;
|
||||
};
|
||||
|
||||
struct ToYYYYMMImpl
|
||||
{
|
||||
static constexpr auto name = "toYYYYMM";
|
||||
@ -1509,47 +1476,25 @@ struct ToYYYYMMDDhhmmssImpl
|
||||
using FactorTransform = ZeroTransform;
|
||||
};
|
||||
|
||||
struct DateTimeComponentsWithFractionalPart : public DateLUTImpl::DateTimeComponents
|
||||
{
|
||||
UInt16 millisecond;
|
||||
UInt16 microsecond;
|
||||
};
|
||||
|
||||
struct ToDateTimeComponentsImpl
|
||||
{
|
||||
static constexpr auto name = "toDateTimeComponents";
|
||||
|
||||
static inline DateTimeComponentsWithFractionalPart execute(const DateTime64 & t, DateTime64::NativeType scale_multiplier, const DateLUTImpl & time_zone)
|
||||
static inline DateLUTImpl::DateTimeComponents execute(Int64 t, const DateLUTImpl & time_zone)
|
||||
{
|
||||
auto components = DecimalUtils::splitWithScaleMultiplier(t, scale_multiplier);
|
||||
|
||||
if (t.value < 0 && components.fractional)
|
||||
{
|
||||
components.fractional = scale_multiplier + (components.whole ? Int64(-1) : Int64(1)) * components.fractional;
|
||||
--components.whole;
|
||||
return time_zone.toDateTimeComponents(t);
|
||||
}
|
||||
Int64 fractional = components.fractional;
|
||||
if (scale_multiplier > microsecond_multiplier)
|
||||
fractional = fractional / (scale_multiplier / microsecond_multiplier);
|
||||
else if (scale_multiplier < microsecond_multiplier)
|
||||
fractional = fractional * (microsecond_multiplier / scale_multiplier);
|
||||
|
||||
constexpr Int64 divider = microsecond_multiplier/ millisecond_multiplier;
|
||||
UInt16 millisecond = static_cast<UInt16>(fractional / divider);
|
||||
UInt16 microsecond = static_cast<UInt16>(fractional % divider);
|
||||
return DateTimeComponentsWithFractionalPart{time_zone.toDateTimeComponents(components.whole), millisecond, microsecond};
|
||||
}
|
||||
static inline DateTimeComponentsWithFractionalPart execute(UInt32 t, const DateLUTImpl & time_zone)
|
||||
static inline DateLUTImpl::DateTimeComponents execute(UInt32 t, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return DateTimeComponentsWithFractionalPart{time_zone.toDateTimeComponents(static_cast<DateLUTImpl::Time>(t)), 0, 0};
|
||||
return time_zone.toDateTimeComponents(static_cast<DateLUTImpl::Time>(t));
|
||||
}
|
||||
static inline DateTimeComponentsWithFractionalPart execute(Int32 d, const DateLUTImpl & time_zone)
|
||||
static inline DateLUTImpl::DateTimeComponents execute(Int32 d, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return DateTimeComponentsWithFractionalPart{time_zone.toDateTimeComponents(ExtendedDayNum(d)), 0, 0};
|
||||
return time_zone.toDateTimeComponents(ExtendedDayNum(d));
|
||||
}
|
||||
static inline DateTimeComponentsWithFractionalPart execute(UInt16 d, const DateLUTImpl & time_zone)
|
||||
static inline DateLUTImpl::DateTimeComponents execute(UInt16 d, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return DateTimeComponentsWithFractionalPart{time_zone.toDateTimeComponents(DayNum(d)), 0, 0};
|
||||
return time_zone.toDateTimeComponents(DayNum(d));
|
||||
}
|
||||
|
||||
using FactorTransform = ZeroTransform;
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
/** Transform-type wrapper for DateTime64, simplifies DateTime64 support for given Transform.
|
||||
/** Tansform-type wrapper for DateTime64, simplifies DateTime64 support for given Transform.
|
||||
*
|
||||
* Depending on what overloads of Transform::execute() are available, when called with DateTime64 value,
|
||||
* invokes Transform::execute() with either:
|
||||
@ -80,10 +80,7 @@ public:
|
||||
}
|
||||
else
|
||||
{
|
||||
auto components = DecimalUtils::splitWithScaleMultiplier(t, scale_multiplier);
|
||||
if (t.value < 0 && components.fractional)
|
||||
--components.whole;
|
||||
|
||||
const auto components = DecimalUtils::splitWithScaleMultiplier(t, scale_multiplier);
|
||||
return wrapped_transform.execute(static_cast<Int64>(components.whole), std::forward<Args>(args)...);
|
||||
}
|
||||
}
|
||||
|
@ -174,13 +174,12 @@ public:
|
||||
{
|
||||
auto res = static_cast<Int64>(transform_y.execute(y, timezone_y))
|
||||
- static_cast<Int64>(transform_x.execute(x, timezone_x));
|
||||
DateTimeComponentsWithFractionalPart a_comp;
|
||||
DateTimeComponentsWithFractionalPart b_comp;
|
||||
DateLUTImpl::DateTimeComponents a_comp;
|
||||
DateLUTImpl::DateTimeComponents b_comp;
|
||||
Int64 adjust_value;
|
||||
auto x_microseconds = TransformDateTime64<ToRelativeSubsecondNumImpl<microsecond_multiplier>>(transform_x.getScaleMultiplier()).execute(x, timezone_x);
|
||||
auto y_microseconds = TransformDateTime64<ToRelativeSubsecondNumImpl<microsecond_multiplier>>(transform_y.getScaleMultiplier()).execute(y, timezone_y);
|
||||
|
||||
if (x_microseconds <= y_microseconds)
|
||||
auto x_seconds = TransformDateTime64<ToRelativeSecondNumImpl<ResultPrecision::Extended>>(transform_x.getScaleMultiplier()).execute(x, timezone_x);
|
||||
auto y_seconds = TransformDateTime64<ToRelativeSecondNumImpl<ResultPrecision::Extended>>(transform_y.getScaleMultiplier()).execute(y, timezone_y);
|
||||
if (x_seconds <= y_seconds)
|
||||
{
|
||||
a_comp = TransformDateTime64<ToDateTimeComponentsImpl>(transform_x.getScaleMultiplier()).execute(x, timezone_x);
|
||||
b_comp = TransformDateTime64<ToDateTimeComponentsImpl>(transform_y.getScaleMultiplier()).execute(y, timezone_y);
|
||||
@ -193,16 +192,14 @@ public:
|
||||
adjust_value = 1;
|
||||
}
|
||||
|
||||
|
||||
if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeYearNumImpl<ResultPrecision::Extended>>>)
|
||||
{
|
||||
if ((a_comp.date.month > b_comp.date.month)
|
||||
|| ((a_comp.date.month == b_comp.date.month) && ((a_comp.date.day > b_comp.date.day)
|
||||
|| ((a_comp.date.day == b_comp.date.day) && ((a_comp.time.hour > b_comp.time.hour)
|
||||
|| ((a_comp.time.hour == b_comp.time.hour) && ((a_comp.time.minute > b_comp.time.minute)
|
||||
|| ((a_comp.time.minute == b_comp.time.minute) && ((a_comp.time.second > b_comp.time.second)
|
||||
|| ((a_comp.time.second == b_comp.time.second) && ((a_comp.millisecond > b_comp.millisecond)
|
||||
|| ((a_comp.millisecond == b_comp.millisecond) && (a_comp.microsecond > b_comp.microsecond)))))))))))))
|
||||
|| ((a_comp.time.minute == b_comp.time.minute) && (a_comp.time.second > b_comp.time.second))))
|
||||
)))))
|
||||
res += adjust_value;
|
||||
}
|
||||
else if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeQuarterNumImpl<ResultPrecision::Extended>>>)
|
||||
@ -213,9 +210,8 @@ public:
|
||||
|| ((x_month_in_quarter == y_month_in_quarter) && ((a_comp.date.day > b_comp.date.day)
|
||||
|| ((a_comp.date.day == b_comp.date.day) && ((a_comp.time.hour > b_comp.time.hour)
|
||||
|| ((a_comp.time.hour == b_comp.time.hour) && ((a_comp.time.minute > b_comp.time.minute)
|
||||
|| ((a_comp.time.minute == b_comp.time.minute) && ((a_comp.time.second > b_comp.time.second)
|
||||
|| ((a_comp.time.second == b_comp.time.second) && ((a_comp.millisecond > b_comp.millisecond)
|
||||
|| ((a_comp.millisecond == b_comp.millisecond) && (a_comp.microsecond > b_comp.microsecond)))))))))))))
|
||||
|| ((a_comp.time.minute == b_comp.time.minute) && (a_comp.time.second > b_comp.time.second))))
|
||||
)))))
|
||||
res += adjust_value;
|
||||
}
|
||||
else if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeMonthNumImpl<ResultPrecision::Extended>>>)
|
||||
@ -223,9 +219,8 @@ public:
|
||||
if ((a_comp.date.day > b_comp.date.day)
|
||||
|| ((a_comp.date.day == b_comp.date.day) && ((a_comp.time.hour > b_comp.time.hour)
|
||||
|| ((a_comp.time.hour == b_comp.time.hour) && ((a_comp.time.minute > b_comp.time.minute)
|
||||
|| ((a_comp.time.minute == b_comp.time.minute) && ((a_comp.time.second > b_comp.time.second)
|
||||
|| ((a_comp.time.second == b_comp.time.second) && ((a_comp.millisecond > b_comp.millisecond)
|
||||
|| ((a_comp.millisecond == b_comp.millisecond) && (a_comp.microsecond > b_comp.microsecond)))))))))))
|
||||
|| ((a_comp.time.minute == b_comp.time.minute) && (a_comp.time.second > b_comp.time.second))))
|
||||
)))
|
||||
res += adjust_value;
|
||||
}
|
||||
else if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeWeekNumImpl<ResultPrecision::Extended>>>)
|
||||
@ -235,44 +230,25 @@ public:
|
||||
if ((x_day_of_week > y_day_of_week)
|
||||
|| ((x_day_of_week == y_day_of_week) && (a_comp.time.hour > b_comp.time.hour))
|
||||
|| ((a_comp.time.hour == b_comp.time.hour) && ((a_comp.time.minute > b_comp.time.minute)
|
||||
|| ((a_comp.time.minute == b_comp.time.minute) && ((a_comp.time.second > b_comp.time.second)
|
||||
|| ((a_comp.time.second == b_comp.time.second) && ((a_comp.millisecond > b_comp.millisecond)
|
||||
|| ((a_comp.millisecond == b_comp.millisecond) && (a_comp.microsecond > b_comp.microsecond)))))))))
|
||||
|| ((a_comp.time.minute == b_comp.time.minute) && (a_comp.time.second > b_comp.time.second)))))
|
||||
res += adjust_value;
|
||||
}
|
||||
else if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeDayNumImpl<ResultPrecision::Extended>>>)
|
||||
{
|
||||
if ((a_comp.time.hour > b_comp.time.hour)
|
||||
|| ((a_comp.time.hour == b_comp.time.hour) && ((a_comp.time.minute > b_comp.time.minute)
|
||||
|| ((a_comp.time.minute == b_comp.time.minute) && ((a_comp.time.second > b_comp.time.second)
|
||||
|| ((a_comp.time.second == b_comp.time.second) && ((a_comp.millisecond > b_comp.millisecond)
|
||||
|| ((a_comp.millisecond == b_comp.millisecond) && (a_comp.microsecond > b_comp.microsecond)))))))))
|
||||
|| ((a_comp.time.minute == b_comp.time.minute) && (a_comp.time.second > b_comp.time.second)))))
|
||||
res += adjust_value;
|
||||
}
|
||||
else if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeHourNumImpl<ResultPrecision::Extended>>>)
|
||||
{
|
||||
if ((a_comp.time.minute > b_comp.time.minute)
|
||||
|| ((a_comp.time.minute == b_comp.time.minute) && ((a_comp.time.second > b_comp.time.second)
|
||||
|| ((a_comp.time.second == b_comp.time.second) && ((a_comp.millisecond > b_comp.millisecond)
|
||||
|| ((a_comp.millisecond == b_comp.millisecond) && (a_comp.microsecond > b_comp.microsecond)))))))
|
||||
|| ((a_comp.time.minute == b_comp.time.minute) && (a_comp.time.second > b_comp.time.second)))
|
||||
res += adjust_value;
|
||||
}
|
||||
else if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeMinuteNumImpl<ResultPrecision::Extended>>>)
|
||||
{
|
||||
if ((a_comp.time.second > b_comp.time.second)
|
||||
|| ((a_comp.time.second == b_comp.time.second) && ((a_comp.millisecond > b_comp.millisecond)
|
||||
|| ((a_comp.millisecond == b_comp.millisecond) && (a_comp.microsecond > b_comp.microsecond)))))
|
||||
res += adjust_value;
|
||||
}
|
||||
else if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeSecondNumImpl<ResultPrecision::Extended>>>)
|
||||
{
|
||||
if ((a_comp.millisecond > b_comp.millisecond)
|
||||
|| ((a_comp.millisecond == b_comp.millisecond) && (a_comp.microsecond > b_comp.microsecond)))
|
||||
res += adjust_value;
|
||||
}
|
||||
else if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeSubsecondNumImpl<1000>>>)
|
||||
{
|
||||
if (a_comp.microsecond > b_comp.microsecond)
|
||||
if (a_comp.time.second > b_comp.time.second)
|
||||
res += adjust_value;
|
||||
}
|
||||
return res;
|
||||
@ -397,10 +373,6 @@ public:
|
||||
impl.template dispatchForColumns<ToRelativeMinuteNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
|
||||
else if (unit == "second" || unit == "ss" || unit == "s")
|
||||
impl.template dispatchForColumns<ToRelativeSecondNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
|
||||
else if (unit == "millisecond" || unit == "ms")
|
||||
impl.template dispatchForColumns<ToRelativeSubsecondNumImpl<millisecond_multiplier>>(x, y, timezone_x, timezone_y, res->getData());
|
||||
else if (unit == "microsecond" || unit == "us" || unit == "u")
|
||||
impl.template dispatchForColumns<ToRelativeSubsecondNumImpl<microsecond_multiplier>>(x, y, timezone_x, timezone_y, res->getData());
|
||||
else
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
"Function {} does not support '{}' unit", getName(), unit);
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include <Functions/DateTimeTransforms.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/IFunction.h>
|
||||
#include <Functions/TransformDateTime64.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
|
||||
|
||||
|
@ -822,8 +822,19 @@ void copyS3File(
|
||||
ThreadPoolCallbackRunner<void> schedule,
|
||||
bool for_disk_s3)
|
||||
{
|
||||
if (settings.allow_native_copy)
|
||||
{
|
||||
CopyFileHelper helper{s3_client, src_bucket, src_key, src_offset, src_size, dest_bucket, dest_key, settings, object_metadata, schedule, for_disk_s3};
|
||||
helper.performCopy();
|
||||
}
|
||||
else
|
||||
{
|
||||
auto create_read_buffer = [&]
|
||||
{
|
||||
return std::make_unique<ReadBufferFromS3>(s3_client, src_bucket, src_key, "", settings, Context::getGlobalContextInstance()->getReadSettings());
|
||||
};
|
||||
copyDataToS3File(create_read_buffer, src_offset, src_size, s3_client, dest_bucket, dest_key, settings, object_metadata, schedule, for_disk_s3);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -21,6 +21,11 @@ using CreateReadBuffer = std::function<std::unique_ptr<SeekableReadBuffer>()>;
|
||||
/// The same functionality can be done by using the function copyData() and the classes ReadBufferFromS3 and WriteBufferFromS3
|
||||
/// however copyS3File() is faster and spends less network traffic and memory.
|
||||
/// The parameters `src_offset` and `src_size` specify a part in the source to copy.
|
||||
///
|
||||
/// Note, that it tries to copy file using native copy (CopyObject), but if it
|
||||
/// has been disabled (with settings.allow_native_copy) or request failed
|
||||
/// because it is a known issue, it is fallbacks to read-write copy
|
||||
/// (copyDataToS3File()).
|
||||
void copyS3File(
|
||||
const std::shared_ptr<const S3::Client> & s3_client,
|
||||
const String & src_bucket,
|
||||
|
@ -34,6 +34,16 @@ static std::string createDirectory(const std::string & file)
|
||||
return path;
|
||||
}
|
||||
|
||||
static std::string renderFileNameTemplate(time_t now, const std::string & file_path)
|
||||
{
|
||||
fs::path path{file_path};
|
||||
std::tm buf;
|
||||
localtime_r(&now, &buf);
|
||||
std::ostringstream ss; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
|
||||
ss << std::put_time(&buf, file_path.c_str());
|
||||
return path.replace_filename(ss.str());
|
||||
}
|
||||
|
||||
#ifndef WITHOUT_TEXT_LOG
|
||||
void Loggers::setTextLog(std::shared_ptr<DB::TextLog> log, int max_priority)
|
||||
{
|
||||
@ -68,9 +78,12 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
|
||||
/// The maximum (the most verbose) of those will be used as default for Poco loggers
|
||||
int max_log_level = 0;
|
||||
|
||||
const auto log_path = config.getString("logger.log", "");
|
||||
if (!log_path.empty())
|
||||
time_t now = std::time({});
|
||||
|
||||
const auto log_path_prop = config.getString("logger.log", "");
|
||||
if (!log_path_prop.empty())
|
||||
{
|
||||
const auto log_path = renderFileNameTemplate(now, log_path_prop);
|
||||
createDirectory(log_path);
|
||||
|
||||
std::string ext;
|
||||
@ -109,9 +122,10 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
|
||||
split->addChannel(log, "log");
|
||||
}
|
||||
|
||||
const auto errorlog_path = config.getString("logger.errorlog", "");
|
||||
if (!errorlog_path.empty())
|
||||
const auto errorlog_path_prop = config.getString("logger.errorlog", "");
|
||||
if (!errorlog_path_prop.empty())
|
||||
{
|
||||
const auto errorlog_path = renderFileNameTemplate(now, errorlog_path_prop);
|
||||
createDirectory(errorlog_path);
|
||||
|
||||
// NOTE: we don't use notice & critical in the code, so in practice error log collects fatal & error & warning.
|
||||
|
@ -208,7 +208,7 @@ namespace DB
|
||||
const String & column_name,
|
||||
ColumnPtr & column,
|
||||
const DataTypePtr & column_type,
|
||||
const PaddedPODArray<UInt8> * null_bytemap,
|
||||
const PaddedPODArray<UInt8> *,
|
||||
arrow::ArrayBuilder * array_builder,
|
||||
String format_name,
|
||||
size_t start,
|
||||
@ -231,7 +231,9 @@ namespace DB
|
||||
/// Start new array.
|
||||
components_status = builder.Append();
|
||||
checkStatus(components_status, nested_column->getName(), format_name);
|
||||
fillArrowArray(column_name, nested_column, nested_type, null_bytemap, value_builder, format_name, offsets[array_idx - 1], offsets[array_idx], output_string_as_string, output_fixed_string_as_fixed_byte_array, dictionary_values);
|
||||
|
||||
/// Pass null null_map, because fillArrowArray will decide whether nested_type is nullable, if nullable, it will create a new null_map from nested_column
|
||||
fillArrowArray(column_name, nested_column, nested_type, nullptr, value_builder, format_name, offsets[array_idx - 1], offsets[array_idx], output_string_as_string, output_fixed_string_as_fixed_byte_array, dictionary_values);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -182,6 +182,7 @@ S3Settings::RequestSettings::RequestSettings(const NamedCollection & collection)
|
||||
max_single_read_retries = collection.getOrDefault<UInt64>("max_single_read_retries", max_single_read_retries);
|
||||
max_connections = collection.getOrDefault<UInt64>("max_connections", max_connections);
|
||||
list_object_keys_size = collection.getOrDefault<UInt64>("list_object_keys_size", list_object_keys_size);
|
||||
allow_native_copy = collection.getOrDefault<bool>("allow_native_copy", allow_native_copy);
|
||||
throw_on_zero_files_match = collection.getOrDefault<bool>("throw_on_zero_files_match", throw_on_zero_files_match);
|
||||
}
|
||||
|
||||
@ -197,6 +198,7 @@ S3Settings::RequestSettings::RequestSettings(
|
||||
max_connections = config.getUInt64(key + "max_connections", settings.s3_max_connections);
|
||||
check_objects_after_upload = config.getBool(key + "check_objects_after_upload", settings.s3_check_objects_after_upload);
|
||||
list_object_keys_size = config.getUInt64(key + "list_object_keys_size", settings.s3_list_object_keys_size);
|
||||
allow_native_copy = config.getBool(key + "allow_native_copy", allow_native_copy);
|
||||
throw_on_zero_files_match = config.getBool(key + "throw_on_zero_files_match", settings.s3_throw_on_zero_files_match);
|
||||
retry_attempts = config.getUInt64(key + "retry_attempts", settings.s3_retry_attempts);
|
||||
request_timeout_ms = config.getUInt64(key + "request_timeout_ms", settings.s3_request_timeout_ms);
|
||||
|
@ -71,6 +71,7 @@ struct S3Settings
|
||||
size_t retry_attempts = 10;
|
||||
size_t request_timeout_ms = 3000;
|
||||
size_t long_request_timeout_ms = 30000; // TODO: Take this from config like request_timeout_ms
|
||||
bool allow_native_copy = true;
|
||||
|
||||
bool throw_on_zero_files_match = false;
|
||||
|
||||
|
@ -2,6 +2,14 @@
|
||||
<storage_configuration>
|
||||
<disks>
|
||||
<!-- s3 disks -->
|
||||
<s3_common_disk>
|
||||
<type>s3</type>
|
||||
<path>s3_common_disk/</path>
|
||||
<endpoint>http://localhost:11111/test/common/</endpoint>
|
||||
<access_key_id>clickhouse</access_key_id>
|
||||
<secret_access_key>clickhouse</secret_access_key>
|
||||
<request_timeout_ms>20000</request_timeout_ms>
|
||||
</s3_common_disk>
|
||||
<s3_disk>
|
||||
<type>s3</type>
|
||||
<path>s3_disk/</path>
|
||||
|
@ -64,6 +64,14 @@ DEFAULT_ENV_NAME = ".env"
|
||||
|
||||
SANITIZER_SIGN = "=================="
|
||||
|
||||
CLICKHOUSE_START_COMMAND = (
|
||||
"clickhouse server --config-file=/etc/clickhouse-server/{main_config_file}"
|
||||
)
|
||||
|
||||
CLICKHOUSE_LOG_FILE = "/var/log/clickhouse-server/clickhouse-server.log"
|
||||
|
||||
CLICKHOUSE_ERROR_LOG_FILE = "/var/log/clickhouse-server/clickhouse-server.err.log"
|
||||
|
||||
|
||||
# to create docker-compose env file
|
||||
def _create_env_file(path, variables):
|
||||
@ -1497,6 +1505,8 @@ class ClickHouseCluster:
|
||||
with_postgres=False,
|
||||
with_postgres_cluster=False,
|
||||
with_postgresql_java_client=False,
|
||||
clickhouse_log_file=CLICKHOUSE_LOG_FILE,
|
||||
clickhouse_error_log_file=CLICKHOUSE_ERROR_LOG_FILE,
|
||||
with_hdfs=False,
|
||||
with_kerberized_hdfs=False,
|
||||
with_mongo=False,
|
||||
@ -1563,6 +1573,13 @@ class ClickHouseCluster:
|
||||
"LLVM_PROFILE_FILE"
|
||||
] = "/var/lib/clickhouse/server_%h_%p_%m.profraw"
|
||||
|
||||
clickhouse_start_command = CLICKHOUSE_START_COMMAND
|
||||
if clickhouse_log_file:
|
||||
clickhouse_start_command += " --log-file=" + clickhouse_log_file
|
||||
if clickhouse_error_log_file:
|
||||
clickhouse_start_command += " --errorlog-file=" + clickhouse_error_log_file
|
||||
logging.debug(f"clickhouse_start_command: {clickhouse_start_command}")
|
||||
|
||||
instance = ClickHouseInstance(
|
||||
cluster=self,
|
||||
base_path=self.base_dir,
|
||||
@ -1592,10 +1609,10 @@ class ClickHouseCluster:
|
||||
with_redis=with_redis,
|
||||
with_minio=with_minio,
|
||||
with_azurite=with_azurite,
|
||||
with_cassandra=with_cassandra,
|
||||
with_jdbc_bridge=with_jdbc_bridge,
|
||||
with_hive=with_hive,
|
||||
with_coredns=with_coredns,
|
||||
with_cassandra=with_cassandra,
|
||||
server_bin_path=self.server_bin_path,
|
||||
odbc_bridge_bin_path=self.odbc_bridge_bin_path,
|
||||
library_bridge_bin_path=self.library_bridge_bin_path,
|
||||
@ -1604,6 +1621,10 @@ class ClickHouseCluster:
|
||||
with_postgres=with_postgres,
|
||||
with_postgres_cluster=with_postgres_cluster,
|
||||
with_postgresql_java_client=with_postgresql_java_client,
|
||||
clickhouse_start_command=clickhouse_start_command,
|
||||
main_config_name=main_config_name,
|
||||
users_config_name=users_config_name,
|
||||
copy_common_configs=copy_common_configs,
|
||||
hostname=hostname,
|
||||
env_variables=env_variables,
|
||||
image=image,
|
||||
@ -1612,9 +1633,6 @@ class ClickHouseCluster:
|
||||
ipv4_address=ipv4_address,
|
||||
ipv6_address=ipv6_address,
|
||||
with_installed_binary=with_installed_binary,
|
||||
main_config_name=main_config_name,
|
||||
users_config_name=users_config_name,
|
||||
copy_common_configs=copy_common_configs,
|
||||
external_dirs=external_dirs,
|
||||
tmpfs=tmpfs or [],
|
||||
config_root_name=config_root_name,
|
||||
@ -3046,16 +3064,6 @@ class ClickHouseCluster:
|
||||
subprocess_check_call(self.base_zookeeper_cmd + ["start", n])
|
||||
|
||||
|
||||
CLICKHOUSE_START_COMMAND = (
|
||||
"clickhouse server --config-file=/etc/clickhouse-server/{main_config_file}"
|
||||
" --log-file=/var/log/clickhouse-server/clickhouse-server.log "
|
||||
" --errorlog-file=/var/log/clickhouse-server/clickhouse-server.err.log"
|
||||
)
|
||||
|
||||
CLICKHOUSE_STAY_ALIVE_COMMAND = "bash -c \"trap 'pkill tail' INT TERM; {} --daemon; coproc tail -f /dev/null; wait $$!\"".format(
|
||||
CLICKHOUSE_START_COMMAND
|
||||
)
|
||||
|
||||
DOCKER_COMPOSE_TEMPLATE = """
|
||||
version: '2.3'
|
||||
services:
|
||||
@ -3230,6 +3238,9 @@ class ClickHouseInstance:
|
||||
self.clickhouse_start_command = clickhouse_start_command.replace(
|
||||
"{main_config_file}", self.main_config_name
|
||||
)
|
||||
self.clickhouse_stay_alive_command = "bash -c \"trap 'pkill tail' INT TERM; {} --daemon; coproc tail -f /dev/null; wait $$!\"".format(
|
||||
clickhouse_start_command
|
||||
)
|
||||
|
||||
self.path = p.join(self.cluster.instances_dir, name)
|
||||
self.docker_compose_path = p.join(self.path, "docker-compose.yml")
|
||||
@ -4321,7 +4332,7 @@ class ClickHouseInstance:
|
||||
entrypoint_cmd = self.clickhouse_start_command
|
||||
|
||||
if self.stay_alive:
|
||||
entrypoint_cmd = CLICKHOUSE_STAY_ALIVE_COMMAND.replace(
|
||||
entrypoint_cmd = self.clickhouse_stay_alive_command.replace(
|
||||
"{main_config_file}", self.main_config_name
|
||||
)
|
||||
else:
|
||||
|
@ -2,6 +2,7 @@ from typing import Dict, Iterable
|
||||
import pytest
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.test_tools import TSV
|
||||
import uuid
|
||||
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
@ -37,32 +38,31 @@ def new_backup_name():
|
||||
return f"backup{backup_id_counter}"
|
||||
|
||||
|
||||
def get_events(events_names: Iterable[str]) -> Dict[str, int]:
|
||||
_events = TSV(
|
||||
def get_events_for_query(query_id: str) -> Dict[str, int]:
|
||||
events = TSV(
|
||||
node.query(
|
||||
f"SELECT event, value FROM system.events WHERE event in {events_names} SETTINGS system_events_show_zero_values = 1;"
|
||||
f"""
|
||||
SYSTEM FLUSH LOGS;
|
||||
|
||||
WITH arrayJoin(ProfileEvents) as pe
|
||||
SELECT pe.1, pe.2
|
||||
FROM system.query_log
|
||||
WHERE query_id = '{query_id}'
|
||||
"""
|
||||
)
|
||||
)
|
||||
return {
|
||||
event: int(value)
|
||||
for event, value in [line.split("\t") for line in _events.lines]
|
||||
for event, value in [line.split("\t") for line in events.lines]
|
||||
}
|
||||
|
||||
|
||||
def check_backup_and_restore(
|
||||
storage_policy, backup_destination, size=1000, backup_name=None, check_events=False
|
||||
storage_policy,
|
||||
backup_destination,
|
||||
size=1000,
|
||||
backup_name=None,
|
||||
):
|
||||
s3_backup_events = (
|
||||
"WriteBufferFromS3Microseconds",
|
||||
"WriteBufferFromS3Bytes",
|
||||
"WriteBufferFromS3RequestsErrors",
|
||||
)
|
||||
s3_restore_events = (
|
||||
"ReadBufferFromS3Microseconds",
|
||||
"ReadBufferFromS3Bytes",
|
||||
"ReadBufferFromS3RequestsErrors",
|
||||
)
|
||||
|
||||
node.query(
|
||||
f"""
|
||||
DROP TABLE IF EXISTS data SYNC;
|
||||
@ -72,16 +72,17 @@ def check_backup_and_restore(
|
||||
"""
|
||||
)
|
||||
try:
|
||||
events_before_backups = get_events(s3_backup_events)
|
||||
node.query(f"BACKUP TABLE data TO {backup_destination}")
|
||||
events_after_backups = get_events(s3_backup_events)
|
||||
events_before_restore = get_events(s3_restore_events)
|
||||
backup_query_id = uuid.uuid4().hex
|
||||
node.query(
|
||||
f"BACKUP TABLE data TO {backup_destination}", query_id=backup_query_id
|
||||
)
|
||||
restore_query_id = uuid.uuid4().hex
|
||||
node.query(
|
||||
f"""
|
||||
RESTORE TABLE data AS data_restored FROM {backup_destination};
|
||||
"""
|
||||
""",
|
||||
query_id=restore_query_id,
|
||||
)
|
||||
events_after_restore = get_events(s3_restore_events)
|
||||
node.query(
|
||||
"""
|
||||
SELECT throwIf(
|
||||
@ -91,55 +92,10 @@ def check_backup_and_restore(
|
||||
);
|
||||
"""
|
||||
)
|
||||
if check_events and backup_name:
|
||||
objects = node.cluster.minio_client.list_objects(
|
||||
"root", f"data/backups/multipart/{backup_name}/"
|
||||
)
|
||||
backup_meta_size = 0
|
||||
for obj in objects:
|
||||
if ".backup" in obj.object_name:
|
||||
backup_meta_size = obj.size
|
||||
break
|
||||
backup_total_size = int(
|
||||
node.query(
|
||||
f"SELECT sum(total_size) FROM system.backups WHERE status = 'BACKUP_CREATED' AND name like '%{backup_name}%'"
|
||||
).strip()
|
||||
)
|
||||
restore_total_size = int(
|
||||
node.query(
|
||||
f"SELECT sum(total_size) FROM system.backups WHERE status = 'RESTORED' AND name like '%{backup_name}%'"
|
||||
).strip()
|
||||
)
|
||||
# backup
|
||||
# NOTE: ~35 bytes is used by .lock file, so set up 100 bytes to avoid flaky test
|
||||
assert (
|
||||
abs(
|
||||
backup_total_size
|
||||
- (
|
||||
events_after_backups["WriteBufferFromS3Bytes"]
|
||||
- events_before_backups["WriteBufferFromS3Bytes"]
|
||||
- backup_meta_size
|
||||
)
|
||||
)
|
||||
< 100
|
||||
)
|
||||
assert (
|
||||
events_after_backups["WriteBufferFromS3Microseconds"]
|
||||
> events_before_backups["WriteBufferFromS3Microseconds"]
|
||||
)
|
||||
assert events_after_backups["WriteBufferFromS3RequestsErrors"] == 0
|
||||
# restore
|
||||
assert (
|
||||
events_after_restore["ReadBufferFromS3Bytes"]
|
||||
- events_before_restore["ReadBufferFromS3Bytes"]
|
||||
- backup_meta_size
|
||||
== restore_total_size
|
||||
)
|
||||
assert (
|
||||
events_after_restore["ReadBufferFromS3Microseconds"]
|
||||
> events_before_restore["ReadBufferFromS3Microseconds"]
|
||||
)
|
||||
assert events_after_restore["ReadBufferFromS3RequestsErrors"] == 0
|
||||
return [
|
||||
get_events_for_query(backup_query_id),
|
||||
get_events_for_query(restore_query_id),
|
||||
]
|
||||
finally:
|
||||
node.query(
|
||||
"""
|
||||
@ -224,17 +180,63 @@ def test_backup_to_s3_multipart():
|
||||
storage_policy = "default"
|
||||
backup_name = new_backup_name()
|
||||
backup_destination = f"S3('http://minio1:9001/root/data/backups/multipart/{backup_name}', 'minio', 'minio123')"
|
||||
check_backup_and_restore(
|
||||
(backup_events, restore_events) = check_backup_and_restore(
|
||||
storage_policy,
|
||||
backup_destination,
|
||||
size=1000000,
|
||||
backup_name=backup_name,
|
||||
check_events=True,
|
||||
)
|
||||
assert node.contains_in_log(
|
||||
f"copyDataToS3File: Multipart upload has completed. Bucket: root, Key: data/backups/multipart/{backup_name}"
|
||||
)
|
||||
|
||||
s3_backup_events = (
|
||||
"WriteBufferFromS3Microseconds",
|
||||
"WriteBufferFromS3Bytes",
|
||||
"WriteBufferFromS3RequestsErrors",
|
||||
)
|
||||
s3_restore_events = (
|
||||
"ReadBufferFromS3Microseconds",
|
||||
"ReadBufferFromS3Bytes",
|
||||
"ReadBufferFromS3RequestsErrors",
|
||||
)
|
||||
|
||||
objects = node.cluster.minio_client.list_objects(
|
||||
"root", f"data/backups/multipart/{backup_name}/"
|
||||
)
|
||||
backup_meta_size = 0
|
||||
for obj in objects:
|
||||
if ".backup" in obj.object_name:
|
||||
backup_meta_size = obj.size
|
||||
break
|
||||
backup_total_size = int(
|
||||
node.query(
|
||||
f"SELECT sum(total_size) FROM system.backups WHERE status = 'BACKUP_CREATED' AND name like '%{backup_name}%'"
|
||||
).strip()
|
||||
)
|
||||
restore_total_size = int(
|
||||
node.query(
|
||||
f"SELECT sum(total_size) FROM system.backups WHERE status = 'RESTORED' AND name like '%{backup_name}%'"
|
||||
).strip()
|
||||
)
|
||||
# backup
|
||||
# NOTE: ~35 bytes is used by .lock file, so set up 100 bytes to avoid flaky test
|
||||
assert (
|
||||
abs(
|
||||
backup_total_size
|
||||
- (backup_events["WriteBufferFromS3Bytes"] - backup_meta_size)
|
||||
)
|
||||
< 100
|
||||
)
|
||||
assert backup_events["WriteBufferFromS3Microseconds"] > 0
|
||||
assert "WriteBufferFromS3RequestsErrors" not in backup_events
|
||||
# restore
|
||||
assert (
|
||||
restore_events["ReadBufferFromS3Bytes"] - backup_meta_size == restore_total_size
|
||||
)
|
||||
assert restore_events["ReadBufferFromS3Microseconds"] > 0
|
||||
assert "ReadBufferFromS3RequestsErrors" not in restore_events
|
||||
|
||||
|
||||
def test_backup_to_s3_native_copy():
|
||||
storage_policy = "policy_s3"
|
||||
@ -242,9 +244,12 @@ def test_backup_to_s3_native_copy():
|
||||
backup_destination = (
|
||||
f"S3('http://minio1:9001/root/data/backups/{backup_name}', 'minio', 'minio123')"
|
||||
)
|
||||
check_backup_and_restore(storage_policy, backup_destination)
|
||||
assert node.contains_in_log("BackupWriterS3.*using native copy")
|
||||
assert node.contains_in_log("BackupReaderS3.*using native copy")
|
||||
(backup_events, restore_events) = check_backup_and_restore(
|
||||
storage_policy, backup_destination
|
||||
)
|
||||
# single part upload
|
||||
assert backup_events["S3CopyObject"] > 0
|
||||
assert restore_events["S3CopyObject"] > 0
|
||||
assert node.contains_in_log(
|
||||
f"copyS3File: Single operation copy has completed. Bucket: root, Key: data/backups/{backup_name}"
|
||||
)
|
||||
@ -256,9 +261,12 @@ def test_backup_to_s3_native_copy_other_bucket():
|
||||
backup_destination = (
|
||||
f"S3('http://minio1:9001/root/data/backups/{backup_name}', 'minio', 'minio123')"
|
||||
)
|
||||
check_backup_and_restore(storage_policy, backup_destination)
|
||||
assert node.contains_in_log("BackupWriterS3.*using native copy")
|
||||
assert node.contains_in_log("BackupReaderS3.*using native copy")
|
||||
(backup_events, restore_events) = check_backup_and_restore(
|
||||
storage_policy, backup_destination
|
||||
)
|
||||
# single part upload
|
||||
assert backup_events["S3CopyObject"] > 0
|
||||
assert restore_events["S3CopyObject"] > 0
|
||||
assert node.contains_in_log(
|
||||
f"copyS3File: Single operation copy has completed. Bucket: root, Key: data/backups/{backup_name}"
|
||||
)
|
||||
@ -268,9 +276,12 @@ def test_backup_to_s3_native_copy_multipart():
|
||||
storage_policy = "policy_s3"
|
||||
backup_name = new_backup_name()
|
||||
backup_destination = f"S3('http://minio1:9001/root/data/backups/multipart/{backup_name}', 'minio', 'minio123')"
|
||||
check_backup_and_restore(storage_policy, backup_destination, size=1000000)
|
||||
assert node.contains_in_log("BackupWriterS3.*using native copy")
|
||||
assert node.contains_in_log("BackupReaderS3.*using native copy")
|
||||
(backup_events, restore_events) = check_backup_and_restore(
|
||||
storage_policy, backup_destination, size=1000000
|
||||
)
|
||||
# multi part upload
|
||||
assert backup_events["S3CreateMultipartUpload"] > 0
|
||||
assert restore_events["S3CreateMultipartUpload"] > 0
|
||||
assert node.contains_in_log(
|
||||
f"copyS3File: Multipart upload has completed. Bucket: root, Key: data/backups/multipart/{backup_name}/"
|
||||
)
|
||||
|
@ -0,0 +1,6 @@
|
||||
<clickhouse>
|
||||
<logger>
|
||||
<log>/var/log/clickhouse-server/clickhouse-server-%Y-%m.log</log>
|
||||
<errorlog>/var/log/clickhouse-server/clickhouse-server-%Y-%m.err.log</errorlog>
|
||||
</logger>
|
||||
</clickhouse>
|
@ -0,0 +1,54 @@
|
||||
import pytest
|
||||
import logging
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
log_dir = "/var/log/clickhouse-server/"
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
cluster.add_instance(
|
||||
"file-names-from-config",
|
||||
main_configs=["configs/config-file-template.xml"],
|
||||
clickhouse_log_file=None,
|
||||
clickhouse_error_log_file=None,
|
||||
)
|
||||
cluster.add_instance(
|
||||
"file-names-from-params",
|
||||
clickhouse_log_file=log_dir + "clickhouse-server-%Y-%m.log",
|
||||
clickhouse_error_log_file=log_dir + "clickhouse-server-%Y-%m.err.log",
|
||||
)
|
||||
try:
|
||||
cluster.start()
|
||||
yield cluster
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
def test_check_file_names(started_cluster):
|
||||
now = datetime.now()
|
||||
log_file = log_dir + f"clickhouse-server-{now.strftime('%Y-%m')}.log"
|
||||
err_log_file = log_dir + f"clickhouse-server-{now.strftime('%Y-%m')}.err.log"
|
||||
logging.debug(f"log_file {log_file} err_log_file {err_log_file}")
|
||||
|
||||
for name, instance in started_cluster.instances.items():
|
||||
files = instance.exec_in_container(
|
||||
["bash", "-c", f"ls -lh {log_dir}"], nothrow=True
|
||||
)
|
||||
|
||||
logging.debug(f"check instance '{name}': {log_dir} contains: {files}")
|
||||
|
||||
assert (
|
||||
instance.exec_in_container(["bash", "-c", f"ls {log_file}"], nothrow=True)
|
||||
== log_file + "\n"
|
||||
)
|
||||
|
||||
assert (
|
||||
instance.exec_in_container(
|
||||
["bash", "-c", f"ls {err_log_file}"], nothrow=True
|
||||
)
|
||||
== err_log_file + "\n"
|
||||
)
|
@ -36,7 +36,7 @@ create temporary table known_short_messages (s String) as select * from (select
|
||||
'Database {} doesn''t exist', 'Dictionary ({}) not found', 'Unknown table function {}',
|
||||
'Unknown format {}', 'Unknown explain kind ''{}''', 'Unknown setting {}', 'Unknown input format {}',
|
||||
'Unknown identifier: ''{}''', 'User name is empty', 'Expected function, got: {}',
|
||||
'Attempt to read after eof', 'String size is too big ({}), maximum: {}'
|
||||
'Attempt to read after eof', 'String size is too big ({}), maximum: {}', 'API mode: {}'
|
||||
] as arr) array join arr;
|
||||
|
||||
-- Check that we don't have too many short meaningless message patterns.
|
||||
|
@ -4,4 +4,3 @@
|
||||
201707
|
||||
20170721
|
||||
20170721112233
|
||||
19691231235959
|
||||
|
@ -4,4 +4,3 @@ SELECT toYYYYMMDDhhmmss(toDate('2017-07-21'));
|
||||
SELECT toYYYYMM(toDateTime('2017-07-21T11:22:33'));
|
||||
SELECT toYYYYMMDD(toDateTime('2017-07-21T11:22:33'));
|
||||
SELECT toYYYYMMDDhhmmss(toDateTime('2017-07-21T11:22:33'));
|
||||
SELECT toYYYYMMDDhhmmss(toDateTime64('1969-12-31 23:59:59.900', 3));
|
||||
|
@ -27,19 +27,19 @@ localhost 9000 57 Code: 57. Error: Table default.never_throw already exists. (TA
|
||||
localhost 9000 0 1 0
|
||||
localhost 1 \N \N 1 0
|
||||
distributed_ddl_queue
|
||||
2 localhost 9000 test_shard_localhost CREATE TABLE default.none ON CLUSTER test_shard_localhost (`n` Int32) ENGINE = Memory 1 localhost 9000 Finished 0 1 1
|
||||
2 localhost 9000 test_shard_localhost CREATE TABLE default.none ON CLUSTER test_shard_localhost (`n` Int32) ENGINE = Memory 1 localhost 9000 Finished 57 Code: 57. DB::Error: Table default.none already exists. (TABLE_ALREADY_EXISTS) 1 1
|
||||
2 localhost 9000 test_unavailable_shard DROP TABLE IF EXISTS default.none ON CLUSTER test_unavailable_shard 1 localhost 1 Inactive \N \N \N \N
|
||||
2 localhost 9000 test_unavailable_shard DROP TABLE IF EXISTS default.none ON CLUSTER test_unavailable_shard 1 localhost 9000 Finished 0 1 1
|
||||
2 localhost 9000 test_shard_localhost CREATE TABLE default.throw ON CLUSTER test_shard_localhost (`n` Int32) ENGINE = Memory 1 localhost 9000 Finished 0 1 1
|
||||
2 localhost 9000 test_shard_localhost CREATE TABLE default.throw ON CLUSTER test_shard_localhost (`n` Int32) ENGINE = Memory 1 localhost 9000 Finished 57 Code: 57. DB::Error: Table default.throw already exists. (TABLE_ALREADY_EXISTS) 1 1
|
||||
2 localhost 9000 test_unavailable_shard DROP TABLE IF EXISTS default.throw ON CLUSTER test_unavailable_shard 1 localhost 1 Inactive \N \N \N \N
|
||||
2 localhost 9000 test_unavailable_shard DROP TABLE IF EXISTS default.throw ON CLUSTER test_unavailable_shard 1 localhost 9000 Finished 0 1 1
|
||||
2 localhost 9000 test_shard_localhost CREATE TABLE default.null_status ON CLUSTER test_shard_localhost (`n` Int32) ENGINE = Memory 1 localhost 9000 Finished 0 1 1
|
||||
2 localhost 9000 test_shard_localhost CREATE TABLE default.null_status ON CLUSTER test_shard_localhost (`n` Int32) ENGINE = Memory 1 localhost 9000 Finished 57 Code: 57. DB::Error: Table default.null_status already exists. (TABLE_ALREADY_EXISTS) 1 1
|
||||
2 localhost 9000 test_unavailable_shard DROP TABLE IF EXISTS default.null_status ON CLUSTER test_unavailable_shard 1 localhost 1 Inactive \N \N \N \N
|
||||
2 localhost 9000 test_unavailable_shard DROP TABLE IF EXISTS default.null_status ON CLUSTER test_unavailable_shard 1 localhost 9000 Finished 0 1 1
|
||||
2 localhost 9000 test_shard_localhost CREATE TABLE default.never_throw ON CLUSTER test_shard_localhost (`n` Int32) ENGINE = Memory 1 localhost 9000 Finished 0 1 1
|
||||
2 localhost 9000 test_shard_localhost CREATE TABLE default.never_throw ON CLUSTER test_shard_localhost (`n` Int32) ENGINE = Memory 1 localhost 9000 Finished 57 Code: 57. DB::Error: Table default.never_throw already exists. (TABLE_ALREADY_EXISTS) 1 1
|
||||
2 localhost 9000 test_unavailable_shard DROP TABLE IF EXISTS default.never_throw ON CLUSTER test_unavailable_shard 1 localhost 1 Inactive \N \N \N \N
|
||||
2 localhost 9000 test_unavailable_shard DROP TABLE IF EXISTS default.never_throw ON CLUSTER test_unavailable_shard 1 localhost 9000 Finished 0 1 1
|
||||
5 localhost 9000 test_shard_localhost CREATE TABLE default.none ON CLUSTER test_shard_localhost (`n` Int32) ENGINE = Memory 1 localhost 9000 Finished 0 1 1
|
||||
5 localhost 9000 test_shard_localhost CREATE TABLE default.none ON CLUSTER test_shard_localhost (`n` Int32) ENGINE = Memory 1 localhost 9000 Finished 57 Code: 57. DB::Error: Table default.none already exists. (TABLE_ALREADY_EXISTS) 1 1
|
||||
5 localhost 9000 test_unavailable_shard DROP TABLE IF EXISTS default.none ON CLUSTER test_unavailable_shard 1 localhost 1 Inactive \N \N \N \N
|
||||
5 localhost 9000 test_unavailable_shard DROP TABLE IF EXISTS default.none ON CLUSTER test_unavailable_shard 1 localhost 9000 Finished 0 1 1
|
||||
5 localhost 9000 test_shard_localhost CREATE TABLE default.throw ON CLUSTER test_shard_localhost (`n` Int32) ENGINE = Memory 1 localhost 9000 Finished 0 1 1
|
||||
5 localhost 9000 test_shard_localhost CREATE TABLE default.throw ON CLUSTER test_shard_localhost (`n` Int32) ENGINE = Memory 1 localhost 9000 Finished 57 Code: 57. DB::Error: Table default.throw already exists. (TABLE_ALREADY_EXISTS) 1 1
|
||||
5 localhost 9000 test_unavailable_shard DROP TABLE IF EXISTS default.throw ON CLUSTER test_unavailable_shard 1 localhost 1 Inactive \N \N \N \N
|
||||
5 localhost 9000 test_unavailable_shard DROP TABLE IF EXISTS default.throw ON CLUSTER test_unavailable_shard 1 localhost 9000 Finished 0 1 1
|
||||
5 localhost 9000 test_shard_localhost CREATE TABLE default.null_status ON CLUSTER test_shard_localhost (`n` Int32) ENGINE = Memory 1 localhost 9000 Finished 0 1 1
|
||||
5 localhost 9000 test_shard_localhost CREATE TABLE default.null_status ON CLUSTER test_shard_localhost (`n` Int32) ENGINE = Memory 1 localhost 9000 Finished 57 Code: 57. DB::Error: Table default.null_status already exists. (TABLE_ALREADY_EXISTS) 1 1
|
||||
5 localhost 9000 test_unavailable_shard DROP TABLE IF EXISTS default.null_status ON CLUSTER test_unavailable_shard 1 localhost 1 Inactive \N \N \N \N
|
||||
5 localhost 9000 test_unavailable_shard DROP TABLE IF EXISTS default.null_status ON CLUSTER test_unavailable_shard 1 localhost 9000 Finished 0 1 1
|
||||
5 localhost 9000 test_shard_localhost CREATE TABLE default.never_throw ON CLUSTER test_shard_localhost (`n` Int32) ENGINE = Memory 1 localhost 9000 Finished 0 1 1
|
||||
5 localhost 9000 test_shard_localhost CREATE TABLE default.never_throw ON CLUSTER test_shard_localhost (`n` Int32) ENGINE = Memory 1 localhost 9000 Finished 57 Code: 57. DB::Error: Table default.never_throw already exists. (TABLE_ALREADY_EXISTS) 1 1
|
||||
5 localhost 9000 test_unavailable_shard DROP TABLE IF EXISTS default.never_throw ON CLUSTER test_unavailable_shard 1 localhost 1 Inactive \N \N \N \N
|
||||
5 localhost 9000 test_unavailable_shard DROP TABLE IF EXISTS default.never_throw ON CLUSTER test_unavailable_shard 1 localhost 9000 Finished 0 1 1
|
||||
|
@ -33,14 +33,4 @@ Hello
|
||||
2021-01-01
|
||||
1
|
||||
1
|
||||
86400000
|
||||
172800000
|
||||
86461000
|
||||
86401299
|
||||
701
|
||||
701
|
||||
800
|
||||
60200201
|
||||
60
|
||||
10
|
||||
1
|
||||
|
@ -41,16 +41,4 @@ SELECT TIMESTAMPSUB(DATE '2022-01-01', INTERVAL 1 YEAR);
|
||||
SELECT DATE_DIFF(YEAR, DATE '2021-01-01', DATE '2022-01-01');
|
||||
SELECT DATEDIFF(YEAR, DATE '2021-01-01', DATE '2022-01-01');
|
||||
|
||||
SELECT DATEDIFF(millisecond, '2021-01-01'::Date, '2021-01-02'::Date);
|
||||
SELECT DATEDIFF(millisecond, '2021-01-01'::Date, '2021-01-03'::Date32);
|
||||
SELECT DATEDIFF(millisecond, '2021-01-01'::Date, '2021-01-02 00:01:01'::DateTime);
|
||||
SELECT DATEDIFF(millisecond, '2021-01-01'::Date, '2021-01-02 00:00:01.299'::DateTime64);
|
||||
SELECT DATEDIFF(millisecond, '2021-01-01 23:59:59.299'::DateTime64, '2021-01-02'::Date);
|
||||
SELECT DATEDIFF(millisecond, '2021-01-01 23:59:59.299999'::DateTime64(6), '2021-01-02'::Date);
|
||||
SELECT DATEDIFF(millisecond, '2021-01-01 23:59:59.2'::DateTime64(1), '2021-01-02'::Date);
|
||||
SELECT DATEDIFF(microsecond, '2021-01-01 23:59:59.899999'::DateTime64(6), '2021-01-02 00:01:00.100200300'::DateTime64(9));
|
||||
|
||||
SELECT DATEDIFF(microsecond, '1969-12-31 23:59:59.999950'::DateTime64(6), '1970-01-01 00:00:00.000010'::DateTime64(6));
|
||||
SELECT DATEDIFF(second, '1969-12-31 23:59:59.123'::DateTime64(6), '1970-01-01 00:00:09.123'::DateTime64(6));
|
||||
|
||||
SELECT EXISTS (SELECT 1);
|
||||
|
@ -111,109 +111,3 @@ SELECT age('day', materialize(toDateTime64('2015-08-18 00:00:00', 0, 'UTC')), ma
|
||||
1
|
||||
SELECT age('day', materialize(toDate('2015-08-18', 'UTC')), materialize(toDateTime64('2015-08-19 00:00:00', 3, 'UTC')));
|
||||
1
|
||||
-- DateTime64 vs DateTime64 with fractional part
|
||||
SELECT age('microsecond', toDateTime64('2015-08-18 20:30:36.100200005', 9, 'UTC'), toDateTime64('2015-08-18 20:30:41.200400005', 9, 'UTC'));
|
||||
5100200
|
||||
SELECT age('microsecond', toDateTime64('2015-08-18 20:30:36.100200005', 9, 'UTC'), toDateTime64('2015-08-18 20:30:41.200400004', 9, 'UTC'));
|
||||
5100200
|
||||
SELECT age('millisecond', toDateTime64('2015-08-18 20:30:36.450299', 6, 'UTC'), toDateTime64('2015-08-18 20:30:41.550299', 6, 'UTC'));
|
||||
5100
|
||||
SELECT age('millisecond', toDateTime64('2015-08-18 20:30:36.450299', 6, 'UTC'), toDateTime64('2015-08-18 20:30:41.550298', 6, 'UTC'));
|
||||
5099
|
||||
SELECT age('second', toDateTime64('2023-03-01 19:18:36.999003', 6, 'UTC'), toDateTime64('2023-03-01 19:18:41.999002', 6, 'UTC'));
|
||||
4
|
||||
SELECT age('second', toDateTime64('2023-03-01 19:18:36.999', 3, 'UTC'), toDateTime64('2023-03-01 19:18:41.001', 3, 'UTC'));
|
||||
4
|
||||
SELECT age('minute', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-01 20:35:36.300', 3, 'UTC'));
|
||||
5
|
||||
SELECT age('minute', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-01 20:35:36.100', 3, 'UTC'));
|
||||
4
|
||||
SELECT age('minute', toDateTime64('2015-01-01 20:30:36.200101', 6, 'UTC'), toDateTime64('2015-01-01 20:35:36.200100', 6, 'UTC'));
|
||||
4
|
||||
SELECT age('hour', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-01 23:30:36.200', 3, 'UTC'));
|
||||
3
|
||||
SELECT age('hour', toDateTime64('2015-01-01 20:31:36.200', 3, 'UTC'), toDateTime64('2015-01-01 23:30:36.200', 3, 'UTC'));
|
||||
2
|
||||
SELECT age('hour', toDateTime64('2015-01-01 20:30:37.200', 3, 'UTC'), toDateTime64('2015-01-01 23:30:36.200', 3, 'UTC'));
|
||||
2
|
||||
SELECT age('hour', toDateTime64('2015-01-01 20:30:36.300', 3, 'UTC'), toDateTime64('2015-01-01 23:30:36.200', 3, 'UTC'));
|
||||
2
|
||||
SELECT age('hour', toDateTime64('2015-01-01 20:30:36.200101', 6, 'UTC'), toDateTime64('2015-01-01 23:30:36.200100', 6, 'UTC'));
|
||||
2
|
||||
SELECT age('day', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-04 20:30:36.200', 3, 'UTC'));
|
||||
3
|
||||
SELECT age('day', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-04 19:30:36.200', 3, 'UTC'));
|
||||
2
|
||||
SELECT age('day', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-04 20:28:36.200', 3, 'UTC'));
|
||||
2
|
||||
SELECT age('day', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-04 20:30:35.200', 3, 'UTC'));
|
||||
2
|
||||
SELECT age('day', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-04 20:30:36.199', 3, 'UTC'));
|
||||
2
|
||||
SELECT age('day', toDateTime64('2015-01-01 20:30:36.200101', 6, 'UTC'), toDateTime64('2015-01-04 20:30:36.200100', 6, 'UTC'));
|
||||
2
|
||||
SELECT age('week', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-15 20:30:36.200', 3, 'UTC'));
|
||||
2
|
||||
SELECT age('week', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-15 19:30:36.200', 3, 'UTC'));
|
||||
1
|
||||
SELECT age('week', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-15 20:29:36.200', 3, 'UTC'));
|
||||
1
|
||||
SELECT age('week', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-15 20:30:35.200', 3, 'UTC'));
|
||||
1
|
||||
SELECT age('week', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-15 20:30:36.100', 3, 'UTC'));
|
||||
1
|
||||
SELECT age('week', toDateTime64('2015-01-01 20:30:36.200101', 6, 'UTC'), toDateTime64('2015-01-15 20:30:36.200100', 6, 'UTC'));
|
||||
1
|
||||
SELECT age('month', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-05-02 20:30:36.200', 3, 'UTC'));
|
||||
16
|
||||
SELECT age('month', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-05-01 20:30:36.200', 3, 'UTC'));
|
||||
15
|
||||
SELECT age('month', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-05-02 19:30:36.200', 3, 'UTC'));
|
||||
15
|
||||
SELECT age('month', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-05-02 20:29:36.200', 3, 'UTC'));
|
||||
15
|
||||
SELECT age('month', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-05-02 20:30:35.200', 3, 'UTC'));
|
||||
15
|
||||
SELECT age('month', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-05-02 20:30:36.100', 3, 'UTC'));
|
||||
15
|
||||
SELECT age('month', toDateTime64('2015-01-02 20:30:36.200101', 6, 'UTC'), toDateTime64('2016-05-02 20:30:36.200100', 6, 'UTC'));
|
||||
15
|
||||
SELECT age('quarter', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-04-02 20:30:36.200', 3, 'UTC'));
|
||||
5
|
||||
SELECT age('quarter', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-04-01 20:30:36.200', 3, 'UTC'));
|
||||
4
|
||||
SELECT age('quarter', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-04-02 19:30:36.200', 3, 'UTC'));
|
||||
4
|
||||
SELECT age('quarter', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-04-02 20:29:36.200', 3, 'UTC'));
|
||||
4
|
||||
SELECT age('quarter', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-04-02 20:30:35.200', 3, 'UTC'));
|
||||
4
|
||||
SELECT age('quarter', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-04-02 20:30:36.100', 3, 'UTC'));
|
||||
4
|
||||
SELECT age('quarter', toDateTime64('2015-01-02 20:30:36.200101', 6, 'UTC'), toDateTime64('2016-04-02 20:30:36.200100', 6, 'UTC'));
|
||||
4
|
||||
SELECT age('year', toDateTime64('2015-02-02 20:30:36.200', 3, 'UTC'), toDateTime64('2023-02-02 20:30:36.200', 3, 'UTC'));
|
||||
8
|
||||
SELECT age('year', toDateTime64('2015-02-02 20:30:36.200', 3, 'UTC'), toDateTime64('2023-01-02 20:30:36.200', 3, 'UTC'));
|
||||
7
|
||||
SELECT age('year', toDateTime64('2015-02-02 20:30:36.200', 3, 'UTC'), toDateTime64('2023-02-01 20:30:36.200', 3, 'UTC'));
|
||||
7
|
||||
SELECT age('year', toDateTime64('2015-02-02 20:30:36.200', 3, 'UTC'), toDateTime64('2023-02-02 19:30:36.200', 3, 'UTC'));
|
||||
7
|
||||
SELECT age('year', toDateTime64('2015-02-02 20:30:36.200', 3, 'UTC'), toDateTime64('2023-02-02 20:29:36.200', 3, 'UTC'));
|
||||
7
|
||||
SELECT age('year', toDateTime64('2015-02-02 20:30:36.200', 3, 'UTC'), toDateTime64('2023-02-02 20:30:35.200', 3, 'UTC'));
|
||||
7
|
||||
SELECT age('year', toDateTime64('2015-02-02 20:30:36.200', 3, 'UTC'), toDateTime64('2023-02-02 20:30:36.100', 3, 'UTC'));
|
||||
7
|
||||
SELECT age('year', toDateTime64('2015-02-02 20:30:36.200101', 6, 'UTC'), toDateTime64('2023-02-02 20:30:36.200100', 6, 'UTC'));
|
||||
7
|
||||
-- DateTime64 vs DateTime64 with negative time
|
||||
SELECT age('millisecond', toDateTime64('1969-12-31 23:59:58.001', 3, 'UTC'), toDateTime64('1970-01-01 00:00:00.350', 3, 'UTC'));
|
||||
2349
|
||||
SELECT age('second', toDateTime64('1969-12-31 23:59:58.001', 3, 'UTC'), toDateTime64('1970-01-01 00:00:00.35', 3, 'UTC'));
|
||||
2
|
||||
SELECT age('second', toDateTime64('1969-12-31 23:59:50.001', 3, 'UTC'), toDateTime64('1969-12-31 23:59:55.002', 3, 'UTC'));
|
||||
5
|
||||
SELECT age('second', toDateTime64('1969-12-31 23:59:50.003', 3, 'UTC'), toDateTime64('1969-12-31 23:59:55.002', 3, 'UTC'));
|
||||
4
|
||||
|
@ -75,68 +75,3 @@ SELECT age('second', materialize(toDateTime64('2015-08-18 00:00:00', 0, 'UTC')),
|
||||
SELECT age('second', materialize(toDateTime('2015-08-18 00:00:00', 'UTC')), materialize(toDateTime64('2015-08-18 00:00:10', 3, 'UTC')));
|
||||
SELECT age('day', materialize(toDateTime64('2015-08-18 00:00:00', 0, 'UTC')), materialize(toDate('2015-08-19', 'UTC')));
|
||||
SELECT age('day', materialize(toDate('2015-08-18', 'UTC')), materialize(toDateTime64('2015-08-19 00:00:00', 3, 'UTC')));
|
||||
|
||||
-- DateTime64 vs DateTime64 with fractional part
|
||||
SELECT age('microsecond', toDateTime64('2015-08-18 20:30:36.100200005', 9, 'UTC'), toDateTime64('2015-08-18 20:30:41.200400005', 9, 'UTC'));
|
||||
SELECT age('microsecond', toDateTime64('2015-08-18 20:30:36.100200005', 9, 'UTC'), toDateTime64('2015-08-18 20:30:41.200400004', 9, 'UTC'));
|
||||
|
||||
SELECT age('millisecond', toDateTime64('2015-08-18 20:30:36.450299', 6, 'UTC'), toDateTime64('2015-08-18 20:30:41.550299', 6, 'UTC'));
|
||||
SELECT age('millisecond', toDateTime64('2015-08-18 20:30:36.450299', 6, 'UTC'), toDateTime64('2015-08-18 20:30:41.550298', 6, 'UTC'));
|
||||
|
||||
SELECT age('second', toDateTime64('2023-03-01 19:18:36.999003', 6, 'UTC'), toDateTime64('2023-03-01 19:18:41.999002', 6, 'UTC'));
|
||||
SELECT age('second', toDateTime64('2023-03-01 19:18:36.999', 3, 'UTC'), toDateTime64('2023-03-01 19:18:41.001', 3, 'UTC'));
|
||||
|
||||
SELECT age('minute', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-01 20:35:36.300', 3, 'UTC'));
|
||||
SELECT age('minute', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-01 20:35:36.100', 3, 'UTC'));
|
||||
SELECT age('minute', toDateTime64('2015-01-01 20:30:36.200101', 6, 'UTC'), toDateTime64('2015-01-01 20:35:36.200100', 6, 'UTC'));
|
||||
|
||||
SELECT age('hour', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-01 23:30:36.200', 3, 'UTC'));
|
||||
SELECT age('hour', toDateTime64('2015-01-01 20:31:36.200', 3, 'UTC'), toDateTime64('2015-01-01 23:30:36.200', 3, 'UTC'));
|
||||
SELECT age('hour', toDateTime64('2015-01-01 20:30:37.200', 3, 'UTC'), toDateTime64('2015-01-01 23:30:36.200', 3, 'UTC'));
|
||||
SELECT age('hour', toDateTime64('2015-01-01 20:30:36.300', 3, 'UTC'), toDateTime64('2015-01-01 23:30:36.200', 3, 'UTC'));
|
||||
SELECT age('hour', toDateTime64('2015-01-01 20:30:36.200101', 6, 'UTC'), toDateTime64('2015-01-01 23:30:36.200100', 6, 'UTC'));
|
||||
|
||||
SELECT age('day', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-04 20:30:36.200', 3, 'UTC'));
|
||||
SELECT age('day', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-04 19:30:36.200', 3, 'UTC'));
|
||||
SELECT age('day', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-04 20:28:36.200', 3, 'UTC'));
|
||||
SELECT age('day', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-04 20:30:35.200', 3, 'UTC'));
|
||||
SELECT age('day', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-04 20:30:36.199', 3, 'UTC'));
|
||||
SELECT age('day', toDateTime64('2015-01-01 20:30:36.200101', 6, 'UTC'), toDateTime64('2015-01-04 20:30:36.200100', 6, 'UTC'));
|
||||
|
||||
SELECT age('week', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-15 20:30:36.200', 3, 'UTC'));
|
||||
SELECT age('week', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-15 19:30:36.200', 3, 'UTC'));
|
||||
SELECT age('week', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-15 20:29:36.200', 3, 'UTC'));
|
||||
SELECT age('week', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-15 20:30:35.200', 3, 'UTC'));
|
||||
SELECT age('week', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-15 20:30:36.100', 3, 'UTC'));
|
||||
SELECT age('week', toDateTime64('2015-01-01 20:30:36.200101', 6, 'UTC'), toDateTime64('2015-01-15 20:30:36.200100', 6, 'UTC'));
|
||||
|
||||
SELECT age('month', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-05-02 20:30:36.200', 3, 'UTC'));
|
||||
SELECT age('month', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-05-01 20:30:36.200', 3, 'UTC'));
|
||||
SELECT age('month', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-05-02 19:30:36.200', 3, 'UTC'));
|
||||
SELECT age('month', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-05-02 20:29:36.200', 3, 'UTC'));
|
||||
SELECT age('month', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-05-02 20:30:35.200', 3, 'UTC'));
|
||||
SELECT age('month', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-05-02 20:30:36.100', 3, 'UTC'));
|
||||
SELECT age('month', toDateTime64('2015-01-02 20:30:36.200101', 6, 'UTC'), toDateTime64('2016-05-02 20:30:36.200100', 6, 'UTC'));
|
||||
|
||||
SELECT age('quarter', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-04-02 20:30:36.200', 3, 'UTC'));
|
||||
SELECT age('quarter', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-04-01 20:30:36.200', 3, 'UTC'));
|
||||
SELECT age('quarter', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-04-02 19:30:36.200', 3, 'UTC'));
|
||||
SELECT age('quarter', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-04-02 20:29:36.200', 3, 'UTC'));
|
||||
SELECT age('quarter', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-04-02 20:30:35.200', 3, 'UTC'));
|
||||
SELECT age('quarter', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-04-02 20:30:36.100', 3, 'UTC'));
|
||||
SELECT age('quarter', toDateTime64('2015-01-02 20:30:36.200101', 6, 'UTC'), toDateTime64('2016-04-02 20:30:36.200100', 6, 'UTC'));
|
||||
|
||||
SELECT age('year', toDateTime64('2015-02-02 20:30:36.200', 3, 'UTC'), toDateTime64('2023-02-02 20:30:36.200', 3, 'UTC'));
|
||||
SELECT age('year', toDateTime64('2015-02-02 20:30:36.200', 3, 'UTC'), toDateTime64('2023-01-02 20:30:36.200', 3, 'UTC'));
|
||||
SELECT age('year', toDateTime64('2015-02-02 20:30:36.200', 3, 'UTC'), toDateTime64('2023-02-01 20:30:36.200', 3, 'UTC'));
|
||||
SELECT age('year', toDateTime64('2015-02-02 20:30:36.200', 3, 'UTC'), toDateTime64('2023-02-02 19:30:36.200', 3, 'UTC'));
|
||||
SELECT age('year', toDateTime64('2015-02-02 20:30:36.200', 3, 'UTC'), toDateTime64('2023-02-02 20:29:36.200', 3, 'UTC'));
|
||||
SELECT age('year', toDateTime64('2015-02-02 20:30:36.200', 3, 'UTC'), toDateTime64('2023-02-02 20:30:35.200', 3, 'UTC'));
|
||||
SELECT age('year', toDateTime64('2015-02-02 20:30:36.200', 3, 'UTC'), toDateTime64('2023-02-02 20:30:36.100', 3, 'UTC'));
|
||||
SELECT age('year', toDateTime64('2015-02-02 20:30:36.200101', 6, 'UTC'), toDateTime64('2023-02-02 20:30:36.200100', 6, 'UTC'));
|
||||
|
||||
-- DateTime64 vs DateTime64 with negative time
|
||||
SELECT age('millisecond', toDateTime64('1969-12-31 23:59:58.001', 3, 'UTC'), toDateTime64('1970-01-01 00:00:00.350', 3, 'UTC'));
|
||||
SELECT age('second', toDateTime64('1969-12-31 23:59:58.001', 3, 'UTC'), toDateTime64('1970-01-01 00:00:00.35', 3, 'UTC'));
|
||||
SELECT age('second', toDateTime64('1969-12-31 23:59:50.001', 3, 'UTC'), toDateTime64('1969-12-31 23:59:55.002', 3, 'UTC'));
|
||||
SELECT age('second', toDateTime64('1969-12-31 23:59:50.003', 3, 'UTC'), toDateTime64('1969-12-31 23:59:55.002', 3, 'UTC'));
|
@ -1,4 +1,4 @@
|
||||
default distributed_ddl_entry_format_version
|
||||
distributed_ddl_entry_format_version=OPENTELEMETRY_ENABLED_VERSION (older then PRESERVE_INITIAL_QUERY_ID_VERSION)
|
||||
DROP TABLE IF EXISTS foo ON CLUSTER test_shard_localhost
|
||||
distributed_ddl_entry_format_version=PRESERVE_INITIAL_QUERY_ID_VERSION
|
||||
DROP TABLE IF EXISTS default.foo
|
||||
|
@ -4,9 +4,10 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CUR_DIR"/../shell_config.sh
|
||||
|
||||
echo "default distributed_ddl_entry_format_version"
|
||||
echo "distributed_ddl_entry_format_version=OPENTELEMETRY_ENABLED_VERSION (older then PRESERVE_INITIAL_QUERY_ID_VERSION)"
|
||||
OPENTELEMETRY_ENABLED_VERSION=4
|
||||
query_id="$(random_str 10)"
|
||||
$CLICKHOUSE_CLIENT --query_id "$query_id" --distributed_ddl_output_mode=none -q "DROP TABLE IF EXISTS foo ON CLUSTER test_shard_localhost"
|
||||
$CLICKHOUSE_CLIENT --distributed_ddl_entry_format_version=$OPENTELEMETRY_ENABLED_VERSION --query_id "$query_id" --distributed_ddl_output_mode=none -q "DROP TABLE IF EXISTS foo ON CLUSTER test_shard_localhost"
|
||||
$CLICKHOUSE_CLIENT -q "SYSTEM FLUSH LOGS"
|
||||
$CLICKHOUSE_CLIENT -q "SELECT query FROM system.query_log WHERE initial_query_id = '$query_id' AND type != 'QueryStart'"
|
||||
|
||||
|
@ -0,0 +1,4 @@
|
||||
BACKUP TABLE data TO S3(s3_conn, \'backups/default/data_native_copy\') SETTINGS allow_s3_native_copy = 1 1
|
||||
BACKUP TABLE data TO S3(s3_conn, \'backups/default/data_no_native_copy\') SETTINGS allow_s3_native_copy = 0 0
|
||||
RESTORE TABLE data AS data_native_copy FROM S3(s3_conn, \'backups/default/data_native_copy\') SETTINGS allow_s3_native_copy = 1 1
|
||||
RESTORE TABLE data AS data_no_native_copy FROM S3(s3_conn, \'backups/default/data_no_native_copy\') SETTINGS allow_s3_native_copy = 0 0
|
43
tests/queries/0_stateless/02801_backup_native_copy.sh
Executable file
43
tests/queries/0_stateless/02801_backup_native_copy.sh
Executable file
@ -0,0 +1,43 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-fasttest
|
||||
# Tag: no-fasttest - requires S3
|
||||
|
||||
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CUR_DIR"/../shell_config.sh
|
||||
|
||||
set -e
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
drop table if exists data;
|
||||
create table data (key Int) engine=MergeTree() order by tuple() settings disk='s3_common_disk';
|
||||
insert into data select * from numbers(10);
|
||||
"
|
||||
|
||||
query_id=$(random_str 10)
|
||||
$CLICKHOUSE_CLIENT --format Null --query_id $query_id -q "BACKUP TABLE data TO S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data_native_copy') SETTINGS allow_s3_native_copy=true"
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
SYSTEM FLUSH LOGS;
|
||||
SELECT query, ProfileEvents['S3CopyObject']>0 FROM system.query_log WHERE type = 'QueryFinish' AND event_date >= yesterday() AND current_database = '$CLICKHOUSE_DATABASE' AND query_id = '$query_id'
|
||||
"
|
||||
|
||||
query_id=$(random_str 10)
|
||||
$CLICKHOUSE_CLIENT --format Null --query_id $query_id -q "BACKUP TABLE data TO S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data_no_native_copy') SETTINGS allow_s3_native_copy=false"
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
SYSTEM FLUSH LOGS;
|
||||
SELECT query, ProfileEvents['S3CopyObject']>0 FROM system.query_log WHERE type = 'QueryFinish' AND event_date >= yesterday() AND current_database = '$CLICKHOUSE_DATABASE' AND query_id = '$query_id'
|
||||
"
|
||||
|
||||
query_id=$(random_str 10)
|
||||
$CLICKHOUSE_CLIENT --send_logs_level=error --format Null --query_id $query_id -q "RESTORE TABLE data AS data_native_copy FROM S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data_native_copy') SETTINGS allow_s3_native_copy=true"
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
SYSTEM FLUSH LOGS;
|
||||
SELECT query, ProfileEvents['S3CopyObject']>0 FROM system.query_log WHERE type = 'QueryFinish' AND event_date >= yesterday() AND current_database = '$CLICKHOUSE_DATABASE' AND query_id = '$query_id'
|
||||
"
|
||||
|
||||
query_id=$(random_str 10)
|
||||
$CLICKHOUSE_CLIENT --send_logs_level=error --format Null --query_id $query_id -q "RESTORE TABLE data AS data_no_native_copy FROM S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data_no_native_copy') SETTINGS allow_s3_native_copy=false"
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
SYSTEM FLUSH LOGS;
|
||||
SELECT query, ProfileEvents['S3CopyObject']>0 FROM system.query_log WHERE type = 'QueryFinish' AND event_date >= yesterday() AND current_database = '$CLICKHOUSE_DATABASE' AND query_id = '$query_id'
|
||||
"
|
@ -0,0 +1,4 @@
|
||||
s3_plain_native_copy
|
||||
Single operation copy has completed.
|
||||
s3_plain_no_native_copy
|
||||
Single part upload has completed.
|
28
tests/queries/0_stateless/02802_clickhouse_disks_s3_copy.sh
Executable file
28
tests/queries/0_stateless/02802_clickhouse_disks_s3_copy.sh
Executable file
@ -0,0 +1,28 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-fasttest
|
||||
# Tag no-fasttest: requires S3
|
||||
|
||||
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CUR_DIR"/../shell_config.sh
|
||||
|
||||
config="${BASH_SOURCE[0]/.sh/.xml}"
|
||||
|
||||
function run_test_for_disk()
|
||||
{
|
||||
local disk=$1 && shift
|
||||
|
||||
echo "$disk"
|
||||
|
||||
clickhouse-disks -C "$config" --disk "$disk" write --input "$config" $CLICKHOUSE_DATABASE/test
|
||||
clickhouse-disks -C "$config" --log-level test --disk "$disk" copy $CLICKHOUSE_DATABASE/test $CLICKHOUSE_DATABASE/test.copy |& {
|
||||
grep -o -e "Single part upload has completed." -e "Single operation copy has completed."
|
||||
}
|
||||
clickhouse-disks -C "$config" --disk "$disk" remove $CLICKHOUSE_DATABASE/test
|
||||
# NOTE: this is due to "copy" does works like "cp -R from to/" instead of "cp from to"
|
||||
clickhouse-disks -C "$config" --disk "$disk" remove $CLICKHOUSE_DATABASE/test.copy/test
|
||||
clickhouse-disks -C "$config" --disk "$disk" remove $CLICKHOUSE_DATABASE/test.copy
|
||||
}
|
||||
|
||||
run_test_for_disk s3_plain_native_copy
|
||||
run_test_for_disk s3_plain_no_native_copy
|
21
tests/queries/0_stateless/02802_clickhouse_disks_s3_copy.xml
Normal file
21
tests/queries/0_stateless/02802_clickhouse_disks_s3_copy.xml
Normal file
@ -0,0 +1,21 @@
|
||||
<clickhouse>
|
||||
<storage_configuration>
|
||||
<disks>
|
||||
<s3_plain_native_copy>
|
||||
<type>s3_plain</type>
|
||||
<endpoint>http://localhost:11111/test/clickhouse-disks/</endpoint>
|
||||
<access_key_id>clickhouse</access_key_id>
|
||||
<secret_access_key>clickhouse</secret_access_key>
|
||||
<s3_allow_native_copy>true</s3_allow_native_copy>
|
||||
</s3_plain_native_copy>
|
||||
|
||||
<s3_plain_no_native_copy>
|
||||
<type>s3_plain</type>
|
||||
<endpoint>http://localhost:11111/test/clickhouse-disks/</endpoint>
|
||||
<access_key_id>clickhouse</access_key_id>
|
||||
<secret_access_key>clickhouse</secret_access_key>
|
||||
<s3_allow_native_copy>false</s3_allow_native_copy>
|
||||
</s3_plain_no_native_copy>
|
||||
</disks>
|
||||
</storage_configuration>
|
||||
</clickhouse>
|
@ -12,6 +12,7 @@ ARMv
|
||||
ASLR
|
||||
ASOF
|
||||
ASan
|
||||
AWST
|
||||
Actian
|
||||
ActionsMenu
|
||||
ActiveRecord
|
||||
|
@ -10,6 +10,7 @@ v23.4.4.16-stable 2023-06-17
|
||||
v23.4.3.48-stable 2023-06-12
|
||||
v23.4.2.11-stable 2023-05-02
|
||||
v23.4.1.1943-stable 2023-04-27
|
||||
v23.3.8.21-lts 2023-07-13
|
||||
v23.3.7.5-lts 2023-06-29
|
||||
v23.3.6.7-lts 2023-06-28
|
||||
v23.3.5.9-lts 2023-06-22
|
||||
|
|
Loading…
Reference in New Issue
Block a user