Merge remote-tracking branch 'origin/master' into datatype-date32

This commit is contained in:
NengLiu 2021-07-05 10:15:08 +08:00
commit dc0c65ef4f
363 changed files with 5274 additions and 2244 deletions

View File

@ -184,10 +184,27 @@ endif ()
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -rdynamic") set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -rdynamic")
find_program (OBJCOPY_PATH NAMES "llvm-objcopy" "llvm-objcopy-12" "llvm-objcopy-11" "llvm-objcopy-10" "llvm-objcopy-9" "llvm-objcopy-8" "objcopy") find_program (OBJCOPY_PATH NAMES "llvm-objcopy" "llvm-objcopy-12" "llvm-objcopy-11" "llvm-objcopy-10" "llvm-objcopy-9" "llvm-objcopy-8" "objcopy")
if (NOT OBJCOPY_PATH AND OS_DARWIN)
find_program (BREW_PATH NAMES "brew")
if (BREW_PATH)
execute_process (COMMAND ${BREW_PATH} --prefix llvm ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE LLVM_PREFIX)
if (LLVM_PREFIX)
find_program (OBJCOPY_PATH NAMES "llvm-objcopy" PATHS "${LLVM_PREFIX}/bin" NO_DEFAULT_PATH)
endif ()
if (NOT OBJCOPY_PATH)
execute_process (COMMAND ${BREW_PATH} --prefix binutils ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE BINUTILS_PREFIX)
if (BINUTILS_PREFIX)
find_program (OBJCOPY_PATH NAMES "objcopy" PATHS "${BINUTILS_PREFIX}/bin" NO_DEFAULT_PATH)
endif ()
endif ()
endif ()
endif ()
if (OBJCOPY_PATH) if (OBJCOPY_PATH)
message(STATUS "Using objcopy: ${OBJCOPY_PATH}.") message (STATUS "Using objcopy: ${OBJCOPY_PATH}")
else () else ()
message(FATAL_ERROR "Cannot find objcopy.") message (FATAL_ERROR "Cannot find objcopy.")
endif () endif ()
if (OS_DARWIN) if (OS_DARWIN)

View File

@ -17,7 +17,7 @@ class DateLUT : private boost::noncopyable
{ {
public: public:
/// Return singleton DateLUTImpl instance for the default time zone. /// Return singleton DateLUTImpl instance for the default time zone.
static ALWAYS_INLINE const DateLUTImpl & instance() static ALWAYS_INLINE const DateLUTImpl & instance() // -V1071
{ {
const auto & date_lut = getInstance(); const auto & date_lut = getInstance();
return *date_lut.default_impl.load(std::memory_order_acquire); return *date_lut.default_impl.load(std::memory_order_acquire);

View File

@ -1,9 +1,9 @@
# This strings autochanged from release_lib.sh: # This strings autochanged from release_lib.sh:
SET(VERSION_REVISION 54452) SET(VERSION_REVISION 54453)
SET(VERSION_MAJOR 21) SET(VERSION_MAJOR 21)
SET(VERSION_MINOR 7) SET(VERSION_MINOR 8)
SET(VERSION_PATCH 1) SET(VERSION_PATCH 1)
SET(VERSION_GITHASH 976ccc2e908ac3bc28f763bfea8134ea0a121b40) SET(VERSION_GITHASH fb895056568e26200629c7d19626e92d2dedc70d)
SET(VERSION_DESCRIBE v21.7.1.1-prestable) SET(VERSION_DESCRIBE v21.8.1.1-prestable)
SET(VERSION_STRING 21.7.1.1) SET(VERSION_STRING 21.8.1.1)
# end of autochange # end of autochange

View File

@ -33,44 +33,25 @@ macro(clickhouse_embed_binaries)
message(FATAL_ERROR "The list of binary resources to embed may not be empty") message(FATAL_ERROR "The list of binary resources to embed may not be empty")
endif() endif()
# If cross-compiling, ensure we use the toolchain file and target the add_library("${EMBED_TARGET}" STATIC)
# actual target architecture set_target_properties("${EMBED_TARGET}" PROPERTIES LINKER_LANGUAGE C)
if (CMAKE_CROSSCOMPILING)
set(CROSS_COMPILE_FLAGS "--target=${CMAKE_C_COMPILER_TARGET} --gcc-toolchain=${TOOLCHAIN_FILE}")
else()
set(CROSS_COMPILE_FLAGS "")
endif()
set(EMBED_TEMPLATE_FILE "${PROJECT_SOURCE_DIR}/programs/embed_binary.S.in") set(EMBED_TEMPLATE_FILE "${PROJECT_SOURCE_DIR}/programs/embed_binary.S.in")
set(RESOURCE_OBJS)
foreach(RESOURCE_FILE ${EMBED_RESOURCES})
set(RESOURCE_OBJ "${RESOURCE_FILE}.o")
list(APPEND RESOURCE_OBJS "${RESOURCE_OBJ}")
# Normalize the name of the resource foreach(RESOURCE_FILE ${EMBED_RESOURCES})
set(ASSEMBLY_FILE_NAME "${RESOURCE_FILE}.S")
set(BINARY_FILE_NAME "${RESOURCE_FILE}") set(BINARY_FILE_NAME "${RESOURCE_FILE}")
# Normalize the name of the resource.
string(REGEX REPLACE "[\./-]" "_" SYMBOL_NAME "${RESOURCE_FILE}") # - must be last in regex string(REGEX REPLACE "[\./-]" "_" SYMBOL_NAME "${RESOURCE_FILE}") # - must be last in regex
string(REPLACE "+" "_PLUS_" SYMBOL_NAME "${SYMBOL_NAME}") string(REPLACE "+" "_PLUS_" SYMBOL_NAME "${SYMBOL_NAME}")
set(ASSEMBLY_FILE_NAME "${RESOURCE_FILE}.S")
# Put the configured assembly file in the output directory. # Generate the configured assembly file in the output directory.
# This is so we can clean it up as usual, and we CD to the
# source directory before compiling, so that the assembly
# `.incbin` directive can find the file.
configure_file("${EMBED_TEMPLATE_FILE}" "${CMAKE_CURRENT_BINARY_DIR}/${ASSEMBLY_FILE_NAME}" @ONLY) configure_file("${EMBED_TEMPLATE_FILE}" "${CMAKE_CURRENT_BINARY_DIR}/${ASSEMBLY_FILE_NAME}" @ONLY)
# Generate the output object file by compiling the assembly, in the directory of # Set the include directory for relative paths specified for `.incbin` directive.
# the sources so that the resource file may also be found set_property(SOURCE "${CMAKE_CURRENT_BINARY_DIR}/${ASSEMBLY_FILE_NAME}" APPEND PROPERTY INCLUDE_DIRECTORIES "${EMBED_RESOURCE_DIR}")
add_custom_command(
OUTPUT ${RESOURCE_OBJ}
COMMAND cd "${EMBED_RESOURCE_DIR}" &&
${CMAKE_C_COMPILER} "${CROSS_COMPILE_FLAGS}" -c -o
"${CMAKE_CURRENT_BINARY_DIR}/${RESOURCE_OBJ}"
"${CMAKE_CURRENT_BINARY_DIR}/${ASSEMBLY_FILE_NAME}"
)
set_source_files_properties("${RESOURCE_OBJ}" PROPERTIES EXTERNAL_OBJECT true GENERATED true)
endforeach()
add_library("${EMBED_TARGET}" STATIC ${RESOURCE_OBJS}) target_sources("${EMBED_TARGET}" PRIVATE "${CMAKE_CURRENT_BINARY_DIR}/${ASSEMBLY_FILE_NAME}")
set_target_properties("${EMBED_TARGET}" PROPERTIES LINKER_LANGUAGE C) endforeach()
endmacro() endmacro()

View File

@ -4,7 +4,6 @@ set (CMAKE_C_COMPILER_TARGET "aarch64-linux-gnu")
set (CMAKE_CXX_COMPILER_TARGET "aarch64-linux-gnu") set (CMAKE_CXX_COMPILER_TARGET "aarch64-linux-gnu")
set (CMAKE_ASM_COMPILER_TARGET "aarch64-linux-gnu") set (CMAKE_ASM_COMPILER_TARGET "aarch64-linux-gnu")
set (CMAKE_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64/aarch64-linux-gnu/libc") set (CMAKE_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64/aarch64-linux-gnu/libc")
get_filename_component (TOOLCHAIN_FILE "${CMAKE_TOOLCHAIN_FILE}" REALPATH)
# We don't use compiler from toolchain because it's gcc-8, and we provide support only for gcc-9. # We don't use compiler from toolchain because it's gcc-8, and we provide support only for gcc-9.
set (CMAKE_AR "${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64/bin/aarch64-linux-gnu-ar" CACHE FILEPATH "" FORCE) set (CMAKE_AR "${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64/bin/aarch64-linux-gnu-ar" CACHE FILEPATH "" FORCE)

2
contrib/NuRaft vendored

@ -1 +1 @@
Subproject commit 2a1bf7d87b4a03561fc66fbb49cee8a288983c5d Subproject commit 976874b7aa7f422bf4ea595bb7d1166c617b1c26

View File

@ -26,7 +26,7 @@ if (NOT USE_INTERNAL_CCTZ_LIBRARY)
set_property (TARGET cctz PROPERTY IMPORTED_LOCATION ${LIBRARY_CCTZ}) set_property (TARGET cctz PROPERTY IMPORTED_LOCATION ${LIBRARY_CCTZ})
set_property (TARGET cctz PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${INCLUDE_CCTZ}) set_property (TARGET cctz PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${INCLUDE_CCTZ})
endif() endif()
set(SYSTEM_STORAGE_TZ_FILE "${CMAKE_BINARY_DIR}/src/Storages/System/StorageSystemTimeZones.generated.cpp") set(SYSTEM_STORAGE_TZ_FILE "${CMAKE_BINARY_DIR}/src/Storages/System/StorageSystemTimeZones.generated.cpp")
file(REMOVE ${SYSTEM_STORAGE_TZ_FILE}) file(REMOVE ${SYSTEM_STORAGE_TZ_FILE})
file(APPEND ${SYSTEM_STORAGE_TZ_FILE} "// autogenerated by ClickHouse/contrib/cctz-cmake/CMakeLists.txt\n") file(APPEND ${SYSTEM_STORAGE_TZ_FILE} "// autogenerated by ClickHouse/contrib/cctz-cmake/CMakeLists.txt\n")

View File

@ -1,7 +1,7 @@
add_library(murmurhash add_library(murmurhash
src/murmurhash2.cpp src/MurmurHash2.cpp
src/murmurhash3.cpp src/MurmurHash3.cpp
include/murmurhash2.h include/MurmurHash2.h
include/murmurhash3.h) include/MurmurHash3.h)
target_include_directories (murmurhash PUBLIC include) target_include_directories (murmurhash PUBLIC include)

View File

@ -0,0 +1,49 @@
//-----------------------------------------------------------------------------
// MurmurHash2 was written by Austin Appleby, and is placed in the public
// domain. The author hereby disclaims copyright to this source code.
#ifndef MURMURHASH2_H
#define MURMURHASH2_H
#include <stddef.h>
//-----------------------------------------------------------------------------
// Platform-specific functions and macros
// Microsoft Visual Studio
#if defined(_MSC_VER) && (_MSC_VER < 1600)
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
typedef unsigned __int64 uint64_t;
// Other compilers
#else // defined(_MSC_VER)
#include <stdint.h>
#endif // !defined(_MSC_VER)
//-----------------------------------------------------------------------------
#ifdef __cplusplus
extern "C" {
#endif
uint32_t MurmurHash2 ( const void * key, size_t len, uint32_t seed );
uint64_t MurmurHash64A ( const void * key, size_t len, uint64_t seed );
uint64_t MurmurHash64B ( const void * key, size_t len, uint64_t seed );
uint32_t MurmurHash2A ( const void * key, size_t len, uint32_t seed );
uint32_t MurmurHashNeutral2 ( const void * key, size_t len, uint32_t seed );
uint32_t MurmurHashAligned2 ( const void * key, size_t len, uint32_t seed );
#ifdef __cplusplus
}
#endif
//-----------------------------------------------------------------------------
#endif // _MURMURHASH2_H_

View File

@ -2,7 +2,10 @@
// MurmurHash3 was written by Austin Appleby, and is placed in the public // MurmurHash3 was written by Austin Appleby, and is placed in the public
// domain. The author hereby disclaims copyright to this source code. // domain. The author hereby disclaims copyright to this source code.
#pragma once #ifndef MURMURHASH3_H
#define MURMURHASH3_H
#include <stddef.h>
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
// Platform-specific functions and macros // Platform-specific functions and macros
@ -23,20 +26,22 @@ typedef unsigned __int64 uint64_t;
#endif // !defined(_MSC_VER) #endif // !defined(_MSC_VER)
//-----------------------------------------------------------------------------
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
//----------------------------------------------------------------------------- void MurmurHash3_x86_32 ( const void * key, size_t len, uint32_t seed, void * out );
void MurmurHash3_x86_32 ( const void * key, int len, uint32_t seed, void * out ); void MurmurHash3_x86_128 ( const void * key, size_t len, uint32_t seed, void * out );
void MurmurHash3_x86_128 ( const void * key, int len, uint32_t seed, void * out ); void MurmurHash3_x64_128 ( const void * key, size_t len, uint32_t seed, void * out );
void MurmurHash3_x64_128 ( const void * key, int len, uint32_t seed, void * out );
//-----------------------------------------------------------------------------
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
//-----------------------------------------------------------------------------
#endif // _MURMURHASH3_H_

View File

@ -1,31 +0,0 @@
//-----------------------------------------------------------------------------
// MurmurHash2 was written by Austin Appleby, and is placed in the public
// domain. The author hereby disclaims copyright to this source code.
#pragma once
//-----------------------------------------------------------------------------
// Platform-specific functions and macros
// Microsoft Visual Studio
#if defined(_MSC_VER) && (_MSC_VER < 1600)
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
typedef unsigned __int64 uint64_t;
// Other compilers
#else // defined(_MSC_VER)
#include <stdint.h>
#endif // !defined(_MSC_VER)
uint32_t MurmurHash2 (const void * key, int len, uint32_t seed);
uint64_t MurmurHash64A (const void * key, int len, uint64_t seed);
uint64_t MurmurHash64B (const void * key, int len, uint64_t seed);
uint32_t MurmurHash2A (const void * key, int len, uint32_t seed);
uint32_t MurmurHashNeutral2 (const void * key, int len, uint32_t seed);
uint32_t MurmurHashAligned2 (const void * key, int len, uint32_t seed);

View File

@ -0,0 +1,523 @@
//-----------------------------------------------------------------------------
// MurmurHash2 was written by Austin Appleby, and is placed in the public
// domain. The author hereby disclaims copyright to this source code.
// Note - This code makes a few assumptions about how your machine behaves -
// 1. We can read a 4-byte value from any address without crashing
// 2. sizeof(int) == 4
// And it has a few limitations -
// 1. It will not work incrementally.
// 2. It will not produce the same results on little-endian and big-endian
// machines.
#include "MurmurHash2.h"
//-----------------------------------------------------------------------------
// Platform-specific functions and macros
// Microsoft Visual Studio
#if defined(_MSC_VER)
#define BIG_CONSTANT(x) (x)
// Other compilers
#else // defined(_MSC_VER)
#define BIG_CONSTANT(x) (x##LLU)
#endif // !defined(_MSC_VER)
//-----------------------------------------------------------------------------
uint32_t MurmurHash2 ( const void * key, size_t len, uint32_t seed )
{
// 'm' and 'r' are mixing constants generated offline.
// They're not really 'magic', they just happen to work well.
const uint32_t m = 0x5bd1e995;
const int r = 24;
// Initialize the hash to a 'random' value
uint32_t h = seed ^ len;
// Mix 4 bytes at a time into the hash
const unsigned char * data = (const unsigned char *)key;
while(len >= 4)
{
uint32_t k = *(uint32_t*)data;
k *= m;
k ^= k >> r;
k *= m;
h *= m;
h ^= k;
data += 4;
len -= 4;
}
// Handle the last few bytes of the input array
switch(len)
{
case 3: h ^= data[2] << 16;
case 2: h ^= data[1] << 8;
case 1: h ^= data[0];
h *= m;
};
// Do a few final mixes of the hash to ensure the last few
// bytes are well-incorporated.
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
//-----------------------------------------------------------------------------
// MurmurHash2, 64-bit versions, by Austin Appleby
// The same caveats as 32-bit MurmurHash2 apply here - beware of alignment
// and endian-ness issues if used across multiple platforms.
// 64-bit hash for 64-bit platforms
uint64_t MurmurHash64A ( const void * key, size_t len, uint64_t seed )
{
const uint64_t m = BIG_CONSTANT(0xc6a4a7935bd1e995);
const int r = 47;
uint64_t h = seed ^ (len * m);
const uint64_t * data = (const uint64_t *)key;
const uint64_t * end = data + (len/8);
while(data != end)
{
uint64_t k = *data++;
k *= m;
k ^= k >> r;
k *= m;
h ^= k;
h *= m;
}
const unsigned char * data2 = (const unsigned char*)data;
switch(len & 7)
{
case 7: h ^= uint64_t(data2[6]) << 48;
case 6: h ^= uint64_t(data2[5]) << 40;
case 5: h ^= uint64_t(data2[4]) << 32;
case 4: h ^= uint64_t(data2[3]) << 24;
case 3: h ^= uint64_t(data2[2]) << 16;
case 2: h ^= uint64_t(data2[1]) << 8;
case 1: h ^= uint64_t(data2[0]);
h *= m;
};
h ^= h >> r;
h *= m;
h ^= h >> r;
return h;
}
// 64-bit hash for 32-bit platforms
uint64_t MurmurHash64B ( const void * key, size_t len, uint64_t seed )
{
const uint32_t m = 0x5bd1e995;
const int r = 24;
uint32_t h1 = uint32_t(seed) ^ len;
uint32_t h2 = uint32_t(seed >> 32);
const uint32_t * data = (const uint32_t *)key;
while(len >= 8)
{
uint32_t k1 = *data++;
k1 *= m; k1 ^= k1 >> r; k1 *= m;
h1 *= m; h1 ^= k1;
len -= 4;
uint32_t k2 = *data++;
k2 *= m; k2 ^= k2 >> r; k2 *= m;
h2 *= m; h2 ^= k2;
len -= 4;
}
if(len >= 4)
{
uint32_t k1 = *data++;
k1 *= m; k1 ^= k1 >> r; k1 *= m;
h1 *= m; h1 ^= k1;
len -= 4;
}
switch(len)
{
case 3: h2 ^= ((unsigned char*)data)[2] << 16;
case 2: h2 ^= ((unsigned char*)data)[1] << 8;
case 1: h2 ^= ((unsigned char*)data)[0];
h2 *= m;
};
h1 ^= h2 >> 18; h1 *= m;
h2 ^= h1 >> 22; h2 *= m;
h1 ^= h2 >> 17; h1 *= m;
h2 ^= h1 >> 19; h2 *= m;
uint64_t h = h1;
h = (h << 32) | h2;
return h;
}
//-----------------------------------------------------------------------------
// MurmurHash2A, by Austin Appleby
// This is a variant of MurmurHash2 modified to use the Merkle-Damgard
// construction. Bulk speed should be identical to Murmur2, small-key speed
// will be 10%-20% slower due to the added overhead at the end of the hash.
// This variant fixes a minor issue where null keys were more likely to
// collide with each other than expected, and also makes the function
// more amenable to incremental implementations.
#define mmix(h,k) { k *= m; k ^= k >> r; k *= m; h *= m; h ^= k; }
uint32_t MurmurHash2A ( const void * key, size_t len, uint32_t seed )
{
const uint32_t m = 0x5bd1e995;
const int r = 24;
uint32_t l = len;
const unsigned char * data = (const unsigned char *)key;
uint32_t h = seed;
while(len >= 4)
{
uint32_t k = *(uint32_t*)data;
mmix(h,k);
data += 4;
len -= 4;
}
uint32_t t = 0;
switch(len)
{
case 3: t ^= data[2] << 16;
case 2: t ^= data[1] << 8;
case 1: t ^= data[0];
};
mmix(h,t);
mmix(h,l);
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
//-----------------------------------------------------------------------------
// CMurmurHash2A, by Austin Appleby
// This is a sample implementation of MurmurHash2A designed to work
// incrementally.
// Usage -
// CMurmurHash2A hasher
// hasher.Begin(seed);
// hasher.Add(data1,size1);
// hasher.Add(data2,size2);
// ...
// hasher.Add(dataN,sizeN);
// uint32_t hash = hasher.End()
class CMurmurHash2A
{
public:
void Begin ( uint32_t seed = 0 )
{
m_hash = seed;
m_tail = 0;
m_count = 0;
m_size = 0;
}
void Add ( const unsigned char * data, size_t len )
{
m_size += len;
MixTail(data,len);
while(len >= 4)
{
uint32_t k = *(uint32_t*)data;
mmix(m_hash,k);
data += 4;
len -= 4;
}
MixTail(data,len);
}
uint32_t End ( void )
{
mmix(m_hash,m_tail);
mmix(m_hash,m_size);
m_hash ^= m_hash >> 13;
m_hash *= m;
m_hash ^= m_hash >> 15;
return m_hash;
}
private:
static const uint32_t m = 0x5bd1e995;
static const int r = 24;
void MixTail ( const unsigned char * & data, size_t & len )
{
while( len && ((len<4) || m_count) )
{
m_tail |= (*data++) << (m_count * 8);
m_count++;
len--;
if(m_count == 4)
{
mmix(m_hash,m_tail);
m_tail = 0;
m_count = 0;
}
}
}
uint32_t m_hash;
uint32_t m_tail;
uint32_t m_count;
uint32_t m_size;
};
//-----------------------------------------------------------------------------
// MurmurHashNeutral2, by Austin Appleby
// Same as MurmurHash2, but endian- and alignment-neutral.
// Half the speed though, alas.
uint32_t MurmurHashNeutral2 ( const void * key, size_t len, uint32_t seed )
{
const uint32_t m = 0x5bd1e995;
const int r = 24;
uint32_t h = seed ^ len;
const unsigned char * data = (const unsigned char *)key;
while(len >= 4)
{
uint32_t k;
k = data[0];
k |= data[1] << 8;
k |= data[2] << 16;
k |= data[3] << 24;
k *= m;
k ^= k >> r;
k *= m;
h *= m;
h ^= k;
data += 4;
len -= 4;
}
switch(len)
{
case 3: h ^= data[2] << 16;
case 2: h ^= data[1] << 8;
case 1: h ^= data[0];
h *= m;
};
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
//-----------------------------------------------------------------------------
// MurmurHashAligned2, by Austin Appleby
// Same algorithm as MurmurHash2, but only does aligned reads - should be safer
// on certain platforms.
// Performance will be lower than MurmurHash2
#define MIX(h,k,m) { k *= m; k ^= k >> r; k *= m; h *= m; h ^= k; }
uint32_t MurmurHashAligned2 ( const void * key, size_t len, uint32_t seed )
{
const uint32_t m = 0x5bd1e995;
const int r = 24;
const unsigned char * data = (const unsigned char *)key;
uint32_t h = seed ^ len;
size_t align = (uint64_t)data & 3;
if(align && (len >= 4))
{
// Pre-load the temp registers
uint32_t t = 0, d = 0;
switch(align)
{
case 1: t |= data[2] << 16;
case 2: t |= data[1] << 8;
case 3: t |= data[0];
}
t <<= (8 * align);
data += 4-align;
len -= 4-align;
int sl = 8 * (4-align);
int sr = 8 * align;
// Mix
while(len >= 4)
{
d = *(uint32_t *)data;
t = (t >> sr) | (d << sl);
uint32_t k = t;
MIX(h,k,m);
t = d;
data += 4;
len -= 4;
}
// Handle leftover data in temp registers
d = 0;
if(len >= align)
{
switch(align)
{
case 3: d |= data[2] << 16;
case 2: d |= data[1] << 8;
case 1: d |= data[0];
}
uint32_t k = (t >> sr) | (d << sl);
MIX(h,k,m);
data += align;
len -= align;
//----------
// Handle tail bytes
switch(len)
{
case 3: h ^= data[2] << 16;
case 2: h ^= data[1] << 8;
case 1: h ^= data[0];
h *= m;
};
}
else
{
switch(len)
{
case 3: d |= data[2] << 16;
case 2: d |= data[1] << 8;
case 1: d |= data[0];
case 0: h ^= (t >> sr) | (d << sl);
h *= m;
}
}
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
else
{
while(len >= 4)
{
uint32_t k = *(uint32_t *)data;
MIX(h,k,m);
data += 4;
len -= 4;
}
//----------
// Handle tail bytes
switch(len)
{
case 3: h ^= data[2] << 16;
case 2: h ^= data[1] << 8;
case 1: h ^= data[0];
h *= m;
};
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
}
//-----------------------------------------------------------------------------

View File

@ -1,3 +1,4 @@
//-----------------------------------------------------------------------------
// MurmurHash3 was written by Austin Appleby, and is placed in the public // MurmurHash3 was written by Austin Appleby, and is placed in the public
// domain. The author hereby disclaims copyright to this source code. // domain. The author hereby disclaims copyright to this source code.
@ -6,8 +7,8 @@
// compile and run any of them on any platform, but your performance with the // compile and run any of them on any platform, but your performance with the
// non-native version will be less than optimal. // non-native version will be less than optimal.
#include "murmurhash3.h" #include "MurmurHash3.h"
#include <cstring> #include <string.h>
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
// Platform-specific functions and macros // Platform-specific functions and macros
@ -93,7 +94,7 @@ FORCE_INLINE uint64_t fmix64 ( uint64_t k )
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
void MurmurHash3_x86_32 ( const void * key, int len, void MurmurHash3_x86_32 ( const void * key, size_t len,
uint32_t seed, void * out ) uint32_t seed, void * out )
{ {
const uint8_t * data = (const uint8_t*)key; const uint8_t * data = (const uint8_t*)key;
@ -149,7 +150,7 @@ void MurmurHash3_x86_32 ( const void * key, int len,
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
void MurmurHash3_x86_128 ( const void * key, const int len, void MurmurHash3_x86_128 ( const void * key, const size_t len,
uint32_t seed, void * out ) uint32_t seed, void * out )
{ {
const uint8_t * data = (const uint8_t*)key; const uint8_t * data = (const uint8_t*)key;
@ -254,7 +255,7 @@ void MurmurHash3_x86_128 ( const void * key, const int len,
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
void MurmurHash3_x64_128 ( const void * key, const int len, void MurmurHash3_x64_128 ( const void * key, const size_t len,
const uint32_t seed, void * out ) const uint32_t seed, void * out )
{ {
const uint8_t * data = (const uint8_t*)key; const uint8_t * data = (const uint8_t*)key;
@ -332,3 +333,6 @@ void MurmurHash3_x64_128 ( const void * key, const int len,
((uint64_t*)out)[0] = h1; ((uint64_t*)out)[0] = h1;
((uint64_t*)out)[1] = h2; ((uint64_t*)out)[1] = h2;
} }
//-----------------------------------------------------------------------------

View File

@ -1,423 +0,0 @@
// MurmurHash2 was written by Austin Appleby, and is placed in the public
// domain. The author hereby disclaims copyright to this source code.
// Note - This code makes a few assumptions about how your machine behaves -
// 1. We can read a 4-byte value from any address without crashing
// 2. sizeof(int) == 4
// And it has a few limitations -
// 1. It will not work incrementally.
// 2. It will not produce the same results on little-endian and big-endian
// machines.
#include "murmurhash2.h"
#include <cstring>
// Platform-specific functions and macros
// Microsoft Visual Studio
#if defined(_MSC_VER)
#define BIG_CONSTANT(x) (x)
// Other compilers
#else // defined(_MSC_VER)
#define BIG_CONSTANT(x) (x##LLU)
#endif // !defined(_MSC_VER)
uint32_t MurmurHash2(const void * key, int len, uint32_t seed)
{
// 'm' and 'r' are mixing constants generated offline.
// They're not really 'magic', they just happen to work well.
const uint32_t m = 0x5bd1e995;
const int r = 24;
// Initialize the hash to a 'random' value
uint32_t h = seed ^ len;
// Mix 4 bytes at a time into the hash
const unsigned char * data = reinterpret_cast<const unsigned char *>(key);
while (len >= 4)
{
uint32_t k;
memcpy(&k, data, sizeof(k));
k *= m;
k ^= k >> r;
k *= m;
h *= m;
h ^= k;
data += 4;
len -= 4;
}
// Handle the last few bytes of the input array
switch (len)
{
case 3: h ^= data[2] << 16;
case 2: h ^= data[1] << 8;
case 1: h ^= data[0];
h *= m;
};
// Do a few final mixes of the hash to ensure the last few
// bytes are well-incorporated.
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
// MurmurHash2, 64-bit versions, by Austin Appleby
// The same caveats as 32-bit MurmurHash2 apply here - beware of alignment
// and endian-ness issues if used across multiple platforms.
// 64-bit hash for 64-bit platforms
uint64_t MurmurHash64A(const void * key, int len, uint64_t seed)
{
const uint64_t m = BIG_CONSTANT(0xc6a4a7935bd1e995);
const int r = 47;
uint64_t h = seed ^ (len * m);
const uint64_t * data = reinterpret_cast<const uint64_t *>(key);
const uint64_t * end = data + (len/8);
while (data != end)
{
uint64_t k = *data++;
k *= m;
k ^= k >> r;
k *= m;
h ^= k;
h *= m;
}
const unsigned char * data2 = reinterpret_cast<const unsigned char *>(data);
switch (len & 7)
{
case 7: h ^= static_cast<uint64_t>(data2[6]) << 48;
case 6: h ^= static_cast<uint64_t>(data2[5]) << 40;
case 5: h ^= static_cast<uint64_t>(data2[4]) << 32;
case 4: h ^= static_cast<uint64_t>(data2[3]) << 24;
case 3: h ^= static_cast<uint64_t>(data2[2]) << 16;
case 2: h ^= static_cast<uint64_t>(data2[1]) << 8;
case 1: h ^= static_cast<uint64_t>(data2[0]);
h *= m;
};
h ^= h >> r;
h *= m;
h ^= h >> r;
return h;
}
// 64-bit hash for 32-bit platforms
uint64_t MurmurHash64B(const void * key, int len, uint64_t seed)
{
const uint32_t m = 0x5bd1e995;
const int r = 24;
uint32_t h1 = static_cast<uint32_t>(seed) ^ len;
uint32_t h2 = static_cast<uint32_t>(seed >> 32);
const uint32_t * data = reinterpret_cast<const uint32_t *>(key);
while (len >= 8)
{
uint32_t k1 = *data++;
k1 *= m; k1 ^= k1 >> r; k1 *= m;
h1 *= m; h1 ^= k1;
len -= 4;
uint32_t k2 = *data++;
k2 *= m; k2 ^= k2 >> r; k2 *= m;
h2 *= m; h2 ^= k2;
len -= 4;
}
if (len >= 4)
{
uint32_t k1 = *data++;
k1 *= m; k1 ^= k1 >> r; k1 *= m;
h1 *= m; h1 ^= k1;
len -= 4;
}
switch (len)
{
case 3: h2 ^= reinterpret_cast<const unsigned char *>(data)[2] << 16;
case 2: h2 ^= reinterpret_cast<const unsigned char *>(data)[1] << 8;
case 1: h2 ^= reinterpret_cast<const unsigned char *>(data)[0];
h2 *= m;
};
h1 ^= h2 >> 18; h1 *= m;
h2 ^= h1 >> 22; h2 *= m;
h1 ^= h2 >> 17; h1 *= m;
h2 ^= h1 >> 19; h2 *= m;
uint64_t h = h1;
h = (h << 32) | h2;
return h;
}
// MurmurHash2A, by Austin Appleby
// This is a variant of MurmurHash2 modified to use the Merkle-Damgard
// construction. Bulk speed should be identical to Murmur2, small-key speed
// will be 10%-20% slower due to the added overhead at the end of the hash.
// This variant fixes a minor issue where null keys were more likely to
// collide with each other than expected, and also makes the function
// more amenable to incremental implementations.
#define mmix(h,k) { k *= m; k ^= k >> r; k *= m; h *= m; h ^= k; }
uint32_t MurmurHash2A(const void * key, int len, uint32_t seed)
{
const uint32_t m = 0x5bd1e995;
const int r = 24;
uint32_t l = len;
const unsigned char * data = reinterpret_cast<const unsigned char *>(key);
uint32_t h = seed;
while (len >= 4)
{
uint32_t k = *reinterpret_cast<const uint32_t *>(data);
mmix(h,k);
data += 4;
len -= 4;
}
uint32_t t = 0;
switch (len)
{
case 3: t ^= data[2] << 16;
case 2: t ^= data[1] << 8;
case 1: t ^= data[0];
};
mmix(h,t);
mmix(h,l);
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
// MurmurHashNeutral2, by Austin Appleby
// Same as MurmurHash2, but endian- and alignment-neutral.
// Half the speed though, alas.
uint32_t MurmurHashNeutral2(const void * key, int len, uint32_t seed)
{
const uint32_t m = 0x5bd1e995;
const int r = 24;
uint32_t h = seed ^ len;
const unsigned char * data = reinterpret_cast<const unsigned char *>(key);
while (len >= 4)
{
uint32_t k;
k = data[0];
k |= data[1] << 8;
k |= data[2] << 16;
k |= data[3] << 24;
k *= m;
k ^= k >> r;
k *= m;
h *= m;
h ^= k;
data += 4;
len -= 4;
}
switch (len)
{
case 3: h ^= data[2] << 16;
case 2: h ^= data[1] << 8;
case 1: h ^= data[0];
h *= m;
};
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
//-----------------------------------------------------------------------------
// MurmurHashAligned2, by Austin Appleby
// Same algorithm as MurmurHash2, but only does aligned reads - should be safer
// on certain platforms.
// Performance will be lower than MurmurHash2
#define MIX(h,k,m) { k *= m; k ^= k >> r; k *= m; h *= m; h ^= k; }
uint32_t MurmurHashAligned2(const void * key, int len, uint32_t seed)
{
const uint32_t m = 0x5bd1e995;
const int r = 24;
const unsigned char * data = reinterpret_cast<const unsigned char *>(key);
uint32_t h = seed ^ len;
int align = reinterpret_cast<uint64_t>(data) & 3;
if (align && (len >= 4))
{
// Pre-load the temp registers
uint32_t t = 0, d = 0;
switch (align)
{
case 1: t |= data[2] << 16;
case 2: t |= data[1] << 8;
case 3: t |= data[0];
}
t <<= (8 * align);
data += 4-align;
len -= 4-align;
int sl = 8 * (4-align);
int sr = 8 * align;
// Mix
while (len >= 4)
{
d = *(reinterpret_cast<const uint32_t *>(data));
t = (t >> sr) | (d << sl);
uint32_t k = t;
MIX(h,k,m);
t = d;
data += 4;
len -= 4;
}
// Handle leftover data in temp registers
d = 0;
if (len >= align)
{
switch (align)
{
case 3: d |= data[2] << 16;
case 2: d |= data[1] << 8;
case 1: d |= data[0];
}
uint32_t k = (t >> sr) | (d << sl);
MIX(h,k,m);
data += align;
len -= align;
//----------
// Handle tail bytes
switch (len)
{
case 3: h ^= data[2] << 16;
case 2: h ^= data[1] << 8;
case 1: h ^= data[0];
h *= m;
};
}
else
{
switch (len)
{
case 3: d |= data[2] << 16;
case 2: d |= data[1] << 8;
case 1: d |= data[0];
case 0: h ^= (t >> sr) | (d << sl);
h *= m;
}
}
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
else
{
while (len >= 4)
{
uint32_t k = *reinterpret_cast<const uint32_t *>(data);
MIX(h,k,m);
data += 4;
len -= 4;
}
// Handle tail bytes
switch (len)
{
case 3: h ^= data[2] << 16;
case 2: h ^= data[1] << 8;
case 1: h ^= data[0];
h *= m;
};
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
}

4
debian/changelog vendored
View File

@ -1,5 +1,5 @@
clickhouse (21.7.1.1) unstable; urgency=low clickhouse (21.8.1.1) unstable; urgency=low
* Modified source code * Modified source code
-- clickhouse-release <clickhouse-release@yandex-team.ru> Thu, 20 May 2021 22:23:29 +0300 -- clickhouse-release <clickhouse-release@yandex-team.ru> Mon, 28 Jun 2021 00:50:15 +0300

View File

@ -1,7 +1,7 @@
FROM ubuntu:18.04 FROM ubuntu:18.04
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/" ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
ARG version=21.7.1.* ARG version=21.8.1.*
RUN apt-get update \ RUN apt-get update \
&& apt-get install --yes --no-install-recommends \ && apt-get install --yes --no-install-recommends \

View File

@ -72,7 +72,7 @@ RUN git clone https://github.com/tpoechtrager/apple-libtapi.git \
&& cd .. \ && cd .. \
&& rm -rf apple-libtapi && rm -rf apple-libtapi
# Build and install tools for cross-linking to Darwin # Build and install tools for cross-linking to Darwin (x86-64)
RUN git clone https://github.com/tpoechtrager/cctools-port.git \ RUN git clone https://github.com/tpoechtrager/cctools-port.git \
&& cd cctools-port/cctools \ && cd cctools-port/cctools \
&& ./configure --prefix=/cctools --with-libtapi=/cctools \ && ./configure --prefix=/cctools --with-libtapi=/cctools \
@ -81,8 +81,17 @@ RUN git clone https://github.com/tpoechtrager/cctools-port.git \
&& cd ../.. \ && cd ../.. \
&& rm -rf cctools-port && rm -rf cctools-port
# Download toolchain for Darwin # Build and install tools for cross-linking to Darwin (aarch64)
RUN wget -nv https://github.com/phracker/MacOSX-SDKs/releases/download/10.15/MacOSX10.15.sdk.tar.xz RUN git clone https://github.com/tpoechtrager/cctools-port.git \
&& cd cctools-port/cctools \
&& ./configure --prefix=/cctools --with-libtapi=/cctools \
--target=aarch64-apple-darwin \
&& make install \
&& cd ../.. \
&& rm -rf cctools-port
# Download toolchain and SDK for Darwin
RUN wget -nv https://github.com/phracker/MacOSX-SDKs/releases/download/11.3/MacOSX11.0.sdk.tar.xz
# Download toolchain for ARM # Download toolchain for ARM
# It contains all required headers and libraries. Note that it's named as "gcc" but actually we are using clang for cross compiling. # It contains all required headers and libraries. Note that it's named as "gcc" but actually we are using clang for cross compiling.

View File

@ -3,7 +3,9 @@
set -x -e set -x -e
mkdir -p build/cmake/toolchain/darwin-x86_64 mkdir -p build/cmake/toolchain/darwin-x86_64
tar xJf MacOSX10.15.sdk.tar.xz -C build/cmake/toolchain/darwin-x86_64 --strip-components=1 tar xJf MacOSX11.0.sdk.tar.xz -C build/cmake/toolchain/darwin-x86_64 --strip-components=1
ln -sf darwin-x86_64 build/cmake/toolchain/darwin-aarch64
mkdir -p build/cmake/toolchain/linux-aarch64 mkdir -p build/cmake/toolchain/linux-aarch64
tar xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz -C build/cmake/toolchain/linux-aarch64 --strip-components=1 tar xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz -C build/cmake/toolchain/linux-aarch64 --strip-components=1

View File

@ -58,6 +58,7 @@ def run_docker_image_with_env(image_name, output, env_variables, ch_root, ccache
def parse_env_variables(build_type, compiler, sanitizer, package_type, image_type, cache, distcc_hosts, unbundled, split_binary, clang_tidy, version, author, official, alien_pkgs, with_coverage, with_binaries): def parse_env_variables(build_type, compiler, sanitizer, package_type, image_type, cache, distcc_hosts, unbundled, split_binary, clang_tidy, version, author, official, alien_pkgs, with_coverage, with_binaries):
CLANG_PREFIX = "clang" CLANG_PREFIX = "clang"
DARWIN_SUFFIX = "-darwin" DARWIN_SUFFIX = "-darwin"
DARWIN_ARM_SUFFIX = "-darwin-aarch64"
ARM_SUFFIX = "-aarch64" ARM_SUFFIX = "-aarch64"
FREEBSD_SUFFIX = "-freebsd" FREEBSD_SUFFIX = "-freebsd"
@ -66,9 +67,10 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ
is_clang = compiler.startswith(CLANG_PREFIX) is_clang = compiler.startswith(CLANG_PREFIX)
is_cross_darwin = compiler.endswith(DARWIN_SUFFIX) is_cross_darwin = compiler.endswith(DARWIN_SUFFIX)
is_cross_darwin_arm = compiler.endswith(DARWIN_ARM_SUFFIX)
is_cross_arm = compiler.endswith(ARM_SUFFIX) is_cross_arm = compiler.endswith(ARM_SUFFIX)
is_cross_freebsd = compiler.endswith(FREEBSD_SUFFIX) is_cross_freebsd = compiler.endswith(FREEBSD_SUFFIX)
is_cross_compile = is_cross_darwin or is_cross_arm or is_cross_freebsd is_cross_compile = is_cross_darwin or is_cross_darwin_arm or is_cross_arm or is_cross_freebsd
# Explicitly use LLD with Clang by default. # Explicitly use LLD with Clang by default.
# Don't force linker for cross-compilation. # Don't force linker for cross-compilation.
@ -82,6 +84,13 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ
cmake_flags.append("-DCMAKE_RANLIB:FILEPATH=/cctools/bin/x86_64-apple-darwin-ranlib") cmake_flags.append("-DCMAKE_RANLIB:FILEPATH=/cctools/bin/x86_64-apple-darwin-ranlib")
cmake_flags.append("-DLINKER_NAME=/cctools/bin/x86_64-apple-darwin-ld") cmake_flags.append("-DLINKER_NAME=/cctools/bin/x86_64-apple-darwin-ld")
cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/darwin/toolchain-x86_64.cmake") cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/darwin/toolchain-x86_64.cmake")
elif is_cross_darwin_arm:
cc = compiler[:-len(DARWIN_ARM_SUFFIX)]
cmake_flags.append("-DCMAKE_AR:FILEPATH=/cctools/bin/aarch64-apple-darwin-ar")
cmake_flags.append("-DCMAKE_INSTALL_NAME_TOOL=/cctools/bin/aarch64-apple-darwin-install_name_tool")
cmake_flags.append("-DCMAKE_RANLIB:FILEPATH=/cctools/bin/aarch64-apple-darwin-ranlib")
cmake_flags.append("-DLINKER_NAME=/cctools/bin/aarch64-apple-darwin-ld")
cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/darwin/toolchain-aarch64.cmake")
elif is_cross_arm: elif is_cross_arm:
cc = compiler[:-len(ARM_SUFFIX)] cc = compiler[:-len(ARM_SUFFIX)]
cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-aarch64.cmake") cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-aarch64.cmake")
@ -185,8 +194,8 @@ if __name__ == "__main__":
parser.add_argument("--clickhouse-repo-path", default=os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir, os.pardir)) parser.add_argument("--clickhouse-repo-path", default=os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir, os.pardir))
parser.add_argument("--output-dir", required=True) parser.add_argument("--output-dir", required=True)
parser.add_argument("--build-type", choices=("debug", ""), default="") parser.add_argument("--build-type", choices=("debug", ""), default="")
parser.add_argument("--compiler", choices=("clang-11", "clang-11-darwin", "clang-11-aarch64", "clang-11-freebsd", parser.add_argument("--compiler", choices=("clang-11", "clang-11-darwin", "clang-11-darwin-aarch64", "clang-11-aarch64",
"gcc-10"), default="clang-11") "clang-11-freebsd", "gcc-10"), default="clang-11")
parser.add_argument("--sanitizer", choices=("address", "thread", "memory", "undefined", ""), default="") parser.add_argument("--sanitizer", choices=("address", "thread", "memory", "undefined", ""), default="")
parser.add_argument("--unbundled", action="store_true") parser.add_argument("--unbundled", action="store_true")
parser.add_argument("--split-binary", action="store_true") parser.add_argument("--split-binary", action="store_true")

View File

@ -1,7 +1,7 @@
FROM ubuntu:20.04 FROM ubuntu:20.04
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/" ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
ARG version=21.7.1.* ARG version=21.8.1.*
ARG gosu_ver=1.10 ARG gosu_ver=1.10
# set non-empty deb_location_url url to create a docker image # set non-empty deb_location_url url to create a docker image

View File

@ -1,7 +1,7 @@
FROM ubuntu:18.04 FROM ubuntu:18.04
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/" ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
ARG version=21.7.1.* ARG version=21.8.1.*
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y apt-transport-https dirmngr && \ apt-get install -y apt-transport-https dirmngr && \

View File

@ -113,6 +113,7 @@ function start_server
echo "ClickHouse server pid '$server_pid' started and responded" echo "ClickHouse server pid '$server_pid' started and responded"
echo " echo "
set follow-fork-mode child
handle all noprint handle all noprint
handle SIGSEGV stop print handle SIGSEGV stop print
handle SIGBUS stop print handle SIGBUS stop print

View File

@ -103,6 +103,7 @@ function fuzz
kill -0 $server_pid kill -0 $server_pid
echo " echo "
set follow-fork-mode child
handle all noprint handle all noprint
handle SIGSEGV stop print handle SIGSEGV stop print
handle SIGBUS stop print handle SIGBUS stop print

View File

@ -409,10 +409,10 @@ create view right_query_log as select *
'$(cat "right-query-log.tsv.columns")'); '$(cat "right-query-log.tsv.columns")');
create view query_logs as create view query_logs as
select 0 version, query_id, ProfileEvents.Names, ProfileEvents.Values, select 0 version, query_id, ProfileEvents.keys, ProfileEvents.values,
query_duration_ms, memory_usage from left_query_log query_duration_ms, memory_usage from left_query_log
union all union all
select 1 version, query_id, ProfileEvents.Names, ProfileEvents.Values, select 1 version, query_id, ProfileEvents.keys, ProfileEvents.values,
query_duration_ms, memory_usage from right_query_log query_duration_ms, memory_usage from right_query_log
; ;
@ -424,7 +424,7 @@ create table query_run_metric_arrays engine File(TSV, 'analyze/query-run-metric-
with ( with (
-- sumMapState with the list of all keys with '-0.' values. Negative zero is because -- sumMapState with the list of all keys with '-0.' values. Negative zero is because
-- sumMap removes keys with positive zeros. -- sumMap removes keys with positive zeros.
with (select groupUniqArrayArray(ProfileEvents.Names) from query_logs) as all_names with (select groupUniqArrayArray(ProfileEvents.keys) from query_logs) as all_names
select arrayReduce('sumMapState', [(all_names, arrayMap(x->-0., all_names))]) select arrayReduce('sumMapState', [(all_names, arrayMap(x->-0., all_names))])
) as all_metrics ) as all_metrics
select test, query_index, version, query_id, select test, query_index, version, query_id,
@ -433,8 +433,8 @@ create table query_run_metric_arrays engine File(TSV, 'analyze/query-run-metric-
[ [
all_metrics, all_metrics,
arrayReduce('sumMapState', arrayReduce('sumMapState',
[(ProfileEvents.Names, [(ProfileEvents.keys,
arrayMap(x->toFloat64(x), ProfileEvents.Values))] arrayMap(x->toFloat64(x), ProfileEvents.values))]
), ),
arrayReduce('sumMapState', [( arrayReduce('sumMapState', [(
['client_time', 'server_time', 'memory_usage'], ['client_time', 'server_time', 'memory_usage'],
@ -1005,7 +1005,7 @@ create table unstable_run_metrics engine File(TSVWithNamesAndTypes,
'unstable-run-metrics.$version.rep') as 'unstable-run-metrics.$version.rep') as
select select
test, query_index, query_id, test, query_index, query_id,
ProfileEvents.Values value, ProfileEvents.Names metric ProfileEvents.values value, ProfileEvents.keys metric
from query_log array join ProfileEvents from query_log array join ProfileEvents
join unstable_query_runs using (query_id) join unstable_query_runs using (query_id)
; ;
@ -1280,7 +1280,7 @@ create table ci_checks engine File(TSVWithNamesAndTypes, 'ci-checks.tsv')
then then
echo Database for test results is not specified, will not upload them. echo Database for test results is not specified, will not upload them.
return 0 return 0
fi fi
set +x # Don't show password in the log set +x # Don't show password in the log
client=(clickhouse-client client=(clickhouse-client

View File

@ -561,7 +561,7 @@ if args.report == 'main':
# Don't show mildly unstable queries, only the very unstable ones we # Don't show mildly unstable queries, only the very unstable ones we
# treat as errors. # treat as errors.
if very_unstable_queries: if very_unstable_queries:
if very_unstable_queries > 3: if very_unstable_queries > 5:
error_tests += very_unstable_queries error_tests += very_unstable_queries
status = 'failure' status = 'failure'
message_array.append(str(very_unstable_queries) + ' unstable') message_array.append(str(very_unstable_queries) + ' unstable')

View File

@ -55,6 +55,7 @@ function start()
done done
echo " echo "
set follow-fork-mode child
handle all noprint handle all noprint
handle SIGSEGV stop print handle SIGSEGV stop print
handle SIGBUS stop print handle SIGBUS stop print

View File

@ -2,18 +2,16 @@
## TL; DR How to make ClickHouse compile and link faster? ## TL; DR How to make ClickHouse compile and link faster?
Developer only! This command will likely fulfill most of your needs. Run before calling `ninja`. Minimal ClickHouse build example:
```cmake ```bash
cmake .. \ cmake .. \
-DCMAKE_C_COMPILER=/bin/clang-10 \ -DCMAKE_C_COMPILER=$(which clang-11) \
-DCMAKE_CXX_COMPILER=/bin/clang++-10 \ -DCMAKE_CXX_COMPILER=$(which clang++-11) \
-DCMAKE_BUILD_TYPE=Debug \ -DCMAKE_BUILD_TYPE=Debug \
-DENABLE_CLICKHOUSE_ALL=OFF \ -DENABLE_CLICKHOUSE_ALL=OFF \
-DENABLE_CLICKHOUSE_SERVER=ON \ -DENABLE_CLICKHOUSE_SERVER=ON \
-DENABLE_CLICKHOUSE_CLIENT=ON \ -DENABLE_CLICKHOUSE_CLIENT=ON \
-DUSE_STATIC_LIBRARIES=OFF \
-DSPLIT_SHARED_LIBRARIES=ON \
-DENABLE_LIBRARIES=OFF \ -DENABLE_LIBRARIES=OFF \
-DUSE_UNWIND=ON \ -DUSE_UNWIND=ON \
-DENABLE_UTILS=OFF \ -DENABLE_UTILS=OFF \

View File

@ -33,7 +33,7 @@ Reboot.
``` bash ``` bash
brew update brew update
brew install cmake ninja libtool gettext llvm gcc brew install cmake ninja libtool gettext llvm gcc binutils
``` ```
## Checkout ClickHouse Sources {#checkout-clickhouse-sources} ## Checkout ClickHouse Sources {#checkout-clickhouse-sources}

View File

@ -126,7 +126,7 @@ Builds ClickHouse in various configurations for use in further steps. You have t
- **Compiler**: `gcc-9` or `clang-10` (or `clang-10-xx` for other architectures e.g. `clang-10-freebsd`). - **Compiler**: `gcc-9` or `clang-10` (or `clang-10-xx` for other architectures e.g. `clang-10-freebsd`).
- **Build type**: `Debug` or `RelWithDebInfo` (cmake). - **Build type**: `Debug` or `RelWithDebInfo` (cmake).
- **Sanitizer**: `none` (without sanitizers), `address` (ASan), `memory` (MSan), `undefined` (UBSan), or `thread` (TSan). - **Sanitizer**: `none` (without sanitizers), `address` (ASan), `memory` (MSan), `undefined` (UBSan), or `thread` (TSan).
- **Bundled**: `bundled` build uses system libraries, and `unbundled` build uses libraries from `contrib` folder. - **Bundled**: `bundled` build uses libraries from `contrib` folder, and `unbundled` build uses system libraries.
- **Splitted** `splitted` is a [split build](build.md#split-build) - **Splitted** `splitted` is a [split build](build.md#split-build)
- **Status**: `success` or `fail` - **Status**: `success` or `fail`
- **Build log**: link to the building and files copying log, useful when build failed. - **Build log**: link to the building and files copying log, useful when build failed.

View File

@ -49,6 +49,7 @@ When working with the `MaterializeMySQL` database engine, [ReplacingMergeTree](.
| DATE, NEWDATE | [Date](../../sql-reference/data-types/date.md) | | DATE, NEWDATE | [Date](../../sql-reference/data-types/date.md) |
| DATETIME, TIMESTAMP | [DateTime](../../sql-reference/data-types/datetime.md) | | DATETIME, TIMESTAMP | [DateTime](../../sql-reference/data-types/datetime.md) |
| DATETIME2, TIMESTAMP2 | [DateTime64](../../sql-reference/data-types/datetime64.md) | | DATETIME2, TIMESTAMP2 | [DateTime64](../../sql-reference/data-types/datetime64.md) |
| ENUM | [Enum](../../sql-reference/data-types/enum.md) |
| STRING | [String](../../sql-reference/data-types/string.md) | | STRING | [String](../../sql-reference/data-types/string.md) |
| VARCHAR, VAR_STRING | [String](../../sql-reference/data-types/string.md) | | VARCHAR, VAR_STRING | [String](../../sql-reference/data-types/string.md) |
| BLOB | [String](../../sql-reference/data-types/string.md) | | BLOB | [String](../../sql-reference/data-types/string.md) |

View File

@ -0,0 +1,53 @@
---
toc_priority: 12
toc_title: ExternalDistributed
---
# ExternalDistributed {#externaldistributed}
The `ExternalDistributed` engine allows to perform `SELECT` queries on data that is stored on a remote servers MySQL or PostgreSQL. Accepts [MySQL](../../../engines/table-engines/integrations/mysql.md) or [PostgreSQL](../../../engines/table-engines/integrations/postgresql.md) engines as an argument so sharding is possible.
## Creating a Table {#creating-a-table}
``` sql
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
(
name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1],
name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2],
...
) ENGINE = ExternalDistributed('engine', 'host:port', 'database', 'table', 'user', 'password');
```
See a detailed description of the [CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query) query.
The table structure can differ from the original table structure:
- Column names should be the same as in the original table, but you can use just some of these columns and in any order.
- Column types may differ from those in the original table. ClickHouse tries to [cast](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) values to the ClickHouse data types.
**Engine Parameters**
- `engine` — The table engine `MySQL` or `PostgreSQL`.
- `host:port` — MySQL or PostgreSQL server address.
- `database` — Remote database name.
- `table` — Remote table name.
- `user` — User name.
- `password` — User password.
## Implementation Details {#implementation-details}
Supports multiple replicas that must be listed by `|` and shards must be listed by `,`. For example:
```sql
CREATE TABLE test_shards (id UInt32, name String, age UInt32, money UInt32) ENGINE = ExternalDistributed('MySQL', `mysql{1|2}:3306,mysql{3|4}:3306`, 'clickhouse', 'test_replicas', 'root', 'clickhouse');
```
When specifying replicas, one of the available replicas is selected for each of the shards when reading. If the connection fails, the next replica is selected, and so on for all the replicas. If the connection attempt fails for all the replicas, the attempt is repeated the same way several times.
You can specify any number of shards and any number of replicas for each shard.
**See Also**
- [MySQL table engine](../../../engines/table-engines/integrations/mysql.md)
- [PostgreSQL table engine](../../../engines/table-engines/integrations/postgresql.md)
- [Distributed table engine](../../../engines/table-engines/special/distributed.md)

View File

@ -28,8 +28,8 @@ See a detailed description of the [CREATE TABLE](../../../sql-reference/statemen
The table structure can differ from the original MySQL table structure: The table structure can differ from the original MySQL table structure:
- Column names should be the same as in the original MySQL table, but you can use just some of these columns and in any order. - Column names should be the same as in the original MySQL table, but you can use just some of these columns and in any order.
- Column types may differ from those in the original MySQL table. ClickHouse tries to [cast](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) values to the ClickHouse data types. - Column types may differ from those in the original MySQL table. ClickHouse tries to [cast](../../../engines/database-engines/mysql.md#data_types-support) values to the ClickHouse data types.
- Setting `external_table_functions_use_nulls` defines how to handle Nullable columns. Default is true, if false - table function will not make nullable columns and will insert default values instead of nulls. This is also applicable for null values inside array data types. - The [external_table_functions_use_nulls](../../../operations/settings/settings.md#external-table-functions-use-nulls) setting defines how to handle Nullable columns. Default value: 1. If 0, the table function does not make Nullable columns and inserts default values instead of nulls. This is also applicable for NULL values inside arrays.
**Engine Parameters** **Engine Parameters**
@ -55,6 +55,12 @@ Simple `WHERE` clauses such as `=, !=, >, >=, <, <=` are executed on the MySQL s
The rest of the conditions and the `LIMIT` sampling constraint are executed in ClickHouse only after the query to MySQL finishes. The rest of the conditions and the `LIMIT` sampling constraint are executed in ClickHouse only after the query to MySQL finishes.
Supports multiple replicas that must be listed by `|`. For example:
```sql
CREATE TABLE test_replicas (id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL(`mysql{2|3|4}:3306`, 'clickhouse', 'test_replicas', 'root', 'clickhouse');
```
## Usage Example {#usage-example} ## Usage Example {#usage-example}
Table in MySQL: Table in MySQL:

View File

@ -29,7 +29,7 @@ The table structure can differ from the source table structure:
- Column names should be the same as in the source table, but you can use just some of these columns and in any order. - Column names should be the same as in the source table, but you can use just some of these columns and in any order.
- Column types may differ from those in the source table. ClickHouse tries to [cast](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) values to the ClickHouse data types. - Column types may differ from those in the source table. ClickHouse tries to [cast](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) values to the ClickHouse data types.
- Setting `external_table_functions_use_nulls` defines how to handle Nullable columns. Default is true, if false - table function will not make nullable columns and will insert default values instead of nulls. This is also applicable for null values inside array data types. - The [external_table_functions_use_nulls](../../../operations/settings/settings.md#external-table-functions-use-nulls) setting defines how to handle Nullable columns. Default value: 1. If 0, the table function does not make Nullable columns and inserts default values instead of nulls. This is also applicable for NULL values inside arrays.
**Engine Parameters** **Engine Parameters**

View File

@ -23,8 +23,8 @@ See a detailed description of the [CREATE TABLE](../../../sql-reference/statemen
The table structure can differ from the original PostgreSQL table structure: The table structure can differ from the original PostgreSQL table structure:
- Column names should be the same as in the original PostgreSQL table, but you can use just some of these columns and in any order. - Column names should be the same as in the original PostgreSQL table, but you can use just some of these columns and in any order.
- Column types may differ from those in the original PostgreSQL table. ClickHouse tries to [cast](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) values to the ClickHouse data types. - Column types may differ from those in the original PostgreSQL table. ClickHouse tries to [cast](../../../engines/database-engines/postgresql.md#data_types-support) values to the ClickHouse data types.
- Setting `external_table_functions_use_nulls` defines how to handle Nullable columns. Default is 1, if 0 - table function will not make nullable columns and will insert default values instead of nulls. This is also applicable for null values inside array data types. - The [external_table_functions_use_nulls](../../../operations/settings/settings.md#external-table-functions-use-nulls) setting defines how to handle Nullable columns. Default value: 1. If 0, the table function does not make Nullable columns and inserts default values instead of nulls. This is also applicable for NULL values inside arrays.
**Engine Parameters** **Engine Parameters**
@ -49,6 +49,12 @@ PostgreSQL `Array` types are converted into ClickHouse arrays.
!!! info "Note" !!! info "Note"
Be careful - in PostgreSQL an array data, created like a `type_name[]`, may contain multi-dimensional arrays of different dimensions in different table rows in same column. But in ClickHouse it is only allowed to have multidimensional arrays of the same count of dimensions in all table rows in same column. Be careful - in PostgreSQL an array data, created like a `type_name[]`, may contain multi-dimensional arrays of different dimensions in different table rows in same column. But in ClickHouse it is only allowed to have multidimensional arrays of the same count of dimensions in all table rows in same column.
Supports multiple replicas that must be listed by `|`. For example:
```sql
CREATE TABLE test_replicas (id UInt32, name String) ENGINE = PostgreSQL(`postgres{2|3|4}:5432`, 'clickhouse', 'test_replicas', 'postgres', 'mysecretpassword');
```
Replicas priority for PostgreSQL dictionary source is supported. The bigger the number in map, the less the priority. The highest priority is `0`. Replicas priority for PostgreSQL dictionary source is supported. The bigger the number in map, the less the priority. The highest priority is `0`.

View File

@ -65,7 +65,7 @@ By checking the row count:
Query: Query:
``` sq; ``` sql
SELECT count() FROM recipes; SELECT count() FROM recipes;
``` ```

View File

@ -1302,6 +1302,7 @@ The table below shows supported data types and how they match ClickHouse [data t
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `UTF8` | | `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `UTF8` |
| `STRING`, `BINARY` | [FixedString](../sql-reference/data-types/fixedstring.md) | `UTF8` | | `STRING`, `BINARY` | [FixedString](../sql-reference/data-types/fixedstring.md) | `UTF8` |
| `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` | | `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` |
| `DECIMAL256` | [Decimal256](../sql-reference/data-types/decimal.md)| `DECIMAL256` |
| `LIST` | [Array](../sql-reference/data-types/array.md) | `LIST` | | `LIST` | [Array](../sql-reference/data-types/array.md) | `LIST` |
Arrays can be nested and can have a value of the `Nullable` type as an argument. Arrays can be nested and can have a value of the `Nullable` type as an argument.

View File

@ -148,5 +148,11 @@ toc_title: Adopters
| <a href="https://www.kakaocorp.com/" class="favicon">kakaocorp</a> | Internet company | — | — | — | [if(kakao)2020 conference](https://if.kakao.com/session/117) | | <a href="https://www.kakaocorp.com/" class="favicon">kakaocorp</a> | Internet company | — | — | — | [if(kakao)2020 conference](https://if.kakao.com/session/117) |
| <a href="https://shop.okraina.ru/" class="favicon">ООО «МПЗ Богородский»</a> | Agriculture | — | — | — | [Article in Russian, November 2020](https://cloud.yandex.ru/cases/okraina) | | <a href="https://shop.okraina.ru/" class="favicon">ООО «МПЗ Богородский»</a> | Agriculture | — | — | — | [Article in Russian, November 2020](https://cloud.yandex.ru/cases/okraina) |
| <a href="https://www.tesla.com/" class="favicon">Tesla</a> | Electric vehicle and clean energy company | — | — | — | [Vacancy description, March 2021](https://news.ycombinator.com/item?id=26306170) | | <a href="https://www.tesla.com/" class="favicon">Tesla</a> | Electric vehicle and clean energy company | — | — | — | [Vacancy description, March 2021](https://news.ycombinator.com/item?id=26306170) |
| <a href="https://www.kgk-global.com/en/" class="favicon">KGK Global</a> | Vehicle monitoring | — | — | — | [Press release, June 2021](https://zoom.cnews.ru/news/item/530921) |
| <a href="https://www.bilibili.com/" class="favicon">BiliBili</a> | Video sharing | — | — | — | [Blog post, June 2021](https://chowdera.com/2021/06/20210622012241476b.html) |
| <a href="https://gigapipe.com/" class="favicon">Gigapipe</a> | Managed ClickHouse | Main product | — | — | [Official website](https://gigapipe.com/) |
| <a href="https://www.hydrolix.io/" class="favicon">Hydrolix</a> | Cloud data platform | Main product | — | — | [Documentation](https://docs.hydrolix.io/guide/query) |
| <a href="https://www.argedor.com/en/clickhouse/" class="favicon">Argedor</a> | ClickHouse support | — | — | — | [Official website](https://www.argedor.com/en/clickhouse/) |
| <a href="https://signoz.io/" class="favicon">SigNoz</a> | Observability Platform | Main Product | — | — | [Source code](https://github.com/SigNoz/signoz) |
[Original article](https://clickhouse.tech/docs/en/introduction/adopters/) <!--hide--> [Original article](https://clickhouse.tech/docs/en/introduction/adopters/) <!--hide-->

View File

@ -379,7 +379,7 @@ Default value: `1`.
## insert_null_as_default {#insert_null_as_default} ## insert_null_as_default {#insert_null_as_default}
Enables or disables the insertion of [default values](../../sql-reference/statements/create/table.md#create-default-values) instead of [NULL](../../sql-reference/syntax.md#null-literal) into columns with not [nullable](../../sql-reference/data-types/nullable.md#data_type-nullable) data type. Enables or disables the insertion of [default values](../../sql-reference/statements/create/table.md#create-default-values) instead of [NULL](../../sql-reference/syntax.md#null-literal) into columns with not [nullable](../../sql-reference/data-types/nullable.md#data_type-nullable) data type.
If column type is not nullable and this setting is disabled, then inserting `NULL` causes an exception. If column type is nullable, then `NULL` values are inserted as is, regardless of this setting. If column type is not nullable and this setting is disabled, then inserting `NULL` causes an exception. If column type is nullable, then `NULL` values are inserted as is, regardless of this setting.
This setting is applicable to [INSERT ... SELECT](../../sql-reference/statements/insert-into.md#insert_query_insert-select) queries. Note that `SELECT` subqueries may be concatenated with `UNION ALL` clause. This setting is applicable to [INSERT ... SELECT](../../sql-reference/statements/insert-into.md#insert_query_insert-select) queries. Note that `SELECT` subqueries may be concatenated with `UNION ALL` clause.
@ -1182,7 +1182,7 @@ Possible values:
Default value: `1`. Default value: `1`.
**Additional Info** **Additional Info**
This setting is useful for replicated tables with a sampling key. A query may be processed faster if it is executed on several servers in parallel. But the query performance may degrade in the following cases: This setting is useful for replicated tables with a sampling key. A query may be processed faster if it is executed on several servers in parallel. But the query performance may degrade in the following cases:
@ -1194,21 +1194,22 @@ This setting is useful for replicated tables with a sampling key. A query may be
!!! warning "Warning" !!! warning "Warning"
This setting will produce incorrect results when joins or subqueries are involved, and all tables don't meet certain requirements. See [Distributed Subqueries and max_parallel_replicas](../../sql-reference/operators/in.md#max_parallel_replica-subqueries) for more details. This setting will produce incorrect results when joins or subqueries are involved, and all tables don't meet certain requirements. See [Distributed Subqueries and max_parallel_replicas](../../sql-reference/operators/in.md#max_parallel_replica-subqueries) for more details.
## compile {#compile} ## compile_expressions {#compile-expressions}
Enable compilation of queries. By default, 0 (disabled). Enables or disables compilation of frequently used simple functions and operators to native code with LLVM at runtime.
The compilation is only used for part of the query-processing pipeline: for the first stage of aggregation (GROUP BY). Possible values:
If this portion of the pipeline was compiled, the query may run faster due to the deployment of short cycles and inlining aggregate function calls. The maximum performance improvement (up to four times faster in rare cases) is seen for queries with multiple simple aggregate functions. Typically, the performance gain is insignificant. In very rare cases, it may slow down query execution.
## min_count_to_compile {#min-count-to-compile} - 0 — Disabled.
- 1 — Enabled.
How many times to potentially use a compiled chunk of code before running compilation. By default, 3. Default value: `1`.
For testing, the value can be set to 0: compilation runs synchronously and the query waits for the end of the compilation process before continuing execution. For all other cases, use values starting with 1. Compilation normally takes about 5-10 seconds.
If the value is 1 or more, compilation occurs asynchronously in a separate thread. The result will be used as soon as it is ready, including queries that are currently running.
Compiled code is required for each different combination of aggregate functions used in the query and the type of keys in the GROUP BY clause. ## min_count_to_compile_expression {#min-count-to-compile-expression}
The results of the compilation are saved in the build directory in the form of .so files. There is no restriction on the number of compilation results since they do not use very much space. Old results will be used after server restarts, except in the case of a server upgrade in this case, the old results are deleted.
Minimum count of executing same expression before it is get compiled.
Default value: `3`.
## output_format_json_quote_64bit_integers {#session_settings-output_format_json_quote_64bit_integers} ## output_format_json_quote_64bit_integers {#session_settings-output_format_json_quote_64bit_integers}
@ -1558,7 +1559,7 @@ Possible values:
- 0 — Disabled (final query processing is done on the initiator node). - 0 — Disabled (final query processing is done on the initiator node).
- 1 - Do not merge aggregation states from different servers for distributed query processing (query completelly processed on the shard, initiator only proxy the data), can be used in case it is for certain that there are different keys on different shards. - 1 - Do not merge aggregation states from different servers for distributed query processing (query completelly processed on the shard, initiator only proxy the data), can be used in case it is for certain that there are different keys on different shards.
- 2 - Same as `1` but applies `ORDER BY` and `LIMIT` (it is not possilbe when the query processed completelly on the remote node, like for `distributed_group_by_no_merge=1`) on the initiator (can be used for queries with `ORDER BY` and/or `LIMIT`). - 2 - Same as `1` but applies `ORDER BY` and `LIMIT` (it is not possible when the query processed completelly on the remote node, like for `distributed_group_by_no_merge=1`) on the initiator (can be used for queries with `ORDER BY` and/or `LIMIT`).
**Example** **Example**
@ -1622,7 +1623,7 @@ Possible values:
Default value: 0 Default value: 0
## optimize_skip_unused_shards_rewrite_in {#optimize-skip-unused-shardslrewrite-in} ## optimize_skip_unused_shards_rewrite_in {#optimize-skip-unused-shards-rewrite-in}
Rewrite IN in query for remote shards to exclude values that does not belong to the shard (requires optimize_skip_unused_shards). Rewrite IN in query for remote shards to exclude values that does not belong to the shard (requires optimize_skip_unused_shards).
@ -1802,6 +1803,27 @@ Possible values:
Default value: 0. Default value: 0.
## distributed_directory_monitor_split_batch_on_failure {#distributed_directory_monitor_split_batch_on_failure}
Enables/disables splitting batches on failures.
Sometimes sending particular batch to the remote shard may fail, because of some complex pipeline after (i.e. `MATERIALIZED VIEW` with `GROUP BY`) due to `Memory limit exceeded` or similar errors. In this case, retrying will not help (and this will stuck distributed sends for the table) but sending files from that batch one by one may succeed INSERT.
So installing this setting to `1` will disable batching for such batches (i.e. temporary disables `distributed_directory_monitor_batch_inserts` for failed batches).
Possible values:
- 1 — Enabled.
- 0 — Disabled.
Default value: 0.
!!! note "Note"
This setting also affects broken batches (that may appears because of abnormal server (machine) termination and no `fsync_after_insert`/`fsync_directories` for [Distributed](../../engines/table-engines/special/distributed.md) table engine).
!!! warning "Warning"
You should not rely on automatic batch splitting, since this may hurt performance.
## os_thread_priority {#setting-os-thread-priority} ## os_thread_priority {#setting-os-thread-priority}
Sets the priority ([nice](https://en.wikipedia.org/wiki/Nice_(Unix))) for threads that execute queries. The OS scheduler considers this priority when choosing the next thread to run on each available CPU core. Sets the priority ([nice](https://en.wikipedia.org/wiki/Nice_(Unix))) for threads that execute queries. The OS scheduler considers this priority when choosing the next thread to run on each available CPU core.
@ -2085,7 +2107,7 @@ Default value: 128.
## background_fetches_pool_size {#background_fetches_pool_size} ## background_fetches_pool_size {#background_fetches_pool_size}
Sets the number of threads performing background fetches for [replicated](../../engines/table-engines/mergetree-family/replication.md) tables. This setting is applied at the ClickHouse server start and cant be changed in a user session. For production usage with frequent small insertions or slow ZooKeeper cluster is recomended to use default value. Sets the number of threads performing background fetches for [replicated](../../engines/table-engines/mergetree-family/replication.md) tables. This setting is applied at the ClickHouse server start and cant be changed in a user session. For production usage with frequent small insertions or slow ZooKeeper cluster is recommended to use default value.
Possible values: Possible values:
@ -2672,7 +2694,7 @@ Default value: `0`.
## aggregate_functions_null_for_empty {#aggregate_functions_null_for_empty} ## aggregate_functions_null_for_empty {#aggregate_functions_null_for_empty}
Enables or disables rewriting all aggregate functions in a query, adding [-OrNull](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-ornull) suffix to them. Enable it for SQL standard compatibility. Enables or disables rewriting all aggregate functions in a query, adding [-OrNull](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-ornull) suffix to them. Enable it for SQL standard compatibility.
It is implemented via query rewrite (similar to [count_distinct_implementation](#settings-count_distinct_implementation) setting) to get consistent results for distributed queries. It is implemented via query rewrite (similar to [count_distinct_implementation](#settings-count_distinct_implementation) setting) to get consistent results for distributed queries.
Possible values: Possible values:
@ -2856,7 +2878,7 @@ Default value: `0`.
## database_atomic_wait_for_drop_and_detach_synchronously {#database_atomic_wait_for_drop_and_detach_synchronously} ## database_atomic_wait_for_drop_and_detach_synchronously {#database_atomic_wait_for_drop_and_detach_synchronously}
Adds a modifier `SYNC` to all `DROP` and `DETACH` queries. Adds a modifier `SYNC` to all `DROP` and `DETACH` queries.
Possible values: Possible values:
@ -2962,7 +2984,7 @@ Enables or disables using the original column names instead of aliases in query
Possible values: Possible values:
- 0 — The column name is substituted with the alias. - 0 — The column name is substituted with the alias.
- 1 — The column name is not substituted with the alias. - 1 — The column name is not substituted with the alias.
Default value: `0`. Default value: `0`.
@ -3075,8 +3097,86 @@ SELECT
sum(a), sum(a),
sumCount(b).1, sumCount(b).1,
sumCount(b).2, sumCount(b).2,
(sumCount(b).1) / (sumCount(b).2) (sumCount(b).1) / (sumCount(b).2)
FROM fuse_tbl FROM fuse_tbl
``` ```
[Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) <!-- hide --> ## flatten_nested {#flatten-nested}
Sets the data format of a [nested](../../sql-reference/data-types/nested-data-structures/nested.md) columns.
Possible values:
- 1 — Nested column is flattened to separate arrays.
- 0 — Nested column stays a single array of tuples.
Default value: `1`.
**Usage**
If the setting is set to `0`, it is possible to use an arbitrary level of nesting.
**Examples**
Query:
``` sql
SET flatten_nested = 1;
CREATE TABLE t_nest (`n` Nested(a UInt32, b UInt32)) ENGINE = MergeTree ORDER BY tuple();
SHOW CREATE TABLE t_nest;
```
Result:
``` text
┌─statement───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
│ CREATE TABLE default.t_nest
(
`n.a` Array(UInt32),
`n.b` Array(UInt32)
)
ENGINE = MergeTree
ORDER BY tuple()
SETTINGS index_granularity = 8192 │
└─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
```
Query:
``` sql
SET flatten_nested = 0;
CREATE TABLE t_nest (`n` Nested(a UInt32, b UInt32)) ENGINE = MergeTree ORDER BY tuple();
SHOW CREATE TABLE t_nest;
```
Result:
``` text
┌─statement──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
│ CREATE TABLE default.t_nest
(
`n` Nested(a UInt32, b UInt32)
)
ENGINE = MergeTree
ORDER BY tuple()
SETTINGS index_granularity = 8192 │
└────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
```
## external_table_functions_use_nulls {#external-table-functions-use-nulls}
Defines how [mysql](../../sql-reference/table-functions/mysql.md), [postgresql](../../sql-reference/table-functions/postgresql.md) and [odbc](../../sql-reference/table-functions/odbc.md)] table functions use Nullable columns.
Possible values:
- 0 — The table function explicitly uses Nullable columns.
- 1 — The table function implicitly uses Nullable columns.
Default value: `1`.
**Usage**
If the setting is set to `0`, the table function does not make Nullable columns and inserts default values instead of NULL. This is also applicable for NULL values inside arrays.

View File

@ -0,0 +1,39 @@
# system.data_skipping_indices {#system-data-skipping-indices}
Contains information about existing data skipping indices in all the tables.
Columns:
- `database` ([String](../../sql-reference/data-types/string.md)) — Database name.
- `table` ([String](../../sql-reference/data-types/string.md)) — Table name.
- `name` ([String](../../sql-reference/data-types/string.md)) — Index name.
- `type` ([String](../../sql-reference/data-types/string.md)) — Index type.
- `expr` ([String](../../sql-reference/data-types/string.md)) — Expression used to calculate the index.
- `granularity` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of granules in the block.
**Example**
```sql
SELECT * FROM system.data_skipping_indices LIMIT 2 FORMAT Vertical;
```
```text
Row 1:
──────
database: default
table: user_actions
name: clicks_idx
type: minmax
expr: clicks
granularity: 1
Row 2:
──────
database: default
table: users
name: contacts_null_idx
type: minmax
expr: assumeNotNull(contacts_null)
granularity: 1
```

View File

@ -34,14 +34,14 @@ initial_port: 47588
interface: 1 interface: 1
os_user: bharatnc os_user: bharatnc
client_hostname: tower client_hostname: tower
client_name: ClickHouse client_name: ClickHouse
client_revision: 54437 client_revision: 54437
client_version_major: 20 client_version_major: 20
client_version_minor: 7 client_version_minor: 7
client_version_patch: 2 client_version_patch: 2
http_method: 0 http_method: 0
http_user_agent: http_user_agent:
quota_key: quota_key:
elapsed: 0.000582537 elapsed: 0.000582537
is_cancelled: 0 is_cancelled: 0
read_rows: 0 read_rows: 0
@ -53,12 +53,10 @@ memory_usage: 0
peak_memory_usage: 0 peak_memory_usage: 0
query: SELECT * from system.processes LIMIT 10 FORMAT Vertical; query: SELECT * from system.processes LIMIT 10 FORMAT Vertical;
thread_ids: [67] thread_ids: [67]
ProfileEvents.Names: ['Query','SelectQuery','ReadCompressedBytes','CompressedReadBufferBlocks','CompressedReadBufferBytes','IOBufferAllocs','IOBufferAllocBytes','ContextLock','RWLockAcquiredReadLocks'] ProfileEvents: {'Query':1,'SelectQuery':1,'ReadCompressedBytes':36,'CompressedReadBufferBlocks':1,'CompressedReadBufferBytes':10,'IOBufferAllocs':1,'IOBufferAllocBytes':89,'ContextLock':15,'RWLockAcquiredReadLocks':1}
ProfileEvents.Values: [1,1,36,1,10,1,89,16,1] Settings: {'background_pool_size':'32','load_balancing':'random','allow_suspicious_low_cardinality_types':'1','distributed_aggregation_memory_efficient':'1','skip_unavailable_shards':'1','log_queries':'1','max_bytes_before_external_group_by':'20000000000','max_bytes_before_external_sort':'20000000000','allow_introspection_functions':'1'}
Settings.Names: ['use_uncompressed_cache','load_balancing','log_queries','max_memory_usage']
Settings.Values: ['0','in_order','1','10000000000']
1 rows in set. Elapsed: 0.002 sec. 1 rows in set. Elapsed: 0.002 sec.
``` ```
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/processes) <!--hide--> [Original article](https://clickhouse.tech/docs/en/operations/system_tables/processes) <!--hide-->

View File

@ -84,12 +84,10 @@ Columns:
- `forwarded_for` ([String](../../sql-reference/data-types/string.md)) — HTTP header `X-Forwarded-For` passed in the HTTP query. - `forwarded_for` ([String](../../sql-reference/data-types/string.md)) — HTTP header `X-Forwarded-For` passed in the HTTP query.
- `quota_key` ([String](../../sql-reference/data-types/string.md)) — The `quota key` specified in the [quotas](../../operations/quotas.md) setting (see `keyed`). - `quota_key` ([String](../../sql-reference/data-types/string.md)) — The `quota key` specified in the [quotas](../../operations/quotas.md) setting (see `keyed`).
- `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ClickHouse revision. - `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ClickHouse revision.
- `ProfileEvents` ([Map(String, UInt64)](../../sql-reference/data-types/array.md)) — ProfileEvents that measure different metrics. The description of them could be found in the table [system.events](../../operations/system-tables/events.md#system_tables-events)
- `Settings` ([Map(String, String)](../../sql-reference/data-types/array.md)) — Settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` parameter to 1.
- `log_comment` ([String](../../sql-reference/data-types/string.md)) — Log comment. It can be set to arbitrary string no longer than [max_query_size](../../operations/settings/settings.md#settings-max_query_size). An empty string if it is not defined. - `log_comment` ([String](../../sql-reference/data-types/string.md)) — Log comment. It can be set to arbitrary string no longer than [max_query_size](../../operations/settings/settings.md#settings-max_query_size). An empty string if it is not defined.
- `thread_ids` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — Thread ids that are participating in query execution. - `thread_ids` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — Thread ids that are participating in query execution.
- `ProfileEvents.Names` ([Array(String)](../../sql-reference/data-types/array.md)) — Counters that measure different metrics. The description of them could be found in the table [system.events](../../operations/system-tables/events.md#system_tables-events)
- `ProfileEvents.Values` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — Values of metrics that are listed in the `ProfileEvents.Names` column.
- `Settings.Names` ([Array(String)](../../sql-reference/data-types/array.md)) — Names of settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` parameter to 1.
- `Settings.Values` ([Array(String)](../../sql-reference/data-types/array.md)) — Values of settings that are listed in the `Settings.Names` column.
- `used_aggregate_functions` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `aggregate functions`, which were used during query execution. - `used_aggregate_functions` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `aggregate functions`, which were used during query execution.
- `used_aggregate_function_combinators` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `aggregate functions combinators`, which were used during query execution. - `used_aggregate_function_combinators` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `aggregate functions combinators`, which were used during query execution.
- `used_database_engines` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `database engines`, which were used during query execution. - `used_database_engines` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `database engines`, which were used during query execution.
@ -109,68 +107,49 @@ SELECT * FROM system.query_log WHERE type = 'QueryFinish' AND (query LIKE '%toDa
``` text ``` text
Row 1: Row 1:
────── ──────
type: QueryFinish type: QueryStart
event_date: 2021-03-18 event_date: 2020-09-11
event_time: 2021-03-18 20:54:18 event_time: 2020-09-11 10:08:17
event_time_microseconds: 2021-03-18 20:54:18.676686 event_time_microseconds: 2020-09-11 10:08:17.063321
query_start_time: 2021-03-18 20:54:18 query_start_time: 2020-09-11 10:08:17
query_start_time_microseconds: 2021-03-18 20:54:18.673934 query_start_time_microseconds: 2020-09-11 10:08:17.063321
query_duration_ms: 2 query_duration_ms: 0
read_rows: 100 read_rows: 0
read_bytes: 800 read_bytes: 0
written_rows: 0 written_rows: 0
written_bytes: 0 written_bytes: 0
result_rows: 2 result_rows: 0
result_bytes: 4858 result_bytes: 0
memory_usage: 0 memory_usage: 0
current_database: default current_database: default
query: SELECT uniqArray([1, 1, 2]), SUBSTRING('Hello, world', 7, 5), flatten([[[BIT_AND(123)]], [[mod(3, 2)], [CAST('1' AS INTEGER)]]]), week(toDate('2000-12-05')), CAST(arrayJoin([NULL, NULL]) AS Nullable(TEXT)), avgOrDefaultIf(number, number % 2), sumOrNull(number), toTypeName(sumOrNull(number)), countIf(toDate('2000-12-05') + number as d, toDayOfYear(d) % 2) FROM numbers(100) query: INSERT INTO test1 VALUES
normalized_query_hash: 17858008518552525706 exception_code: 0
query_kind: Select
databases: ['_table_function']
tables: ['_table_function.numbers']
columns: ['_table_function.numbers.number']
exception_code: 0
exception: exception:
stack_trace: stack_trace:
is_initial_query: 1 is_initial_query: 1
user: default user: default
query_id: 58f3d392-0fa0-4663-ae1d-29917a1a9c9c query_id: 50a320fd-85a8-49b8-8761-98a86bcbacef
address: ::ffff:127.0.0.1 address: ::ffff:127.0.0.1
port: 37486 port: 33452
initial_user: default initial_user: default
initial_query_id: 58f3d392-0fa0-4663-ae1d-29917a1a9c9c initial_query_id: 50a320fd-85a8-49b8-8761-98a86bcbacef
initial_address: ::ffff:127.0.0.1 initial_address: ::ffff:127.0.0.1
initial_port: 37486 initial_port: 33452
interface: 1 interface: 1
os_user: sevirov os_user: bharatnc
client_hostname: clickhouse.ru-central1.internal client_hostname: tower
client_name: ClickHouse client_name: ClickHouse
client_revision: 54447 client_revision: 54437
client_version_major: 21 client_version_major: 20
client_version_minor: 4 client_version_minor: 7
client_version_patch: 1 client_version_patch: 2
http_method: 0 http_method: 0
http_user_agent: http_user_agent:
http_referer:
forwarded_for:
quota_key: quota_key:
revision: 54449 revision: 54440
log_comment: thread_ids: []
thread_ids: [587,11939] ProfileEvents: {'Query':1,'SelectQuery':1,'ReadCompressedBytes':36,'CompressedReadBufferBlocks':1,'CompressedReadBufferBytes':10,'IOBufferAllocs':1,'IOBufferAllocBytes':89,'ContextLock':15,'RWLockAcquiredReadLocks':1}
ProfileEvents.Names: ['Query','SelectQuery','ReadCompressedBytes','CompressedReadBufferBlocks','CompressedReadBufferBytes','IOBufferAllocs','IOBufferAllocBytes','ArenaAllocChunks','ArenaAllocBytes','FunctionExecute','TableFunctionExecute','NetworkSendElapsedMicroseconds','SelectedRows','SelectedBytes','ContextLock','RWLockAcquiredReadLocks','RealTimeMicroseconds','UserTimeMicroseconds','SystemTimeMicroseconds','SoftPageFaults','OSCPUVirtualTimeMicroseconds','OSWriteBytes'] Settings: {'background_pool_size':'32','load_balancing':'random','allow_suspicious_low_cardinality_types':'1','distributed_aggregation_memory_efficient':'1','skip_unavailable_shards':'1','log_queries':'1','max_bytes_before_external_group_by':'20000000000','max_bytes_before_external_sort':'20000000000','allow_introspection_functions':'1'}
ProfileEvents.Values: [1,1,36,1,10,2,1048680,1,4096,36,1,110,100,800,77,1,3137,1476,1101,8,2577,8192]
Settings.Names: ['load_balancing','max_memory_usage']
Settings.Values: ['random','10000000000']
used_aggregate_functions: ['groupBitAnd','avg','sum','count','uniq']
used_aggregate_function_combinators: ['OrDefault','If','OrNull','Array']
used_database_engines: []
used_data_type_families: ['String','Array','Int32','Nullable']
used_dictionaries: []
used_formats: []
used_functions: ['toWeek','CAST','arrayFlatten','toTypeName','toDayOfYear','addDays','array','toDate','modulo','substring','plus']
used_storages: []
used_table_functions: ['numbers']
``` ```
**See Also** **See Also**

View File

@ -58,8 +58,7 @@ Columns:
- `http_user_agent` ([String](../../sql-reference/data-types/string.md)) — The `UserAgent` header passed in the HTTP request. - `http_user_agent` ([String](../../sql-reference/data-types/string.md)) — The `UserAgent` header passed in the HTTP request.
- `quota_key` ([String](../../sql-reference/data-types/string.md)) — The “quota key” specified in the [quotas](../../operations/quotas.md) setting (see `keyed`). - `quota_key` ([String](../../sql-reference/data-types/string.md)) — The “quota key” specified in the [quotas](../../operations/quotas.md) setting (see `keyed`).
- `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ClickHouse revision. - `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ClickHouse revision.
- `ProfileEvents.Names` ([Array(String)](../../sql-reference/data-types/array.md)) — Counters that measure different metrics for this thread. The description of them could be found in the table [system.events](#system_tables-events). - `ProfileEvents` ([Map(String, UInt64)](../../sql-reference/data-types/array.md)) — ProfileEvents that measure different metrics for this thread. The description of them could be found in the table [system.events](#system_tables-events).
- `ProfileEvents.Values` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — Values of metrics for this thread that are listed in the `ProfileEvents.Names` column.
**Example** **Example**
@ -98,17 +97,16 @@ initial_port: 33452
interface: 1 interface: 1
os_user: bharatnc os_user: bharatnc
client_hostname: tower client_hostname: tower
client_name: ClickHouse client_name: ClickHouse
client_revision: 54437 client_revision: 54437
client_version_major: 20 client_version_major: 20
client_version_minor: 7 client_version_minor: 7
client_version_patch: 2 client_version_patch: 2
http_method: 0 http_method: 0
http_user_agent: http_user_agent:
quota_key: quota_key:
revision: 54440 revision: 54440
ProfileEvents.Names: ['Query','InsertQuery','FileOpen','WriteBufferFromFileDescriptorWrite','WriteBufferFromFileDescriptorWriteBytes','ReadCompressedBytes','CompressedReadBufferBlocks','CompressedReadBufferBytes','IOBufferAllocs','IOBufferAllocBytes','FunctionExecute','CreatedWriteBufferOrdinary','DiskWriteElapsedMicroseconds','NetworkReceiveElapsedMicroseconds','NetworkSendElapsedMicroseconds','InsertedRows','InsertedBytes','SelectedRows','SelectedBytes','MergeTreeDataWriterRows','MergeTreeDataWriterUncompressedBytes','MergeTreeDataWriterCompressedBytes','MergeTreeDataWriterBlocks','MergeTreeDataWriterBlocksAlreadySorted','ContextLock','RWLockAcquiredReadLocks','RealTimeMicroseconds','UserTimeMicroseconds','SoftPageFaults','OSCPUVirtualTimeMicroseconds','OSWriteBytes','OSReadChars','OSWriteChars'] ProfileEvents: {'Query':1,'SelectQuery':1,'ReadCompressedBytes':36,'CompressedReadBufferBlocks':1,'CompressedReadBufferBytes':10,'IOBufferAllocs':1,'IOBufferAllocBytes':89,'ContextLock':15,'RWLockAcquiredReadLocks':1}
ProfileEvents.Values: [1,1,11,11,591,148,3,71,29,6533808,1,11,72,18,47,1,12,1,12,1,12,189,1,1,10,2,70853,2748,49,2747,45056,422,1520]
``` ```
**See Also** **See Also**

View File

@ -30,14 +30,6 @@ Do not disable overcommit. The value `cat /proc/sys/vm/overcommit_memory` should
$ echo 0 | sudo tee /proc/sys/vm/overcommit_memory $ echo 0 | sudo tee /proc/sys/vm/overcommit_memory
``` ```
## Huge Pages {#huge-pages}
Always disable transparent huge pages. It interferes with memory allocators, which leads to significant performance degradation.
``` bash
$ echo 'madvise' | sudo tee /sys/kernel/mm/transparent_hugepage/enabled
```
Use `perf top` to watch the time spent in the kernel for memory management. Use `perf top` to watch the time spent in the kernel for memory management.
Permanent huge pages also do not need to be allocated. Permanent huge pages also do not need to be allocated.
@ -91,6 +83,15 @@ The Linux kernel prior to 3.2 had a multitude of problems with IPv6 implementati
Use at least a 10 GB network, if possible. 1 Gb will also work, but it will be much worse for patching replicas with tens of terabytes of data, or for processing distributed queries with a large amount of intermediate data. Use at least a 10 GB network, if possible. 1 Gb will also work, but it will be much worse for patching replicas with tens of terabytes of data, or for processing distributed queries with a large amount of intermediate data.
## Huge Pages {#huge-pages}
If you are using old Linux kernel, disable transparent huge pages. It interferes with memory allocators, which leads to significant performance degradation.
On newer Linux kernels transparent huge pages are alright.
``` bash
$ echo 'madvise' | sudo tee /sys/kernel/mm/transparent_hugepage/enabled
```
## Hypervisor configuration ## Hypervisor configuration
If you are using OpenStack, set If you are using OpenStack, set

View File

@ -1,37 +0,0 @@
---
toc_priority: 150
---
## initializeAggregation {#initializeaggregation}
Initializes aggregation for your input rows. It is intended for the functions with the suffix `State`.
Use it for tests or to process columns of types `AggregateFunction` and `AggregationgMergeTree`.
**Syntax**
``` sql
initializeAggregation (aggregate_function, column_1, column_2)
```
**Arguments**
- `aggregate_function` — Name of the aggregation function. The state of this function — the creating one. [String](../../../sql-reference/data-types/string.md#string).
- `column_n` — The column to translate it into the function as it's argument. [String](../../../sql-reference/data-types/string.md#string).
**Returned value(s)**
Returns the result of the aggregation for your input rows. The return type will be the same as the return type of function, that `initializeAgregation` takes as first argument.
For example for functions with the suffix `State` the return type will be `AggregateFunction`.
**Example**
Query:
```sql
SELECT uniqMerge(state) FROM (SELECT initializeAggregation('uniqState', number % 3) AS state FROM system.numbers LIMIT 10000);
```
Result:
┌─uniqMerge(state)─┐
│ 3 │
└──────────────────┘

View File

@ -74,4 +74,26 @@ Received exception from server (version 1.1.54388):
Code: 386. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: There is no supertype for types UInt8, String because some of them are String/FixedString and some of them are not. Code: 386. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: There is no supertype for types UInt8, String because some of them are String/FixedString and some of them are not.
``` ```
[Original article](https://clickhouse.tech/docs/en/data_types/array/) <!--hide--> ## Array Size {#array-size}
It is possible to find the size of an array by using the `size0` subcolumn without reading the whole column. For multi-dimensional arrays you can use `sizeN-1`, where `N` is the wanted dimension.
**Example**
Query:
```sql
CREATE TABLE t_arr (`arr` Array(Array(Array(UInt32)))) ENGINE = MergeTree ORDER BY tuple();
INSERT INTO t_arr VALUES ([[[12, 13, 0, 1],[12]]]);
SELECT arr.size0, arr.size1, arr.size2 FROM t_arr;
```
Result:
``` text
┌─arr.size0─┬─arr.size1─┬─arr.size2─┐
│ 1 │ [2] │ [[4,1]] │
└───────────┴───────────┴───────────┘
```

View File

@ -34,7 +34,7 @@ CREATE TABLE test.visits
This example declares the `Goals` nested data structure, which contains data about conversions (goals reached). Each row in the visits table can correspond to zero or any number of conversions. This example declares the `Goals` nested data structure, which contains data about conversions (goals reached). Each row in the visits table can correspond to zero or any number of conversions.
Only a single nesting level is supported. Columns of nested structures containing arrays are equivalent to multidimensional arrays, so they have limited support (there is no support for storing these columns in tables with the MergeTree engine). When [flatten_nested](../../../operations/settings/settings.md#flatten-nested) is set to `0` (which is not by default), arbitrary levels of nesting are supported.
In most cases, when working with a nested data structure, its columns are specified with column names separated by a dot. These columns make up an array of matching types. All the column arrays of a single nested data structure have the same length. In most cases, when working with a nested data structure, its columns are specified with column names separated by a dot. These columns make up an array of matching types. All the column arrays of a single nested data structure have the same length.

View File

@ -20,6 +20,33 @@ To store `Nullable` type values in a table column, ClickHouse uses a separate fi
!!! info "Note" !!! info "Note"
Using `Nullable` almost always negatively affects performance, keep this in mind when designing your databases. Using `Nullable` almost always negatively affects performance, keep this in mind when designing your databases.
## Finding NULL {#finding-null}
It is possible to find `NULL` values in a column by using `null` subcolumn without reading the whole column. It returns `1` if the corresponding value is `NULL` and `0` otherwise.
**Example**
Query:
``` sql
CREATE TABLE nullable (`n` Nullable(UInt32)) ENGINE = MergeTree ORDER BY tuple();
INSERT INTO nullable VALUES (1) (NULL) (2) (NULL);
SELECT n.null FROM nullable;
```
Result:
``` text
┌─n.null─┐
│ 0 │
│ 1 │
│ 0 │
│ 1 │
└────────┘
```
## Usage Example {#usage-example} ## Usage Example {#usage-example}
``` sql ``` sql

View File

@ -47,4 +47,32 @@ SELECT tuple(1, NULL) AS x, toTypeName(x)
└──────────┴─────────────────────────────────┘ └──────────┴─────────────────────────────────┘
``` ```
## Addressing Tuple Elements {#addressing-tuple-elements}
It is possible to read elements of named tuples using indexes and names:
``` sql
CREATE TABLE named_tuples (`a` Tuple(s String, i Int64)) ENGINE = Memory;
INSERT INTO named_tuples VALUES (('y', 10)), (('x',-10));
SELECT a.s FROM named_tuples;
SELECT a.2 FROM named_tuples;
```
Result:
``` text
┌─a.s─┐
│ y │
│ x │
└─────┘
┌─tupleElement(a, 2)─┐
│ 10 │
│ -10 │
└────────────────────┘
```
[Original article](https://clickhouse.tech/docs/en/data_types/tuple/) <!--hide--> [Original article](https://clickhouse.tech/docs/en/data_types/tuple/) <!--hide-->

View File

@ -486,6 +486,7 @@ Example of settings:
<table>table_name</table> <table>table_name</table>
<where>id=10</where> <where>id=10</where>
<invalidate_query>SQL_QUERY</invalidate_query> <invalidate_query>SQL_QUERY</invalidate_query>
<fail_on_connection_loss>true</fail_on_connection_loss>
</mysql> </mysql>
</source> </source>
``` ```
@ -503,6 +504,7 @@ SOURCE(MYSQL(
table 'table_name' table 'table_name'
where 'id=10' where 'id=10'
invalidate_query 'SQL_QUERY' invalidate_query 'SQL_QUERY'
fail_on_connection_loss 'true'
)) ))
``` ```
@ -527,6 +529,8 @@ Setting fields:
- `invalidate_query` Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md). - `invalidate_query` Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md).
- `fail_on_connection_loss` The configuration parameter that controls behavior of the server on connection loss. If `true`, an exception is thrown immediately if the connection between client and server was lost. If `false`, the ClickHouse server retries to execute the query three times before throwing an exception. Note that retrying leads to increased response times. Default value: `false`.
MySQL can be connected on a local host via sockets. To do this, set `host` and `socket`. MySQL can be connected on a local host via sockets. To do this, set `host` and `socket`.
Example of settings: Example of settings:
@ -542,6 +546,7 @@ Example of settings:
<table>table_name</table> <table>table_name</table>
<where>id=10</where> <where>id=10</where>
<invalidate_query>SQL_QUERY</invalidate_query> <invalidate_query>SQL_QUERY</invalidate_query>
<fail_on_connection_loss>true</fail_on_connection_loss>
</mysql> </mysql>
</source> </source>
``` ```
@ -558,6 +563,7 @@ SOURCE(MYSQL(
table 'table_name' table 'table_name'
where 'id=10' where 'id=10'
invalidate_query 'SQL_QUERY' invalidate_query 'SQL_QUERY'
fail_on_connection_loss 'true'
)) ))
``` ```

View File

@ -39,13 +39,44 @@ Accepts zero arguments and returns an empty array of the appropriate type.
Accepts an empty array and returns a one-element array that is equal to the default value. Accepts an empty array and returns a one-element array that is equal to the default value.
## range(end), range(start, end \[, step\]) {#rangeend-rangestart-end-step}
Returns an array of numbers from start to end-1 by step. ## range(end), range(\[start, \] end \[, step\]) {#range}
If the argument `start` is not specified, defaults to 0.
If the argument `step` is not specified, defaults to 1. Returns an array of `UInt` numbers from `start` to `end - 1` by `step`.
It behaviors almost like pythonic `range`. But the difference is that all the arguments type must be `UInt` numbers.
Just in case, an exception is thrown if arrays with a total length of more than 100,000,000 elements are created in a data block. **Syntax**
``` sql
range([start, ] end [, step])
```
**Arguments**
- `start` — The first element of the array. Optional, required if `step` is used. Default value: 0. [UInt](../data-types/int-uint.md)
- `end` — The number before which the array is constructed. Required. [UInt](../data-types/int-uint.md)
- `step` — Determines the incremental step between each element in the array. Optional. Default value: 1. [UInt](../data-types/int-uint.md)
**Returned value**
- Array of `UInt` numbers from `start` to `end - 1` by `step`.
**Implementation details**
- All arguments must be positive values: `start`, `end`, `step` are `UInt` data types, as well as elements of the returned array.
- An exception is thrown if query results in arrays with a total length of more than 100,000,000 elements.
**Examples**
Query:
``` sql
SELECT range(5), range(1, 5), range(1, 5, 2);
```
Result:
```txt
┌─range(5)────┬─range(1, 5)─┬─range(1, 5, 2)─┐
│ [0,1,2,3,4] │ [1,2,3,4] │ [1,3] │
└─────────────┴─────────────┴────────────────┘
```
## array(x1, …), operator \[x1, …\] {#arrayx1-operator-x1} ## array(x1, …), operator \[x1, …\] {#arrayx1-operator-x1}

View File

@ -831,7 +831,7 @@ Returns 0 for the first row and the difference from the previous row for each su
!!! warning "Warning" !!! warning "Warning"
It can reach the previous row only inside the currently processed data block. It can reach the previous row only inside the currently processed data block.
The result of the function depends on the affected data blocks and the order of data in the block. The result of the function depends on the affected data blocks and the order of data in the block.
The rows order used during the calculation of `runningDifference` can differ from the order of rows returned to the user. The rows order used during the calculation of `runningDifference` can differ from the order of rows returned to the user.
@ -908,7 +908,7 @@ Same as for [runningDifference](./other-functions.md#other_functions-runningdiff
## runningConcurrency {#runningconcurrency} ## runningConcurrency {#runningconcurrency}
Calculates the number of concurrent events. Calculates the number of concurrent events.
Each event has a start time and an end time. The start time is included in the event, while the end time is excluded. Columns with a start time and an end time must be of the same data type. Each event has a start time and an end time. The start time is included in the event, while the end time is excluded. Columns with a start time and an end time must be of the same data type.
The function calculates the total number of active (concurrent) events for each event start time. The function calculates the total number of active (concurrent) events for each event start time.
@ -1424,11 +1424,83 @@ Result:
└───────────┴────────┘ └───────────┴────────┘
``` ```
## initializeAggregation {#initializeaggregation}
Calculates result of aggregate function based on single value. It is intended to use this function to initialize aggregate functions with combinator [-State](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-state). You can create states of aggregate functions and insert them to columns of type [AggregateFunction](../../sql-reference/data-types/aggregatefunction.md#data-type-aggregatefunction) or use initialized aggregates as default values.
**Syntax**
``` sql
initializeAggregation (aggregate_function, arg1, arg2, ..., argN)
```
**Arguments**
- `aggregate_function` — Name of the aggregation function to initialize. [String](../../sql-reference/data-types/string.md).
- `arg` — Arguments of aggregate function.
**Returned value(s)**
- Result of aggregation for every row passed to the function.
The return type is the same as the return type of function, that `initializeAgregation` takes as first argument.
**Example**
Query:
```sql
SELECT uniqMerge(state) FROM (SELECT initializeAggregation('uniqState', number % 3) AS state FROM numbers(10000));
```
Result:
```text
┌─uniqMerge(state)─┐
│ 3 │
└──────────────────┘
```
Query:
```sql
SELECT finalizeAggregation(state), toTypeName(state) FROM (SELECT initializeAggregation('sumState', number % 3) AS state FROM numbers(5));
```
Result:
```text
┌─finalizeAggregation(state)─┬─toTypeName(state)─────────────┐
│ 0 │ AggregateFunction(sum, UInt8) │
│ 1 │ AggregateFunction(sum, UInt8) │
│ 2 │ AggregateFunction(sum, UInt8) │
│ 0 │ AggregateFunction(sum, UInt8) │
│ 1 │ AggregateFunction(sum, UInt8) │
└────────────────────────────┴───────────────────────────────┘
```
Example with `AggregatingMergeTree` table engine and `AggregateFunction` column:
```sql
CREATE TABLE metrics
(
key UInt64,
value AggregateFunction(sum, UInt64) DEFAULT initializeAggregation('sumState', toUInt64(0))
)
ENGINE = AggregatingMergeTree
ORDER BY key
```
```sql
INSERT INTO metrics VALUES (0, initializeAggregation('sumState', toUInt64(42)))
```
**See Also**
- [arrayReduce](../../sql-reference/functions/array-functions.md#arrayreduce)
## finalizeAggregation {#function-finalizeaggregation} ## finalizeAggregation {#function-finalizeaggregation}
Takes state of aggregate function. Returns result of aggregation (or finalized state when using[-State](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-state) combinator). Takes state of aggregate function. Returns result of aggregation (or finalized state when using[-State](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-state) combinator).
**Syntax** **Syntax**
``` sql ``` sql
finalizeAggregation(state) finalizeAggregation(state)
@ -1442,7 +1514,7 @@ finalizeAggregation(state)
- Value/values that was aggregated. - Value/values that was aggregated.
Type: Value of any types that was aggregated. Type: Value of any types that was aggregated.
**Examples** **Examples**
@ -1474,7 +1546,7 @@ Result:
└──────────────────────────────────┘ └──────────────────────────────────┘
``` ```
Note that `NULL` values are ignored. Note that `NULL` values are ignored.
Query: Query:
@ -1520,10 +1592,9 @@ Result:
└────────┴─────────────┴────────────────┘ └────────┴─────────────┴────────────────┘
``` ```
**See Also** **See Also**
- [arrayReduce](../../sql-reference/functions/array-functions.md#arrayreduce) - [arrayReduce](../../sql-reference/functions/array-functions.md#arrayreduce)
- [initializeAggregation](../../sql-reference/aggregate-functions/reference/initializeAggregation.md) - [initializeAggregation](#initializeaggregation)
## runningAccumulate {#runningaccumulate} ## runningAccumulate {#runningaccumulate}

View File

@ -119,7 +119,7 @@ For manage uncompressed data cache parameters use following server level setting
## DROP COMPILED EXPRESSION CACHE {#query_language-system-drop-compiled-expression-cache} ## DROP COMPILED EXPRESSION CACHE {#query_language-system-drop-compiled-expression-cache}
Reset the compiled expression cache. Used in development of ClickHouse and performance tests. Reset the compiled expression cache. Used in development of ClickHouse and performance tests.
Complied expression cache used when query/user/profile enable option [compile](../../operations/settings/settings.md#compile) Compiled expression cache used when query/user/profile enable option [compile-expressions](../../operations/settings/settings.md#compile-expressions)
## FLUSH LOGS {#query_language-system-flush_logs} ## FLUSH LOGS {#query_language-system-flush_logs}

View File

@ -39,6 +39,18 @@ Simple `WHERE` clauses such as `=, !=, >, >=, <, <=` are currently executed on t
The rest of the conditions and the `LIMIT` sampling constraint are executed in ClickHouse only after the query to MySQL finishes. The rest of the conditions and the `LIMIT` sampling constraint are executed in ClickHouse only after the query to MySQL finishes.
Supports multiple replicas that must be listed by `|`. For example:
```sql
SELECT name FROM mysql(`mysql{1|2|3}:3306`, 'mysql_database', 'mysql_table', 'user', 'password');
```
or
```sql
SELECT name FROM mysql(`mysql1:3306|mysql2:3306|mysql3:3306`, 'mysql_database', 'mysql_table', 'user', 'password');
```
**Returned Value** **Returned Value**
A table object with the same columns as the original MySQL table. A table object with the same columns as the original MySQL table.

View File

@ -43,8 +43,20 @@ PostgreSQL Array types converts into ClickHouse arrays.
!!! info "Note" !!! info "Note"
Be careful, in PostgreSQL an array data type column like Integer[] may contain arrays of different dimensions in different rows, but in ClickHouse it is only allowed to have multidimensional arrays of the same dimension in all rows. Be careful, in PostgreSQL an array data type column like Integer[] may contain arrays of different dimensions in different rows, but in ClickHouse it is only allowed to have multidimensional arrays of the same dimension in all rows.
Supports multiple replicas that must be listed by `|`. For example:
Supports replicas priority for PostgreSQL dictionary source. The bigger the number in map, the less the priority. The highest priority is `0`. ```sql
SELECT name FROM postgresql(`postgres{1|2|3}:5432`, 'postgres_database', 'postgres_table', 'user', 'password');
```
or
```sql
SELECT name FROM postgresql(`postgres1:5431|postgres2:5432`, 'postgres_database', 'postgres_table', 'user', 'password');
```
Supports replicas priority for PostgreSQL dictionary source. The bigger the number in map, the less the priority. The highest priority is `0`.
**Examples** **Examples**

View File

@ -817,22 +817,6 @@ load_balancing = first_or_random
のための一貫性を異なる部分に同じデータを分割)、このオプションにしているときだけサンプリングキーを設定します。 のための一貫性を異なる部分に同じデータを分割)、このオプションにしているときだけサンプリングキーを設定します。
レプリカラグは制御されません。 レプリカラグは制御されません。
## コンパイル {#compile}
を編集ます。 既定では、0(無効)です。
コンパイルは、クエリ処理パイプラインの一部にのみ使用されます。
この部分のパイプラインのためのクエリを実行するアによる展開の短サイクルinlining集計機能。 複数の単純な集計関数を使用するクエリでは、最大のパフォーマンスの向上が見られます。 通常、性能は軽微であります。 非常に珍しい例で遅くなクエリを実行します。
## min_count_to_compile {#min-count-to-compile}
り方を潜在的に利用コチャンクのコードの実行前に作成する。 デフォルトでは3.
For testing, the value can be set to 0: compilation runs synchronously and the query waits for the end of the compilation process before continuing execution. For all other cases, use values starting with 1. Compilation normally takes about 5-10 seconds.
値が1以上の場合、コンパイルは別のスレッドで非同期に実行されます。 結果は、現在実行中のクエリを含め、準備が整うとすぐに使用されます。
コンパイルされたコードは、クエリで使用される集計関数とGROUP BY句内のキーの種類のそれぞれの異なる組み合わせに必要です。
The results of the compilation are saved in the build directory in the form of .so files. There is no restriction on the number of compilation results since they don't use very much space. Old results will be used after server restarts, except in the case of a server upgrade in this case, the old results are deleted.
## output_format_json_quote_64bit_integers {#session_settings-output_format_json_quote_64bit_integers} ## output_format_json_quote_64bit_integers {#session_settings-output_format_json_quote_64bit_integers}
値がtrueの場合、json\*Int64およびUInt64形式ほとんどのJavaScript実装との互換性のためを使用するときに整数が引用符で表示されます。 値がtrueの場合、json\*Int64およびUInt64形式ほとんどのJavaScript実装との互換性のためを使用するときに整数が引用符で表示されます。

View File

@ -625,10 +625,8 @@ ClickHouseはこのテーブルを作成します。 [query_log](server-configur
- `quota_key` (String) — The “quota key” で指定される。 [クォータ](quotas.md) 設定(参照 `keyed`). - `quota_key` (String) — The “quota key” で指定される。 [クォータ](quotas.md) 設定(参照 `keyed`).
- `revision` (UInt32) — ClickHouse revision. - `revision` (UInt32) — ClickHouse revision.
- `thread_numbers` (Array(UInt32)) — Number of threads that are participating in query execution. - `thread_numbers` (Array(UInt32)) — Number of threads that are participating in query execution.
- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics. The description of them could be found in the table [システムイベント](#system_tables-events) - `ProfileEvents` (Map(String, UInt64)) — ProfileEvents that measure different metrics. The description of them could be found in the table [システムイベント](#system_tables-events)
- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics that are listed in the `ProfileEvents.Names` 列。 - `Settings` (Map(String, String)) — Settings 列。
- `Settings.Names` (Array(String)) — Names of settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` パラメータは1。
- `Settings.Values` (Array(String)) — Values of settings that are listed in the `Settings.Names` 列。
それぞれのクエリでは、一つまたは二つの行が `query_log` クエリのステータスに応じて、テーブル: それぞれのクエリでは、一つまたは二つの行が `query_log` クエリのステータスに応じて、テーブル:
@ -698,8 +696,7 @@ ClickHouseはこのテーブルを作成します。 [query_thread_log](server-c
- `http_user_agent` (String) — The `UserAgent` HTTP要求で渡されるヘッダー。 - `http_user_agent` (String) — The `UserAgent` HTTP要求で渡されるヘッダー。
- `quota_key` (String) — The “quota key” で指定される。 [クォータ](quotas.md) 設定(参照 `keyed`). - `quota_key` (String) — The “quota key” で指定される。 [クォータ](quotas.md) 設定(参照 `keyed`).
- `revision` (UInt32) — ClickHouse revision. - `revision` (UInt32) — ClickHouse revision.
- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics for this thread. The description of them could be found in the table [システムイベント](#system_tables-events) - `ProfileEvents` (Map(String, UInt64)) — ProfileEvents that measure different metrics for this thread. The description of them could be found in the table [システムイベント](#system_tables-events)
- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics for this thread that are listed in the `ProfileEvents.Names` 列。
既定では、ログは7.5秒間隔でテーブルに追加されます。 この間隔は [query_thread_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-thread-log) サーバ設定(参照 `flush_interval_milliseconds` 変数)。 ログをメモリバッファからテーブルに強制的にフラッシュするには、 `SYSTEM FLUSH LOGS` クエリ。 既定では、ログは7.5秒間隔でテーブルに追加されます。 この間隔は [query_thread_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-thread-log) サーバ設定(参照 `flush_interval_milliseconds` 変数)。 ログをメモリバッファからテーブルに強制的にフラッシュするには、 `SYSTEM FLUSH LOGS` クエリ。

View File

@ -0,0 +1,53 @@
---
toc_priority: 12
toc_title: ExternalDistributed
---
# ExternalDistributed {#externaldistributed}
Движок `ExternalDistributed` позволяет выполнять запросы `SELECT` для таблиц на удаленном сервере MySQL или PostgreSQL. Принимает в качестве аргумента табличные движки [MySQL](../../../engines/table-engines/integrations/mysql.md) или [PostgreSQL](../../../engines/table-engines/integrations/postgresql.md), поэтому возможно шардирование.
## Создание таблицы {#creating-a-table}
``` sql
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
(
name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1],
name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2],
...
) ENGINE = ExternalDistributed('engine', 'host:port', 'database', 'table', 'user', 'password');
```
Смотрите подробное описание запроса [CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query).
Структура таблицы может отличаться от структуры исходной таблицы:
- Имена столбцов должны быть такими же, как в исходной таблице, но можно использовать только некоторые из этих столбцов и в любом порядке.
- Типы столбцов могут отличаться от типов в исходной таблице. ClickHouse пытается [привести](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) значения к типам данных ClickHouse.
**Параметры движка**
- `engine` — табличный движок `MySQL` или `PostgreSQL`.
- `host:port` — адрес сервера MySQL или PostgreSQL.
- `database` — имя базы данных на сервере.
- `table` — имя таблицы.
- `user` — имя пользователя.
- `password` — пароль пользователя.
## Особенности реализации {#implementation-details}
Поддерживает несколько реплик, которые должны быть перечислены через `|`, а шарды — через `,`. Например:
```sql
CREATE TABLE test_shards (id UInt32, name String, age UInt32, money UInt32) ENGINE = ExternalDistributed('MySQL', `mysql{1|2}:3306,mysql{3|4}:3306`, 'clickhouse', 'test_replicas', 'root', 'clickhouse');
```
При указании реплик для каждого из шардов при чтении выбирается одна из доступных реплик. Если соединиться не удалось, то выбирается следующая реплика, и так для всех реплик. Если попытка соединения не удалась для всех реплик, то сервер ClickHouse снова пытается соединиться с одной из реплик, перебирая их по кругу, и так несколько раз.
Вы можете указать любое количество шардов и любое количество реплик для каждого шарда.
**Смотрите также**
- [Табличный движок MySQL](../../../engines/table-engines/integrations/mysql.md)
- [Табличный движок PostgreSQL](../../../engines/table-engines/integrations/postgresql.md)
- [Табличный движок Distributed](../../../engines/table-engines/special/distributed.md)

View File

@ -20,11 +20,11 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
Смотрите подробное описание запроса [CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query). Смотрите подробное описание запроса [CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query).
Структура таблицы может отличаться от исходной структуры таблицы MySQL: Структура таблицы может отличаться от структуры исходной таблицы MySQL:
- Имена столбцов должны быть такими же, как в исходной таблице MySQL, но вы можете использовать только некоторые из этих столбцов и в любом порядке. - Имена столбцов должны быть такими же, как в исходной таблице MySQL, но можно использовать только некоторые из этих столбцов и в любом порядке.
- Типы столбцов могут отличаться от типов в исходной таблице MySQL. ClickHouse пытается [приводить](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) значения к типам данных ClickHouse. - Типы столбцов могут отличаться от типов в исходной таблице MySQL. ClickHouse пытается [привести](../../../engines/database-engines/mysql.md#data_types-support) значения к типам данных ClickHouse.
- Настройка `external_table_functions_use_nulls` определяет как обрабатывать Nullable столбцы. По умолчанию 1, если 0 - табличная функция не будет делать nullable столбцы и будет вместо null выставлять значения по умолчанию для скалярного типа. Это также применимо для null значений внутри массивов. - Настройка [external_table_functions_use_nulls](../../../operations/settings/settings.md#external-table-functions-use-nulls) определяет как обрабатывать Nullable столбцы. Значение по умолчанию: 1. Если значение 0, то табличная функция не делает Nullable столбцы, а вместо NULL выставляет значения по умолчанию для скалярного типа. Это также применимо для значений NULL внутри массивов.
**Параметры движка** **Параметры движка**
@ -50,6 +50,12 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
Остальные условия и ограничение выборки `LIMIT` будут выполнены в ClickHouse только после выполнения запроса к MySQL. Остальные условия и ограничение выборки `LIMIT` будут выполнены в ClickHouse только после выполнения запроса к MySQL.
Поддерживает несколько реплик, которые должны быть перечислены через `|`. Например:
```sql
CREATE TABLE test_replicas (id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL(`mysql{2|3|4}:3306`, 'clickhouse', 'test_replicas', 'root', 'clickhouse');
```
## Пример использования {#primer-ispolzovaniia} ## Пример использования {#primer-ispolzovaniia}
Таблица в MySQL: Таблица в MySQL:

View File

@ -29,7 +29,7 @@ ENGINE = ODBC(connection_settings, external_database, external_table)
- Имена столбцов должны быть такими же, как в исходной таблице, но вы можете использовать только некоторые из этих столбцов и в любом порядке. - Имена столбцов должны быть такими же, как в исходной таблице, но вы можете использовать только некоторые из этих столбцов и в любом порядке.
- Типы столбцов могут отличаться от типов аналогичных столбцов в исходной таблице. ClickHouse пытается [приводить](../../../engines/table-engines/integrations/odbc.md#type_conversion_function-cast) значения к типам данных ClickHouse. - Типы столбцов могут отличаться от типов аналогичных столбцов в исходной таблице. ClickHouse пытается [приводить](../../../engines/table-engines/integrations/odbc.md#type_conversion_function-cast) значения к типам данных ClickHouse.
- Настройка `external_table_functions_use_nulls` определяет как обрабатывать Nullable столбцы. По умолчанию 1, если 0 - табличная функция не будет делать nullable столбцы и будет вместо null выставлять значения по умолчанию для скалярного типа. Это также применимо для null значений внутри массивов. - Настройка [external_table_functions_use_nulls](../../../operations/settings/settings.md#external-table-functions-use-nulls) определяет как обрабатывать Nullable столбцы. Значение по умолчанию: 1. Если значение 0, то табличная функция не делает Nullable столбцы, а вместо NULL выставляет значения по умолчанию для скалярного типа. Это также применимо для значений NULL внутри массивов.
**Параметры движка** **Параметры движка**

View File

@ -20,19 +20,19 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
Смотрите подробное описание запроса [CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query). Смотрите подробное описание запроса [CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query).
Структура таблицы может отличаться от исходной структуры таблицы PostgreSQL: Структура таблицы может отличаться от структуры исходной таблицы PostgreSQL:
- Имена столбцов должны быть такими же, как в исходной таблице PostgreSQL, но вы можете использовать только некоторые из этих столбцов и в любом порядке. - Имена столбцов должны быть такими же, как в исходной таблице PostgreSQL, но можно использовать только некоторые из этих столбцов и в любом порядке.
- Типы столбцов могут отличаться от типов в исходной таблице PostgreSQL. ClickHouse пытается [приводить](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) values to the ClickHouse data types. - Типы столбцов могут отличаться от типов в исходной таблице PostgreSQL. ClickHouse пытается [привести](../../../engines/database-engines/postgresql.md#data_types-support) значения к типам данных ClickHouse.
- Настройка `external_table_functions_use_nulls` определяет как обрабатывать Nullable столбцы. По умолчанию 1, если 0 - табличная функция не будет делать nullable столбцы и будет вместо null выставлять значения по умолчанию для скалярного типа. Это также применимо для null значений внутри массивов. - Настройка [external_table_functions_use_nulls](../../../operations/settings/settings.md#external-table-functions-use-nulls) определяет как обрабатывать Nullable столбцы. Значение по умолчанию: 1. Если значение 0, то табличная функция не делает Nullable столбцы, а вместо NULL выставляет значения по умолчанию для скалярного типа. Это также применимо для значений NULL внутри массивов.
**Параметры движка** **Параметры движка**
- `host:port` — адрес сервера PostgreSQL. - `host:port` — адрес сервера PostgreSQL.
- `database`Имя базы данных на сервере PostgreSQL. - `database`имя базы данных на сервере PostgreSQL.
- `table`Имя таблицы. - `table`имя таблицы.
- `user`Имя пользователя PostgreSQL. - `user`имя пользователя PostgreSQL.
- `password`Пароль пользователя PostgreSQL. - `password`пароль пользователя PostgreSQL.
- `schema` — имя схемы, если не используется схема по умолчанию. Необязательный аргумент. - `schema` — имя схемы, если не используется схема по умолчанию. Необязательный аргумент.
## Особенности реализации {#implementation-details} ## Особенности реализации {#implementation-details}
@ -49,6 +49,12 @@ PostgreSQL массивы конвертируются в массивы ClickHo
!!! info "Внимание" !!! info "Внимание"
Будьте внимательны, в PostgreSQL массивы, созданные как `type_name[]`, являются многомерными и могут содержать в себе разное количество измерений в разных строках одной таблицы. Внутри ClickHouse допустимы только многомерные массивы с одинаковым кол-вом измерений во всех строках таблицы. Будьте внимательны, в PostgreSQL массивы, созданные как `type_name[]`, являются многомерными и могут содержать в себе разное количество измерений в разных строках одной таблицы. Внутри ClickHouse допустимы только многомерные массивы с одинаковым кол-вом измерений во всех строках таблицы.
Поддерживает несколько реплик, которые должны быть перечислены через `|`. Например:
```sql
CREATE TABLE test_replicas (id UInt32, name String) ENGINE = PostgreSQL(`postgres{2|3|4}:5432`, 'clickhouse', 'test_replicas', 'postgres', 'mysecretpassword');
```
При использовании словаря PostgreSQL поддерживается приоритет реплик. Чем больше номер реплики, тем ниже ее приоритет. Наивысший приоритет у реплики с номером `0`. При использовании словаря PostgreSQL поддерживается приоритет реплик. Чем больше номер реплики, тем ниже ее приоритет. Наивысший приоритет у реплики с номером `0`.

View File

@ -348,7 +348,7 @@ INSERT INTO table_with_enum_column_for_tsv_insert FORMAT TSV 102 2;
## input_format_null_as_default {#settings-input-format-null-as-default} ## input_format_null_as_default {#settings-input-format-null-as-default}
Включает или отключает инициализацию [значениями по умолчанию](../../sql-reference/statements/create/table.md#create-default-values) ячеек с [NULL](../../sql-reference/syntax.md#null-literal), если тип данных столбца не позволяет [хранить NULL](../../sql-reference/data-types/nullable.md#data_type-nullable). Включает или отключает инициализацию [значениями по умолчанию](../../sql-reference/statements/create/table.md#create-default-values) ячеек с [NULL](../../sql-reference/syntax.md#null-literal), если тип данных столбца не позволяет [хранить NULL](../../sql-reference/data-types/nullable.md#data_type-nullable).
Если столбец не позволяет хранить `NULL` и эта настройка отключена, то вставка `NULL` приведет к возникновению исключения. Если столбец позволяет хранить `NULL`, то значения `NULL` вставляются независимо от этой настройки. Если столбец не позволяет хранить `NULL` и эта настройка отключена, то вставка `NULL` приведет к возникновению исключения. Если столбец позволяет хранить `NULL`, то значения `NULL` вставляются независимо от этой настройки.
Эта настройка используется для запросов [INSERT ... VALUES](../../sql-reference/statements/insert-into.md) для текстовых входных форматов. Эта настройка используется для запросов [INSERT ... VALUES](../../sql-reference/statements/insert-into.md) для текстовых входных форматов.
@ -361,7 +361,7 @@ INSERT INTO table_with_enum_column_for_tsv_insert FORMAT TSV 102 2;
## insert_null_as_default {#insert_null_as_default} ## insert_null_as_default {#insert_null_as_default}
Включает или отключает вставку [значений по умолчанию](../../sql-reference/statements/create/table.md#create-default-values) вместо [NULL](../../sql-reference/syntax.md#null-literal) в столбцы, которые не позволяют [хранить NULL](../../sql-reference/data-types/nullable.md#data_type-nullable). Включает или отключает вставку [значений по умолчанию](../../sql-reference/statements/create/table.md#create-default-values) вместо [NULL](../../sql-reference/syntax.md#null-literal) в столбцы, которые не позволяют [хранить NULL](../../sql-reference/data-types/nullable.md#data_type-nullable).
Если столбец не позволяет хранить `NULL` и эта настройка отключена, то вставка `NULL` приведет к возникновению исключения. Если столбец позволяет хранить `NULL`, то значения `NULL` вставляются независимо от этой настройки. Если столбец не позволяет хранить `NULL` и эта настройка отключена, то вставка `NULL` приведет к возникновению исключения. Если столбец позволяет хранить `NULL`, то значения `NULL` вставляются независимо от этой настройки.
Эта настройка используется для запросов [INSERT ... SELECT](../../sql-reference/statements/insert-into.md#insert_query_insert-select). При этом подзапросы `SELECT` могут объединяться с помощью `UNION ALL`. Эта настройка используется для запросов [INSERT ... SELECT](../../sql-reference/statements/insert-into.md#insert_query_insert-select). При этом подзапросы `SELECT` могут объединяться с помощью `UNION ALL`.
@ -1181,22 +1181,22 @@ load_balancing = round_robin
!!! warning "Предупреждение" !!! warning "Предупреждение"
Параллельное выполнение запроса может привести к неверному результату, если в запросе есть объединение или подзапросы и при этом таблицы не удовлетворяют определенным требованиям. Подробности смотрите в разделе [Распределенные подзапросы и max_parallel_replicas](../../sql-reference/operators/in.md#max_parallel_replica-subqueries). Параллельное выполнение запроса может привести к неверному результату, если в запросе есть объединение или подзапросы и при этом таблицы не удовлетворяют определенным требованиям. Подробности смотрите в разделе [Распределенные подзапросы и max_parallel_replicas](../../sql-reference/operators/in.md#max_parallel_replica-subqueries).
## compile_expressions {#compile-expressions}
## compile {#compile} Включает или выключает компиляцию часто используемых функций и операторов. Компиляция производится в нативный код платформы с помощью LLVM во время выполнения.
Включить компиляцию запросов. По умолчанию - 0 (выключено). Возможные значения:
Компиляция предусмотрена только для части конвейера обработки запроса - для первой стадии агрегации (GROUP BY). - 0 — компиляция выключена.
В случае, если эта часть конвейера была скомпилирована, запрос может работать быстрее, за счёт разворачивания коротких циклов и инлайнинга вызовов агрегатных функций. Максимальный прирост производительности (до четырёх раз в редких случаях) достигается на запросах с несколькими простыми агрегатными функциями. Как правило, прирост производительности незначителен. В очень редких случаях возможно замедление выполнения запроса. - 1 — компиляция включена.
## min_count_to_compile {#min-count-to-compile} Значение по умолчанию: `1`.
После скольких раз, когда скомпилированный кусок кода мог пригодиться, выполнить его компиляцию. По умолчанию - 3. ## min_count_to_compile_expression {#min-count-to-compile-expression}
Для тестирования можно установить значение 0: компиляция выполняется синхронно, и запрос ожидает окончания процесса компиляции перед продолжением выполнения. Во всех остальных случаях используйте значения, начинающиеся с 1. Как правило, компиляция занимает по времени около 5-10 секунд.
В случае, если значение равно 1 или больше, компиляция выполняется асинхронно, в отдельном потоке. При готовности результата, он сразу же будет использован, в том числе, уже выполняющимися в данный момент запросами.
Скомпилированный код требуется для каждого разного сочетания используемых в запросе агрегатных функций и вида ключей в GROUP BY. Минимальное количество выполнений одного и того же выражения до его компиляции.
Результаты компиляции сохраняются в директории build в виде .so файлов. Количество результатов компиляции не ограничено, так как они не занимают много места. При перезапуске сервера, старые результаты будут использованы, за исключением случая обновления сервера - тогда старые результаты удаляются.
Значение по умолчанию: `3`.
## input_format_skip_unknown_fields {#input-format-skip-unknown-fields} ## input_format_skip_unknown_fields {#input-format-skip-unknown-fields}
@ -2721,7 +2721,7 @@ SELECT * FROM test2;
- 0 — запрос `INSERT` добавляет данные в конец файла после существующих. - 0 — запрос `INSERT` добавляет данные в конец файла после существующих.
- 1 — `INSERT` удаляет имеющиеся в файле данные и замещает их новыми. - 1 — `INSERT` удаляет имеющиеся в файле данные и замещает их новыми.
Значение по умолчанию: `0`. Значение по умолчанию: `0`.
## allow_experimental_geo_types {#allow-experimental-geo-types} ## allow_experimental_geo_types {#allow-experimental-geo-types}
@ -2735,7 +2735,7 @@ SELECT * FROM test2;
## database_atomic_wait_for_drop_and_detach_synchronously {#database_atomic_wait_for_drop_and_detach_synchronously} ## database_atomic_wait_for_drop_and_detach_synchronously {#database_atomic_wait_for_drop_and_detach_synchronously}
Добавляет модификатор `SYNC` ко всем запросам `DROP` и `DETACH`. Добавляет модификатор `SYNC` ко всем запросам `DROP` и `DETACH`.
Возможные значения: Возможные значения:
@ -2813,7 +2813,7 @@ SELECT * FROM test2;
**Пример** **Пример**
Какие изменения привносит включение и выключение настройки: Какие изменения привносит включение и выключение настройки:
Запрос: Запрос:
@ -2957,4 +2957,83 @@ SELECT
FROM fuse_tbl FROM fuse_tbl
``` ```
[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/settings/settings/) <!--hide--> ## flatten_nested {#flatten-nested}
Устанавливает формат данных у [вложенных](../../sql-reference/data-types/nested-data-structures/nested.md) столбцов.
Возможные значения:
- 1 — вложенный столбец преобразуется к отдельным массивам.
- 0 — вложенный столбец преобразуется к массиву кортежей.
Значение по умолчанию: `1`.
**Использование**
Если установлено значение `0`, можно использовать любой уровень вложенности.
**Примеры**
Запрос:
``` sql
SET flatten_nested = 1;
CREATE TABLE t_nest (`n` Nested(a UInt32, b UInt32)) ENGINE = MergeTree ORDER BY tuple();
SHOW CREATE TABLE t_nest;
```
Результат:
``` text
┌─statement───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
│ CREATE TABLE default.t_nest
(
`n.a` Array(UInt32),
`n.b` Array(UInt32)
)
ENGINE = MergeTree
ORDER BY tuple()
SETTINGS index_granularity = 8192 │
└─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
```
Запрос:
``` sql
SET flatten_nested = 0;
CREATE TABLE t_nest (`n` Nested(a UInt32, b UInt32)) ENGINE = MergeTree ORDER BY tuple();
SHOW CREATE TABLE t_nest;
```
Результат:
``` text
┌─statement──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
│ CREATE TABLE default.t_nest
(
`n` Nested(a UInt32, b UInt32)
)
ENGINE = MergeTree
ORDER BY tuple()
SETTINGS index_granularity = 8192 │
└────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
```
## external_table_functions_use_nulls {#external-table-functions-use-nulls}
Определяет, как табличные функции [mysql](../../sql-reference/table-functions/mysql.md), [postgresql](../../sql-reference/table-functions/postgresql.md) и [odbc](../../sql-reference/table-functions/odbc.md)] используют Nullable столбцы.
Возможные значения:
- 0 — табличная функция явно использует Nullable столбцы.
- 1 — табличная функция неявно использует Nullable столбцы.
Значение по умолчанию: `1`.
**Использование**
Если установлено значение `0`, то табличная функция не делает Nullable столбцы, а вместо NULL выставляет значения по умолчанию для скалярного типа. Это также применимо для значений NULL внутри массивов.

View File

@ -84,12 +84,10 @@ ClickHouse не удаляет данные из таблица автомати
- `forwarded_for` ([String](../../sql-reference/data-types/string.md)) — HTTP заголовок `X-Forwarded-For`. - `forwarded_for` ([String](../../sql-reference/data-types/string.md)) — HTTP заголовок `X-Forwarded-For`.
- `quota_key` ([String](../../sql-reference/data-types/string.md)) — `ключ квоты` из настроек [квот](quotas.md) (см. `keyed`). - `quota_key` ([String](../../sql-reference/data-types/string.md)) — `ключ квоты` из настроек [квот](quotas.md) (см. `keyed`).
- `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ревизия ClickHouse. - `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ревизия ClickHouse.
- `ProfileEvents` ([Map(String, UInt64)](../../sql-reference/data-types/array.md)) — счетчики для изменения различных метрик. Описание метрик можно получить из таблицы [system.events](#system_tables-events)(#system_tables-events
- `Settings` ([Map(String, String)](../../sql-reference/data-types/array.md)) — имена настроек, которые меняются, когда клиент выполняет запрос. Чтобы разрешить логирование изменений настроек, установите параметр `log_query_settings` равным 1.
- `log_comment` ([String](../../sql-reference/data-types/string.md)) — комментарий к записи в логе. Представляет собой произвольную строку, длина которой должна быть не больше, чем [max_query_size](../../operations/settings/settings.md#settings-max_query_size). Если нет комментария, то пустая строка. - `log_comment` ([String](../../sql-reference/data-types/string.md)) — комментарий к записи в логе. Представляет собой произвольную строку, длина которой должна быть не больше, чем [max_query_size](../../operations/settings/settings.md#settings-max_query_size). Если нет комментария, то пустая строка.
- `thread_ids` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — идентификаторы потоков, участвующих в обработке запросов. - `thread_ids` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — идентификаторы потоков, участвующих в обработке запросов.
- `ProfileEvents.Names` ([Array(String)](../../sql-reference/data-types/array.md)) — счетчики для изменения различных метрик. Описание метрик можно получить из таблицы [system.events](#system_tables-events)(#system_tables-events
- `ProfileEvents.Values` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — метрики, перечисленные в столбце `ProfileEvents.Names`.
- `Settings.Names` ([Array(String)](../../sql-reference/data-types/array.md)) — имена настроек, которые меняются, когда клиент выполняет запрос. Чтобы разрешить логирование изменений настроек, установите параметр `log_query_settings` равным 1.
- `Settings.Values` ([Array(String)](../../sql-reference/data-types/array.md)) — значения настроек, которые перечислены в столбце `Settings.Names`.
- `used_aggregate_functions` ([Array(String)](../../sql-reference/data-types/array.md)) — канонические имена `агрегатных функций`, использованных при выполнении запроса. - `used_aggregate_functions` ([Array(String)](../../sql-reference/data-types/array.md)) — канонические имена `агрегатных функций`, использованных при выполнении запроса.
- `used_aggregate_function_combinators` ([Array(String)](../../sql-reference/data-types/array.md)) — канонические имена `комбинаторов агрегатных функций`, использованных при выполнении запроса. - `used_aggregate_function_combinators` ([Array(String)](../../sql-reference/data-types/array.md)) — канонические имена `комбинаторов агрегатных функций`, использованных при выполнении запроса.
- `used_database_engines` ([Array(String)](../../sql-reference/data-types/array.md)) — канонические имена `движков баз данных`, использованных при выполнении запроса. - `used_database_engines` ([Array(String)](../../sql-reference/data-types/array.md)) — канонические имена `движков баз данных`, использованных при выполнении запроса.
@ -109,68 +107,49 @@ SELECT * FROM system.query_log WHERE type = 'QueryFinish' AND (query LIKE '%toDa
``` text ``` text
Row 1: Row 1:
────── ──────
type: QueryFinish type: QueryStart
event_date: 2021-03-18 event_date: 2020-09-11
event_time: 2021-03-18 20:54:18 event_time: 2020-09-11 10:08:17
event_time_microseconds: 2021-03-18 20:54:18.676686 event_time_microseconds: 2020-09-11 10:08:17.063321
query_start_time: 2021-03-18 20:54:18 query_start_time: 2020-09-11 10:08:17
query_start_time_microseconds: 2021-03-18 20:54:18.673934 query_start_time_microseconds: 2020-09-11 10:08:17.063321
query_duration_ms: 2 query_duration_ms: 0
read_rows: 100 read_rows: 0
read_bytes: 800 read_bytes: 0
written_rows: 0 written_rows: 0
written_bytes: 0 written_bytes: 0
result_rows: 2 result_rows: 0
result_bytes: 4858 result_bytes: 0
memory_usage: 0 memory_usage: 0
current_database: default current_database: default
query: SELECT uniqArray([1, 1, 2]), SUBSTRING('Hello, world', 7, 5), flatten([[[BIT_AND(123)]], [[mod(3, 2)], [CAST('1' AS INTEGER)]]]), week(toDate('2000-12-05')), CAST(arrayJoin([NULL, NULL]) AS Nullable(TEXT)), avgOrDefaultIf(number, number % 2), sumOrNull(number), toTypeName(sumOrNull(number)), countIf(toDate('2000-12-05') + number as d, toDayOfYear(d) % 2) FROM numbers(100) query: INSERT INTO test1 VALUES
normalized_query_hash: 17858008518552525706 exception_code: 0
query_kind: Select
databases: ['_table_function']
tables: ['_table_function.numbers']
columns: ['_table_function.numbers.number']
exception_code: 0
exception: exception:
stack_trace: stack_trace:
is_initial_query: 1 is_initial_query: 1
user: default user: default
query_id: 58f3d392-0fa0-4663-ae1d-29917a1a9c9c query_id: 50a320fd-85a8-49b8-8761-98a86bcbacef
address: ::ffff:127.0.0.1 address: ::ffff:127.0.0.1
port: 37486 port: 33452
initial_user: default initial_user: default
initial_query_id: 58f3d392-0fa0-4663-ae1d-29917a1a9c9c initial_query_id: 50a320fd-85a8-49b8-8761-98a86bcbacef
initial_address: ::ffff:127.0.0.1 initial_address: ::ffff:127.0.0.1
initial_port: 37486 initial_port: 33452
interface: 1 interface: 1
os_user: sevirov os_user: bharatnc
client_hostname: clickhouse.ru-central1.internal client_hostname: tower
client_name: ClickHouse client_name: ClickHouse
client_revision: 54447 client_revision: 54437
client_version_major: 21 client_version_major: 20
client_version_minor: 4 client_version_minor: 7
client_version_patch: 1 client_version_patch: 2
http_method: 0 http_method: 0
http_user_agent: http_user_agent:
http_referer:
forwarded_for:
quota_key: quota_key:
revision: 54449 revision: 54440
log_comment: thread_ids: []
thread_ids: [587,11939] ProfileEvents: {'Query':1,'SelectQuery':1,'ReadCompressedBytes':36,'CompressedReadBufferBlocks':1,'CompressedReadBufferBytes':10,'IOBufferAllocs':1,'IOBufferAllocBytes':89,'ContextLock':15,'RWLockAcquiredReadLocks':1}
ProfileEvents.Names: ['Query','SelectQuery','ReadCompressedBytes','CompressedReadBufferBlocks','CompressedReadBufferBytes','IOBufferAllocs','IOBufferAllocBytes','ArenaAllocChunks','ArenaAllocBytes','FunctionExecute','TableFunctionExecute','NetworkSendElapsedMicroseconds','SelectedRows','SelectedBytes','ContextLock','RWLockAcquiredReadLocks','RealTimeMicroseconds','UserTimeMicroseconds','SystemTimeMicroseconds','SoftPageFaults','OSCPUVirtualTimeMicroseconds','OSWriteBytes'] Settings: {'background_pool_size':'32','load_balancing':'random','allow_suspicious_low_cardinality_types':'1','distributed_aggregation_memory_efficient':'1','skip_unavailable_shards':'1','log_queries':'1','max_bytes_before_external_group_by':'20000000000','max_bytes_before_external_sort':'20000000000','allow_introspection_functions':'1'}
ProfileEvents.Values: [1,1,36,1,10,2,1048680,1,4096,36,1,110,100,800,77,1,3137,1476,1101,8,2577,8192]
Settings.Names: ['load_balancing','max_memory_usage']
Settings.Values: ['random','10000000000']
used_aggregate_functions: ['groupBitAnd','avg','sum','count','uniq']
used_aggregate_function_combinators: ['OrDefault','If','OrNull','Array']
used_database_engines: []
used_data_type_families: ['String','Array','Int32','Nullable']
used_dictionaries: []
used_formats: []
used_functions: ['toWeek','CAST','arrayFlatten','toTypeName','toDayOfYear','addDays','array','toDate','modulo','substring','plus']
used_storages: []
used_table_functions: ['numbers']
``` ```
**Смотрите также** **Смотрите также**

View File

@ -57,8 +57,7 @@ ClickHouse не удаляет данные из таблицы автомати
- `http_user_agent` ([String](../../sql-reference/data-types/string.md)) — HTTP заголовок `UserAgent`. - `http_user_agent` ([String](../../sql-reference/data-types/string.md)) — HTTP заголовок `UserAgent`.
- `quota_key` ([String](../../sql-reference/data-types/string.md)) — «ключ квоты» из настроек [квот](quotas.md) (см. `keyed`). - `quota_key` ([String](../../sql-reference/data-types/string.md)) — «ключ квоты» из настроек [квот](quotas.md) (см. `keyed`).
- `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ревизия ClickHouse. - `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ревизия ClickHouse.
- `ProfileEvents.Names` ([Array(String)](../../sql-reference/data-types/array.md)) — счетчики для изменения различных метрик для данного потока. Описание метрик можно получить из таблицы [system.events](#system_tables-events). - `ProfileEvents` ([Map(String, UInt64)](../../sql-reference/data-types/array.md)) — счетчики для изменения различных метрик для данного потока. Описание метрик можно получить из таблицы [system.events](#system_tables-events).
- `ProfileEvents.Values` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — метрики для данного потока, перечисленные в столбце `ProfileEvents.Names`.
**Пример** **Пример**
@ -97,17 +96,16 @@ initial_port: 33452
interface: 1 interface: 1
os_user: bharatnc os_user: bharatnc
client_hostname: tower client_hostname: tower
client_name: ClickHouse client_name: ClickHouse
client_revision: 54437 client_revision: 54437
client_version_major: 20 client_version_major: 20
client_version_minor: 7 client_version_minor: 7
client_version_patch: 2 client_version_patch: 2
http_method: 0 http_method: 0
http_user_agent: http_user_agent:
quota_key: quota_key:
revision: 54440 revision: 54440
ProfileEvents.Names: ['Query','InsertQuery','FileOpen','WriteBufferFromFileDescriptorWrite','WriteBufferFromFileDescriptorWriteBytes','ReadCompressedBytes','CompressedReadBufferBlocks','CompressedReadBufferBytes','IOBufferAllocs','IOBufferAllocBytes','FunctionExecute','CreatedWriteBufferOrdinary','DiskWriteElapsedMicroseconds','NetworkReceiveElapsedMicroseconds','NetworkSendElapsedMicroseconds','InsertedRows','InsertedBytes','SelectedRows','SelectedBytes','MergeTreeDataWriterRows','MergeTreeDataWriterUncompressedBytes','MergeTreeDataWriterCompressedBytes','MergeTreeDataWriterBlocks','MergeTreeDataWriterBlocksAlreadySorted','ContextLock','RWLockAcquiredReadLocks','RealTimeMicroseconds','UserTimeMicroseconds','SoftPageFaults','OSCPUVirtualTimeMicroseconds','OSWriteBytes','OSReadChars','OSWriteChars'] ProfileEvents: {'Query':1,'SelectQuery':1,'ReadCompressedBytes':36,'CompressedReadBufferBlocks':1,'CompressedReadBufferBytes':10,'IOBufferAllocs':1,'IOBufferAllocBytes':89,'ContextLock':15,'RWLockAcquiredReadLocks':1}
ProfileEvents.Values: [1,1,11,11,591,148,3,71,29,6533808,1,11,72,18,47,1,12,1,12,1,12,189,1,1,10,2,70853,2748,49,2747,45056,422,1520]
``` ```
**Смотрите также** **Смотрите также**

View File

@ -1,40 +0,0 @@
---
toc_priority: 150
---
## initializeAggregation {#initializeaggregation}
Инициализирует агрегацию для введеных строчек. Предназначена для функций с суффиксом `State`.
Поможет вам проводить тесты или работать со столбцами типов: `AggregateFunction` и `AggregationgMergeTree`.
**Синтаксис**
``` sql
initializeAggregation (aggregate_function, column_1, column_2)
```
**Аргументы**
- `aggregate_function` — название функции агрегации, состояние которой нужно создать. [String](../../../sql-reference/data-types/string.md#string).
- `column_n` — столбец, который передается в функцию агрегации как аргумент. [String](../../../sql-reference/data-types/string.md#string).
**Возвращаемое значение**
Возвращает результат агрегации введенной информации. Тип возвращаемого значения такой же, как и для функции, которая становится первым аргументом для `initializeAgregation`.
Пример:
Возвращаемый тип функций с суффиксом `State``AggregateFunction`.
**Пример**
Запрос:
```sql
SELECT uniqMerge(state) FROM (SELECT initializeAggregation('uniqState', number % 3) AS state FROM system.numbers LIMIT 10000);
```
Результат:
┌─uniqMerge(state)─┐
│ 3 │
└──────────────────┘

View File

@ -5,11 +5,9 @@ toc_title: Array(T)
# Array(T) {#data-type-array} # Array(T) {#data-type-array}
Массив из элементов типа `T`. Массив из элементов типа `T`. `T` может любым, в том числе массивом. Таким образом поддерживаются многомерные массивы.
`T` может любым, в том числе, массивом. Таким образом поддержаны многомерные массивы. ## Создание массива {#creating-an-array}
## Создание массива {#sozdanie-massiva}
Массив можно создать с помощью функции: Массив можно создать с помощью функции:
@ -45,7 +43,7 @@ SELECT [1, 2] AS x, toTypeName(x)
└───────┴────────────────────┘ └───────┴────────────────────┘
``` ```
## Особенности работы с типами данных {#osobennosti-raboty-s-tipami-dannykh} ## Особенности работы с типами данных {#working-with-data-types}
Максимальный размер массива ограничен одним миллионом элементов. Максимальный размер массива ограничен одним миллионом элементов.
@ -76,3 +74,26 @@ Received exception from server (version 1.1.54388):
Code: 386. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: There is no supertype for types UInt8, String because some of them are String/FixedString and some of them are not. Code: 386. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: There is no supertype for types UInt8, String because some of them are String/FixedString and some of them are not.
``` ```
## Размер массива {#array-size}
Узнать размер массива можно с помощью подстолбца `size0` без чтения всего столбца. Для многомерных массивов можно использовать подстолбец `sizeN-1`, где `N` — требуемое измерение.
**Пример**
Запрос:
```sql
CREATE TABLE t_arr (`arr` Array(Array(Array(UInt32)))) ENGINE = MergeTree ORDER BY tuple();
INSERT INTO t_arr VALUES ([[[12, 13, 0, 1],[12]]]);
SELECT arr.size0, arr.size1, arr.size2 FROM t_arr;
```
Результат:
``` text
┌─arr.size0─┬─arr.size1─┬─arr.size2─┐
│ 1 │ [2] │ [[4,1]] │
└───────────┴───────────┴───────────┘
```

View File

@ -29,7 +29,7 @@ CREATE TABLE test.visits
В этом примере объявлена вложенная структура данных `Goals`, содержащая данные о достижении целей. Каждой строке таблицы visits может соответствовать от нуля до произвольного количества достижений целей. В этом примере объявлена вложенная структура данных `Goals`, содержащая данные о достижении целей. Каждой строке таблицы visits может соответствовать от нуля до произвольного количества достижений целей.
Поддерживается только один уровень вложенности. Столбцы вложенных структур, содержащие массивы, эквивалентны многомерным массивам, поэтому их поддержка ограничена (не поддерживается хранение таких столбцов в таблицах с движком семейства MergeTree). Если настройка [flatten_nested](../../../operations/settings/settings.md#flatten-nested) установлена в значение `0` (что не является значением по умолчанию), поддерживаются любые уровни вложенности.
В большинстве случаев, при работе с вложенной структурой данных, указываются отдельные её столбцы. Для этого, имена столбцов указываются через точку. Эти столбцы представляют собой массивы соответствующих типов. Все столбцы-массивы одной вложенной структуры данных имеют одинаковые длины. В большинстве случаев, при работе с вложенной структурой данных, указываются отдельные её столбцы. Для этого, имена столбцов указываются через точку. Эти столбцы представляют собой массивы соответствующих типов. Все столбцы-массивы одной вложенной структуры данных имеют одинаковые длины.

View File

@ -13,7 +13,7 @@ toc_title: Nullable
`NULL` — значение по умолчанию для типа `Nullable`, если в конфигурации сервера ClickHouse не указано иное. `NULL` — значение по умолчанию для типа `Nullable`, если в конфигурации сервера ClickHouse не указано иное.
## Особенности хранения {#osobennosti-khraneniia} ## Особенности хранения {#storage-features}
Для хранения значения типа `Nullable` ClickHouse использует: Для хранения значения типа `Nullable` ClickHouse использует:
@ -27,7 +27,34 @@ toc_title: Nullable
!!! info "Info" !!! info "Info"
Почти всегда использование `Nullable` снижает производительность, учитывайте это при проектировании своих баз. Почти всегда использование `Nullable` снижает производительность, учитывайте это при проектировании своих баз.
## Пример использования {#primer-ispolzovaniia} ## Поиск NULL {#finding-null}
Найти в столбце значения `NULL` можно с помощью подстолбца `null`, при этом весь столбец считывать не требуется. Подстолбец содержит `1`, если соответствующее значение равно `NULL`, и `0` если не равно.
**Пример**
Запрос:
``` sql
CREATE TABLE nullable (`n` Nullable(UInt32)) ENGINE = MergeTree ORDER BY tuple();
INSERT INTO nullable VALUES (1) (NULL) (2) (NULL);
SELECT n.null FROM nullable;
```
Результат:
``` text
┌─n.null─┐
│ 0 │
│ 1 │
│ 0 │
│ 1 │
└────────┘
```
## Пример использования {#usage-example}
``` sql ``` sql
CREATE TABLE t_null(x Int8, y Nullable(Int8)) ENGINE TinyLog CREATE TABLE t_null(x Int8, y Nullable(Int8)) ENGINE TinyLog
@ -47,4 +74,3 @@ SELECT x + y from t_null
│ 5 │ │ 5 │
└────────────┘ └────────────┘
``` ```

View File

@ -47,3 +47,30 @@ SELECT tuple(1,NULL) AS x, toTypeName(x)
└──────────┴─────────────────────────────────┘ └──────────┴─────────────────────────────────┘
``` ```
## Адресация элементов кортежа {#addressing-tuple-elements}
К элементам кортежа можно обращаться по индексу и по имени:
``` sql
CREATE TABLE named_tuples (`a` Tuple(s String, i Int64)) ENGINE = Memory;
INSERT INTO named_tuples VALUES (('y', 10)), (('x',-10));
SELECT a.s FROM named_tuples;
SELECT a.2 FROM named_tuples;
```
Результат:
``` text
┌─a.s─┐
│ y │
│ x │
└─────┘
┌─tupleElement(a, 2)─┐
│ 10 │
│ -10 │
└────────────────────┘
```

View File

@ -486,6 +486,7 @@ LIFETIME(MIN 300 MAX 360)
<table>table_name</table> <table>table_name</table>
<where>id=10</where> <where>id=10</where>
<invalidate_query>SQL_QUERY</invalidate_query> <invalidate_query>SQL_QUERY</invalidate_query>
<fail_on_connection_loss>true</fail_on_connection_loss>
</mysql> </mysql>
</source> </source>
``` ```
@ -503,6 +504,7 @@ SOURCE(MYSQL(
table 'table_name' table 'table_name'
where 'id=10' where 'id=10'
invalidate_query 'SQL_QUERY' invalidate_query 'SQL_QUERY'
fail_on_connection_loss 'true'
)) ))
``` ```
@ -527,6 +529,8 @@ SOURCE(MYSQL(
- `invalidate_query` — запрос для проверки статуса словаря. Необязательный параметр. Читайте подробнее в разделе [Обновление словарей](external-dicts-dict-lifetime.md). - `invalidate_query` — запрос для проверки статуса словаря. Необязательный параметр. Читайте подробнее в разделе [Обновление словарей](external-dicts-dict-lifetime.md).
- `fail_on_connection_loss` параметр конфигурации, контролирующий поведение сервера при потере соединения. Если значение `true`, то исключение генерируется сразу же, если соединение между клиентом и сервером было потеряно. Если значение `false`, то сервер повторно попытается выполнить запрос три раза прежде чем сгенерировать исключение. Имейте в виду, что повторные попытки могут увеличить время выполнения запроса. Значение по умолчанию: `false`.
MySQL можно подключить на локальном хосте через сокеты, для этого необходимо задать `host` и `socket`. MySQL можно подключить на локальном хосте через сокеты, для этого необходимо задать `host` и `socket`.
Пример настройки: Пример настройки:
@ -542,6 +546,7 @@ MySQL можно подключить на локальном хосте чер
<table>table_name</table> <table>table_name</table>
<where>id=10</where> <where>id=10</where>
<invalidate_query>SQL_QUERY</invalidate_query> <invalidate_query>SQL_QUERY</invalidate_query>
<fail_on_connection_loss>true</fail_on_connection_loss>
</mysql> </mysql>
</source> </source>
``` ```
@ -558,6 +563,7 @@ SOURCE(MYSQL(
table 'table_name' table 'table_name'
where 'id=10' where 'id=10'
invalidate_query 'SQL_QUERY' invalidate_query 'SQL_QUERY'
fail_on_connection_loss 'true'
)) ))
``` ```

View File

@ -39,10 +39,49 @@ toc_title: "Массивы"
Принимает пустой массив и возвращает массив из одного элемента, равного значению по умолчанию. Принимает пустой массив и возвращает массив из одного элемента, равного значению по умолчанию.
## range(N) {#rangen}
Возвращает массив чисел от 0 до N-1. ## range(end), range(\[start, \] end \[, step\]) {#range}
На всякий случай, если на блок данных, создаются массивы суммарной длины больше 100 000 000 элементов, то кидается исключение.
Возвращает массив чисел от `start` до `end - 1` с шагом `step`.
**Синтаксис**
``` sql
range([start, ] end [, step])
```
**Аргументы**
- `start` — начало диапазона. Обязательно, когда указан `step`. По умолчанию равно `0`. Тип: [UInt](../data-types/int-uint.md)
- `end` — конец диапазона. Обязательный аргумент. Должен быть больше, чем `start`. Тип: [UInt](../data-types/int-uint.md)
- `step` — шаг обхода. Необязательный аргумент. По умолчанию равен `1`. Тип: [UInt](../data-types/int-uint.md)
**Возвращаемые значения**
- массив `UInt` чисел от `start` до `end - 1` с шагом `step`
**Особенности реализации**
- Не поддерживаются отрицательные значения аргументов: `start`, `end`, `step` имеют тип `UInt`.
- Если в результате запроса создаются массивы суммарной длиной больше 100 000 000 элементов, то генерируется исключение.
**Примеры**
Запрос:
``` sql
SELECT range(5), range(1, 5), range(1, 5, 2);
```
Ответ:
```txt
┌─range(5)────┬─range(1, 5)─┬─range(1, 5, 2)─┐
│ [0,1,2,3,4] │ [1,2,3,4] │ [1,3] │
└─────────────┴─────────────┴────────────────┘
```
## array(x1, …), оператор \[x1, …\] {#arrayx1-operator-x1} ## array(x1, …), оператор \[x1, …\] {#arrayx1-operator-x1}
@ -1576,4 +1615,4 @@ SELECT arrayProduct([toDecimal64(1,8), toDecimal64(2,8), toDecimal64(3,8)]) as r
┌─res─┬─toTypeName(arrayProduct(array(toDecimal64(1, 8), toDecimal64(2, 8), toDecimal64(3, 8))))─┐ ┌─res─┬─toTypeName(arrayProduct(array(toDecimal64(1, 8), toDecimal64(2, 8), toDecimal64(3, 8))))─┐
│ 6 │ Float64 │ │ 6 │ Float64 │
└─────┴──────────────────────────────────────────────────────────────────────────────────────────┘ └─────┴──────────────────────────────────────────────────────────────────────────────────────────┘
``` ```

View File

@ -13,7 +13,7 @@ toc_title: "Прочие функции"
Возвращает именованное значение из секции [macros](../../operations/server-configuration-parameters/settings.md#macros) конфигурации сервера. Возвращает именованное значение из секции [macros](../../operations/server-configuration-parameters/settings.md#macros) конфигурации сервера.
**Синтаксис** **Синтаксис**
```sql ```sql
getMacro(name) getMacro(name)
@ -854,8 +854,8 @@ WHERE diff != 1
## runningConcurrency {#runningconcurrency} ## runningConcurrency {#runningconcurrency}
Подсчитывает количество одновременно идущих событий. Подсчитывает количество одновременно идущих событий.
У каждого события есть время начала и время окончания. Считается, что время начала включено в событие, а время окончания исключено из него. Столбцы со временем начала и окончания событий должны иметь одинаковый тип данных. У каждого события есть время начала и время окончания. Считается, что время начала включено в событие, а время окончания исключено из него. Столбцы со временем начала и окончания событий должны иметь одинаковый тип данных.
Функция подсчитывает количество событий, происходящих одновременно на момент начала каждого из событий в выборке. Функция подсчитывает количество событий, происходящих одновременно на момент начала каждого из событий в выборке.
!!! warning "Предупреждение" !!! warning "Предупреждение"
События должны быть отсортированы по возрастанию времени начала. Если это требование нарушено, то функция вызывает исключение. События должны быть отсортированы по возрастанию времени начала. Если это требование нарушено, то функция вызывает исключение.
@ -1371,11 +1371,84 @@ SELECT formatReadableSize(filesystemCapacity()) AS "Capacity", toTypeName(filesy
└───────────┴────────┘ └───────────┴────────┘
``` ```
## initializeAggregation {#initializeaggregation}
Вычисляет результат агрегатной функции для каждой строки. Предназначена для инициализации агрегатных функций с комбинатором [-State](../../sql-reference/aggregate-functions/combinators.md#state). Может быть полезна для создания состояний агрегатных функций для последующей их вставки в столбцы типа [AggregateFunction](../../sql-reference/data-types/aggregatefunction.md#data-type-aggregatefunction) или использования в качестве значений по-умолчанию.
**Синтаксис**
``` sql
initializeAggregation (aggregate_function, arg1, arg2, ..., argN)
```
**Аргументы**
- `aggregate_function` — название агрегатной функции, состояние которой нужно создать. [String](../../sql-reference/data-types/string.md#string).
- `arg` — аргументы, которые передаются в агрегатную функцию.
**Возвращаемое значение**
- В каждой строке результат агрегатной функции, примененной к аргументам из этой строки.
Тип возвращаемого значения такой же, как и у функции, переданной первым аргументом.
**Пример**
Запрос:
```sql
SELECT uniqMerge(state) FROM (SELECT initializeAggregation('uniqState', number % 3) AS state FROM numbers(10000));
```
Результат:
```text
┌─uniqMerge(state)─┐
│ 3 │
└──────────────────┘
```
Запрос:
```sql
SELECT finalizeAggregation(state), toTypeName(state) FROM (SELECT initializeAggregation('sumState', number % 3) AS state FROM numbers(5));
```
Результат:
```text
┌─finalizeAggregation(state)─┬─toTypeName(state)─────────────┐
│ 0 │ AggregateFunction(sum, UInt8) │
│ 1 │ AggregateFunction(sum, UInt8) │
│ 2 │ AggregateFunction(sum, UInt8) │
│ 0 │ AggregateFunction(sum, UInt8) │
│ 1 │ AggregateFunction(sum, UInt8) │
└────────────────────────────┴───────────────────────────────┘
```
Пример с движком таблиц `AggregatingMergeTree` и столбцом типа `AggregateFunction`:
```sql
CREATE TABLE metrics
(
key UInt64,
value AggregateFunction(sum, UInt64) DEFAULT initializeAggregation('sumState', toUInt64(0))
)
ENGINE = AggregatingMergeTree
ORDER BY key
```
```sql
INSERT INTO metrics VALUES (0, initializeAggregation('sumState', toUInt64(42)))
```
**Смотрите также**
- [arrayReduce](../../sql-reference/functions/array-functions.md#arrayreduce)
## finalizeAggregation {#function-finalizeaggregation} ## finalizeAggregation {#function-finalizeaggregation}
Принимает состояние агрегатной функции. Возвращает результат агрегирования (или конечное состояние при использовании комбинатора [-State](../../sql-reference/aggregate-functions/combinators.md#state)). Принимает состояние агрегатной функции. Возвращает результат агрегирования (или конечное состояние при использовании комбинатора [-State](../../sql-reference/aggregate-functions/combinators.md#state)).
**Синтаксис** **Синтаксис**
``` sql ``` sql
finalizeAggregation(state) finalizeAggregation(state)
@ -1421,7 +1494,7 @@ SELECT finalizeAggregation(( SELECT sumState(number) FROM numbers(10)));
└──────────────────────────────────┘ └──────────────────────────────────┘
``` ```
Обратите внимание, что значения `NULL` игнорируются. Обратите внимание, что значения `NULL` игнорируются.
Запрос: Запрос:
@ -1470,7 +1543,7 @@ FROM numbers(10);
**Смотрите также** **Смотрите также**
- [arrayReduce](../../sql-reference/functions/array-functions.md#arrayreduce) - [arrayReduce](../../sql-reference/functions/array-functions.md#arrayreduce)
- [initializeAggregation](../../sql-reference/aggregate-functions/reference/initializeAggregation.md) - [initializeAggregation](#initializeaggregation)
## runningAccumulate {#runningaccumulate} ## runningAccumulate {#runningaccumulate}
@ -1537,13 +1610,13 @@ SELECT k, runningAccumulate(sum_k) AS res FROM (SELECT number as k, sumState(k)
Запрос: Запрос:
```sql ```sql
SELECT SELECT
grouping, grouping,
item, item,
runningAccumulate(state, grouping) AS res runningAccumulate(state, grouping) AS res
FROM FROM
( (
SELECT SELECT
toInt8(number / 4) AS grouping, toInt8(number / 4) AS grouping,
number AS item, number AS item,
sumState(number) AS state sumState(number) AS state
@ -1732,7 +1805,7 @@ SELECT number, randomPrintableASCII(30) as str, length(str) FROM system.numbers
randomString(length) randomString(length)
``` ```
**Аргументы** **Аргументы**
- `length` — длина строки. Положительное целое число. - `length` — длина строки. Положительное целое число.
@ -1831,13 +1904,13 @@ randomStringUTF8(length)
Запрос: Запрос:
```sql ```sql
SELECT randomStringUTF8(13) SELECT randomStringUTF8(13)
``` ```
Результат: Результат:
```text ```text
┌─randomStringUTF8(13)─┐ ┌─randomStringUTF8(13)─┐
│ 𘤗𙉝д兠庇󡅴󱱎󦐪􂕌𔊹𓰛 │ │ 𘤗𙉝д兠庇󡅴󱱎󦐪􂕌𔊹𓰛 │
└──────────────────────┘ └──────────────────────┘
@ -1848,13 +1921,13 @@ SELECT randomStringUTF8(13)
Возвращает текущее значение [пользовательской настройки](../../operations/settings/index.md#custom_settings). Возвращает текущее значение [пользовательской настройки](../../operations/settings/index.md#custom_settings).
**Синтаксис** **Синтаксис**
```sql ```sql
getSetting('custom_setting') getSetting('custom_setting')
``` ```
**Параметр** **Параметр**
- `custom_setting` — название настройки. [String](../../sql-reference/data-types/string.md). - `custom_setting` — название настройки. [String](../../sql-reference/data-types/string.md).
@ -1866,7 +1939,7 @@ getSetting('custom_setting')
```sql ```sql
SET custom_a = 123; SET custom_a = 123;
SELECT getSetting('custom_a'); SELECT getSetting('custom_a');
``` ```
**Результат** **Результат**
@ -1875,7 +1948,7 @@ SELECT getSetting('custom_a');
123 123
``` ```
**См. также** **См. также**
- [Пользовательские настройки](../../operations/settings/index.md#custom_settings) - [Пользовательские настройки](../../operations/settings/index.md#custom_settings)
@ -1889,10 +1962,10 @@ SELECT getSetting('custom_a');
isDecimalOverflow(d, [p]) isDecimalOverflow(d, [p])
``` ```
**Аргументы** **Аргументы**
- `d` — число. [Decimal](../../sql-reference/data-types/decimal.md). - `d` — число. [Decimal](../../sql-reference/data-types/decimal.md).
- `p` — точность. Необязательный параметр. Если опущен, используется исходная точность первого аргумента. Использование этого параметра может быть полезно для извлечения данных в другую СУБД или файл. [UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges). - `p` — точность. Необязательный параметр. Если опущен, используется исходная точность первого аргумента. Использование этого параметра может быть полезно для извлечения данных в другую СУБД или файл. [UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges).
**Возвращаемое значение** **Возвращаемое значение**
@ -1926,7 +1999,7 @@ SELECT isDecimalOverflow(toDecimal32(1000000000, 0), 9),
countDigits(x) countDigits(x)
``` ```
**Аргументы** **Аргументы**
- `x` — [целое](../../sql-reference/data-types/int-uint.md#uint8-uint16-uint32-uint64-int8-int16-int32-int64) или [дробное](../../sql-reference/data-types/decimal.md) число. - `x` — [целое](../../sql-reference/data-types/int-uint.md#uint8-uint16-uint32-uint64-int8-int16-int32-int64) или [дробное](../../sql-reference/data-types/decimal.md) число.

View File

@ -5,14 +5,14 @@ toc_title: SYSTEM
# Запросы SYSTEM {#query-language-system} # Запросы SYSTEM {#query-language-system}
- [RELOAD EMBEDDED DICTIONARIES](#query_language-system-reload-emdedded-dictionaries) - [RELOAD EMBEDDED DICTIONARIES](#query_language-system-reload-emdedded-dictionaries)
- [RELOAD DICTIONARIES](#query_language-system-reload-dictionaries) - [RELOAD DICTIONARIES](#query_language-system-reload-dictionaries)
- [RELOAD DICTIONARY](#query_language-system-reload-dictionary) - [RELOAD DICTIONARY](#query_language-system-reload-dictionary)
- [RELOAD MODELS](#query_language-system-reload-models) - [RELOAD MODELS](#query_language-system-reload-models)
- [RELOAD MODEL](#query_language-system-reload-model) - [RELOAD MODEL](#query_language-system-reload-model)
- [DROP DNS CACHE](#query_language-system-drop-dns-cache) - [DROP DNS CACHE](#query_language-system-drop-dns-cache)
- [DROP MARK CACHE](#query_language-system-drop-mark-cache) - [DROP MARK CACHE](#query_language-system-drop-mark-cache)
- [DROP UNCOMPRESSED CACHE](#query_language-system-drop-uncompressed-cache) - [DROP UNCOMPRESSED CACHE](#query_language-system-drop-uncompressed-cache)
- [DROP COMPILED EXPRESSION CACHE](#query_language-system-drop-compiled-expression-cache) - [DROP COMPILED EXPRESSION CACHE](#query_language-system-drop-compiled-expression-cache)
- [DROP REPLICA](#query_language-system-drop-replica) - [DROP REPLICA](#query_language-system-drop-replica)
- [FLUSH LOGS](#query_language-system-flush_logs) - [FLUSH LOGS](#query_language-system-flush_logs)
@ -24,10 +24,10 @@ toc_title: SYSTEM
- [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) - [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends)
- [STOP MERGES](#query_language-system-stop-merges) - [STOP MERGES](#query_language-system-stop-merges)
- [START MERGES](#query_language-system-start-merges) - [START MERGES](#query_language-system-start-merges)
- [STOP TTL MERGES](#query_language-stop-ttl-merges) - [STOP TTL MERGES](#query_language-stop-ttl-merges)
- [START TTL MERGES](#query_language-start-ttl-merges) - [START TTL MERGES](#query_language-start-ttl-merges)
- [STOP MOVES](#query_language-stop-moves) - [STOP MOVES](#query_language-stop-moves)
- [START MOVES](#query_language-start-moves) - [START MOVES](#query_language-start-moves)
- [STOP FETCHES](#query_language-system-stop-fetches) - [STOP FETCHES](#query_language-system-stop-fetches)
- [START FETCHES](#query_language-system-start-fetches) - [START FETCHES](#query_language-system-start-fetches)
- [STOP REPLICATED SENDS](#query_language-system-start-replicated-sends) - [STOP REPLICATED SENDS](#query_language-system-start-replicated-sends)
@ -36,13 +36,13 @@ toc_title: SYSTEM
- [START REPLICATION QUEUES](#query_language-system-start-replication-queues) - [START REPLICATION QUEUES](#query_language-system-start-replication-queues)
- [SYNC REPLICA](#query_language-system-sync-replica) - [SYNC REPLICA](#query_language-system-sync-replica)
- [RESTART REPLICA](#query_language-system-restart-replica) - [RESTART REPLICA](#query_language-system-restart-replica)
- [RESTART REPLICAS](#query_language-system-restart-replicas) - [RESTART REPLICAS](#query_language-system-restart-replicas)
## RELOAD EMBEDDED DICTIONARIES] {#query_language-system-reload-emdedded-dictionaries} ## RELOAD EMBEDDED DICTIONARIES] {#query_language-system-reload-emdedded-dictionaries}
Перегружает все [Встроенные словари](../dictionaries/internal-dicts.md). Перегружает все [Встроенные словари](../dictionaries/internal-dicts.md).
По умолчанию встроенные словари выключены. По умолчанию встроенные словари выключены.
Всегда возвращает `Ok.`, вне зависимости от результата обновления встроенных словарей. Всегда возвращает `Ok.`, вне зависимости от результата обновления встроенных словарей.
## RELOAD DICTIONARIES {#query_language-system-reload-dictionaries} ## RELOAD DICTIONARIES {#query_language-system-reload-dictionaries}
Перегружает все словари, которые были успешно загружены до этого. Перегружает все словари, которые были успешно загружены до этого.
@ -115,7 +115,7 @@ SYSTEM DROP REPLICA 'replica_name' FROM ZKPATH '/path/to/table/in/zk';
## DROP COMPILED EXPRESSION CACHE {#query_language-system-drop-compiled-expression-cache} ## DROP COMPILED EXPRESSION CACHE {#query_language-system-drop-compiled-expression-cache}
Сбрасывает кеш скомпилированных выражений. Используется при разработке ClickHouse и тестах производительности. Сбрасывает кеш скомпилированных выражений. Используется при разработке ClickHouse и тестах производительности.
Компилированные выражения используются когда включена настройка уровня запрос/пользователь/профиль [compile](../../operations/settings/settings.md#compile) омпилированные выражения используются когда включена настройка уровня запрос/пользователь/профиль [compile-expressions](../../operations/settings/settings.md#compile-expressions)
## FLUSH LOGS {#query_language-system-flush_logs} ## FLUSH LOGS {#query_language-system-flush_logs}
@ -194,7 +194,7 @@ SYSTEM START MERGES [ON VOLUME <volume_name> | [db.]merge_tree_family_table_name
SYSTEM STOP TTL MERGES [[db.]merge_tree_family_table_name] SYSTEM STOP TTL MERGES [[db.]merge_tree_family_table_name]
``` ```
### START TTL MERGES {#query_language-start-ttl-merges} ### START TTL MERGES {#query_language-start-ttl-merges}
Запускает фоновые процессы удаления старых данных основанные на [выражениях TTL](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl) для таблиц семейства MergeTree: Запускает фоновые процессы удаления старых данных основанные на [выражениях TTL](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl) для таблиц семейства MergeTree:
Возвращает `Ok.` даже если указана несуществующая таблица или таблица имеет тип отличный от MergeTree. Возвращает ошибку если указана не существующая база данных: Возвращает `Ok.` даже если указана несуществующая таблица или таблица имеет тип отличный от MergeTree. Возвращает ошибку если указана не существующая база данных:
@ -203,7 +203,7 @@ SYSTEM STOP TTL MERGES [[db.]merge_tree_family_table_name]
SYSTEM START TTL MERGES [[db.]merge_tree_family_table_name] SYSTEM START TTL MERGES [[db.]merge_tree_family_table_name]
``` ```
### STOP MOVES {#query_language-stop-moves} ### STOP MOVES {#query_language-stop-moves}
Позволяет остановить фоновые процессы переноса данных основанные [табличных выражениях TTL с использованием TO VOLUME или TO DISK](../../engines/table-engines/mergetree-family/mergetree.md#mergetree-table-ttl) for tables in the MergeTree family: Позволяет остановить фоновые процессы переноса данных основанные [табличных выражениях TTL с использованием TO VOLUME или TO DISK](../../engines/table-engines/mergetree-family/mergetree.md#mergetree-table-ttl) for tables in the MergeTree family:
Возвращает `Ok.` даже если указана несуществующая таблица или таблица имеет тип отличный от MergeTree. Возвращает ошибку если указана не существующая база данных: Возвращает `Ok.` даже если указана несуществующая таблица или таблица имеет тип отличный от MergeTree. Возвращает ошибку если указана не существующая база данных:
@ -212,7 +212,7 @@ SYSTEM START TTL MERGES [[db.]merge_tree_family_table_name]
SYSTEM STOP MOVES [[db.]merge_tree_family_table_name] SYSTEM STOP MOVES [[db.]merge_tree_family_table_name]
``` ```
### START MOVES {#query_language-start-moves} ### START MOVES {#query_language-start-moves}
Запускает фоновые процессы переноса данных основанные [табличных выражениях TTL с использованием TO VOLUME или TO DISK](../../engines/table-engines/mergetree-family/mergetree.md#mergetree-table-ttl) for tables in the MergeTree family: Запускает фоновые процессы переноса данных основанные [табличных выражениях TTL с использованием TO VOLUME или TO DISK](../../engines/table-engines/mergetree-family/mergetree.md#mergetree-table-ttl) for tables in the MergeTree family:
Возвращает `Ok.` даже если указана несуществующая таблица или таблица имеет тип отличный от MergeTree. Возвращает ошибку если указана не существующая база данных: Возвращает `Ok.` даже если указана несуществующая таблица или таблица имеет тип отличный от MergeTree. Возвращает ошибку если указана не существующая база данных:
@ -261,7 +261,7 @@ SYSTEM START REPLICATED SENDS [[db.]replicated_merge_tree_family_table_name]
### STOP REPLICATION QUEUES {#query_language-system-stop-replication-queues} ### STOP REPLICATION QUEUES {#query_language-system-stop-replication-queues}
Останавливает фоновые процессы разбора заданий из очереди репликации которая хранится в Zookeeper для таблиц семейства `ReplicatedMergeTree`. Возможные типы заданий - merges, fetches, mutation, DDL запросы с ON CLUSTER: Останавливает фоновые процессы разбора заданий из очереди репликации которая хранится в Zookeeper для таблиц семейства `ReplicatedMergeTree`. Возможные типы заданий - merges, fetches, mutation, DDL запросы с ON CLUSTER:
``` sql ``` sql
SYSTEM STOP REPLICATION QUEUES [[db.]replicated_merge_tree_family_table_name] SYSTEM STOP REPLICATION QUEUES [[db.]replicated_merge_tree_family_table_name]
@ -269,7 +269,7 @@ SYSTEM STOP REPLICATION QUEUES [[db.]replicated_merge_tree_family_table_name]
### START REPLICATION QUEUES {#query_language-system-start-replication-queues} ### START REPLICATION QUEUES {#query_language-system-start-replication-queues}
Запускает фоновые процессы разбора заданий из очереди репликации которая хранится в Zookeeper для таблиц семейства `ReplicatedMergeTree`. Возможные типы заданий - merges, fetches, mutation, DDL запросы с ON CLUSTER: Запускает фоновые процессы разбора заданий из очереди репликации которая хранится в Zookeeper для таблиц семейства `ReplicatedMergeTree`. Возможные типы заданий - merges, fetches, mutation, DDL запросы с ON CLUSTER:
``` sql ``` sql
SYSTEM START REPLICATION QUEUES [[db.]replicated_merge_tree_family_table_name] SYSTEM START REPLICATION QUEUES [[db.]replicated_merge_tree_family_table_name]
@ -277,7 +277,7 @@ SYSTEM START REPLICATION QUEUES [[db.]replicated_merge_tree_family_table_name]
### SYNC REPLICA {#query_language-system-sync-replica} ### SYNC REPLICA {#query_language-system-sync-replica}
Ждет когда таблица семейства `ReplicatedMergeTree` будет синхронизирована с другими репликами в кластере, будет работать до достижения `receive_timeout`, если синхронизация для таблицы отключена в настоящий момент времени: Ждет когда таблица семейства `ReplicatedMergeTree` будет синхронизирована с другими репликами в кластере, будет работать до достижения `receive_timeout`, если синхронизация для таблицы отключена в настоящий момент времени:
``` sql ``` sql
SYSTEM SYNC REPLICA [db.]replicated_merge_tree_family_table_name SYSTEM SYNC REPLICA [db.]replicated_merge_tree_family_table_name

View File

@ -38,6 +38,18 @@ mysql('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_
Остальные условия и ограничение выборки `LIMIT` будут выполнены в ClickHouse только после выполнения запроса к MySQL. Остальные условия и ограничение выборки `LIMIT` будут выполнены в ClickHouse только после выполнения запроса к MySQL.
Поддерживает несколько реплик, которые должны быть перечислены через `|`. Например:
```sql
SELECT name FROM mysql(`mysql{1|2|3}:3306`, 'mysql_database', 'mysql_table', 'user', 'password');
```
или
```sql
SELECT name FROM mysql(`mysql1:3306|mysql2:3306|mysql3:3306`, 'mysql_database', 'mysql_table', 'user', 'password');
```
**Возвращаемое значение** **Возвращаемое значение**
Объект таблицы с теми же столбцами, что и в исходной таблице MySQL. Объект таблицы с теми же столбцами, что и в исходной таблице MySQL.

View File

@ -43,6 +43,18 @@ PostgreSQL массивы конвертируются в массивы ClickHo
!!! info "Примечание" !!! info "Примечание"
Будьте внимательны, в PostgreSQL массивы, созданные как `type_name[]`, являются многомерными и могут содержать в себе разное количество измерений в разных строках одной таблицы. Внутри ClickHouse допустипы только многомерные массивы с одинаковым кол-вом измерений во всех строках таблицы. Будьте внимательны, в PostgreSQL массивы, созданные как `type_name[]`, являются многомерными и могут содержать в себе разное количество измерений в разных строках одной таблицы. Внутри ClickHouse допустипы только многомерные массивы с одинаковым кол-вом измерений во всех строках таблицы.
Поддерживает несколько реплик, которые должны быть перечислены через `|`. Например:
```sql
SELECT name FROM postgresql(`postgres{1|2|3}:5432`, 'postgres_database', 'postgres_table', 'user', 'password');
```
или
```sql
SELECT name FROM postgresql(`postgres1:5431|postgres2:5432`, 'postgres_database', 'postgres_table', 'user', 'password');
```
При использовании словаря PostgreSQL поддерживается приоритет реплик. Чем больше номер реплики, тем ниже ее приоритет. Наивысший приоритет у реплики с номером `0`. При использовании словаря PostgreSQL поддерживается приоритет реплик. Чем больше номер реплики, тем ниже ее приоритет. Наивысший приоритет у реплики с номером `0`.

View File

@ -47,6 +47,13 @@ When all prerequisites are installed, running `build.py` without args (there are
The easiest way to see the result is to use `--livereload=8888` argument of build.py. Alternatively, you can manually launch a HTTP server to serve the docs, for example by running `cd ClickHouse/docs/build && python3 -m http.server 8888`. Then go to http://localhost:8888 in browser. Feel free to use any other port instead of 8888. The easiest way to see the result is to use `--livereload=8888` argument of build.py. Alternatively, you can manually launch a HTTP server to serve the docs, for example by running `cd ClickHouse/docs/build && python3 -m http.server 8888`. Then go to http://localhost:8888 in browser. Feel free to use any other port instead of 8888.
## How to change code highlighting? {#how-to-change-code-hl}
ClickHouse does not use mkdocs `highlightjs` feature. It uses modified pygments styles instead.
If you want to change code highlighting, edit the `website/css/highlight.css` file.
Currently, an [eighties](https://github.com/idleberg/base16-pygments/blob/master/css/base16-eighties.dark.css) theme
is used.
## How to subscribe on documentation changes? {#how-to-subscribe-on-documentation-changes} ## How to subscribe on documentation changes? {#how-to-subscribe-on-documentation-changes}
At the moment theres no easy way to do just that, but you can consider: At the moment theres no easy way to do just that, but you can consider:

View File

@ -87,6 +87,7 @@ def build_for_lang(lang, args):
website_url = 'https://clickhouse.tech' website_url = 'https://clickhouse.tech'
site_name = site_names.get(lang, site_names['en']) % '' site_name = site_names.get(lang, site_names['en']) % ''
site_name = site_name.replace(' ', ' ') site_name = site_name.replace(' ', ' ')
raw_config = dict( raw_config = dict(
site_name=site_name, site_name=site_name,
site_url=f'{website_url}/docs/{lang}/', site_url=f'{website_url}/docs/{lang}/',

View File

@ -1,4 +1,4 @@
# 设置 {#set} # 集合 {#set}
始终存在于 RAM 中的数据集。它适用于IN运算符的右侧请参见 «IN运算符» 部分)。 始终存在于 RAM 中的数据集。它适用于IN运算符的右侧请参见 «IN运算符» 部分)。

View File

@ -817,21 +817,22 @@ load_balancing = first_or_random
为了保持一致性(以获取相同数据拆分的不同部分),此选项仅在设置了采样键时有效。 为了保持一致性(以获取相同数据拆分的不同部分),此选项仅在设置了采样键时有效。
副本滞后不受控制。 副本滞后不受控制。
## 编译 {#compile} ## compile_expressions {#compile-expressions}
启用查询的编译。 默认情况下0禁用 啟用或禁用在運行時使用 LLVM 將常用的簡單函數和運算符編譯為本機代碼
编译仅用于查询处理管道的一部分用于聚合的第一阶段GROUP BY 可能的值:
如果编译了管道的这一部分,则由于部署周期较短和内联聚合函数调用,查询可能运行得更快。 对于具有多个简单聚合函数的查询,可以看到最大的性能改进(在极少数情况下可快四倍)。 通常,性能增益是微不足道的。 在极少数情况下,它可能会减慢查询执行速度。
## min_count_to_compile {#min-count-to-compile} - 0 — 禁用。
- 1 — 啟用。
在运行编译之前可能使用已编译代码块的次数。 默认情况下3。 默認值:`1`。
For testing, the value can be set to 0: compilation runs synchronously and the query waits for the end of the compilation process before continuing execution. For all other cases, use values starting with 1. Compilation normally takes about 5-10 seconds.
如果该值为1或更大则编译在单独的线程中异步进行。 结果将在准备就绪后立即使用,包括当前正在运行的查询。
对于查询中使用的聚合函数的每个不同组合以及GROUP BY子句中的键类型都需要编译代码。 ## min_count_to_compile_expression {#min-count-to-compile-expression}
The results of the compilation are saved in the build directory in the form of .so files. There is no restriction on the number of compilation results since they don't use very much space. Old results will be used after server restarts, except in the case of a server upgrade in this case, the old results are deleted.
在編譯之前執行相同表達式的最小計數。
默認值:`3`。
## output_format_json_quote_64bit_integers {#session_settings-output_format_json_quote_64bit_integers} ## output_format_json_quote_64bit_integers {#session_settings-output_format_json_quote_64bit_integers}

View File

@ -76,14 +76,14 @@ ClickHouse不会自动从表中删除数据。更多详情请看 [introduction](
- `client_version_patch` ([UInt32](../../sql-reference/data-types/int-uint.md)) — [clickhouse-client](../../interfaces/cli.md) 或其他TCP客户端的Patch component。 - `client_version_patch` ([UInt32](../../sql-reference/data-types/int-uint.md)) — [clickhouse-client](../../interfaces/cli.md) 或其他TCP客户端的Patch component。
- `http_method` (UInt8) — 发起查询的HTTP方法. 可能值: - `http_method` (UInt8) — 发起查询的HTTP方法. 可能值:
- 0 — TCP接口的查询. - 0 — TCP接口的查询.
- 1 — `GET` - 1 — `GET`
- 2 — `POST` - 2 — `POST`
- `http_user_agent` ([String](../../sql-reference/data-types/string.md)) — The `UserAgent` The UserAgent header passed in the HTTP request。 - `http_user_agent` ([String](../../sql-reference/data-types/string.md)) — The `UserAgent` The UserAgent header passed in the HTTP request。
- `quota_key` ([String](../../sql-reference/data-types/string.md)) — 在[quotas](../../operations/quotas.md) 配置里设置的“quota key” (见 `keyed`). - `quota_key` ([String](../../sql-reference/data-types/string.md)) — 在[quotas](../../operations/quotas.md) 配置里设置的“quota key” (见 `keyed`).
- `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ClickHouse revision. - `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ClickHouse revision.
- `thread_numbers` ([Array(UInt32)](../../sql-reference/data-types/array.md)) — 参与查询的线程数. - `ProfileEvents` ([Map(String, UInt64))](../../sql-reference/data-types/array.md)) — Counters that measure different metrics. The description of them could be found in the table [系统。活动](../../operations/system-tables/events.md#system_tables-events)
- `ProfileEvents.Names` ([ArrayString)](../../sql-reference/data-types/array.md)) — 衡量不同指标的计数器。 可以在[system.events](../../operations/system-tables/events.md#system_tables-events)中找到它们的描述 - `Settings` ([Map(String, String)](../../sql-reference/data-types/array.md)) — Names of settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` 参数为1
- `ProfileEvents.Values` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — `ProfileEvents.Names` 列中列出的指标的值。 - `thread_ids` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — 参与查询的线程数.
- `Settings.Names` ([ArrayString)](../../sql-reference/data-types/array.md)) — 客户端运行查询时更改的设置的名称。 要启用对设置的日志记录更改请将log_query_settings参数设置为1。 - `Settings.Names` ([ArrayString)](../../sql-reference/data-types/array.md)) — 客户端运行查询时更改的设置的名称。 要启用对设置的日志记录更改请将log_query_settings参数设置为1。
- `Settings.Values` ([ArrayString)](../../sql-reference/data-types/array.md)) — `Settings.Names` 列中列出的设置的值。 - `Settings.Values` ([ArrayString)](../../sql-reference/data-types/array.md)) — `Settings.Names` 列中列出的设置的值。
**示例** **示例**
@ -133,10 +133,8 @@ http_user_agent:
quota_key: quota_key:
revision: 54434 revision: 54434
thread_ids: [] thread_ids: []
ProfileEvents.Names: [] ProfileEvents: {'Query':1,'SelectQuery':1,'ReadCompressedBytes':36,'CompressedReadBufferBlocks':1,'CompressedReadBufferBytes':10,'IOBufferAllocs':1,'IOBufferAllocBytes':89,'ContextLock':15,'RWLockAcquiredReadLocks':1}
ProfileEvents.Values: [] Settings: {'background_pool_size':'32','load_balancing':'random','allow_suspicious_low_cardinality_types':'1','distributed_aggregation_memory_efficient':'1','skip_unavailable_shards':'1','log_queries':'1','max_bytes_before_external_group_by':'20000000000','max_bytes_before_external_sort':'20000000000','allow_introspection_functions':'1'}
Settings.Names: ['use_uncompressed_cache','load_balancing','log_queries','max_memory_usage']
Settings.Values: ['0','random','1','10000000000']
``` ```
**另请参阅** **另请参阅**

View File

@ -61,8 +61,7 @@ ClickHouse不会自动从表中删除数据。 看 [导言](../../operations/sys
- `http_user_agent` ([字符串](../../sql-reference/data-types/string.md)) — The `UserAgent` http请求中传递的标头。 - `http_user_agent` ([字符串](../../sql-reference/data-types/string.md)) — The `UserAgent` http请求中传递的标头。
- `quota_key` ([字符串](../../sql-reference/data-types/string.md)) — The “quota key” 在指定 [配额](../../operations/quotas.md) 设置(见 `keyed`). - `quota_key` ([字符串](../../sql-reference/data-types/string.md)) — The “quota key” 在指定 [配额](../../operations/quotas.md) 设置(见 `keyed`).
- `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ClickHouse revision. - `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ClickHouse revision.
- `ProfileEvents.Names` ([数组(字符串)](../../sql-reference/data-types/array.md)) — Counters that measure different metrics for this thread. The description of them could be found in the table [系统。活动](#system_tables-events). - `ProfileEvents` ([数组(字符串, UInt64)](../../sql-reference/data-types/array.md)) — Counters that measure different metrics for this thread. The description of them could be found in the table [系统。活动](#system_tables-events).
- `ProfileEvents.Values` ([数组(UInt64)](../../sql-reference/data-types/array.md)) — Values of metrics for this thread that are listed in the `ProfileEvents.Names` 列。
**示例** **示例**
@ -108,8 +107,7 @@ http_method: 0
http_user_agent: http_user_agent:
quota_key: quota_key:
revision: 54434 revision: 54434
ProfileEvents.Names: ['ContextLock','RealTimeMicroseconds','UserTimeMicroseconds','OSCPUWaitMicroseconds','OSCPUVirtualTimeMicroseconds'] ProfileEvents: {'Query':1,'SelectQuery':1,'ReadCompressedBytes':36,'CompressedReadBufferBlocks':1,'CompressedReadBufferBytes':10,'IOBufferAllocs':1,'IOBufferAllocBytes':89,'ContextLock':15,'RWLockAcquiredReadLocks':1}
ProfileEvents.Values: [1,97,81,5,81]
... ...
``` ```

View File

@ -95,7 +95,7 @@ SYSTEM DROP REPLICA 'replica_name' FROM ZKPATH '/path/to/table/in/zk';
## DROP COMPILED EXPRESSION CACHE {#query_language-system-drop-compiled-expression-cache} ## DROP COMPILED EXPRESSION CACHE {#query_language-system-drop-compiled-expression-cache}
重置已编译的表达式缓存。用于ClickHouse开发和性能测试。 重置已编译的表达式缓存。用于ClickHouse开发和性能测试。
`query/user/profile` 启用配置项 [compile](../../operations/settings/settings.md#compile)时,编译的表达式缓存开启。 `query/user/profile` 启用配置项 [compile-expressions](../../operations/settings/settings.md#compile-expressions)时,编译的表达式缓存开启。
## FLUSH LOGS {#query_language-system-flush_logs} ## FLUSH LOGS {#query_language-system-flush_logs}
@ -209,7 +209,7 @@ SYSTEM STOP MOVES [[db.]merge_tree_family_table_name]
### STOP FETCHES {#query_language-system-stop-fetches} ### STOP FETCHES {#query_language-system-stop-fetches}
停止后台获取 `ReplicatedMergeTree`系列引擎表中插入的数据块。 停止后台获取 `ReplicatedMergeTree`系列引擎表中插入的数据块。
不管表引擎类型如何或表/数据库是否存,都返回 `OK.` 不管表引擎类型如何或表/数据库是否存,都返回 `OK.`
``` sql ``` sql
@ -218,7 +218,7 @@ SYSTEM STOP FETCHES [[db.]replicated_merge_tree_family_table_name]
### START FETCHES {#query_language-system-start-fetches} ### START FETCHES {#query_language-system-start-fetches}
启动后台获取 `ReplicatedMergeTree`系列引擎表中插入的数据块。 启动后台获取 `ReplicatedMergeTree`系列引擎表中插入的数据块。
不管表引擎类型如何或表/数据库是否存,都返回 `OK.` 不管表引擎类型如何或表/数据库是否存,都返回 `OK.`
``` sql ``` sql
@ -227,7 +227,7 @@ SYSTEM START FETCHES [[db.]replicated_merge_tree_family_table_name]
### STOP REPLICATED SENDS {#query_language-system-start-replicated-sends} ### STOP REPLICATED SENDS {#query_language-system-start-replicated-sends}
停止通过后台分发 `ReplicatedMergeTree`系列引擎表中新插入的数据块到集群的其它副本节点。 停止通过后台分发 `ReplicatedMergeTree`系列引擎表中新插入的数据块到集群的其它副本节点。
``` sql ``` sql
SYSTEM STOP REPLICATED SENDS [[db.]replicated_merge_tree_family_table_name] SYSTEM STOP REPLICATED SENDS [[db.]replicated_merge_tree_family_table_name]
@ -235,7 +235,7 @@ SYSTEM STOP REPLICATED SENDS [[db.]replicated_merge_tree_family_table_name]
### START REPLICATED SENDS {#query_language-system-start-replicated-sends} ### START REPLICATED SENDS {#query_language-system-start-replicated-sends}
启动通过后台分发 `ReplicatedMergeTree`系列引擎表中新插入的数据块到集群的其它副本节点。 启动通过后台分发 `ReplicatedMergeTree`系列引擎表中新插入的数据块到集群的其它副本节点。
``` sql ``` sql
SYSTEM START REPLICATED SENDS [[db.]replicated_merge_tree_family_table_name] SYSTEM START REPLICATED SENDS [[db.]replicated_merge_tree_family_table_name]

View File

@ -0,0 +1,120 @@
---
toc_priority: 42
toc_title: postgresql
---
# postgresql {#postgresql}
允许对存储在远程 PostgreSQL 服务器上的数据进行 `SELECT``INSERT` 查询.
**语法**
``` sql
postgresql('host:port', 'database', 'table', 'user', 'password'[, `schema`])
```
**参数**
- `host:port` — PostgreSQL 服务器地址.
- `database` — 远程数据库名称.
- `table` — 远程表名称.
- `user` — PostgreSQL 用户.
- `password` — 用户密码.
- `schema` — 非默认的表结构. 可选.
**返回值**
一个表对象,其列数与原 PostgreSQL 表的列数相同。
!!! info "Note"
在`INSERT`查询中,为了区分表函数`postgresql(..)`和表名以及表的列名列表,你必须使用关键字`FUNCTION`或`TABLE FUNCTION`。请看下面的例子。
## 实施细节 {#implementation-details}
`SELECT`查询在 PostgreSQL 上以 `COPY (SELECT ...) TO STDOUT` 的方式在只读的 PostgreSQL 事务中运行,每次在`SELECT`查询后提交。
简单的`WHERE`子句,如`=`、`=`、`>`、`>=`、`<`、`<=`和`IN`在PostgreSQL服务器上执行。
所有的连接、聚合、排序,`IN [ 数组 ]`条件和`LIMIT`采样约束只有在对PostgreSQL的查询结束后才会在ClickHouse中执行。
PostgreSQL 上的`INSERT`查询以`COPY "table_name" (field1, field2, ... fieldN) FROM STDIN`的方式在 PostgreSQL 事务中运行,每次`INSERT`语句后自动提交。
PostgreSQL 数组类型将转换为 ClickHouse 数组。
!!! info "Note"
要小心,在 PostgreSQL 中,像 Integer[] 这样的数组数据类型列可以在不同的行中包含不同维度的数组,但在 ClickHouse 中,只允许在所有的行中有相同维度的多维数组。
支持设置 PostgreSQL 字典源中 Replicas 的优先级。地图中的数字越大,优先级就越低。`0`代表最高的优先级。
**示例**
PostgreSQL 中的表:
``` text
postgres=# CREATE TABLE "public"."test" (
"int_id" SERIAL,
"int_nullable" INT NULL DEFAULT NULL,
"float" FLOAT NOT NULL,
"str" VARCHAR(100) NOT NULL DEFAULT '',
"float_nullable" FLOAT NULL DEFAULT NULL,
PRIMARY KEY (int_id));
CREATE TABLE
postgres=# INSERT INTO test (int_id, str, "float") VALUES (1,'test',2);
INSERT 0 1
postgresql> SELECT * FROM test;
int_id | int_nullable | float | str | float_nullable
--------+--------------+-------+------+----------------
1 | | 2 | test |
(1 row)
```
从 ClickHouse 检索数据:
```sql
SELECT * FROM postgresql('localhost:5432', 'test', 'test', 'postgresql_user', 'password') WHERE str IN ('test');
```
``` text
┌─int_id─┬─int_nullable─┬─float─┬─str──┬─float_nullable─┐
│ 1 │ ᴺᵁᴸᴸ │ 2 │ test │ ᴺᵁᴸᴸ │
└────────┴──────────────┴───────┴──────┴────────────────┘
```
插入数据:
```sql
INSERT INTO TABLE FUNCTION postgresql('localhost:5432', 'test', 'test', 'postgrsql_user', 'password') (int_id, float) VALUES (2, 3);
SELECT * FROM postgresql('localhost:5432', 'test', 'test', 'postgresql_user', 'password');
```
``` text
┌─int_id─┬─int_nullable─┬─float─┬─str──┬─float_nullable─┐
│ 1 │ ᴺᵁᴸᴸ │ 2 │ test │ ᴺᵁᴸᴸ │
│ 2 │ ᴺᵁᴸᴸ │ 3 │ │ ᴺᵁᴸᴸ │
└────────┴──────────────┴───────┴──────┴────────────────┘
```
使用非默认的表结构:
```text
postgres=# CREATE SCHEMA "nice.schema";
postgres=# CREATE TABLE "nice.schema"."nice.table" (a integer);
postgres=# INSERT INTO "nice.schema"."nice.table" SELECT i FROM generate_series(0, 99) as t(i)
```
```sql
CREATE TABLE pg_table_schema_with_dots (a UInt32)
ENGINE PostgreSQL('localhost:5432', 'clickhouse', 'nice.table', 'postgrsql_user', 'password', 'nice.schema');
```
**另请参阅**
- [PostgreSQL 表引擎](../../engines/table-engines/integrations/postgresql.md)
- [使用 PostgreSQL 作为外部字典的来源](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql)
[原始文章](https://clickhouse.tech/docs/en/sql-reference/table-functions/postgresql/) <!--hide-->

View File

@ -577,7 +577,18 @@ private:
} }
if (!history_file.empty() && !fs::exists(history_file)) if (!history_file.empty() && !fs::exists(history_file))
FS::createFile(history_file); {
/// Avoid TOCTOU issue.
try
{
FS::createFile(history_file);
}
catch (const ErrnoException & e)
{
if (e.getErrno() != EEXIST)
throw;
}
}
LineReader::Patterns query_extenders = {"\\"}; LineReader::Patterns query_extenders = {"\\"};
LineReader::Patterns query_delimiters = {";", "\\G"}; LineReader::Patterns query_delimiters = {";", "\\G"};
@ -1369,9 +1380,19 @@ private:
have_error = true; have_error = true;
} }
const auto * exception = server_exception ? server_exception.get() : client_exception.get();
// Sometimes you may get TOO_DEEP_RECURSION from the server,
// and TOO_DEEP_RECURSION should not fail the fuzzer check.
if (have_error && exception->code() == ErrorCodes::TOO_DEEP_RECURSION)
{
have_error = false;
server_exception.reset();
client_exception.reset();
return true;
}
if (have_error) if (have_error)
{ {
const auto * exception = server_exception ? server_exception.get() : client_exception.get();
fmt::print(stderr, "Error on processing query '{}': {}\n", ast_to_process->formatForErrorMessage(), exception->message()); fmt::print(stderr, "Error on processing query '{}': {}\n", ast_to_process->formatForErrorMessage(), exception->message());
// Try to reconnect after errors, for two reasons: // Try to reconnect after errors, for two reasons:
@ -2435,6 +2456,8 @@ public:
{ {
/// param_name value /// param_name value
++arg_num; ++arg_num;
if (arg_num >= argc)
throw Exception("Parameter requires value", ErrorCodes::BAD_ARGUMENTS);
arg = argv[arg_num]; arg = argv[arg_num];
query_parameters.emplace(String(param_continuation), String(arg)); query_parameters.emplace(String(param_continuation), String(arg));
} }

View File

@ -115,6 +115,8 @@ void ODBCBlockInputStream::insertValue(
assert_cast<ColumnFloat64 &>(column).insertValue(row.get<double>(idx)); assert_cast<ColumnFloat64 &>(column).insertValue(row.get<double>(idx));
break; break;
case ValueType::vtFixedString:[[fallthrough]]; case ValueType::vtFixedString:[[fallthrough]];
case ValueType::vtEnum8:
case ValueType::vtEnum16:
case ValueType::vtString: case ValueType::vtString:
assert_cast<ColumnString &>(column).insert(row.get<std::string>(idx)); assert_cast<ColumnString &>(column).insert(row.get<std::string>(idx));
break; break;

View File

@ -324,6 +324,13 @@ Poco::Net::SocketAddress Server::socketBindListen(Poco::Net::ServerSocket & sock
socket.bind(address, /* reuseAddress = */ true, /* reusePort = */ config().getBool("listen_reuse_port", false)); socket.bind(address, /* reuseAddress = */ true, /* reusePort = */ config().getBool("listen_reuse_port", false));
#endif #endif
/// If caller requests any available port from the OS, discover it after binding.
if (port == 0)
{
address = socket.address();
LOG_DEBUG(&logger(), "Requested any available port (port == 0), actual port is {:d}", address.port());
}
socket.listen(/* backlog = */ config().getUInt("listen_backlog", 64)); socket.listen(/* backlog = */ config().getUInt("listen_backlog", 64));
return address; return address;
@ -390,7 +397,7 @@ void Server::initialize(Poco::Util::Application & self)
BaseDaemon::initialize(self); BaseDaemon::initialize(self);
logger().information("starting up"); logger().information("starting up");
LOG_INFO(&logger(), "OS Name = {}, OS Version = {}, OS Architecture = {}", LOG_INFO(&logger(), "OS name: {}, version: {}, architecture: {}",
Poco::Environment::osName(), Poco::Environment::osName(),
Poco::Environment::osVersion(), Poco::Environment::osVersion(),
Poco::Environment::osArchitecture()); Poco::Environment::osArchitecture());

View File

@ -30,16 +30,16 @@ static IAggregateFunction * createWithNumericOrTimeType(const IDataType & argume
template <typename Trait, typename ... TArgs> template <typename Trait, typename ... TArgs>
inline AggregateFunctionPtr createAggregateFunctionGroupArrayImpl(const DataTypePtr & argument_type, TArgs ... args) inline AggregateFunctionPtr createAggregateFunctionGroupArrayImpl(const DataTypePtr & argument_type, const Array & parameters, TArgs ... args)
{ {
if (auto res = createWithNumericOrTimeType<GroupArrayNumericImpl, Trait>(*argument_type, argument_type, std::forward<TArgs>(args)...)) if (auto res = createWithNumericOrTimeType<GroupArrayNumericImpl, Trait>(*argument_type, argument_type, parameters, std::forward<TArgs>(args)...))
return AggregateFunctionPtr(res); return AggregateFunctionPtr(res);
WhichDataType which(argument_type); WhichDataType which(argument_type);
if (which.idx == TypeIndex::String) if (which.idx == TypeIndex::String)
return std::make_shared<GroupArrayGeneralImpl<GroupArrayNodeString, Trait>>(argument_type, std::forward<TArgs>(args)...); return std::make_shared<GroupArrayGeneralImpl<GroupArrayNodeString, Trait>>(argument_type, parameters, std::forward<TArgs>(args)...);
return std::make_shared<GroupArrayGeneralImpl<GroupArrayNodeGeneral, Trait>>(argument_type, std::forward<TArgs>(args)...); return std::make_shared<GroupArrayGeneralImpl<GroupArrayNodeGeneral, Trait>>(argument_type, parameters, std::forward<TArgs>(args)...);
// Link list implementation doesn't show noticeable performance improvement // Link list implementation doesn't show noticeable performance improvement
// if (which.idx == TypeIndex::String) // if (which.idx == TypeIndex::String)
@ -79,9 +79,9 @@ AggregateFunctionPtr createAggregateFunctionGroupArray(
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
if (!limit_size) if (!limit_size)
return createAggregateFunctionGroupArrayImpl<GroupArrayTrait<false, Sampler::NONE>>(argument_types[0]); return createAggregateFunctionGroupArrayImpl<GroupArrayTrait<false, Sampler::NONE>>(argument_types[0], parameters);
else else
return createAggregateFunctionGroupArrayImpl<GroupArrayTrait<true, Sampler::NONE>>(argument_types[0], max_elems); return createAggregateFunctionGroupArrayImpl<GroupArrayTrait<true, Sampler::NONE>>(argument_types[0], parameters, max_elems);
} }
AggregateFunctionPtr createAggregateFunctionGroupArraySample( AggregateFunctionPtr createAggregateFunctionGroupArraySample(
@ -114,7 +114,7 @@ AggregateFunctionPtr createAggregateFunctionGroupArraySample(
else else
seed = thread_local_rng(); seed = thread_local_rng();
return createAggregateFunctionGroupArrayImpl<GroupArrayTrait<true, Sampler::RNG>>(argument_types[0], max_elems, seed); return createAggregateFunctionGroupArrayImpl<GroupArrayTrait<true, Sampler::RNG>>(argument_types[0], parameters, max_elems, seed);
} }
} }

View File

@ -119,9 +119,9 @@ class GroupArrayNumericImpl final
public: public:
explicit GroupArrayNumericImpl( explicit GroupArrayNumericImpl(
const DataTypePtr & data_type_, UInt64 max_elems_ = std::numeric_limits<UInt64>::max(), UInt64 seed_ = 123456) const DataTypePtr & data_type_, const Array & parameters_, UInt64 max_elems_ = std::numeric_limits<UInt64>::max(), UInt64 seed_ = 123456)
: IAggregateFunctionDataHelper<GroupArrayNumericData<T, Trait::sampler != Sampler::NONE>, GroupArrayNumericImpl<T, Trait>>( : IAggregateFunctionDataHelper<GroupArrayNumericData<T, Trait::sampler != Sampler::NONE>, GroupArrayNumericImpl<T, Trait>>(
{data_type_}, {}) {data_type_}, parameters_)
, max_elems(max_elems_) , max_elems(max_elems_)
, seed(seed_) , seed(seed_)
{ {
@ -421,9 +421,9 @@ class GroupArrayGeneralImpl final
UInt64 seed; UInt64 seed;
public: public:
GroupArrayGeneralImpl(const DataTypePtr & data_type_, UInt64 max_elems_ = std::numeric_limits<UInt64>::max(), UInt64 seed_ = 123456) GroupArrayGeneralImpl(const DataTypePtr & data_type_, const Array & parameters_, UInt64 max_elems_ = std::numeric_limits<UInt64>::max(), UInt64 seed_ = 123456)
: IAggregateFunctionDataHelper<GroupArrayGeneralData<Node, Trait::sampler != Sampler::NONE>, GroupArrayGeneralImpl<Node, Trait>>( : IAggregateFunctionDataHelper<GroupArrayGeneralData<Node, Trait::sampler != Sampler::NONE>, GroupArrayGeneralImpl<Node, Trait>>(
{data_type_}, {}) {data_type_}, parameters_)
, data_type(this->argument_types[0]) , data_type(this->argument_types[0])
, max_elems(max_elems_) , max_elems(max_elems_)
, seed(seed_) , seed(seed_)
@ -696,8 +696,8 @@ class GroupArrayGeneralListImpl final
UInt64 max_elems; UInt64 max_elems;
public: public:
GroupArrayGeneralListImpl(const DataTypePtr & data_type_, UInt64 max_elems_ = std::numeric_limits<UInt64>::max()) GroupArrayGeneralListImpl(const DataTypePtr & data_type_, const Array & parameters_, UInt64 max_elems_ = std::numeric_limits<UInt64>::max())
: IAggregateFunctionDataHelper<GroupArrayGeneralListData<Node>, GroupArrayGeneralListImpl<Node, Trait>>({data_type_}, {}) : IAggregateFunctionDataHelper<GroupArrayGeneralListData<Node>, GroupArrayGeneralListImpl<Node, Trait>>({data_type_}, parameters_)
, data_type(this->argument_types[0]) , data_type(this->argument_types[0])
, max_elems(max_elems_) , max_elems(max_elems_)
{ {

View File

@ -1,6 +1,7 @@
#pragma once #pragma once
#include <algorithm> #include <algorithm>
#include <memory>
#include <boost/noncopyable.hpp> #include <boost/noncopyable.hpp>
#include <IO/ReadHelpers.h> #include <IO/ReadHelpers.h>
#include <IO/WriteHelpers.h> #include <IO/WriteHelpers.h>
@ -43,7 +44,7 @@ private:
void toLarge() void toLarge()
{ {
rb = std::make_shared<RoaringBitmap>(); rb = std::make_unique<RoaringBitmap>();
for (const auto & x : small) for (const auto & x : small)
rb->add(static_cast<Value>(x.getValue())); rb->add(static_cast<Value>(x.getValue()));
small.clear(); small.clear();
@ -113,7 +114,7 @@ public:
readVarUInt(size, in); readVarUInt(size, in);
std::unique_ptr<char[]> buf(new char[size]); std::unique_ptr<char[]> buf(new char[size]);
in.readStrict(buf.get(), size); in.readStrict(buf.get(), size);
rb = std::make_shared<RoaringBitmap>(RoaringBitmap::read(buf.get())); rb = std::make_unique<RoaringBitmap>(RoaringBitmap::read(buf.get()));
} }
} }
@ -140,7 +141,7 @@ public:
*/ */
std::shared_ptr<RoaringBitmap> getNewRoaringBitmapFromSmall() const std::shared_ptr<RoaringBitmap> getNewRoaringBitmapFromSmall() const
{ {
std::shared_ptr<RoaringBitmap> ret = std::make_shared<RoaringBitmap>(); std::shared_ptr<RoaringBitmap> ret = std::make_unique<RoaringBitmap>();
for (const auto & x : small) for (const auto & x : small)
ret->add(static_cast<Value>(x.getValue())); ret->add(static_cast<Value>(x.getValue()));
return ret; return ret;

View File

@ -1,6 +1,7 @@
#pragma once #pragma once
#include <string> #include <string>
#include <optional>
#include <vector> #include <vector>
#include <boost/noncopyable.hpp> #include <boost/noncopyable.hpp>
#include <unordered_map> #include <unordered_map>

View File

@ -87,18 +87,4 @@ FilterDescription::FilterDescription(const IColumn & column_)
ErrorCodes::ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER); ErrorCodes::ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER);
} }
void checkColumnCanBeUsedAsFilter(const ColumnWithTypeAndName & column_elem)
{
ConstantFilterDescription const_filter;
if (column_elem.column)
const_filter = ConstantFilterDescription(*column_elem.column);
if (!const_filter.always_false && !const_filter.always_true)
{
auto column = column_elem.column ? column_elem.column : column_elem.type->createColumn();
FilterDescription filter(*column);
}
}
} }

View File

@ -32,7 +32,4 @@ struct FilterDescription
struct ColumnWithTypeAndName; struct ColumnWithTypeAndName;
/// Will throw an exception if column_elem is cannot be used as a filter column.
void checkColumnCanBeUsedAsFilter(const ColumnWithTypeAndName & column_elem);
} }

View File

@ -555,6 +555,8 @@
M(585, CANNOT_PARSE_YAML) \ M(585, CANNOT_PARSE_YAML) \
M(586, CANNOT_CREATE_FILE) \ M(586, CANNOT_CREATE_FILE) \
M(587, CONCURRENT_ACCESS_NOT_SUPPORTED) \ M(587, CONCURRENT_ACCESS_NOT_SUPPORTED) \
M(588, DISTRIBUTED_BROKEN_BATCH_INFO) \
M(589, DISTRIBUTED_BROKEN_BATCH_FILES) \
\ \
M(998, POSTGRESQL_CONNECTION_FAILURE) \ M(998, POSTGRESQL_CONNECTION_FAILURE) \
M(999, KEEPER_EXCEPTION) \ M(999, KEEPER_EXCEPTION) \

View File

@ -60,7 +60,8 @@ struct ThreadStack
void * getData() const { return data; } void * getData() const { return data; }
private: private:
static constexpr size_t size = 16 << 10; /// 16 KiB - not too big but enough to handle error. /// 16 KiB - not too big but enough to handle error.
static constexpr size_t size = std::max<size_t>(16 << 10, MINSIGSTKSZ);
void * data; void * data;
}; };

View File

@ -44,7 +44,7 @@ size_t getStackSize(void ** out_address)
size = pthread_main_np() ? (8 * 1024 * 1024) : pthread_get_stacksize_np(thread); size = pthread_main_np() ? (8 * 1024 * 1024) : pthread_get_stacksize_np(thread);
// stack address points to the start of the stack, not the end how it's returned by pthread_get_stackaddr_np // stack address points to the start of the stack, not the end how it's returned by pthread_get_stackaddr_np
address = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(pthread_get_stackaddr_np(thread)) - max_stack_size); address = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(pthread_get_stackaddr_np(thread)) - size);
#else #else
pthread_attr_t attr; pthread_attr_t attr;
# if defined(__FreeBSD__) || defined(OS_SUNOS) # if defined(__FreeBSD__) || defined(OS_SUNOS)

View File

@ -59,7 +59,7 @@ static void validateChecksum(char * data, size_t size, const Checksum expected_c
"or bad RAM on host (look at dmesg or kern.log for enormous amount of EDAC errors, " "or bad RAM on host (look at dmesg or kern.log for enormous amount of EDAC errors, "
"ECC-related reports, Machine Check Exceptions, mcelog; note that ECC memory can fail " "ECC-related reports, Machine Check Exceptions, mcelog; note that ECC memory can fail "
"if the number of errors is huge) or bad CPU on host. If you read data from disk, " "if the number of errors is huge) or bad CPU on host. If you read data from disk, "
"this can be caused by disk bit rott. This exception protects ClickHouse " "this can be caused by disk bit rot. This exception protects ClickHouse "
"from data corruption due to hardware failures."; "from data corruption due to hardware failures.";
auto flip_bit = [](char * buf, size_t pos) auto flip_bit = [](char * buf, size_t pos)

View File

@ -23,9 +23,10 @@ using IndexToLogEntry = std::unordered_map<uint64_t, LogEntryPtr>;
enum class ChangelogVersion : uint8_t enum class ChangelogVersion : uint8_t
{ {
V0 = 0, V0 = 0,
V1 = 1, /// with 64 bit buffer header
}; };
static constexpr auto CURRENT_CHANGELOG_VERSION = ChangelogVersion::V0; static constexpr auto CURRENT_CHANGELOG_VERSION = ChangelogVersion::V1;
struct ChangelogRecordHeader struct ChangelogRecordHeader
{ {

View File

@ -204,7 +204,7 @@ SnapshotMetadataPtr KeeperStorageSnapshot::deserialize(KeeperStorage & storage,
uint8_t version; uint8_t version;
readBinary(version, in); readBinary(version, in);
SnapshotVersion current_version = static_cast<SnapshotVersion>(version); SnapshotVersion current_version = static_cast<SnapshotVersion>(version);
if (current_version > SnapshotVersion::V1) if (current_version > CURRENT_SNAPSHOT_VERSION)
throw Exception(ErrorCodes::UNKNOWN_FORMAT_VERSION, "Unsupported snapshot version {}", version); throw Exception(ErrorCodes::UNKNOWN_FORMAT_VERSION, "Unsupported snapshot version {}", version);
SnapshotMetadataPtr result = deserializeSnapshotMetadata(in); SnapshotMetadataPtr result = deserializeSnapshotMetadata(in);

View File

@ -14,8 +14,11 @@ enum SnapshotVersion : uint8_t
{ {
V0 = 0, V0 = 0,
V1 = 1, /// with ACL map V1 = 1, /// with ACL map
V2 = 2, /// with 64 bit buffer header
}; };
static constexpr auto CURRENT_SNAPSHOT_VERSION = SnapshotVersion::V2;
struct KeeperStorageSnapshot struct KeeperStorageSnapshot
{ {
public: public:
@ -30,7 +33,7 @@ public:
KeeperStorage * storage; KeeperStorage * storage;
SnapshotVersion version = SnapshotVersion::V1; SnapshotVersion version = CURRENT_SNAPSHOT_VERSION;
SnapshotMetadataPtr snapshot_meta; SnapshotMetadataPtr snapshot_meta;
int64_t session_id; int64_t session_id;
size_t snapshot_container_size; size_t snapshot_container_size;

View File

@ -69,9 +69,9 @@ void ExternalResultDescription::init(const Block & sample_block_)
else if (which.isUUID()) else if (which.isUUID())
types.emplace_back(ValueType::vtUUID, is_nullable); types.emplace_back(ValueType::vtUUID, is_nullable);
else if (which.isEnum8()) else if (which.isEnum8())
types.emplace_back(ValueType::vtString, is_nullable); types.emplace_back(ValueType::vtEnum8, is_nullable);
else if (which.isEnum16()) else if (which.isEnum16())
types.emplace_back(ValueType::vtString, is_nullable); types.emplace_back(ValueType::vtEnum16, is_nullable);
else if (which.isDateTime64()) else if (which.isDateTime64())
types.emplace_back(ValueType::vtDateTime64, is_nullable); types.emplace_back(ValueType::vtDateTime64, is_nullable);
else if (which.isDecimal32()) else if (which.isDecimal32())

View File

@ -22,6 +22,8 @@ struct ExternalResultDescription
vtInt64, vtInt64,
vtFloat32, vtFloat32,
vtFloat64, vtFloat64,
vtEnum8,
vtEnum16,
vtString, vtString,
vtDate, vtDate,
vtDate32, vtDate32,

Some files were not shown because too many files have changed in this diff Show More