Merge branch 'master' into add_conversion_stream

This commit is contained in:
alesapin 2020-04-07 16:35:40 +03:00
commit 33c2587213
91 changed files with 1205 additions and 392 deletions

View File

@ -228,7 +228,7 @@ else ()
set(NOT_UNBUNDLED 1)
endif ()
if (UNBUNDLED OR NOT (OS_LINUX OR OS_DARWIN) OR ARCH_32)
if (UNBUNDLED OR NOT (OS_LINUX OR OS_DARWIN))
# Using system libs can cause a lot of warnings in includes (on macro expansion).
option (WERROR "Enable -Werror compiler option" OFF)
else ()
@ -251,6 +251,8 @@ if (OS_LINUX)
include(cmake/linux/default_libs.cmake)
elseif (OS_DARWIN)
include(cmake/darwin/default_libs.cmake)
elseif (OS_FREEBSD)
include(cmake/freebsd/default_libs.cmake)
endif ()
######################################
@ -316,7 +318,6 @@ include (cmake/find/poco.cmake)
include (cmake/find/lz4.cmake)
include (cmake/find/xxhash.cmake)
include (cmake/find/sparsehash.cmake)
include (cmake/find/execinfo.cmake)
include (cmake/find/re2.cmake)
include (cmake/find/libgsasl.cmake)
include (cmake/find/rdkafka.cmake)

View File

@ -4,4 +4,6 @@
#if defined (OS_DARWIN)
# define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC
#elif defined (OS_FREEBSD)
# define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC_FAST
#endif

View File

@ -11,7 +11,6 @@ if (CMAKE_LIBRARY_ARCHITECTURE MATCHES "i386")
set (ARCH_I386 1)
endif ()
if ((ARCH_ARM AND NOT ARCH_AARCH64) OR ARCH_I386)
set (ARCH_32 1)
message (FATAL_ERROR "32bit platforms are not supported")
endif ()

View File

@ -1,8 +0,0 @@
if (OS_FREEBSD)
find_library (EXECINFO_LIBRARY execinfo)
find_library (ELF_LIBRARY elf)
set (EXECINFO_LIBRARIES ${EXECINFO_LIBRARY} ${ELF_LIBRARY})
message (STATUS "Using execinfo: ${EXECINFO_LIBRARIES}")
else ()
set (EXECINFO_LIBRARIES "")
endif ()

View File

@ -1,6 +1,4 @@
if (NOT ARCH_32)
option (USE_INTERNAL_LIBGSASL_LIBRARY "Set to FALSE to use system libgsasl library instead of bundled" ${NOT_UNBUNDLED})
endif ()
option (USE_INTERNAL_LIBGSASL_LIBRARY "Set to FALSE to use system libgsasl library instead of bundled" ${NOT_UNBUNDLED})
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libgsasl/src/gsasl.h")
if (USE_INTERNAL_LIBGSASL_LIBRARY)
@ -16,7 +14,7 @@ if (NOT USE_INTERNAL_LIBGSASL_LIBRARY)
endif ()
if (LIBGSASL_LIBRARY AND LIBGSASL_INCLUDE_DIR)
elseif (NOT MISSING_INTERNAL_LIBGSASL_LIBRARY AND NOT ARCH_32)
elseif (NOT MISSING_INTERNAL_LIBGSASL_LIBRARY)
set (LIBGSASL_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libgsasl/src ${ClickHouse_SOURCE_DIR}/contrib/libgsasl/linux_x86_64/include)
set (USE_INTERNAL_LIBGSASL_LIBRARY 1)
set (LIBGSASL_LIBRARY libgsasl)

View File

@ -1,5 +1,5 @@
# Freebsd: contrib/cppkafka/include/cppkafka/detail/endianness.h:53:23: error: 'betoh16' was not declared in this scope
if (NOT ARCH_ARM AND NOT ARCH_32 AND NOT OS_FREEBSD AND OPENSSL_FOUND)
if (NOT ARCH_ARM AND NOT OS_FREEBSD AND OPENSSL_FOUND)
option (ENABLE_RDKAFKA "Enable kafka" ${ENABLE_LIBRARIES})
endif ()

View File

@ -2,9 +2,7 @@ option(ENABLE_SSL "Enable ssl" ${ENABLE_LIBRARIES})
if(ENABLE_SSL)
if(NOT ARCH_32)
option(USE_INTERNAL_SSL_LIBRARY "Set to FALSE to use system *ssl library instead of bundled" ${NOT_UNBUNDLED})
endif()
option(USE_INTERNAL_SSL_LIBRARY "Set to FALSE to use system *ssl library instead of bundled" ${NOT_UNBUNDLED})
if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/openssl/README")
if(USE_INTERNAL_SSL_LIBRARY)

View File

@ -1,14 +1,5 @@
option (USE_UNWIND "Enable libunwind (better stacktraces)" ${ENABLE_LIBRARIES})
if (NOT CMAKE_SYSTEM MATCHES "Linux" OR ARCH_ARM OR ARCH_32)
set (USE_UNWIND OFF)
endif ()
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libunwind/CMakeLists.txt")
message(WARNING "submodule contrib/libunwind is missing. to fix try run: \n git submodule update --init --recursive")
set (USE_UNWIND OFF)
endif ()
if (USE_UNWIND)
add_subdirectory(contrib/libunwind-cmake)
set (UNWIND_LIBRARIES unwind)

View File

@ -1,6 +1,4 @@
if (NOT OS_FREEBSD AND NOT ARCH_32)
option (USE_INTERNAL_ZLIB_LIBRARY "Set to FALSE to use system zlib library instead of bundled" ${NOT_UNBUNDLED})
endif ()
option (USE_INTERNAL_ZLIB_LIBRARY "Set to FALSE to use system zlib library instead of bundled" ${NOT_UNBUNDLED})
if (NOT MSVC)
set (INTERNAL_ZLIB_NAME "zlib-ng" CACHE INTERNAL "")

View File

@ -0,0 +1,40 @@
set (DEFAULT_LIBS "-nodefaultlibs")
if (NOT COMPILER_CLANG)
message (FATAL_ERROR "FreeBSD build is supported only for Clang")
endif ()
execute_process (COMMAND ${CMAKE_CXX_COMPILER} --print-file-name=libclang_rt.builtins-${CMAKE_SYSTEM_PROCESSOR}.a OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE)
set (DEFAULT_LIBS "${DEFAULT_LIBS} ${BUILTINS_LIBRARY} ${COVERAGE_OPTION} -lc -lm -lrt -lpthread")
message(STATUS "Default libraries: ${DEFAULT_LIBS}")
set(CMAKE_CXX_STANDARD_LIBRARIES ${DEFAULT_LIBS})
set(CMAKE_C_STANDARD_LIBRARIES ${DEFAULT_LIBS})
# Global libraries
add_library(global-libs INTERFACE)
# Unfortunately '-pthread' doesn't work with '-nodefaultlibs'.
# Just make sure we have pthreads at all.
set(THREADS_PREFER_PTHREAD_FLAG ON)
find_package(Threads REQUIRED)
include (cmake/find/unwind.cmake)
include (cmake/find/cxx.cmake)
add_library(global-group INTERFACE)
target_link_libraries(global-group INTERFACE
$<TARGET_PROPERTY:global-libs,INTERFACE_LINK_LIBRARIES>
)
link_libraries(global-group)
# FIXME: remove when all contribs will get custom cmake lists
install(
TARGETS global-group global-libs
EXPORT global
)

View File

@ -0,0 +1,19 @@
set (CMAKE_SYSTEM_NAME "FreeBSD")
set (CMAKE_SYSTEM_PROCESSOR "x86_64")
set (CMAKE_C_COMPILER_TARGET "x86_64-pc-freebsd12.1")
set (CMAKE_CXX_COMPILER_TARGET "x86_64-pc-freebsd12.1")
set (CMAKE_ASM_COMPILER_TARGET "x86_64-pc-freebsd12.1")
set (CMAKE_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../toolchain/freebsd-x86_64")
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake
set (LINKER_NAME "lld" CACHE STRING "" FORCE)
set (CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=lld")
set (CMAKE_SHARED_LINKER_FLAGS_INIT "-fuse-ld=lld")
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)

View File

@ -24,6 +24,9 @@ if (CMAKE_CROSSCOMPILING)
set (ENABLE_PARQUET OFF CACHE INTERNAL "")
set (ENABLE_MYSQL OFF CACHE INTERNAL "")
endif ()
elseif (OS_FREEBSD)
# FIXME: broken dependencies
set (ENABLE_PROTOBUF OFF CACHE INTERNAL "")
else ()
message (FATAL_ERROR "Trying to cross-compile to unsupported system: ${CMAKE_SYSTEM_NAME}!")
endif ()

View File

@ -65,5 +65,8 @@ RUN wget https://github.com/phracker/MacOSX-SDKs/releases/download/10.14-beta4/M
# It contains all required headers and libraries. Note that it's named as "gcc" but actually we are using clang for cross compiling.
RUN wget "https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz?revision=2e88a73f-d233-4f96-b1f4-d8b36e9bb0b9&la=en" -O gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz
# Download toolchain for FreeBSD 12.1
RUN wget https://clickhouse-datasets.s3.yandex.net/toolchains/toolchains/freebsd-12.1-toolchain.tar.xz
COPY build.sh /
CMD ["/bin/bash", "/build.sh"]

View File

@ -8,6 +8,9 @@ tar xJf MacOSX10.14.sdk.tar.xz -C build/cmake/toolchain/darwin-x86_64 --strip-co
mkdir -p build/cmake/toolchain/linux-aarch64
tar xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz -C build/cmake/toolchain/linux-aarch64 --strip-components=1
mkdir -p build/cmake/toolchain/freebsd-x86_64
tar xJf freebsd-12.1-toolchain.tar.xz -C build/cmake/toolchain/freebsd-x86_64 --strip-components=1
mkdir -p build/build_docker
cd build/build_docker
ccache --show-stats ||:

View File

@ -107,6 +107,7 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ
CLANG_PREFIX = "clang"
DARWIN_SUFFIX = "-darwin"
ARM_SUFFIX = "-aarch64"
FREEBSD_SUFFIX = "-freebsd"
result = []
cmake_flags = ['$CMAKE_FLAGS', '-DADD_GDB_INDEX_FOR_GOLD=1']
@ -114,7 +115,8 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ
is_clang = compiler.startswith(CLANG_PREFIX)
is_cross_darwin = compiler.endswith(DARWIN_SUFFIX)
is_cross_arm = compiler.endswith(ARM_SUFFIX)
is_cross_compile = is_cross_darwin or is_cross_arm
is_cross_freebsd = compiler.endswith(FREEBSD_SUFFIX)
is_cross_compile = is_cross_darwin or is_cross_arm or is_cross_freebsd
# Explicitly use LLD with Clang by default.
# Don't force linker for cross-compilation.
@ -131,6 +133,9 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ
elif is_cross_arm:
cc = compiler[:-len(ARM_SUFFIX)]
cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-aarch64.cmake")
elif is_cross_freebsd:
cc = compiler[:-len(FREEBSD_SUFFIX)]
cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/freebsd/toolchain-x86_64.cmake")
else:
cc = compiler

View File

@ -48,6 +48,7 @@ ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/con
ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/disks.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/secure_ports.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/clusters.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/; \
ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/; \
ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/; \

View File

@ -274,7 +274,17 @@ private:
comment_column->aliases.push_back("ALTER COMMENT COLUMN");
auto clear_column = std::make_unique<Node>("CLEAR COLUMN", next_flag++, COLUMN);
clear_column->aliases.push_back("ALTER CLEAR COLUMN");
auto alter_column = std::make_unique<Node>("ALTER COLUMN", std::move(add_column), std::move(modify_column), std::move(drop_column), std::move(comment_column), std::move(clear_column));
auto rename_column = std::make_unique<Node>("RENAME COLUMN", next_flag++, COLUMN);
rename_column->aliases.push_back("ALTER RENAME COLUMN");
auto alter_column = std::make_unique<Node>(
"ALTER COLUMN",
std::move(add_column),
std::move(modify_column),
std::move(drop_column),
std::move(comment_column),
std::move(clear_column),
std::move(rename_column));
auto alter_order_by = std::make_unique<Node>("ALTER ORDER BY", next_flag++, TABLE);
alter_order_by->aliases.push_back("MODIFY ORDER BY");

View File

@ -30,6 +30,7 @@ enum class AccessType
MODIFY_COLUMN,
COMMENT_COLUMN,
CLEAR_COLUMN,
RENAME_COLUMN,
ALTER_COLUMN, /// allow to execute ALTER {ADD|DROP|MODIFY...} COLUMN
ALTER_ORDER_BY,
@ -197,6 +198,7 @@ namespace impl
ACCESS_TYPE_TO_KEYWORD_CASE(MODIFY_COLUMN);
ACCESS_TYPE_TO_KEYWORD_CASE(COMMENT_COLUMN);
ACCESS_TYPE_TO_KEYWORD_CASE(CLEAR_COLUMN);
ACCESS_TYPE_TO_KEYWORD_CASE(RENAME_COLUMN);
ACCESS_TYPE_TO_KEYWORD_CASE(ALTER_COLUMN);
ACCESS_TYPE_TO_KEYWORD_CASE(ALTER_ORDER_BY);

View File

@ -274,10 +274,6 @@ set_source_files_properties(Common/malloc.cpp PROPERTIES COMPILE_FLAGS "-fno-bui
add_library (clickhouse_new_delete STATIC Common/new_delete.cpp)
target_link_libraries (clickhouse_new_delete PRIVATE clickhouse_common_io jemalloc)
if (OS_FREEBSD)
target_compile_definitions (clickhouse_common_io PUBLIC CLOCK_MONOTONIC_COARSE=CLOCK_MONOTONIC_FAST)
endif ()
add_subdirectory(Common/ZooKeeper)
add_subdirectory(Common/Config)

View File

@ -23,7 +23,9 @@ namespace DB
namespace
{
#if defined(OS_LINUX)
thread_local size_t write_trace_iteration = 0;
#endif
void writeTraceInfo(TraceType trace_type, int /* sig */, siginfo_t * info, void * context)
{
@ -53,7 +55,6 @@ namespace
}
#else
UNUSED(info);
UNUSED(write_trace_iteration);
#endif
const auto signal_context = *reinterpret_cast<ucontext_t *>(context);
@ -110,7 +111,7 @@ QueryProfilerBase<ProfilerImpl>::QueryProfilerBase(const UInt64 thread_id, const
sev.sigev_notify = SIGEV_THREAD_ID;
sev.sigev_signo = pause_signal;
# if defined(__FreeBSD__)
# if defined(OS_FREEBSD)
sev._sigev_un._threadid = thread_id;
# else
sev._sigev_un._tid = thread_id;

View File

@ -2,7 +2,7 @@
#include <time.h>
#include <sys/time.h>
#if defined(OS_LINUX)
#include <sys/sysinfo.h>
# include <sys/sysinfo.h>
#endif
#include <sched.h>
@ -20,7 +20,7 @@
/// We will also wrap some thread synchronization functions to inject sleep/migration before or after.
#if defined(OS_LINUX)
#define FOR_EACH_WRAPPED_FUNCTION(M) \
# define FOR_EACH_WRAPPED_FUNCTION(M) \
M(int, pthread_mutex_lock, pthread_mutex_t * arg) \
M(int, pthread_mutex_unlock, pthread_mutex_t * arg)
#endif
@ -67,20 +67,20 @@ static void initFromEnv(std::atomic<T> & what, const char * name)
static std::atomic<int> num_cpus = 0;
#if defined(OS_LINUX)
#define DEFINE_WRAPPER_PARAMS(RET, NAME, ...) \
static std::atomic<double> NAME ## _before_yield_probability = 0; \
static std::atomic<double> NAME ## _before_migrate_probability = 0; \
static std::atomic<double> NAME ## _before_sleep_probability = 0; \
static std::atomic<double> NAME ## _before_sleep_time_us = 0; \
\
static std::atomic<double> NAME ## _after_yield_probability = 0; \
static std::atomic<double> NAME ## _after_migrate_probability = 0; \
static std::atomic<double> NAME ## _after_sleep_probability = 0; \
static std::atomic<double> NAME ## _after_sleep_time_us = 0; \
# define DEFINE_WRAPPER_PARAMS(RET, NAME, ...) \
static std::atomic<double> NAME##_before_yield_probability = 0; \
static std::atomic<double> NAME##_before_migrate_probability = 0; \
static std::atomic<double> NAME##_before_sleep_probability = 0; \
static std::atomic<double> NAME##_before_sleep_time_us = 0; \
\
static std::atomic<double> NAME##_after_yield_probability = 0; \
static std::atomic<double> NAME##_after_migrate_probability = 0; \
static std::atomic<double> NAME##_after_sleep_probability = 0; \
static std::atomic<double> NAME##_after_sleep_time_us = 0;
FOR_EACH_WRAPPED_FUNCTION(DEFINE_WRAPPER_PARAMS)
#undef DEFINE_WRAPPER_PARAMS
# undef DEFINE_WRAPPER_PARAMS
#endif
void ThreadFuzzer::initConfiguration()
@ -98,20 +98,20 @@ void ThreadFuzzer::initConfiguration()
initFromEnv(sleep_time_us, "THREAD_FUZZER_SLEEP_TIME_US");
#if defined(OS_LINUX)
#define INIT_WRAPPER_PARAMS(RET, NAME, ...) \
initFromEnv(NAME ## _before_yield_probability, "THREAD_FUZZER_" #NAME "_BEFORE_YIELD_PROBABILITY"); \
initFromEnv(NAME ## _before_migrate_probability, "THREAD_FUZZER_" #NAME "_BEFORE_MIGRATE_PROBABILITY"); \
initFromEnv(NAME ## _before_sleep_probability, "THREAD_FUZZER_" #NAME "_BEFORE_SLEEP_PROBABILITY"); \
initFromEnv(NAME ## _before_sleep_time_us, "THREAD_FUZZER_" #NAME "_BEFORE_SLEEP_TIME_US"); \
\
initFromEnv(NAME ## _after_yield_probability, "THREAD_FUZZER_" #NAME "_AFTER_YIELD_PROBABILITY"); \
initFromEnv(NAME ## _after_migrate_probability, "THREAD_FUZZER_" #NAME "_AFTER_MIGRATE_PROBABILITY"); \
initFromEnv(NAME ## _after_sleep_probability, "THREAD_FUZZER_" #NAME "_AFTER_SLEEP_PROBABILITY"); \
initFromEnv(NAME ## _after_sleep_time_us, "THREAD_FUZZER_" #NAME "_AFTER_SLEEP_TIME_US"); \
# define INIT_WRAPPER_PARAMS(RET, NAME, ...) \
initFromEnv(NAME##_before_yield_probability, "THREAD_FUZZER_" #NAME "_BEFORE_YIELD_PROBABILITY"); \
initFromEnv(NAME##_before_migrate_probability, "THREAD_FUZZER_" #NAME "_BEFORE_MIGRATE_PROBABILITY"); \
initFromEnv(NAME##_before_sleep_probability, "THREAD_FUZZER_" #NAME "_BEFORE_SLEEP_PROBABILITY"); \
initFromEnv(NAME##_before_sleep_time_us, "THREAD_FUZZER_" #NAME "_BEFORE_SLEEP_TIME_US"); \
\
initFromEnv(NAME##_after_yield_probability, "THREAD_FUZZER_" #NAME "_AFTER_YIELD_PROBABILITY"); \
initFromEnv(NAME##_after_migrate_probability, "THREAD_FUZZER_" #NAME "_AFTER_MIGRATE_PROBABILITY"); \
initFromEnv(NAME##_after_sleep_probability, "THREAD_FUZZER_" #NAME "_AFTER_SLEEP_PROBABILITY"); \
initFromEnv(NAME##_after_sleep_time_us, "THREAD_FUZZER_" #NAME "_AFTER_SLEEP_TIME_US");
FOR_EACH_WRAPPED_FUNCTION(INIT_WRAPPER_PARAMS)
#undef INIT_WRAPPER_PARAMS
# undef INIT_WRAPPER_PARAMS
#endif
}
@ -119,20 +119,28 @@ void ThreadFuzzer::initConfiguration()
bool ThreadFuzzer::isEffective() const
{
#if defined(OS_LINUX)
#define CHECK_WRAPPER_PARAMS(RET, NAME, ...) \
if (NAME ## _before_yield_probability.load(std::memory_order_relaxed)) return true; \
if (NAME ## _before_migrate_probability.load(std::memory_order_relaxed)) return true; \
if (NAME ## _before_sleep_probability.load(std::memory_order_relaxed)) return true; \
if (NAME ## _before_sleep_time_us.load(std::memory_order_relaxed)) return true; \
\
if (NAME ## _after_yield_probability.load(std::memory_order_relaxed)) return true; \
if (NAME ## _after_migrate_probability.load(std::memory_order_relaxed)) return true; \
if (NAME ## _after_sleep_probability.load(std::memory_order_relaxed)) return true; \
if (NAME ## _after_sleep_time_us.load(std::memory_order_relaxed)) return true; \
# define CHECK_WRAPPER_PARAMS(RET, NAME, ...) \
if (NAME##_before_yield_probability.load(std::memory_order_relaxed)) \
return true; \
if (NAME##_before_migrate_probability.load(std::memory_order_relaxed)) \
return true; \
if (NAME##_before_sleep_probability.load(std::memory_order_relaxed)) \
return true; \
if (NAME##_before_sleep_time_us.load(std::memory_order_relaxed)) \
return true; \
\
if (NAME##_after_yield_probability.load(std::memory_order_relaxed)) \
return true; \
if (NAME##_after_migrate_probability.load(std::memory_order_relaxed)) \
return true; \
if (NAME##_after_sleep_probability.load(std::memory_order_relaxed)) \
return true; \
if (NAME##_after_sleep_time_us.load(std::memory_order_relaxed)) \
return true;
FOR_EACH_WRAPPED_FUNCTION(CHECK_WRAPPER_PARAMS)
#undef INIT_WRAPPER_PARAMS
# undef INIT_WRAPPER_PARAMS
#endif
return cpu_time_period_us != 0
@ -229,30 +237,29 @@ void ThreadFuzzer::setup()
/// NOTE We cannot use dlsym(... RTLD_NEXT), because it will call pthread_mutex_lock and it will lead to infinite recursion.
#if defined(OS_LINUX)
#define MAKE_WRAPPER(RET, NAME, ...) \
extern "C" RET __ ## NAME(__VA_ARGS__); /* NOLINT */ \
# define MAKE_WRAPPER(RET, NAME, ...) \
extern "C" RET __##NAME(__VA_ARGS__); /* NOLINT */ \
extern "C" RET NAME(__VA_ARGS__) /* NOLINT */ \
{ \
injection( \
NAME ## _before_yield_probability.load(std::memory_order_relaxed), \
NAME ## _before_migrate_probability.load(std::memory_order_relaxed), \
NAME ## _before_sleep_probability.load(std::memory_order_relaxed), \
NAME ## _before_sleep_time_us.load(std::memory_order_relaxed)); \
\
auto && ret{__ ## NAME(arg)}; \
\
NAME##_before_yield_probability.load(std::memory_order_relaxed), \
NAME##_before_migrate_probability.load(std::memory_order_relaxed), \
NAME##_before_sleep_probability.load(std::memory_order_relaxed), \
NAME##_before_sleep_time_us.load(std::memory_order_relaxed)); \
\
auto && ret{__##NAME(arg)}; \
\
injection( \
NAME ## _after_yield_probability.load(std::memory_order_relaxed), \
NAME ## _after_migrate_probability.load(std::memory_order_relaxed), \
NAME ## _after_sleep_probability.load(std::memory_order_relaxed), \
NAME ## _after_sleep_time_us.load(std::memory_order_relaxed)); \
\
NAME##_after_yield_probability.load(std::memory_order_relaxed), \
NAME##_after_migrate_probability.load(std::memory_order_relaxed), \
NAME##_after_sleep_probability.load(std::memory_order_relaxed), \
NAME##_after_sleep_time_us.load(std::memory_order_relaxed)); \
\
return ret; \
} \
}
FOR_EACH_WRAPPED_FUNCTION(MAKE_WRAPPER)
FOR_EACH_WRAPPED_FUNCTION(MAKE_WRAPPER)
#undef MAKE_WRAPPER
# undef MAKE_WRAPPER
#endif
}

View File

@ -29,11 +29,10 @@ void setThreadName(const char * name)
throw DB::Exception("Thread name cannot be longer than 15 bytes", DB::ErrorCodes::PTHREAD_ERROR);
#endif
#if defined(__FreeBSD__)
#if defined(OS_FREEBSD)
pthread_set_name_np(pthread_self(), name);
return;
#elif defined(__APPLE__)
if ((false))
#elif defined(OS_DARWIN)
if (0 != pthread_setname_np(name))
#else
if (0 != prctl(PR_SET_NAME, name, 0, 0, 0))

View File

@ -3,7 +3,8 @@
#include <Functions/FunctionFactory.h>
#include <Functions/FunctionHelpers.h>
#include <Interpreters/Context.h>
#include <Interpreters/Join.h>
#include <Interpreters/HashJoin.h>
#include <Columns/ColumnString.h>
#include <Storages/StorageJoin.h>

View File

@ -6,8 +6,8 @@ namespace DB
{
class Context;
class Join;
using HashJoinPtr = std::shared_ptr<Join>;
class HashJoin;
using HashJoinPtr = std::shared_ptr<HashJoin>;
class ExecutableFunctionJoinGet final : public IExecutableFunctionImpl
{

View File

@ -1,12 +1,12 @@
#if defined(__linux__)
#include <boost/noncopyable.hpp>
#include <Common/Exception.h>
#include <sys/syscall.h>
#include <unistd.h>
#include <IO/AIO.h>
#if defined(OS_LINUX)
# include <Common/Exception.h>
# include <sys/syscall.h>
# include <unistd.h>
/** Small wrappers for asynchronous I/O.
*/
@ -53,17 +53,10 @@ AIOContext::~AIOContext()
io_destroy(ctx);
}
#elif defined(__FreeBSD__)
#elif defined(OS_FREEBSD)
# include <aio.h>
# include <boost/noncopyable.hpp>
# include <sys/event.h>
# include <sys/time.h>
# include <sys/types.h>
# include <Common/Exception.h>
# include <IO/AIO.h>
/** Small wrappers for asynchronous I/O.
*/
@ -123,7 +116,7 @@ int io_submit(int ctx, long nr, struct iocb * iocbpp[])
int io_getevents(int ctx, long, long max_nr, struct kevent * events, struct timespec * timeout)
{
return kevent(ctx, NULL, 0, events, max_nr, timeout);
return kevent(ctx, nullptr, 0, events, max_nr, timeout);
}

View File

@ -2,20 +2,20 @@
#include <boost/noncopyable.hpp>
#if defined(__linux__)
#if defined(OS_LINUX)
/// https://stackoverflow.com/questions/20759750/resolving-redefinition-of-timespec-in-time-h
#define timespec linux_timespec
#define timeval linux_timeval
#define itimerspec linux_itimerspec
#define sigset_t linux_sigset_t
# define timespec linux_timespec
# define timeval linux_timeval
# define itimerspec linux_itimerspec
# define sigset_t linux_sigset_t
#include <linux/aio_abi.h>
# include <linux/aio_abi.h>
#undef timespec
#undef timeval
#undef itimerspec
#undef sigset_t
# undef timespec
# undef timeval
# undef itimerspec
# undef sigset_t
/** Small wrappers for asynchronous I/O.
@ -39,12 +39,12 @@ struct AIOContext : private boost::noncopyable
~AIOContext();
};
#elif defined(__FreeBSD__)
#elif defined(OS_FREEBSD)
#include <aio.h>
#include <sys/types.h>
#include <sys/event.h>
#include <sys/time.h>
# include <aio.h>
# include <sys/event.h>
# include <sys/time.h>
# include <sys/types.h>
typedef struct kevent io_event;
typedef int aio_context_t;

View File

@ -2,7 +2,7 @@
#include <Interpreters/CollectJoinOnKeysVisitor.h>
#include <Interpreters/IdentifierSemantic.h>
#include <Interpreters/AnalyzedJoin.h>
#include <Interpreters/TableJoin.h>
namespace DB
{

View File

@ -11,7 +11,7 @@ namespace DB
{
class ASTIdentifier;
class AnalyzedJoin;
class TableJoin;
namespace ASOF
{
@ -25,7 +25,7 @@ public:
struct Data
{
AnalyzedJoin & analyzed_join;
TableJoin & analyzed_join;
const TableWithColumnNames & left_table;
const TableWithColumnNames & right_table;
const Aliases & aliases;

View File

@ -4,7 +4,7 @@
#include <Common/SipHash.h>
#include <Interpreters/ExpressionActions.h>
#include <Interpreters/ExpressionJIT.h>
#include <Interpreters/AnalyzedJoin.h>
#include <Interpreters/TableJoin.h>
#include <Columns/ColumnsNumber.h>
#include <Common/typeid_cast.h>
#include <DataTypes/DataTypeArray.h>
@ -147,7 +147,7 @@ ExpressionAction ExpressionAction::arrayJoin(const NameSet & array_joined_column
return a;
}
ExpressionAction ExpressionAction::ordinaryJoin(std::shared_ptr<AnalyzedJoin> table_join, JoinPtr join)
ExpressionAction ExpressionAction::ordinaryJoin(std::shared_ptr<TableJoin> table_join, JoinPtr join)
{
ExpressionAction a;
a.type = JOIN;
@ -1206,7 +1206,7 @@ bool ExpressionAction::operator==(const ExpressionAction & other) const
&& result_name == other.result_name
&& argument_names == other.argument_names
&& same_array_join
&& AnalyzedJoin::sameJoin(table_join.get(), other.table_join.get())
&& TableJoin::sameJoin(table_join.get(), other.table_join.get())
&& projection == other.projection
&& is_function_compiled == other.is_function_compiled;
}

View File

@ -22,7 +22,7 @@ namespace ErrorCodes
extern const int LOGICAL_ERROR;
}
class AnalyzedJoin;
class TableJoin;
class IJoin;
using JoinPtr = std::shared_ptr<IJoin>;
@ -97,7 +97,7 @@ public:
std::shared_ptr<ArrayJoinAction> array_join;
/// For JOIN
std::shared_ptr<const AnalyzedJoin> table_join;
std::shared_ptr<const TableJoin> table_join;
JoinPtr join;
/// For PROJECT.
@ -114,7 +114,7 @@ public:
static ExpressionAction project(const Names & projected_columns_);
static ExpressionAction addAliases(const NamesWithAliases & aliased_columns_);
static ExpressionAction arrayJoin(const NameSet & array_joined_columns, bool array_join_is_left, const Context & context);
static ExpressionAction ordinaryJoin(std::shared_ptr<AnalyzedJoin> table_join, JoinPtr join);
static ExpressionAction ordinaryJoin(std::shared_ptr<TableJoin> table_join, JoinPtr join);
/// Which columns necessary to perform this action.
Names getNeededColumns() const;

View File

@ -27,9 +27,9 @@
#include <Interpreters/LogicalExpressionsOptimizer.h>
#include <Interpreters/ExternalDictionariesLoader.h>
#include <Interpreters/Set.h>
#include <Interpreters/AnalyzedJoin.h>
#include <Interpreters/TableJoin.h>
#include <Interpreters/JoinSwitcher.h>
#include <Interpreters/Join.h>
#include <Interpreters/HashJoin.h>
#include <Interpreters/MergeJoin.h>
#include <AggregateFunctions/AggregateFunctionFactory.h>
@ -502,7 +502,7 @@ bool SelectQueryExpressionAnalyzer::appendJoin(ExpressionActionsChain & chain, b
return true;
}
static JoinPtr tryGetStorageJoin(const ASTTablesInSelectQueryElement & join_element, std::shared_ptr<AnalyzedJoin> analyzed_join,
static JoinPtr tryGetStorageJoin(const ASTTablesInSelectQueryElement & join_element, std::shared_ptr<TableJoin> analyzed_join,
const Context & context)
{
const auto & table_to_join = join_element.table_expression->as<ASTTableExpression &>();
@ -524,19 +524,19 @@ static JoinPtr tryGetStorageJoin(const ASTTablesInSelectQueryElement & join_elem
return {};
}
static ExpressionActionsPtr createJoinedBlockActions(const Context & context, const AnalyzedJoin & analyzed_join)
static ExpressionActionsPtr createJoinedBlockActions(const Context & context, const TableJoin & analyzed_join)
{
ASTPtr expression_list = analyzed_join.rightKeysList();
auto syntax_result = SyntaxAnalyzer(context).analyze(expression_list, analyzed_join.columnsFromJoinedTable());
return ExpressionAnalyzer(expression_list, syntax_result, context).getActions(true, false);
}
static std::shared_ptr<IJoin> makeJoin(std::shared_ptr<AnalyzedJoin> analyzed_join, const Block & sample_block)
static std::shared_ptr<IJoin> makeJoin(std::shared_ptr<TableJoin> analyzed_join, const Block & sample_block)
{
bool allow_merge_join = analyzed_join->allowMergeJoin();
if (analyzed_join->forceHashJoin() || (analyzed_join->preferMergeJoin() && !allow_merge_join))
return std::make_shared<Join>(analyzed_join, sample_block);
return std::make_shared<HashJoin>(analyzed_join, sample_block);
else if (analyzed_join->forceMergeJoin() || (analyzed_join->preferMergeJoin() && allow_merge_join))
return std::make_shared<MergeJoin>(analyzed_join, sample_block);
return std::make_shared<JoinSwitcher>(analyzed_join, sample_block);

View File

@ -123,7 +123,7 @@ protected:
SyntaxAnalyzerResultPtr syntax;
const ConstStoragePtr & storage() const { return syntax->storage; } /// The main table in FROM clause, if exists.
const AnalyzedJoin & analyzedJoin() const { return *syntax->analyzed_join; }
const TableJoin & analyzedJoin() const { return *syntax->analyzed_join; }
const NamesAndTypesList & sourceColumns() const { return syntax->required_source_columns; }
const std::vector<const ASTFunction *> & aggregates() const { return syntax->aggregates; }
NamesAndTypesList sourceWithJoinedColumns() const;

View File

@ -9,9 +9,9 @@
#include <DataTypes/DataTypeNullable.h>
#include <Interpreters/Join.h>
#include <Interpreters/HashJoin.h>
#include <Interpreters/join_common.h>
#include <Interpreters/AnalyzedJoin.h>
#include <Interpreters/TableJoin.h>
#include <Interpreters/joinDispatch.h>
#include <Interpreters/NullableUtils.h>
@ -189,7 +189,7 @@ static void changeColumnRepresentation(const ColumnPtr & src_column, ColumnPtr &
}
Join::Join(std::shared_ptr<AnalyzedJoin> table_join_, const Block & right_sample_block, bool any_take_last_row_)
HashJoin::HashJoin(std::shared_ptr<TableJoin> table_join_, const Block & right_sample_block, bool any_take_last_row_)
: table_join(table_join_)
, kind(table_join->kind())
, strictness(table_join->strictness())
@ -199,13 +199,13 @@ Join::Join(std::shared_ptr<AnalyzedJoin> table_join_, const Block & right_sample
, any_take_last_row(any_take_last_row_)
, asof_inequality(table_join->getAsofInequality())
, data(std::make_shared<RightTableData>())
, log(&Logger::get("Join"))
, log(&Logger::get("HashJoin"))
{
setSampleBlock(right_sample_block);
}
Join::Type Join::chooseMethod(const ColumnRawPtrs & key_columns, Sizes & key_sizes)
HashJoin::Type HashJoin::chooseMethod(const ColumnRawPtrs & key_columns, Sizes & key_sizes)
{
size_t keys_size = key_columns.size();
@ -282,47 +282,47 @@ static KeyGetter createKeyGetter(const ColumnRawPtrs & key_columns, const Sizes
return KeyGetter(key_columns, key_sizes, nullptr);
}
template <Join::Type type, typename Value, typename Mapped>
template <HashJoin::Type type, typename Value, typename Mapped>
struct KeyGetterForTypeImpl;
template <typename Value, typename Mapped> struct KeyGetterForTypeImpl<Join::Type::key8, Value, Mapped>
template <typename Value, typename Mapped> struct KeyGetterForTypeImpl<HashJoin::Type::key8, Value, Mapped>
{
using Type = ColumnsHashing::HashMethodOneNumber<Value, Mapped, UInt8, false>;
};
template <typename Value, typename Mapped> struct KeyGetterForTypeImpl<Join::Type::key16, Value, Mapped>
template <typename Value, typename Mapped> struct KeyGetterForTypeImpl<HashJoin::Type::key16, Value, Mapped>
{
using Type = ColumnsHashing::HashMethodOneNumber<Value, Mapped, UInt16, false>;
};
template <typename Value, typename Mapped> struct KeyGetterForTypeImpl<Join::Type::key32, Value, Mapped>
template <typename Value, typename Mapped> struct KeyGetterForTypeImpl<HashJoin::Type::key32, Value, Mapped>
{
using Type = ColumnsHashing::HashMethodOneNumber<Value, Mapped, UInt32, false>;
};
template <typename Value, typename Mapped> struct KeyGetterForTypeImpl<Join::Type::key64, Value, Mapped>
template <typename Value, typename Mapped> struct KeyGetterForTypeImpl<HashJoin::Type::key64, Value, Mapped>
{
using Type = ColumnsHashing::HashMethodOneNumber<Value, Mapped, UInt64, false>;
};
template <typename Value, typename Mapped> struct KeyGetterForTypeImpl<Join::Type::key_string, Value, Mapped>
template <typename Value, typename Mapped> struct KeyGetterForTypeImpl<HashJoin::Type::key_string, Value, Mapped>
{
using Type = ColumnsHashing::HashMethodString<Value, Mapped, true, false>;
};
template <typename Value, typename Mapped> struct KeyGetterForTypeImpl<Join::Type::key_fixed_string, Value, Mapped>
template <typename Value, typename Mapped> struct KeyGetterForTypeImpl<HashJoin::Type::key_fixed_string, Value, Mapped>
{
using Type = ColumnsHashing::HashMethodFixedString<Value, Mapped, true, false>;
};
template <typename Value, typename Mapped> struct KeyGetterForTypeImpl<Join::Type::keys128, Value, Mapped>
template <typename Value, typename Mapped> struct KeyGetterForTypeImpl<HashJoin::Type::keys128, Value, Mapped>
{
using Type = ColumnsHashing::HashMethodKeysFixed<Value, UInt128, Mapped, false, false, false>;
};
template <typename Value, typename Mapped> struct KeyGetterForTypeImpl<Join::Type::keys256, Value, Mapped>
template <typename Value, typename Mapped> struct KeyGetterForTypeImpl<HashJoin::Type::keys256, Value, Mapped>
{
using Type = ColumnsHashing::HashMethodKeysFixed<Value, UInt256, Mapped, false, false, false>;
};
template <typename Value, typename Mapped> struct KeyGetterForTypeImpl<Join::Type::hashed, Value, Mapped>
template <typename Value, typename Mapped> struct KeyGetterForTypeImpl<HashJoin::Type::hashed, Value, Mapped>
{
using Type = ColumnsHashing::HashMethodHashed<Value, Mapped, false>;
};
template <Join::Type type, typename Data>
template <HashJoin::Type type, typename Data>
struct KeyGetterForType
{
using Value = typename Data::value_type;
@ -332,7 +332,7 @@ struct KeyGetterForType
};
void Join::init(Type type_)
void HashJoin::init(Type type_)
{
data->type = type_;
@ -342,7 +342,7 @@ void Join::init(Type type_)
joinDispatch(kind, strictness, data->maps, [&](auto, auto, auto & map) { map.create(data->type); });
}
size_t Join::getTotalRowCount() const
size_t HashJoin::getTotalRowCount() const
{
size_t res = 0;
@ -359,7 +359,7 @@ size_t Join::getTotalRowCount() const
return res;
}
size_t Join::getTotalByteCount() const
size_t HashJoin::getTotalByteCount() const
{
size_t res = 0;
@ -377,7 +377,7 @@ size_t Join::getTotalByteCount() const
return res;
}
void Join::setSampleBlock(const Block & block)
void HashJoin::setSampleBlock(const Block & block)
{
/// You have to restore this lock if you call the function outside of ctor.
//std::unique_lock lock(rwlock);
@ -441,7 +441,7 @@ namespace
template <typename Map, typename KeyGetter>
struct Inserter
{
static ALWAYS_INLINE void insertOne(const Join & join, Map & map, KeyGetter & key_getter, Block * stored_block, size_t i,
static ALWAYS_INLINE void insertOne(const HashJoin & join, Map & map, KeyGetter & key_getter, Block * stored_block, size_t i,
Arena & pool)
{
auto emplace_result = key_getter.emplaceKey(map, i, pool);
@ -450,7 +450,7 @@ namespace
new (&emplace_result.getMapped()) typename Map::mapped_type(stored_block, i);
}
static ALWAYS_INLINE void insertAll(const Join &, Map & map, KeyGetter & key_getter, Block * stored_block, size_t i, Arena & pool)
static ALWAYS_INLINE void insertAll(const HashJoin &, Map & map, KeyGetter & key_getter, Block * stored_block, size_t i, Arena & pool)
{
auto emplace_result = key_getter.emplaceKey(map, i, pool);
@ -463,7 +463,7 @@ namespace
}
}
static ALWAYS_INLINE void insertAsof(Join & join, Map & map, KeyGetter & key_getter, Block * stored_block, size_t i, Arena & pool,
static ALWAYS_INLINE void insertAsof(HashJoin & join, Map & map, KeyGetter & key_getter, Block * stored_block, size_t i, Arena & pool,
const IColumn * asof_column)
{
auto emplace_result = key_getter.emplaceKey(map, i, pool);
@ -478,7 +478,7 @@ namespace
template <ASTTableJoin::Strictness STRICTNESS, typename KeyGetter, typename Map, bool has_null_map>
void NO_INLINE insertFromBlockImplTypeCase(
Join & join, Map & map, size_t rows, const ColumnRawPtrs & key_columns,
HashJoin & join, Map & map, size_t rows, const ColumnRawPtrs & key_columns,
const Sizes & key_sizes, Block * stored_block, ConstNullMapPtr null_map, Arena & pool)
{
[[maybe_unused]] constexpr bool mapped_one = std::is_same_v<typename Map::mapped_type, JoinStuff::MappedOne> ||
@ -508,7 +508,7 @@ namespace
template <ASTTableJoin::Strictness STRICTNESS, typename KeyGetter, typename Map>
void insertFromBlockImplType(
Join & join, Map & map, size_t rows, const ColumnRawPtrs & key_columns,
HashJoin & join, Map & map, size_t rows, const ColumnRawPtrs & key_columns,
const Sizes & key_sizes, Block * stored_block, ConstNullMapPtr null_map, Arena & pool)
{
if (null_map)
@ -520,17 +520,17 @@ namespace
template <ASTTableJoin::Strictness STRICTNESS, typename Maps>
void insertFromBlockImpl(
Join & join, Join::Type type, Maps & maps, size_t rows, const ColumnRawPtrs & key_columns,
HashJoin & join, HashJoin::Type type, Maps & maps, size_t rows, const ColumnRawPtrs & key_columns,
const Sizes & key_sizes, Block * stored_block, ConstNullMapPtr null_map, Arena & pool)
{
switch (type)
{
case Join::Type::EMPTY: break;
case Join::Type::CROSS: break; /// Do nothing. We have already saved block, and it is enough.
case HashJoin::Type::EMPTY: break;
case HashJoin::Type::CROSS: break; /// Do nothing. We have already saved block, and it is enough.
#define M(TYPE) \
case Join::Type::TYPE: \
insertFromBlockImplType<STRICTNESS, typename KeyGetterForType<Join::Type::TYPE, std::remove_reference_t<decltype(*maps.TYPE)>>::Type>(\
case HashJoin::Type::TYPE: \
insertFromBlockImplType<STRICTNESS, typename KeyGetterForType<HashJoin::Type::TYPE, std::remove_reference_t<decltype(*maps.TYPE)>>::Type>(\
join, *maps.TYPE, rows, key_columns, key_sizes, stored_block, null_map, pool); \
break;
APPLY_FOR_JOIN_VARIANTS(M)
@ -539,7 +539,7 @@ namespace
}
}
void Join::initRequiredRightKeys()
void HashJoin::initRequiredRightKeys()
{
const Names & left_keys = table_join->keyNamesLeft();
const Names & right_keys = table_join->keyNamesRight();
@ -558,7 +558,7 @@ void Join::initRequiredRightKeys()
}
}
void Join::initRightBlockStructure(Block & saved_block_sample)
void HashJoin::initRightBlockStructure(Block & saved_block_sample)
{
/// We could remove key columns for LEFT | INNER HashJoin but we should keep them for JoinSwitcher (if any).
bool save_key_columns = !table_join->forceHashJoin() || isRightOrFull(kind);
@ -580,7 +580,7 @@ void Join::initRightBlockStructure(Block & saved_block_sample)
JoinCommon::convertColumnsToNullable(saved_block_sample, (isFull(kind) ? right_table_keys.columns() : 0));
}
Block Join::structureRightBlock(const Block & block) const
Block HashJoin::structureRightBlock(const Block & block) const
{
Block structured_block;
for (auto & sample_column : savedBlockSample().getColumnsWithTypeAndName())
@ -594,10 +594,10 @@ Block Join::structureRightBlock(const Block & block) const
return structured_block;
}
bool Join::addJoinedBlock(const Block & source_block, bool check_limits)
bool HashJoin::addJoinedBlock(const Block & source_block, bool check_limits)
{
if (empty())
throw Exception("Logical error: Join was not initialized", ErrorCodes::LOGICAL_ERROR);
throw Exception("Logical error: HashJoin was not initialized", ErrorCodes::LOGICAL_ERROR);
/// There's no optimization for right side const columns. Remove constness if any.
Block block = materializeBlock(source_block);
@ -666,7 +666,7 @@ public:
const Block & block,
const Block & saved_block_sample,
const ColumnsWithTypeAndName & extras,
const Join & join_,
const HashJoin & join_,
const ColumnRawPtrs & key_columns_,
const Sizes & key_sizes_)
: join(join_)
@ -729,7 +729,7 @@ public:
}
}
const Join & join;
const HashJoin & join;
const ColumnRawPtrs & key_columns;
const Sizes & key_sizes;
size_t rows_to_add;
@ -839,7 +839,7 @@ NO_INLINE IColumn::Filter joinRightColumns(const Map & map, AddedColumns & added
if constexpr (is_asof_join)
{
const Join & join = added_columns.join;
const HashJoin & join = added_columns.join;
if (const RowRef * found = mapped.findAsof(join.getAsofType(), join.getAsofInequality(), asof_column, i))
{
setUsed<need_filter>(filter, i);
@ -924,14 +924,14 @@ IColumn::Filter joinRightColumnsSwitchNullability(const Map & map, AddedColumns
}
template <ASTTableJoin::Kind KIND, ASTTableJoin::Strictness STRICTNESS, typename Maps>
IColumn::Filter switchJoinRightColumns(const Maps & maps_, AddedColumns & added_columns, Join::Type type, const ConstNullMapPtr & null_map)
IColumn::Filter switchJoinRightColumns(const Maps & maps_, AddedColumns & added_columns, HashJoin::Type type, const ConstNullMapPtr & null_map)
{
switch (type)
{
#define M(TYPE) \
case Join::Type::TYPE: \
case HashJoin::Type::TYPE: \
return joinRightColumnsSwitchNullability<KIND, STRICTNESS,\
typename KeyGetterForType<Join::Type::TYPE, const std::remove_reference_t<decltype(*maps_.TYPE)>>::Type>(\
typename KeyGetterForType<HashJoin::Type::TYPE, const std::remove_reference_t<decltype(*maps_.TYPE)>>::Type>(\
*maps_.TYPE, added_columns, null_map);\
break;
APPLY_FOR_JOIN_VARIANTS(M)
@ -946,7 +946,7 @@ IColumn::Filter switchJoinRightColumns(const Maps & maps_, AddedColumns & added_
template <ASTTableJoin::Kind KIND, ASTTableJoin::Strictness STRICTNESS, typename Maps>
void Join::joinBlockImpl(
void HashJoin::joinBlockImpl(
Block & block,
const Names & key_names_left,
const Block & block_with_columns_to_add,
@ -1065,7 +1065,7 @@ void Join::joinBlockImpl(
}
}
void Join::joinBlockImplCross(Block & block, ExtraBlockPtr & not_processed) const
void HashJoin::joinBlockImplCross(Block & block, ExtraBlockPtr & not_processed) const
{
size_t max_joined_block_rows = table_join->maxJoinedBlockRows();
size_t start_left_row = 0;
@ -1158,7 +1158,7 @@ static void checkTypeOfKey(const Block & block_left, const Block & block_right)
}
DataTypePtr Join::joinGetReturnType(const String & column_name) const
DataTypePtr HashJoin::joinGetReturnType(const String & column_name) const
{
std::shared_lock lock(data->rwlock);
@ -1169,7 +1169,7 @@ DataTypePtr Join::joinGetReturnType(const String & column_name) const
template <typename Maps>
void Join::joinGetImpl(Block & block, const String & column_name, const Maps & maps_) const
void HashJoin::joinGetImpl(Block & block, const String & column_name, const Maps & maps_) const
{
joinBlockImpl<ASTTableJoin::Kind::Left, ASTTableJoin::Strictness::RightAny>(
block, {block.getByPosition(0).name}, {sample_block_with_columns_to_add.getByName(column_name)}, maps_);
@ -1179,7 +1179,7 @@ void Join::joinGetImpl(Block & block, const String & column_name, const Maps & m
// TODO: support composite key
// TODO: return multiple columns as named tuple
// TODO: return array of values when strictness == ASTTableJoin::Strictness::All
void Join::joinGet(Block & block, const String & column_name) const
void HashJoin::joinGet(Block & block, const String & column_name) const
{
std::shared_lock lock(data->rwlock);
@ -1198,7 +1198,7 @@ void Join::joinGet(Block & block, const String & column_name) const
}
void Join::joinBlock(Block & block, ExtraBlockPtr & not_processed)
void HashJoin::joinBlock(Block & block, ExtraBlockPtr & not_processed)
{
std::shared_lock lock(data->rwlock);
@ -1219,7 +1219,7 @@ void Join::joinBlock(Block & block, ExtraBlockPtr & not_processed)
}
void Join::joinTotals(Block & block) const
void HashJoin::joinTotals(Block & block) const
{
JoinCommon::joinTotals(totals, sample_block_with_columns_to_add, key_names_right, block);
}
@ -1268,7 +1268,7 @@ struct AdderNonJoined
class NonJoinedBlockInputStream : public IBlockInputStream
{
public:
NonJoinedBlockInputStream(const Join & parent_, const Block & result_sample_block_, UInt64 max_block_size_)
NonJoinedBlockInputStream(const HashJoin & parent_, const Block & result_sample_block_, UInt64 max_block_size_)
: parent(parent_)
, max_block_size(max_block_size_)
, result_sample_block(materializeBlock(result_sample_block_))
@ -1342,7 +1342,7 @@ protected:
}
private:
const Join & parent;
const HashJoin & parent;
UInt64 max_block_size;
Block result_sample_block;
@ -1359,7 +1359,7 @@ private:
std::vector<std::pair<size_t, ColumnPtr>> right_lowcard_changes;
std::any position;
std::optional<Join::BlockNullmapList::const_iterator> nulls_position;
std::optional<HashJoin::BlockNullmapList::const_iterator> nulls_position;
void setRightIndex(size_t right_pos, size_t result_position)
{
@ -1452,7 +1452,7 @@ private:
switch (parent.data->type)
{
#define M(TYPE) \
case Join::Type::TYPE: \
case HashJoin::Type::TYPE: \
return fillColumns<STRICTNESS>(*maps.TYPE, columns_keys_and_right);
APPLY_FOR_JOIN_VARIANTS(M)
#undef M
@ -1523,7 +1523,7 @@ private:
};
BlockInputStreamPtr Join::createStreamWithNonJoinedRows(const Block & result_sample_block, UInt64 max_block_size) const
BlockInputStreamPtr HashJoin::createStreamWithNonJoinedRows(const Block & result_sample_block, UInt64 max_block_size) const
{
if (table_join->strictness() == ASTTableJoin::Strictness::Asof ||
table_join->strictness() == ASTTableJoin::Strictness::Semi)
@ -1535,7 +1535,7 @@ BlockInputStreamPtr Join::createStreamWithNonJoinedRows(const Block & result_sam
}
bool Join::hasStreamWithNonJoinedRows() const
bool HashJoin::hasStreamWithNonJoinedRows() const
{
if (table_join->strictness() == ASTTableJoin::Strictness::Asof ||
table_join->strictness() == ASTTableJoin::Strictness::Semi)

View File

@ -26,7 +26,7 @@
namespace DB
{
class AnalyzedJoin;
class TableJoin;
namespace JoinStuff
{
@ -143,10 +143,10 @@ using MappedAsof = WithFlags<AsofRowRefs, false>;
* If it is true, we always generate Nullable column and substitute NULLs for non-joined rows,
* as in standard SQL.
*/
class Join : public IJoin
class HashJoin : public IJoin
{
public:
Join(std::shared_ptr<AnalyzedJoin> table_join_, const Block & right_sample_block, bool any_take_last_row_ = false);
HashJoin(std::shared_ptr<TableJoin> table_join_, const Block & right_sample_block, bool any_take_last_row_ = false);
bool empty() { return data->type == Type::EMPTY; }
@ -315,7 +315,7 @@ public:
Arena pool;
};
void reuseJoinedData(const Join & join)
void reuseJoinedData(const HashJoin & join)
{
data = join.data;
}
@ -329,7 +329,7 @@ private:
friend class NonJoinedBlockInputStream;
friend class JoinSource;
std::shared_ptr<AnalyzedJoin> table_join;
std::shared_ptr<TableJoin> table_join;
ASTTableJoin::Kind kind;
ASTTableJoin::Strictness strictness;

View File

@ -280,6 +280,11 @@ AccessRightsElements InterpreterAlterQuery::getRequiredAccessForCommand(const AS
required_access.emplace_back(AccessType::REFRESH_VIEW, database, table);
break;
}
case ASTAlterCommand::RENAME_COLUMN:
{
required_access.emplace_back(AccessType::RENAME_COLUMN, database, table, column_name());
break;
}
case ASTAlterCommand::NO_TYPE: break;
}

View File

@ -48,8 +48,8 @@
#include <Interpreters/getTableExpressions.h>
#include <Interpreters/JoinToSubqueryTransformVisitor.h>
#include <Interpreters/CrossToInnerJoinVisitor.h>
#include <Interpreters/AnalyzedJoin.h>
#include <Interpreters/Join.h>
#include <Interpreters/TableJoin.h>
#include <Interpreters/HashJoin.h>
#include <Interpreters/JoinedTables.h>
#include <Interpreters/QueryAliasesVisitor.h>
@ -897,7 +897,7 @@ void InterpreterSelectQuery::executeImpl(TPipeline & pipeline, const BlockInputS
if (join)
{
inflating_join = true;
if (auto * hash_join = typeid_cast<Join *>(join.get()))
if (auto * hash_join = typeid_cast<HashJoin *>(join.get()))
inflating_join = isCross(hash_join->getKind());
}

View File

@ -1,6 +1,6 @@
#include <Common/typeid_cast.h>
#include <Interpreters/JoinSwitcher.h>
#include <Interpreters/Join.h>
#include <Interpreters/HashJoin.h>
#include <Interpreters/MergeJoin.h>
#include <Interpreters/join_common.h>
@ -17,13 +17,13 @@ static ColumnWithTypeAndName correctNullability(ColumnWithTypeAndName && column,
return std::move(column);
}
JoinSwitcher::JoinSwitcher(std::shared_ptr<AnalyzedJoin> table_join_, const Block & right_sample_block_)
JoinSwitcher::JoinSwitcher(std::shared_ptr<TableJoin> table_join_, const Block & right_sample_block_)
: limits(table_join_->sizeLimits())
, switched(false)
, table_join(table_join_)
, right_sample_block(right_sample_block_.cloneEmpty())
{
join = std::make_shared<Join>(table_join, right_sample_block);
join = std::make_shared<HashJoin>(table_join, right_sample_block);
if (!limits.hasLimits())
limits.max_bytes = table_join->defaultMaxBytes();
@ -50,7 +50,7 @@ bool JoinSwitcher::addJoinedBlock(const Block & block, bool)
void JoinSwitcher::switchJoin()
{
std::shared_ptr<Join::RightTableData> joined_data = static_cast<const Join &>(*join).getJoinedData();
std::shared_ptr<HashJoin::RightTableData> joined_data = static_cast<const HashJoin &>(*join).getJoinedData();
BlocksList right_blocks = std::move(joined_data->blocks);
/// Destroy old join & create new one. Early destroy for memory saving.

View File

@ -4,7 +4,7 @@
#include <Core/Block.h>
#include <Interpreters/IJoin.h>
#include <Interpreters/AnalyzedJoin.h>
#include <Interpreters/TableJoin.h>
namespace DB
{
@ -15,7 +15,7 @@ namespace DB
class JoinSwitcher : public IJoin
{
public:
JoinSwitcher(std::shared_ptr<AnalyzedJoin> table_join_, const Block & right_sample_block_);
JoinSwitcher(std::shared_ptr<TableJoin> table_join_, const Block & right_sample_block_);
/// Add block of data from right hand of JOIN into current join object.
/// If join-in-memory memory limit exceeded switches to join-on-disk and continue with it.
@ -72,7 +72,7 @@ private:
SizeLimits limits;
bool switched;
mutable std::mutex switch_mutex;
std::shared_ptr<AnalyzedJoin> table_join;
std::shared_ptr<TableJoin> table_join;
const Block right_sample_block;
/// Change join-in-memory to join-on-disk moving right hand JOIN data from one to another.

View File

@ -4,7 +4,7 @@
#include <Core/SortCursor.h>
#include <Columns/ColumnNullable.h>
#include <Interpreters/MergeJoin.h>
#include <Interpreters/AnalyzedJoin.h>
#include <Interpreters/TableJoin.h>
#include <Interpreters/sortBlock.h>
#include <Interpreters/join_common.h>
#include <DataStreams/materializeBlock.h>
@ -445,7 +445,7 @@ void MiniLSM::merge(std::function<void(const Block &)> callback)
}
MergeJoin::MergeJoin(std::shared_ptr<AnalyzedJoin> table_join_, const Block & right_sample_block_)
MergeJoin::MergeJoin(std::shared_ptr<TableJoin> table_join_, const Block & right_sample_block_)
: table_join(table_join_)
, size_limits(table_join->sizeLimits())
, right_sample_block(right_sample_block_)

View File

@ -13,7 +13,7 @@
namespace DB
{
class AnalyzedJoin;
class TableJoin;
class MergeJoinCursor;
struct MergeJoinEqualRange;
@ -48,7 +48,7 @@ struct MiniLSM
class MergeJoin : public IJoin
{
public:
MergeJoin(std::shared_ptr<AnalyzedJoin> table_join_, const Block & right_sample_block);
MergeJoin(std::shared_ptr<TableJoin> table_join_, const Block & right_sample_block);
bool addJoinedBlock(const Block & block, bool check_limits) override;
void joinBlock(Block &, ExtraBlockPtr & not_processed) override;
@ -76,7 +76,7 @@ private:
using Cache = LRUCache<size_t, Block, std::hash<size_t>, BlockByteWeight>;
mutable std::shared_mutex rwlock;
std::shared_ptr<AnalyzedJoin> table_join;
std::shared_ptr<TableJoin> table_join;
SizeLimits size_limits;
SortDescription left_sort_description;
SortDescription right_sort_description;

View File

@ -0,0 +1,12 @@
#include <Interpreters/RenameColumnVisitor.h>
#include <Interpreters/IdentifierSemantic.h>
namespace DB
{
void RenameColumnData::visit(ASTIdentifier & identifier, ASTPtr &)
{
std::optional<String> identifier_column_name = IdentifierSemantic::getColumnName(identifier);
if (identifier_column_name && identifier_column_name == column_name)
identifier.name = rename_to;
}
}

View File

@ -0,0 +1,22 @@
#pragma once
#include <Interpreters/InDepthNodeVisitor.h>
#include <Parsers/ASTIdentifier.h>
namespace DB
{
/// Data for RenameColumnVisitor which traverse tree and rename all columns with
/// name column_name to rename_to
struct RenameColumnData
{
using TypeToVisit = ASTIdentifier;
String column_name;
String rename_to;
void visit(ASTIdentifier & identifier, ASTPtr & ast);
};
using RenameColumnMatcher = OneTypeMatcher<RenameColumnData>;
using RenameColumnVisitor = InDepthNodeVisitor<RenameColumnMatcher, true>;
}

View File

@ -1,6 +1,6 @@
#include <Interpreters/SubqueryForSet.h>
#include <Interpreters/InterpreterSelectWithUnionQuery.h>
#include <Interpreters/Join.h>
#include <Interpreters/IJoin.h>
#include <Interpreters/MergeJoin.h>
#include <Interpreters/ExpressionActions.h>
#include <DataStreams/LazyBlockInputStream.h>

View File

@ -17,7 +17,7 @@
#include <Interpreters/OptimizeIfWithConstantConditionVisitor.h>
#include <Interpreters/RequiredSourceColumnsVisitor.h>
#include <Interpreters/GetAggregatesVisitor.h>
#include <Interpreters/AnalyzedJoin.h>
#include <Interpreters/TableJoin.h>
#include <Interpreters/ExpressionActions.h> /// getSmallestColumn()
#include <Interpreters/getTableExpressions.h>
#include <Interpreters/OptimizeIfChains.h>
@ -520,7 +520,7 @@ void setJoinStrictness(ASTSelectQuery & select_query, JoinStrictness join_defaul
}
/// Find the columns that are obtained by JOIN.
void collectJoinedColumns(AnalyzedJoin & analyzed_join, const ASTSelectQuery & select_query,
void collectJoinedColumns(TableJoin & analyzed_join, const ASTSelectQuery & select_query,
const std::vector<TableWithColumnNames> & tables, const Aliases & aliases)
{
const ASTTablesInSelectQueryElement * node = select_query.join();
@ -795,7 +795,7 @@ SyntaxAnalyzerResultPtr SyntaxAnalyzer::analyzeSelect(
const auto & settings = context.getSettingsRef();
const NameSet & source_columns_set = result.source_columns_set;
result.analyzed_join = std::make_shared<AnalyzedJoin>(settings, context.getTemporaryVolume());
result.analyzed_join = std::make_shared<TableJoin>(settings, context.getTemporaryVolume());
if (remove_duplicates)
renameDuplicatedColumns(select_query);

View File

@ -11,7 +11,7 @@ namespace DB
{
class ASTFunction;
class AnalyzedJoin;
class TableJoin;
class Context;
struct Settings;
struct SelectQueryOptions;
@ -20,7 +20,7 @@ using Scalars = std::map<String, Block>;
struct SyntaxAnalyzerResult
{
ConstStoragePtr storage;
std::shared_ptr<AnalyzedJoin> analyzed_join;
std::shared_ptr<TableJoin> analyzed_join;
NamesAndTypesList source_columns;
NameSet source_columns_set; /// Set of names of source_columns.

View File

@ -1,4 +1,4 @@
#include <Interpreters/AnalyzedJoin.h>
#include <Interpreters/TableJoin.h>
#include <Parsers/ASTExpressionList.h>
@ -15,7 +15,7 @@ namespace ErrorCodes
{
}
AnalyzedJoin::AnalyzedJoin(const Settings & settings, VolumePtr tmp_volume_)
TableJoin::TableJoin(const Settings & settings, VolumePtr tmp_volume_)
: size_limits(SizeLimits{settings.max_rows_in_join, settings.max_bytes_in_join, settings.join_overflow_mode})
, default_max_bytes(settings.default_max_bytes_in_join)
, join_use_nulls(settings.join_use_nulls)
@ -29,7 +29,7 @@ AnalyzedJoin::AnalyzedJoin(const Settings & settings, VolumePtr tmp_volume_)
join_algorithm = JoinAlgorithm::PREFER_PARTIAL_MERGE;
}
void AnalyzedJoin::addUsingKey(const ASTPtr & ast)
void TableJoin::addUsingKey(const ASTPtr & ast)
{
key_names_left.push_back(ast->getColumnName());
key_names_right.push_back(ast->getAliasOrColumnName());
@ -42,7 +42,7 @@ void AnalyzedJoin::addUsingKey(const ASTPtr & ast)
right_key = renames[right_key];
}
void AnalyzedJoin::addOnKeys(ASTPtr & left_table_ast, ASTPtr & right_table_ast)
void TableJoin::addOnKeys(ASTPtr & left_table_ast, ASTPtr & right_table_ast)
{
key_names_left.push_back(left_table_ast->getColumnName());
key_names_right.push_back(right_table_ast->getAliasOrColumnName());
@ -52,7 +52,7 @@ void AnalyzedJoin::addOnKeys(ASTPtr & left_table_ast, ASTPtr & right_table_ast)
}
/// @return how many times right key appears in ON section.
size_t AnalyzedJoin::rightKeyInclusion(const String & name) const
size_t TableJoin::rightKeyInclusion(const String & name) const
{
if (hasUsing())
return 0;
@ -64,7 +64,7 @@ size_t AnalyzedJoin::rightKeyInclusion(const String & name) const
return count;
}
void AnalyzedJoin::deduplicateAndQualifyColumnNames(const NameSet & left_table_columns, const String & right_table_prefix)
void TableJoin::deduplicateAndQualifyColumnNames(const NameSet & left_table_columns, const String & right_table_prefix)
{
NameSet joined_columns;
NamesAndTypesList dedup_columns;
@ -90,7 +90,7 @@ void AnalyzedJoin::deduplicateAndQualifyColumnNames(const NameSet & left_table_c
columns_from_joined_table.swap(dedup_columns);
}
NameSet AnalyzedJoin::getQualifiedColumnsSet() const
NameSet TableJoin::getQualifiedColumnsSet() const
{
NameSet out;
for (const auto & names : original_names)
@ -98,7 +98,7 @@ NameSet AnalyzedJoin::getQualifiedColumnsSet() const
return out;
}
NamesWithAliases AnalyzedJoin::getNamesWithAliases(const NameSet & required_columns) const
NamesWithAliases TableJoin::getNamesWithAliases(const NameSet & required_columns) const
{
NamesWithAliases out;
for (const auto & column : required_columns)
@ -110,14 +110,14 @@ NamesWithAliases AnalyzedJoin::getNamesWithAliases(const NameSet & required_colu
return out;
}
ASTPtr AnalyzedJoin::leftKeysList() const
ASTPtr TableJoin::leftKeysList() const
{
ASTPtr keys_list = std::make_shared<ASTExpressionList>();
keys_list->children = key_asts_left;
return keys_list;
}
ASTPtr AnalyzedJoin::rightKeysList() const
ASTPtr TableJoin::rightKeysList() const
{
ASTPtr keys_list = std::make_shared<ASTExpressionList>();
if (hasOn())
@ -125,7 +125,7 @@ ASTPtr AnalyzedJoin::rightKeysList() const
return keys_list;
}
Names AnalyzedJoin::requiredJoinedNames() const
Names TableJoin::requiredJoinedNames() const
{
NameSet required_columns_set(key_names_right.begin(), key_names_right.end());
for (const auto & joined_column : columns_added_by_join)
@ -134,7 +134,7 @@ Names AnalyzedJoin::requiredJoinedNames() const
return Names(required_columns_set.begin(), required_columns_set.end());
}
NameSet AnalyzedJoin::requiredRightKeys() const
NameSet TableJoin::requiredRightKeys() const
{
NameSet required;
for (const auto & name : key_names_right)
@ -144,7 +144,7 @@ NameSet AnalyzedJoin::requiredRightKeys() const
return required;
}
NamesWithAliases AnalyzedJoin::getRequiredColumns(const Block & sample, const Names & action_required_columns) const
NamesWithAliases TableJoin::getRequiredColumns(const Block & sample, const Names & action_required_columns) const
{
NameSet required_columns(action_required_columns.begin(), action_required_columns.end());
@ -155,7 +155,7 @@ NamesWithAliases AnalyzedJoin::getRequiredColumns(const Block & sample, const Na
return getNamesWithAliases(required_columns);
}
void AnalyzedJoin::addJoinedColumn(const NameAndTypePair & joined_column)
void TableJoin::addJoinedColumn(const NameAndTypePair & joined_column)
{
if (join_use_nulls && isLeftOrFull(table_join.kind))
{
@ -166,7 +166,7 @@ void AnalyzedJoin::addJoinedColumn(const NameAndTypePair & joined_column)
columns_added_by_join.push_back(joined_column);
}
void AnalyzedJoin::addJoinedColumnsAndCorrectNullability(Block & sample_block) const
void TableJoin::addJoinedColumnsAndCorrectNullability(Block & sample_block) const
{
bool right_or_full_join = isRightOrFull(table_join.kind);
bool left_or_full_join = isLeftOrFull(table_join.kind);
@ -198,7 +198,7 @@ void AnalyzedJoin::addJoinedColumnsAndCorrectNullability(Block & sample_block) c
}
}
bool AnalyzedJoin::sameJoin(const AnalyzedJoin * x, const AnalyzedJoin * y)
bool TableJoin::sameJoin(const TableJoin * x, const TableJoin * y)
{
if (!x && !y)
return true;
@ -212,7 +212,7 @@ bool AnalyzedJoin::sameJoin(const AnalyzedJoin * x, const AnalyzedJoin * y)
&& x->columns_added_by_join == y->columns_added_by_join;
}
bool AnalyzedJoin::sameStrictnessAndKind(ASTTableJoin::Strictness strictness_, ASTTableJoin::Kind kind_) const
bool TableJoin::sameStrictnessAndKind(ASTTableJoin::Strictness strictness_, ASTTableJoin::Kind kind_) const
{
if (strictness_ == strictness() && kind_ == kind())
return true;
@ -228,7 +228,7 @@ bool AnalyzedJoin::sameStrictnessAndKind(ASTTableJoin::Strictness strictness_, A
return false;
}
bool AnalyzedJoin::allowMergeJoin() const
bool TableJoin::allowMergeJoin() const
{
bool is_any = (strictness() == ASTTableJoin::Strictness::Any);
bool is_all = (strictness() == ASTTableJoin::Strictness::All);

View File

@ -25,7 +25,7 @@ struct Settings;
class Volume;
using VolumePtr = std::shared_ptr<Volume>;
class AnalyzedJoin
class TableJoin
{
/** Query of the form `SELECT expr(x) AS k FROM t1 ANY LEFT JOIN (SELECT expr(x) AS k FROM t2) USING k`
* The join is made by column k.
@ -69,10 +69,10 @@ class AnalyzedJoin
VolumePtr tmp_volume;
public:
AnalyzedJoin(const Settings &, VolumePtr tmp_volume);
TableJoin(const Settings &, VolumePtr tmp_volume);
/// for StorageJoin
AnalyzedJoin(SizeLimits limits, bool use_nulls, ASTTableJoin::Kind kind, ASTTableJoin::Strictness strictness,
TableJoin(SizeLimits limits, bool use_nulls, ASTTableJoin::Kind kind, ASTTableJoin::Strictness strictness,
const Names & key_names_right_)
: size_limits(limits)
, default_max_bytes(0)
@ -133,7 +133,7 @@ public:
/// StorageJoin overrides key names (cause of different names qualification)
void setRightKeys(const Names & keys) { key_names_right = keys; }
static bool sameJoin(const AnalyzedJoin * x, const AnalyzedJoin * y);
static bool sameJoin(const TableJoin * x, const TableJoin * y);
};
}

View File

@ -3,7 +3,7 @@
#include <array>
#include <common/constexpr_helpers.h>
#include <Interpreters/Join.h>
#include <Interpreters/HashJoin.h>
/** Used in implementation of Join to process different data structures.
@ -15,37 +15,37 @@ namespace DB
template <ASTTableJoin::Kind kind, typename ASTTableJoin::Strictness>
struct MapGetter;
template <> struct MapGetter<ASTTableJoin::Kind::Left, ASTTableJoin::Strictness::RightAny> { using Map = Join::MapsOne; };
template <> struct MapGetter<ASTTableJoin::Kind::Inner, ASTTableJoin::Strictness::RightAny> { using Map = Join::MapsOne; };
template <> struct MapGetter<ASTTableJoin::Kind::Right, ASTTableJoin::Strictness::RightAny> { using Map = Join::MapsOneFlagged; };
template <> struct MapGetter<ASTTableJoin::Kind::Full, ASTTableJoin::Strictness::RightAny> { using Map = Join::MapsOneFlagged; };
template <> struct MapGetter<ASTTableJoin::Kind::Left, ASTTableJoin::Strictness::RightAny> { using Map = HashJoin::MapsOne; };
template <> struct MapGetter<ASTTableJoin::Kind::Inner, ASTTableJoin::Strictness::RightAny> { using Map = HashJoin::MapsOne; };
template <> struct MapGetter<ASTTableJoin::Kind::Right, ASTTableJoin::Strictness::RightAny> { using Map = HashJoin::MapsOneFlagged; };
template <> struct MapGetter<ASTTableJoin::Kind::Full, ASTTableJoin::Strictness::RightAny> { using Map = HashJoin::MapsOneFlagged; };
template <> struct MapGetter<ASTTableJoin::Kind::Left, ASTTableJoin::Strictness::Any> { using Map = Join::MapsOne; };
template <> struct MapGetter<ASTTableJoin::Kind::Inner, ASTTableJoin::Strictness::Any> { using Map = Join::MapsOneFlagged; };
template <> struct MapGetter<ASTTableJoin::Kind::Right, ASTTableJoin::Strictness::Any> { using Map = Join::MapsAllFlagged; };
template <> struct MapGetter<ASTTableJoin::Kind::Full, ASTTableJoin::Strictness::Any> { using Map = Join::MapsAllFlagged; };
template <> struct MapGetter<ASTTableJoin::Kind::Left, ASTTableJoin::Strictness::Any> { using Map = HashJoin::MapsOne; };
template <> struct MapGetter<ASTTableJoin::Kind::Inner, ASTTableJoin::Strictness::Any> { using Map = HashJoin::MapsOneFlagged; };
template <> struct MapGetter<ASTTableJoin::Kind::Right, ASTTableJoin::Strictness::Any> { using Map = HashJoin::MapsAllFlagged; };
template <> struct MapGetter<ASTTableJoin::Kind::Full, ASTTableJoin::Strictness::Any> { using Map = HashJoin::MapsAllFlagged; };
template <> struct MapGetter<ASTTableJoin::Kind::Left, ASTTableJoin::Strictness::All> { using Map = Join::MapsAll; };
template <> struct MapGetter<ASTTableJoin::Kind::Inner, ASTTableJoin::Strictness::All> { using Map = Join::MapsAll; };
template <> struct MapGetter<ASTTableJoin::Kind::Right, ASTTableJoin::Strictness::All> { using Map = Join::MapsAllFlagged; };
template <> struct MapGetter<ASTTableJoin::Kind::Full, ASTTableJoin::Strictness::All> { using Map = Join::MapsAllFlagged; };
template <> struct MapGetter<ASTTableJoin::Kind::Left, ASTTableJoin::Strictness::All> { using Map = HashJoin::MapsAll; };
template <> struct MapGetter<ASTTableJoin::Kind::Inner, ASTTableJoin::Strictness::All> { using Map = HashJoin::MapsAll; };
template <> struct MapGetter<ASTTableJoin::Kind::Right, ASTTableJoin::Strictness::All> { using Map = HashJoin::MapsAllFlagged; };
template <> struct MapGetter<ASTTableJoin::Kind::Full, ASTTableJoin::Strictness::All> { using Map = HashJoin::MapsAllFlagged; };
/// Only SEMI LEFT and SEMI RIGHT are valid. INNER and FULL are here for templates instantiation.
template <> struct MapGetter<ASTTableJoin::Kind::Left, ASTTableJoin::Strictness::Semi> { using Map = Join::MapsOne; };
template <> struct MapGetter<ASTTableJoin::Kind::Inner, ASTTableJoin::Strictness::Semi> { using Map = Join::MapsOne; };
template <> struct MapGetter<ASTTableJoin::Kind::Right, ASTTableJoin::Strictness::Semi> { using Map = Join::MapsAllFlagged; };
template <> struct MapGetter<ASTTableJoin::Kind::Full, ASTTableJoin::Strictness::Semi> { using Map = Join::MapsOne; };
template <> struct MapGetter<ASTTableJoin::Kind::Left, ASTTableJoin::Strictness::Semi> { using Map = HashJoin::MapsOne; };
template <> struct MapGetter<ASTTableJoin::Kind::Inner, ASTTableJoin::Strictness::Semi> { using Map = HashJoin::MapsOne; };
template <> struct MapGetter<ASTTableJoin::Kind::Right, ASTTableJoin::Strictness::Semi> { using Map = HashJoin::MapsAllFlagged; };
template <> struct MapGetter<ASTTableJoin::Kind::Full, ASTTableJoin::Strictness::Semi> { using Map = HashJoin::MapsOne; };
/// Only SEMI LEFT and SEMI RIGHT are valid. INNER and FULL are here for templates instantiation.
template <> struct MapGetter<ASTTableJoin::Kind::Left, ASTTableJoin::Strictness::Anti> { using Map = Join::MapsOne; };
template <> struct MapGetter<ASTTableJoin::Kind::Inner, ASTTableJoin::Strictness::Anti> { using Map = Join::MapsOne; };
template <> struct MapGetter<ASTTableJoin::Kind::Right, ASTTableJoin::Strictness::Anti> { using Map = Join::MapsAllFlagged; };
template <> struct MapGetter<ASTTableJoin::Kind::Full, ASTTableJoin::Strictness::Anti> { using Map = Join::MapsOne; };
template <> struct MapGetter<ASTTableJoin::Kind::Left, ASTTableJoin::Strictness::Anti> { using Map = HashJoin::MapsOne; };
template <> struct MapGetter<ASTTableJoin::Kind::Inner, ASTTableJoin::Strictness::Anti> { using Map = HashJoin::MapsOne; };
template <> struct MapGetter<ASTTableJoin::Kind::Right, ASTTableJoin::Strictness::Anti> { using Map = HashJoin::MapsAllFlagged; };
template <> struct MapGetter<ASTTableJoin::Kind::Full, ASTTableJoin::Strictness::Anti> { using Map = HashJoin::MapsOne; };
template <ASTTableJoin::Kind kind>
struct MapGetter<kind, ASTTableJoin::Strictness::Asof>
{
using Map = Join::MapsAsof;
using Map = HashJoin::MapsAsof;
};
@ -66,7 +66,7 @@ static constexpr std::array<ASTTableJoin::Kind, 4> KINDS = {
};
/// Init specified join map
inline bool joinDispatchInit(ASTTableJoin::Kind kind, ASTTableJoin::Strictness strictness, Join::MapsVariant & maps)
inline bool joinDispatchInit(ASTTableJoin::Kind kind, ASTTableJoin::Strictness strictness, HashJoin::MapsVariant & maps)
{
return static_for<0, KINDS.size() * STRICTNESSES.size()>([&](auto ij)
{

View File

@ -56,6 +56,11 @@ ASTPtr ASTAlterCommand::clone() const
res->values = values->clone();
res->children.push_back(res->values);
}
if (rename_to)
{
res->rename_to = rename_to->clone();
res->children.push_back(res->rename_to);
}
return res;
}
@ -285,6 +290,15 @@ void ASTAlterCommand::formatImpl(
{
settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << "REFRESH " << (settings.hilite ? hilite_none : "");
}
else if (type == ASTAlterCommand::RENAME_COLUMN)
{
settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << "RENAME COLUMN " << (if_exists ? "IF EXISTS " : "")
<< (settings.hilite ? hilite_none : "");
column->formatImpl(settings, state, frame);
settings.ostr << (settings.hilite ? hilite_keyword : "") << " TO ";
rename_to->formatImpl(settings, state, frame);
}
else
throw Exception("Unexpected type of ALTER", ErrorCodes::UNEXPECTED_AST_STRUCTURE);
}

View File

@ -29,6 +29,7 @@ public:
DROP_COLUMN,
MODIFY_COLUMN,
COMMENT_COLUMN,
RENAME_COLUMN,
MODIFY_ORDER_BY,
MODIFY_TTL,
MATERIALIZE_TTL,
@ -69,6 +70,7 @@ public:
/** The ADD COLUMN query here optionally stores the name of the column following AFTER
* The DROP query stores the column name for deletion here
* Also used for RENAME COLUMN.
*/
ASTPtr column;
@ -155,6 +157,9 @@ public:
String to_database;
String to_table;
/// Target column name
ASTPtr rename_to;
String getID(char delim) const override { return "AlterCommand" + (delim + std::to_string(static_cast<int>(type))); }
ASTPtr clone() const override;

View File

@ -27,6 +27,7 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
ParserKeyword s_drop_column("DROP COLUMN");
ParserKeyword s_clear_column("CLEAR COLUMN");
ParserKeyword s_modify_column("MODIFY COLUMN");
ParserKeyword s_rename_column("RENAME COLUMN");
ParserKeyword s_comment_column("COMMENT COLUMN");
ParserKeyword s_modify_order_by("MODIFY ORDER BY");
ParserKeyword s_modify_ttl("MODIFY TTL");
@ -77,6 +78,7 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
ParserKeyword s_delete_where("DELETE WHERE");
ParserKeyword s_update("UPDATE");
ParserKeyword s_where("WHERE");
ParserKeyword s_to("TO");
ParserCompoundIdentifier parser_name;
ParserStringLiteral parser_string_literal;
@ -121,6 +123,22 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
command->type = ASTAlterCommand::ADD_COLUMN;
}
else if (s_rename_column.ignore(pos, expected))
{
if (s_if_exists.ignore(pos, expected))
command->if_exists = true;
if (!parser_name.parse(pos, command->column, expected))
return false;
if (!s_to.ignore(pos, expected))
return false;
if (!parser_name.parse(pos, command->rename_to, expected))
return false;
command->type = ASTAlterCommand::RENAME_COLUMN;
}
else if (s_drop_partition.ignore(pos, expected))
{
if (!parser_partition.parse(pos, command->partition, expected))

View File

@ -12,6 +12,7 @@ namespace DB
* [DROP COLUMN [IF EXISTS] col_to_drop, ...]
* [CLEAR COLUMN [IF EXISTS] col_to_clear [IN PARTITION partition],]
* [MODIFY COLUMN [IF EXISTS] col_to_modify type, ...]
* [RENAME COLUMN [IF EXISTS] col_name TO col_name]
* [MODIFY PRIMARY KEY (a, b, c...)]
* [MODIFY SETTING setting_name=setting_value, ...]
* [COMMENT COLUMN [IF EXISTS] col_name string]

View File

@ -5,7 +5,7 @@
#include <DataStreams/IBlockOutputStream.h>
#include <Interpreters/Set.h>
#include <Interpreters/Join.h>
#include <Interpreters/IJoin.h>
#include <Storages/IStorage.h>
#include <iomanip>

View File

@ -12,6 +12,7 @@
#include <Interpreters/addTypeConversionToAST.h>
#include <Interpreters/ExpressionAnalyzer.h>
#include <Interpreters/SyntaxAnalyzer.h>
#include <Interpreters/RenameColumnVisitor.h>
#include <Parsers/ASTAlterQuery.h>
#include <Parsers/ASTColumnDeclaration.h>
#include <Parsers/ASTConstraintDeclaration.h>
@ -39,6 +40,7 @@ namespace ErrorCodes
extern const int NOT_FOUND_COLUMN_IN_BLOCK;
extern const int LOGICAL_ERROR;
extern const int DUPLICATE_COLUMN;
extern const int NOT_IMPLEMENTED;
}
@ -231,10 +233,21 @@ std::optional<AlterCommand> AlterCommand::parse(const ASTAlterCommand * command_
else if (command_ast->type == ASTAlterCommand::MODIFY_QUERY)
{
AlterCommand command;
command.ast = command_ast->clone();
command.type = AlterCommand::MODIFY_QUERY;
command.select = command_ast->select;
return command;
}
else if (command_ast->type == ASTAlterCommand::RENAME_COLUMN)
{
AlterCommand command;
command.ast = command_ast->clone();
command.type = AlterCommand::RENAME_COLUMN;
command.column_name = command_ast->column->as<ASTIdentifier &>().name;
command.rename_to = command_ast->rename_to->as<ASTIdentifier &>().name;
command.if_exists = command_ast->if_exists;
return command;
}
else
return {};
}
@ -437,6 +450,24 @@ void AlterCommand::apply(StorageInMemoryMetadata & metadata) const
settings_from_storage.push_back(change);
}
}
else if (type == RENAME_COLUMN)
{
metadata.columns.rename(column_name, rename_to);
RenameColumnData rename_data{column_name, rename_to};
RenameColumnVisitor rename_visitor(rename_data);
for (auto & column : metadata.columns)
{
metadata.columns.modify(column.name, [&](ColumnDescription & column_to_modify)
{
if (column_to_modify.default_desc.expression)
rename_visitor.visit(column_to_modify.default_desc.expression);
if (column_to_modify.ttl)
rename_visitor.visit(column_to_modify.ttl);
});
}
if (metadata.ttl_for_table_ast)
rename_visitor.visit(metadata.ttl_for_table_ast);
}
else
throw Exception("Wrong parameter type in ALTER query", ErrorCodes::LOGICAL_ERROR);
}
@ -519,7 +550,7 @@ bool AlterCommand::isRequireMutationStage(const StorageInMemoryMetadata & metada
if (ignore)
return false;
if (type == DROP_COLUMN || type == DROP_INDEX)
if (type == DROP_COLUMN || type == DROP_INDEX || type == RENAME_COLUMN)
return true;
if (type != MODIFY_COLUMN || data_type == nullptr)
@ -585,6 +616,12 @@ std::optional<MutationCommand> AlterCommand::tryConvertToMutationCommand(const S
result.predicate = nullptr;
}
else if (type == RENAME_COLUMN)
{
result.type = MutationCommand::Type::RENAME_COLUMN;
result.column_name = column_name;
result.rename_to = rename_to;
}
result.ast = ast->clone();
return result;
@ -619,6 +656,8 @@ String alterTypeToString(const AlterCommand::Type type)
return "MODIFY SETTING";
case AlterCommand::Type::MODIFY_QUERY:
return "MODIFY QUERY";
case AlterCommand::Type::RENAME_COLUMN:
return "RENAME COLUMN";
}
__builtin_unreachable();
}
@ -666,7 +705,8 @@ void AlterCommands::prepare(const StorageInMemoryMetadata & metadata)
command.ignore = true;
}
else if (command.type == AlterCommand::DROP_COLUMN
|| command.type == AlterCommand::COMMENT_COLUMN)
|| command.type == AlterCommand::COMMENT_COLUMN
|| command.type == AlterCommand::RENAME_COLUMN)
{
if (!has_column && command.if_exists)
command.ignore = true;
@ -680,6 +720,7 @@ void AlterCommands::validate(const StorageInMemoryMetadata & metadata, const Con
auto all_columns = metadata.columns;
/// Default expression for all added/modified columns
ASTPtr default_expr_list = std::make_shared<ASTExpressionList>();
NameToNameMap renames_map;
for (size_t i = 0; i < size(); ++i)
{
auto & command = (*this)[i];
@ -753,6 +794,52 @@ void AlterCommands::validate(const StorageInMemoryMetadata & metadata, const Con
if (metadata.settings_ast == nullptr)
throw Exception{"Cannot alter settings, because table engine doesn't support settings changes", ErrorCodes::BAD_ARGUMENTS};
}
else if (command.type == AlterCommand::RENAME_COLUMN)
{
/// TODO Implement nested rename
if (metadata.columns.hasNested(command.column_name))
{
throw Exception{"Cannot rename whole Nested struct", ErrorCodes::NOT_IMPLEMENTED};
}
if (!metadata.columns.has(command.column_name))
{
if (!command.if_exists)
throw Exception{"Wrong column name. Cannot find column " + backQuote(command.column_name) + " to rename",
ErrorCodes::NOT_FOUND_COLUMN_IN_BLOCK};
}
if (metadata.columns.has(command.rename_to))
throw Exception{"Cannot rename to " + backQuote(command.rename_to) + ": column with this name already exists",
ErrorCodes::DUPLICATE_COLUMN};
if (renames_map.count(command.column_name))
throw Exception{"Cannot rename column '" + backQuote(command.column_name) + "' to two different names in a single ALTER query", ErrorCodes::BAD_ARGUMENTS};
if (renames_map.count(command.rename_to))
throw Exception{"Rename loop detected in ALTER query",
ErrorCodes::BAD_ARGUMENTS};
String from_nested_table_name = Nested::extractTableName(command.column_name);
String to_nested_table_name = Nested::extractTableName(command.rename_to);
bool from_nested = from_nested_table_name != command.column_name;
bool to_nested = to_nested_table_name != command.rename_to;
if (from_nested && to_nested)
{
if (from_nested_table_name != to_nested_table_name)
throw Exception{"Cannot rename column from one nested name to another", ErrorCodes::BAD_ARGUMENTS};
}
else if (!from_nested && !to_nested)
{
renames_map[command.column_name] = command.rename_to;
}
else
{
throw Exception{"Cannot rename column from nested struct to normal column and vice versa", ErrorCodes::BAD_ARGUMENTS};
}
}
/// Collect default expressions for MODIFY and ADD comands
if (command.type == AlterCommand::MODIFY_COLUMN || command.type == AlterCommand::ADD_COLUMN)

View File

@ -35,6 +35,7 @@ struct AlterCommand
MODIFY_TTL,
MODIFY_SETTING,
MODIFY_QUERY,
RENAME_COLUMN,
};
Type type;
@ -96,6 +97,9 @@ struct AlterCommand
/// For MODIFY_QUERY
ASTPtr select = nullptr;
/// Target column name
String rename_to;
static std::optional<AlterCommand> parse(const ASTAlterCommand * command);
void apply(StorageInMemoryMetadata & metadata) const;

View File

@ -1,4 +1,5 @@
#include <Storages/ColumnsDescription.h>
#include <Parsers/ASTLiteral.h>
#include <Parsers/ExpressionElementParsers.h>
#include <Parsers/ExpressionListParsers.h>
@ -36,6 +37,7 @@ namespace ErrorCodes
extern const int ILLEGAL_COLUMN;
extern const int CANNOT_PARSE_TEXT;
extern const int THERE_IS_NO_DEFAULT_VALUE;
extern const int LOGICAL_ERROR;
}
ColumnDescription::ColumnDescription(String name_, DataTypePtr type_, bool is_virtual_)
@ -195,6 +197,18 @@ void ColumnsDescription::remove(const String & column_name)
list_it = columns.get<0>().erase(list_it);
}
void ColumnsDescription::rename(const String & column_from, const String & column_to)
{
auto it = columns.get<1>().find(column_from);
if (it == columns.get<1>().end())
throw Exception("Cannot find column " + column_from + " in ColumnsDescription", ErrorCodes::LOGICAL_ERROR);
columns.get<1>().modify_key(it, [&column_to] (String & old_name)
{
old_name = column_to;
});
}
void ColumnsDescription::flattenNested()
{

View File

@ -57,6 +57,10 @@ public:
/// `column_name` can be a Nested column name;
void remove(const String & column_name);
/// Rename column. column_from and column_to cannot be nested columns.
/// TODO add ability to rename nested columns
void rename(const String & column_from, const String & column_to);
void flattenNested(); /// TODO: remove, insert already flattened Nested columns.
bool operator==(const ColumnsDescription & other) const { return columns == other.columns; }

View File

@ -30,6 +30,7 @@ IMergeTreeReader::IMergeTreeReader(const MergeTreeData::DataPartPtr & data_part_
, columns(columns_), uncompressed_cache(uncompressed_cache_), mark_cache(mark_cache_)
, settings(settings_), storage(data_part_->storage)
, all_mark_ranges(all_mark_ranges_)
, alter_conversions(storage.getAlterConversionsForPart(data_part))
{
}

View File

@ -78,6 +78,8 @@ protected:
MarkRanges all_mark_ranges;
friend class MergeTreeRangeReader::DelayedStream;
/// Alter conversions, which must be applied on fly if required
MergeTreeData::AlterConversions alter_conversions;
};
}

View File

@ -192,7 +192,7 @@ MergeTreeData::MergeTreeData(
min_format_version = MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING;
}
setTTLExpressions(metadata.columns.getColumnTTLs(), metadata.ttl_for_table_ast);
setTTLExpressions(metadata.columns, metadata.ttl_for_table_ast);
/// format_file always contained on any data path
PathWithDisk version_file;
@ -610,14 +610,17 @@ void checkTTLExpression(const ExpressionActionsPtr & ttl_expression, const Strin
}
void MergeTreeData::setTTLExpressions(const ColumnsDescription::ColumnTTLs & new_column_ttls,
void MergeTreeData::setTTLExpressions(const ColumnsDescription & new_columns,
const ASTPtr & new_ttl_table_ast, bool only_check)
{
auto create_ttl_entry = [this](ASTPtr ttl_ast)
auto new_column_ttls = new_columns.getColumnTTLs();
auto create_ttl_entry = [this, &new_columns](ASTPtr ttl_ast)
{
TTLEntry result;
auto syntax_result = SyntaxAnalyzer(global_context).analyze(ttl_ast, getColumns().getAllPhysical());
auto syntax_result = SyntaxAnalyzer(global_context).analyze(ttl_ast, new_columns.getAllPhysical());
result.expression = ExpressionAnalyzer(ttl_ast, syntax_result, global_context).getActions(false);
result.destination_type = PartDestinationType::DELETE;
result.result_column = ttl_ast->getColumnName();
@ -1457,6 +1460,7 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, const S
for (const auto & column : getColumns().getAllPhysical())
old_types.emplace(column.name, column.type.get());
for (const AlterCommand & command : commands)
{
if (command.type == AlterCommand::MODIFY_ORDER_BY && !is_custom_partitioned)
@ -1471,6 +1475,15 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, const S
"ALTER ADD INDEX is not supported for tables with the old syntax",
ErrorCodes::BAD_ARGUMENTS);
}
if (command.type == AlterCommand::RENAME_COLUMN)
{
if (columns_alter_type_forbidden.count(command.column_name) || columns_alter_type_metadata_only.count(command.column_name))
{
throw Exception(
"Trying to ALTER RENAME key " + backQuoteIfNeed(command.column_name) + " column which is a part of key expression",
ErrorCodes::ILLEGAL_COLUMN);
}
}
else if (command.isModifyingData())
{
if (columns_alter_type_forbidden.count(command.column_name))
@ -1490,7 +1503,7 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, const S
setProperties(metadata, /* only_check = */ true);
setTTLExpressions(metadata.columns.getColumnTTLs(), metadata.ttl_for_table_ast, /* only_check = */ true);
setTTLExpressions(metadata.columns, metadata.ttl_for_table_ast, /* only_check = */ true);
if (settings_ast)
{
@ -3589,4 +3602,18 @@ bool MergeTreeData::canUsePolymorphicParts(const MergeTreeSettings & settings, S
return true;
}
MergeTreeData::AlterConversions MergeTreeData::getAlterConversionsForPart(const MergeTreeDataPartPtr part) const
{
MutationCommands commands = getFirtsAlterMutationCommandsForPart(part);
AlterConversions result{};
for (const auto & command : commands)
/// Currently we need explicit conversions only for RENAME alter
/// all other conversions can be deduced from diff between part columns
/// and columns in storage.
if (command.type == MutationCommand::Type::RENAME_COLUMN)
result.rename_map[command.rename_to] = command.column_name;
return result;
}
}

View File

@ -33,6 +33,7 @@ namespace DB
class MergeListEntry;
class AlterCommands;
class MergeTreePartsMover;
class MutationCommands;
class ExpressionActions;
using ExpressionActionsPtr = std::shared_ptr<ExpressionActions>;
@ -124,6 +125,20 @@ public:
STRONG_TYPEDEF(String, PartitionID)
/// Alter conversions which should be applied on-fly for part. Build from of
/// the most recent mutation commands for part. Now we have only rename_map
/// here (from ALTER_RENAME) command, because for all other type of alters
/// we can deduce conversions for part from difference between
/// part->getColumns() and storage->getColumns().
struct AlterConversions
{
/// Rename map new_name -> old_name
std::unordered_map<String, String> rename_map;
bool isColumnRenamed(const String & new_name) const { return rename_map.count(new_name) > 0; }
String getColumnOldName(const String & new_name) const { return rename_map.at(new_name); }
};
struct LessDataPart
{
using is_transparent = void;
@ -535,9 +550,8 @@ public:
broken_part_callback(name);
}
/** Get the key expression AST as an ASTExpressionList.
* It can be specified in the tuple: (CounterID, Date),
* or as one column: CounterID.
/** Get the key expression AST as an ASTExpressionList. It can be specified
* in the tuple: (CounterID, Date), or as one column: CounterID.
*/
static ASTPtr extractKeyExpressionList(const ASTPtr & node);
@ -647,6 +661,9 @@ public:
/// Reserves 0 bytes
ReservationPtr makeEmptyReservationOnLargestDisk() { return getStoragePolicy()->makeEmptyReservationOnLargestDisk(); }
/// Return alter conversions for part which must be applied on fly.
AlterConversions getAlterConversionsForPart(const MergeTreeDataPartPtr part) const;
MergeTreeDataFormatVersion format_version;
Context & global_context;
@ -856,14 +873,14 @@ protected:
std::mutex grab_old_parts_mutex;
/// The same for clearOldTemporaryDirectories.
std::mutex clear_old_temporary_directories_mutex;
/// Mutex for settings usage
void setProperties(const StorageInMemoryMetadata & metadata, bool only_check = false);
void initPartitionKey();
void setTTLExpressions(const ColumnsDescription::ColumnTTLs & new_column_ttls,
void setTTLExpressions(const ColumnsDescription & columns,
const ASTPtr & new_ttl_table_ast, bool only_check = false);
void checkStoragePolicy(const StoragePolicyPtr & new_storage_policy);
void setStoragePolicy(const String & new_storage_policy_name, bool only_check = false);
@ -908,6 +925,11 @@ protected:
/// mechanisms for parts locking
virtual bool partIsAssignedToBackgroundOperation(const DataPartPtr & part) const = 0;
/// Return most recent mutations commands for part which weren't applied
/// Used to receive AlterConversions for part and apply them on fly. This
/// method has different implementations for replicated and non replicated
/// MergeTree because they store mutations in different way.
virtual MutationCommands getFirtsAlterMutationCommandsForPart(const DataPartPtr & part) const = 0;
/// Moves part to specified space, used in ALTER ... MOVE ... queries
bool movePartsToSpace(const DataPartsVector & parts, SpacePtr space);

View File

@ -26,11 +26,14 @@
#include <Common/SimpleIncrement.h>
#include <Common/interpolate.h>
#include <Common/typeid_cast.h>
#include <Common/escapeForFileName.h>
#include <cmath>
#include <numeric>
#include <iomanip>
#include <boost/algorithm/string/replace.hpp>
namespace ProfileEvents
{
extern const Event MergedRows;
@ -988,6 +991,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor
splitMutationCommands(source_part, commands_for_part, for_interpreter, for_file_renames);
UInt64 watch_prev_elapsed = 0;
MergeStageProgress stage_progress(1.0);
@ -1056,7 +1060,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor
auto indices_to_recalc = getIndicesToRecalculate(in, storage_from_source_part, updated_header.getNamesAndTypesList(), context);
NameSet files_to_skip = collectFilesToSkip(updated_header, indices_to_recalc, mrk_extension);
NameSet files_to_remove = collectFilesToRemove(source_part, for_file_renames, mrk_extension);
NameToNameMap files_to_rename = collectFilesForRenames(source_part, for_file_renames, mrk_extension);
if (need_remove_expired_values)
files_to_skip.insert("ttl.txt");
@ -1064,10 +1068,21 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor
/// Create hardlinks for unchanged files
for (auto it = disk->iterateDirectory(source_part->getFullRelativePath()); it->isValid(); it->next())
{
if (files_to_skip.count(it->name()) || files_to_remove.count(it->name()))
if (files_to_skip.count(it->name()))
continue;
String destination = new_part_tmp_path + "/" + it->name();
String destination = new_part_tmp_path + "/";
auto rename_it = files_to_rename.find(it->name());
if (rename_it != files_to_rename.end())
{
if (rename_it->second.empty())
continue;
destination += rename_it->second;
}
else
{
destination += it->name();
}
disk->createHardLink(it->path(), destination);
}
@ -1090,9 +1105,19 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor
need_remove_expired_values);
}
for (const String & removed_file : files_to_remove)
if (new_data_part->checksums.files.count(removed_file))
new_data_part->checksums.files.erase(removed_file);
for (const auto & [rename_from, rename_to] : files_to_rename)
{
if (rename_to.empty() && new_data_part->checksums.files.count(rename_from))
{
new_data_part->checksums.files.erase(rename_from);
}
else if (new_data_part->checksums.files.count(rename_from))
{
new_data_part->checksums.files[rename_to] = new_data_part->checksums.files[rename_from];
new_data_part->checksums.files.erase(rename_from);
}
}
finalizeMutatedPart(source_part, new_data_part, need_remove_expired_values);
}
@ -1236,6 +1261,20 @@ void MergeTreeDataMergerMutator::splitMutationCommands(
{
removed_columns_from_compact_part.emplace(command.column_name);
}
else if (command.type == MutationCommand::Type::RENAME_COLUMN)
{
if (is_compact_part)
{
for_interpreter.push_back(
{
.type = MutationCommand::Type::READ_COLUMN,
.column_name = command.rename_to,
});
already_changed_columns.emplace(command.column_name);
}
else
for_file_renames.push_back(command);
}
else
{
for_file_renames.push_back(command);
@ -1248,7 +1287,8 @@ void MergeTreeDataMergerMutator::splitMutationCommands(
/// we just don't read dropped columns
for (const auto & column : part->getColumns())
{
if (!removed_columns_from_compact_part.count(column.name) && !already_changed_columns.count(column.name))
if (!removed_columns_from_compact_part.count(column.name)
&& !already_changed_columns.count(column.name))
{
for_interpreter.emplace_back(MutationCommand
{
@ -1262,7 +1302,7 @@ void MergeTreeDataMergerMutator::splitMutationCommands(
}
NameSet MergeTreeDataMergerMutator::collectFilesToRemove(
NameToNameMap MergeTreeDataMergerMutator::collectFilesForRenames(
MergeTreeData::DataPartPtr source_part, const MutationCommands & commands_for_removes, const String & mrk_extension)
{
/// Collect counts for shared streams of different columns. As an example, Nested columns have shared stream with array sizes.
@ -1277,14 +1317,14 @@ NameSet MergeTreeDataMergerMutator::collectFilesToRemove(
{});
}
NameSet remove_files;
NameToNameMap rename_map;
/// Remove old indices
for (const auto & command : commands_for_removes)
{
if (command.type == MutationCommand::Type::DROP_INDEX)
{
remove_files.emplace("skp_idx_" + command.column_name + ".idx");
remove_files.emplace("skp_idx_" + command.column_name + mrk_extension);
rename_map.emplace("skp_idx_" + command.column_name + ".idx", "");
rename_map.emplace("skp_idx_" + command.column_name + mrk_extension, "");
}
else if (command.type == MutationCommand::Type::DROP_COLUMN)
{
@ -1294,8 +1334,8 @@ NameSet MergeTreeDataMergerMutator::collectFilesToRemove(
/// Delete files if they are no longer shared with another column.
if (--stream_counts[stream_name] == 0)
{
remove_files.emplace(stream_name + ".bin");
remove_files.emplace(stream_name + mrk_extension);
rename_map.emplace(stream_name + ".bin", "");
rename_map.emplace(stream_name + mrk_extension, "");
}
};
@ -1304,9 +1344,31 @@ NameSet MergeTreeDataMergerMutator::collectFilesToRemove(
if (column)
column->type->enumerateStreams(callback, stream_path);
}
else if (command.type == MutationCommand::Type::RENAME_COLUMN)
{
String escaped_name_from = escapeForFileName(command.column_name);
String escaped_name_to = escapeForFileName(command.rename_to);
IDataType::StreamCallback callback = [&](const IDataType::SubstreamPath & substream_path)
{
String stream_from = IDataType::getFileNameForStream(command.column_name, substream_path);
String stream_to = boost::replace_first_copy(stream_from, escaped_name_from, escaped_name_to);
if (stream_from != stream_to)
{
rename_map.emplace(stream_from + ".bin", stream_to + ".bin");
rename_map.emplace(stream_from + mrk_extension, stream_to + mrk_extension);
}
};
IDataType::SubstreamPath stream_path;
auto column = source_part->getColumns().tryGetByName(command.column_name);
if (column)
column->type->enumerateStreams(callback, stream_path);
}
}
return remove_files;
return rename_map;
}
NameSet MergeTreeDataMergerMutator::collectFilesToSkip(
@ -1344,10 +1406,13 @@ NamesAndTypesList MergeTreeDataMergerMutator::getColumnsForNewDataPart(
const MutationCommands & commands_for_removes)
{
NameSet removed_columns;
NameToNameMap renamed_columns;
for (const auto & command : commands_for_removes)
{
if (command.type == MutationCommand::DROP_COLUMN)
removed_columns.insert(command.column_name);
if (command.type == MutationCommand::RENAME_COLUMN)
renamed_columns.emplace(command.rename_to, command.column_name);
}
Names source_column_names = source_part->getColumns().getNames();
NameSet source_columns_name_set(source_column_names.begin(), source_column_names.end());
@ -1364,6 +1429,10 @@ NamesAndTypesList MergeTreeDataMergerMutator::getColumnsForNewDataPart(
{
++it;
}
else if (renamed_columns.count(it->name) && source_columns_name_set.count(renamed_columns[it->name]))
{
++it;
}
else
it = all_columns.erase(it);
}

View File

@ -147,7 +147,7 @@ private:
/// Apply commands to source_part i.e. remove some columns in source_part
/// and return set of files, that have to be removed from filesystem and checksums
static NameSet collectFilesToRemove(MergeTreeData::DataPartPtr source_part, const MutationCommands & commands_for_removes, const String & mrk_extension);
static NameToNameMap collectFilesForRenames(MergeTreeData::DataPartPtr source_part, const MutationCommands & commands_for_removes, const String & mrk_extension);
/// Files, that we don't need to remove and don't need to hardlink, for example columns.txt and checksums.txt.
/// Because we will generate new versions of them after we perform mutation.

View File

@ -81,14 +81,21 @@ MergeTreeReaderCompact::MergeTreeReaderCompact(
const auto & [name, type] = *name_and_type;
auto position = data_part->getColumnPosition(name);
/// If array of Nested column is missing in part,
/// we have to read it's offsets if they exists.
if (!position && alter_conversions.isColumnRenamed(name))
{
String old_name = alter_conversions.getColumnOldName(name);
position = data_part->getColumnPosition(old_name);
}
if (!position && typeid_cast<const DataTypeArray *>(type.get()))
{
/// If array of Nested column is missing in part,
/// we have to read it's offsets if they exists.
position = findColumnForOffsets(name);
read_only_offsets[i] = (position != std::nullopt);
}
column_positions[i] = std::move(position);
}
@ -125,7 +132,15 @@ size_t MergeTreeReaderCompact::readRows(size_t from_mark, bool continue_reading,
if (!res_columns[pos])
continue;
const auto & [name, type] = *name_and_type;
auto [name, type] = *name_and_type;
if (alter_conversions.isColumnRenamed(name))
{
String old_name = alter_conversions.getColumnOldName(name);
if (!data_part->getColumnPosition(name) && data_part->getColumnPosition(old_name))
name = old_name;
}
auto & column = mutable_columns[pos];
try

View File

@ -42,18 +42,29 @@ MergeTreeReaderWide::MergeTreeReaderWide(
try
{
for (const NameAndTypePair & column_from_part : data_part->getColumns())
{
columns_from_part[column_from_part.name] = column_from_part.type;
}
for (const NameAndTypePair & column : columns)
{
if (columns_from_part.count(column.name))
{
addStreams(column.name, *columns_from_part[column.name], profile_callback_, clock_type_);
}
else
{
if (alter_conversions.isColumnRenamed(column.name))
{
String old_name = alter_conversions.getColumnOldName(column.name);
if (columns_from_part.count(old_name))
addStreams(old_name, *columns_from_part[old_name], profile_callback_, clock_type_);
}
else
{
addStreams(column.name, *column.type, profile_callback_, clock_type_);
}
}
}
}
catch (...)
{
storage.reportBrokenPart(data_part->name);
@ -82,7 +93,14 @@ size_t MergeTreeReaderWide::readRows(size_t from_mark, bool continue_reading, si
auto name_and_type = columns.begin();
for (size_t pos = 0; pos < num_columns; ++pos, ++name_and_type)
{
String & name = name_and_type->name;
String name = name_and_type->name;
if (alter_conversions.isColumnRenamed(name))
{
String original_name = alter_conversions.getColumnOldName(name);
if (!columns_from_part.count(name) && columns_from_part.count(original_name))
name = original_name;
}
DataTypePtr type;
if (columns_from_part.count(name))
type = columns_from_part[name];

View File

@ -1309,6 +1309,21 @@ ReplicatedMergeTreeMergePredicate ReplicatedMergeTreeQueue::getMergePredicate(zk
}
MutationCommands ReplicatedMergeTreeQueue::getFirstAlterMutationCommandsForPart(const MergeTreeData::DataPartPtr & part) const
{
std::lock_guard lock(state_mutex);
auto in_partition = mutations_by_partition.find(part->info.partition_id);
if (in_partition == mutations_by_partition.end())
return MutationCommands{};
Int64 part_version = part->info.getDataVersion();
for (auto [mutation_version, mutation_status] : in_partition->second)
if (mutation_version > part_version && mutation_status->entry->alter_version != -1)
return mutation_status->entry->commands;
return MutationCommands{};
}
MutationCommands ReplicatedMergeTreeQueue::getMutationCommands(
const MergeTreeData::DataPartPtr & part, Int64 desired_mutation_version) const
{

View File

@ -331,6 +331,11 @@ public:
MutationCommands getMutationCommands(const MergeTreeData::DataPartPtr & part, Int64 desired_mutation_version) const;
/// Return mutation commands for part with smallest mutation version bigger
/// than data part version. Used when we apply alter commands on fly,
/// without actual data modification on disk.
MutationCommands getFirstAlterMutationCommandsForPart(const MergeTreeData::DataPartPtr & part) const;
/// Mark finished mutations as done. If the function needs to be called again at some later time
/// (because some mutations are probably done but we are not sure yet), returns true.
bool tryFinalizeMutations(zkutil::ZooKeeperPtr zookeeper);

View File

@ -52,12 +52,10 @@ public:
return part->storage.getInMemoryMetadata();
}
bool hasSortingKey() const { return part->storage.hasSortingKey(); }
Names getSortingKeyColumns() const override { return part->storage.getSortingKeyColumns(); }
protected:
StorageFromMergeTreeDataPart(const MergeTreeData::DataPartPtr & part_)
: IStorage(getIDFromPart(part_), part_->storage.getVirtuals())

View File

@ -94,6 +94,15 @@ std::optional<MutationCommand> MutationCommand::parse(ASTAlterCommand * command,
res.clear = true;
return res;
}
else if (parse_alter_commands && command->type == ASTAlterCommand::RENAME_COLUMN)
{
MutationCommand res;
res.ast = command->ptr();
res.type = MutationCommand::Type::RENAME_COLUMN;
res.column_name = command->column->as<ASTIdentifier &>().name;
res.rename_to = command->rename_to->as<ASTIdentifier &>().name;
return res;
}
else if (command->type == ASTAlterCommand::MATERIALIZE_TTL)
{
MutationCommand res;

View File

@ -31,7 +31,8 @@ struct MutationCommand
READ_COLUMN,
DROP_COLUMN,
DROP_INDEX,
MATERIALIZE_TTL
MATERIALIZE_TTL,
RENAME_COLUMN,
};
Type type = EMPTY;
@ -53,6 +54,9 @@ struct MutationCommand
/// We need just clear column, not drop from metadata.
bool clear = false;
/// Column rename_to
String rename_to;
/// If parse_alter_commands, than consider more Alter commands as mutation commands
static std::optional<MutationCommand> parse(ASTAlterCommand * command, bool parse_alter_commands = false);
};

View File

@ -1,7 +1,7 @@
#include <Storages/ReadInOrderOptimizer.h>
#include <Storages/MergeTree/MergeTreeData.h>
#include <Storages/MergeTree/StorageFromMergeTreeDataPart.h>
#include <Interpreters/AnalyzedJoin.h>
#include <Interpreters/TableJoin.h>
#include <Functions/IFunction.h>
namespace DB

View File

@ -1,6 +1,6 @@
#include <Storages/StorageJoin.h>
#include <Storages/StorageFactory.h>
#include <Interpreters/Join.h>
#include <Interpreters/HashJoin.h>
#include <Interpreters/Context.h>
#include <Parsers/ASTCreateQuery.h>
#include <Parsers/ASTSetQuery.h>
@ -9,7 +9,7 @@
#include <DataStreams/IBlockInputStream.h>
#include <DataTypes/NestedUtils.h>
#include <Interpreters/joinDispatch.h>
#include <Interpreters/AnalyzedJoin.h>
#include <Interpreters/TableJoin.h>
#include <Common/assert_cast.h>
#include <Common/quoteString.h>
@ -57,8 +57,8 @@ StorageJoin::StorageJoin(
if (!getColumns().hasPhysical(key))
throw Exception{"Key column (" + key + ") does not exist in table declaration.", ErrorCodes::NO_SUCH_COLUMN_IN_TABLE};
table_join = std::make_shared<AnalyzedJoin>(limits, use_nulls, kind, strictness, key_names);
join = std::make_shared<Join>(table_join, getSampleBlock().sortColumns(), overwrite);
table_join = std::make_shared<TableJoin>(limits, use_nulls, kind, strictness, key_names);
join = std::make_shared<HashJoin>(table_join, getSampleBlock().sortColumns(), overwrite);
restore();
}
@ -70,11 +70,11 @@ void StorageJoin::truncate(const ASTPtr &, const Context &, TableStructureWriteL
Poco::File(path + "tmp/").createDirectories();
increment = 0;
join = std::make_shared<Join>(table_join, getSampleBlock().sortColumns(), overwrite);
join = std::make_shared<HashJoin>(table_join, getSampleBlock().sortColumns(), overwrite);
}
HashJoinPtr StorageJoin::getJoin(std::shared_ptr<AnalyzedJoin> analyzed_join) const
HashJoinPtr StorageJoin::getJoin(std::shared_ptr<TableJoin> analyzed_join) const
{
if (!analyzed_join->sameStrictnessAndKind(strictness, kind))
throw Exception("Table " + getStorageID().getNameForLogs() + " has incompatible type of JOIN.", ErrorCodes::INCOMPATIBLE_TYPE_OF_JOIN);
@ -89,7 +89,7 @@ HashJoinPtr StorageJoin::getJoin(std::shared_ptr<AnalyzedJoin> analyzed_join) co
/// Some HACK to remove wrong names qualifiers: table.column -> column.
analyzed_join->setRightKeys(key_names);
HashJoinPtr join_clone = std::make_shared<Join>(analyzed_join, getSampleBlock().sortColumns());
HashJoinPtr join_clone = std::make_shared<HashJoin>(analyzed_join, getSampleBlock().sortColumns());
join_clone->reuseJoinedData(*join);
return join_clone;
}
@ -244,7 +244,7 @@ size_t rawSize(const StringRef & t)
class JoinSource : public SourceWithProgress
{
public:
JoinSource(const Join & parent_, UInt64 max_block_size_, Block sample_block_)
JoinSource(const HashJoin & parent_, UInt64 max_block_size_, Block sample_block_)
: SourceWithProgress(sample_block_)
, parent(parent_)
, lock(parent.data->rwlock)
@ -287,7 +287,7 @@ protected:
}
private:
const Join & parent;
const HashJoin & parent;
std::shared_lock<std::shared_mutex> lock;
UInt64 max_block_size;
Block sample_block;
@ -326,7 +326,7 @@ private:
switch (parent.data->type)
{
#define M(TYPE) \
case Join::Type::TYPE: \
case HashJoin::Type::TYPE: \
rows_added = fillColumns<KIND, STRICTNESS>(*maps.TYPE); \
break;
APPLY_FOR_JOIN_VARIANTS_LIMITED(M)

View File

@ -9,9 +9,9 @@
namespace DB
{
class AnalyzedJoin;
class Join;
using HashJoinPtr = std::shared_ptr<Join>;
class TableJoin;
class HashJoin;
using HashJoinPtr = std::shared_ptr<HashJoin>;
/** Allows you save the state for later use on the right side of the JOIN.
@ -31,7 +31,7 @@ public:
/// Access the innards.
HashJoinPtr & getJoin() { return join; }
HashJoinPtr getJoin(std::shared_ptr<AnalyzedJoin> analyzed_join) const;
HashJoinPtr getJoin(std::shared_ptr<TableJoin> analyzed_join) const;
/// Verify that the data structure is suitable for implementing this type of JOIN.
void assertCompatible(ASTTableJoin::Kind kind_, ASTTableJoin::Strictness strictness_) const;
@ -53,7 +53,7 @@ private:
ASTTableJoin::Strictness strictness; /// ANY | ALL
bool overwrite;
std::shared_ptr<AnalyzedJoin> table_join;
std::shared_ptr<TableJoin> table_join;
HashJoinPtr join;
void insertBlock(const Block & block) override;

View File

@ -237,10 +237,15 @@ void StorageMergeTree::alter(
/// Reinitialize primary key because primary key column types might have changed.
setProperties(metadata);
setTTLExpressions(metadata.columns.getColumnTTLs(), metadata.ttl_for_table_ast);
setTTLExpressions(metadata.columns, metadata.ttl_for_table_ast);
DatabaseCatalog::instance().getDatabase(table_id.database_name)->alterTable(context, table_id.table_name, metadata);
String mutation_file_name;
Int64 mutation_version = -1;
if (!maybe_mutation_commands.empty())
mutation_version = startMutation(maybe_mutation_commands, mutation_file_name);
/// We release all locks except alter_intention_lock which allows
/// to execute alter queries sequentially
table_lock_holder.releaseAllExceptAlterIntention();
@ -248,7 +253,7 @@ void StorageMergeTree::alter(
/// Always execute required mutations synchronously, because alters
/// should be executed in sequential order.
if (!maybe_mutation_commands.empty())
mutateImpl(maybe_mutation_commands, /* mutations_sync = */ 1);
waitForMutation(mutation_version, mutation_file_name);
}
}
@ -351,43 +356,42 @@ public:
};
void StorageMergeTree::mutateImpl(const MutationCommands & commands, size_t mutations_sync)
Int64 StorageMergeTree::startMutation(const MutationCommands & commands, String & mutation_file_name)
{
/// Choose any disk, because when we load mutations we search them at each disk
/// where storage can be placed. See loadMutations().
auto disk = getStoragePolicy()->getAnyDisk();
String file_name;
Int64 version;
{
std::lock_guard lock(currently_processing_in_background_mutex);
MergeTreeMutationEntry entry(commands, disk, relative_data_path, insert_increment.get());
version = increment.get();
entry.commit(version);
file_name = entry.file_name;
auto insertion = current_mutations_by_id.emplace(file_name, std::move(entry));
mutation_file_name = entry.file_name;
auto insertion = current_mutations_by_id.emplace(mutation_file_name, std::move(entry));
current_mutations_by_version.emplace(version, insertion.first->second);
LOG_INFO(log, "Added mutation: " << file_name);
LOG_INFO(log, "Added mutation: " << mutation_file_name);
merging_mutating_task_handle->wake();
}
return version;
}
/// We have to wait mutation end
if (mutations_sync > 0)
{
void StorageMergeTree::waitForMutation(Int64 version, const String & file_name)
{
LOG_INFO(log, "Waiting mutation: " << file_name);
auto check = [version, this]() { return shutdown_called || isMutationDone(version); };
std::unique_lock lock(mutation_wait_mutex);
mutation_wait_event.wait(lock, check);
LOG_INFO(log, "Mutation " << file_name << " done");
}
}
void StorageMergeTree::mutate(const MutationCommands & commands, const Context & query_context)
{
mutateImpl(commands, query_context.getSettingsRef().mutations_sync);
String mutation_file_name;
Int64 version = startMutation(commands, mutation_file_name);
if (query_context.getSettingsRef().mutations_sync > 0)
waitForMutation(version, mutation_file_name);
}
namespace
@ -688,11 +692,17 @@ bool StorageMergeTree::tryMutatePart()
MutationCommands commands_for_size_validation;
for (const auto & command : it->second.commands)
{
if (command.type != MutationCommand::Type::DROP_COLUMN && command.type != MutationCommand::Type::DROP_INDEX)
if (command.type != MutationCommand::Type::DROP_COLUMN
&& command.type != MutationCommand::Type::DROP_INDEX
&& command.type != MutationCommand::Type::RENAME_COLUMN)
{
commands_for_size_validation.push_back(command);
}
else
{
commands_size += command.ast->size();
}
}
if (!commands_for_size_validation.empty())
{
@ -1254,4 +1264,15 @@ CheckResults StorageMergeTree::checkData(const ASTPtr & query, const Context & c
return results;
}
MutationCommands StorageMergeTree::getFirtsAlterMutationCommandsForPart(const DataPartPtr & part) const
{
std::lock_guard lock(currently_processing_in_background_mutex);
auto it = current_mutations_by_version.upper_bound(part->info.getDataVersion());
if (it == current_mutations_by_version.end())
return {};
return it->second.commands;
}
}

View File

@ -120,7 +120,11 @@ private:
BackgroundProcessingPoolTaskResult movePartsTask();
void mutateImpl(const MutationCommands & commands, size_t mutations_sync);
/// Allocate block number for new mutation, write mutation to disk
/// and into in-memory structures. Wake up merge-mutation task.
Int64 startMutation(const MutationCommands & commands, String & mutation_file_name);
/// Wait until mutation with version will finish mutation for all parts
void waitForMutation(Int64 version, const String & file_name);
/// Try and find a single part to mutate and mutate it. If some part was successfully mutated, return true.
bool tryMutatePart();
@ -165,6 +169,8 @@ protected:
const MergingParams & merging_params_,
std::unique_ptr<MergeTreeSettings> settings_,
bool has_force_restore_data_flag);
MutationCommands getFirtsAlterMutationCommandsForPart(const DataPartPtr & part) const override;
};
}

View File

@ -447,7 +447,6 @@ void StorageReplicatedMergeTree::checkTableStructure(const String & zookeeper_pr
}
}
void StorageReplicatedMergeTree::setTableStructure(ColumnsDescription new_columns, const ReplicatedMergeTreeTableMetadata::Diff & metadata_diff)
{
StorageInMemoryMetadata metadata = getInMemoryMetadata();
@ -497,7 +496,7 @@ void StorageReplicatedMergeTree::setTableStructure(ColumnsDescription new_column
/// Even if the primary/sorting keys didn't change we must reinitialize it
/// because primary key column types might have changed.
setProperties(metadata);
setTTLExpressions(new_columns.getColumnTTLs(), metadata.ttl_for_table_ast);
setTTLExpressions(new_columns, metadata.ttl_for_table_ast);
}
@ -5293,32 +5292,8 @@ bool StorageReplicatedMergeTree::canUseAdaptiveGranularity() const
}
StorageInMemoryMetadata
StorageReplicatedMergeTree::getMetadataFromSharedZookeeper(const String & metadata_str, const String & columns_str) const
MutationCommands StorageReplicatedMergeTree::getFirtsAlterMutationCommandsForPart(const DataPartPtr & part) const
{
auto replicated_metadata = ReplicatedMergeTreeTableMetadata::parse(metadata_str);
StorageInMemoryMetadata result = getInMemoryMetadata();
result.columns = ColumnsDescription::parse(columns_str);
result.constraints = ConstraintsDescription::parse(replicated_metadata.constraints);
result.indices = IndicesDescription::parse(replicated_metadata.skip_indices);
ParserExpression expression_p;
/// The only thing, that can be changed is ttl expression
if (replicated_metadata.primary_key.empty())
throw Exception("Primary key cannot be empty" , ErrorCodes::LOGICAL_ERROR);
if (!replicated_metadata.sorting_key.empty())
{
result.order_by_ast = parseQuery(expression_p, "(" + replicated_metadata.sorting_key + ")", 0);
result.primary_key_ast = parseQuery(expression_p, "(" + replicated_metadata.primary_key + ")", 0);
}
else
{
result.order_by_ast = parseQuery(expression_p, "(" + replicated_metadata.primary_key + ")", 0);
}
return result;
return queue.getFirstAlterMutationCommandsForPart(part);
}
}

View File

@ -526,7 +526,7 @@ private:
void waitMutationToFinishOnReplicas(
const Strings & replicas, const String & mutation_id) const;
StorageInMemoryMetadata getMetadataFromSharedZookeeper(const String & metadata_str, const String & columns_str) const;
MutationCommands getFirtsAlterMutationCommandsForPart(const DataPartPtr & part) const override;
protected:
/** If not 'attach', either creates a new table in ZK, or adds a replica to an existing table.
@ -543,6 +543,7 @@ protected:
const MergingParams & merging_params_,
std::unique_ptr<MergeTreeSettings> settings_,
bool has_force_restore_data_flag);
};

View File

@ -0,0 +1,7 @@
1
1
date key renamed_value1 value2 value3
2019-10-02 1 1 1 1
7 7
date key renamed_value1 renamed_value2 renamed_value3
2019-10-02 7 7 7 7

View File

@ -0,0 +1,38 @@
DROP TABLE IF EXISTS table_for_rename;
CREATE TABLE table_for_rename
(
date Date,
key UInt64,
value1 String,
value2 String,
value3 String
)
ENGINE = MergeTree()
PARTITION BY date
ORDER BY key;
INSERT INTO table_for_rename SELECT toDate('2019-10-01') + number % 3, number, toString(number), toString(number), toString(number) from numbers(9);
SELECT value1 FROM table_for_rename WHERE key = 1;
ALTER TABLE table_for_rename RENAME COLUMN value1 to renamed_value1;
SELECT renamed_value1 FROM table_for_rename WHERE key = 1;
SELECT * FROM table_for_rename WHERE key = 1 FORMAT TSVWithNames;
ALTER TABLE table_for_rename RENAME COLUMN value3 to value2; --{serverError 15}
ALTER TABLE table_for_rename RENAME COLUMN value3 TO r1, RENAME COLUMN value3 TO r2; --{serverError 36}
ALTER TABLE table_for_rename RENAME COLUMN value3 TO r1, RENAME COLUMN r1 TO value1; --{serverError 10}
ALTER TABLE table_for_rename RENAME COLUMN value2 TO renamed_value2, RENAME COLUMN value3 TO renamed_value3;
SELECT renamed_value2, renamed_value3 FROM table_for_rename WHERE key = 7;
SELECT * FROM table_for_rename WHERE key = 7 FORMAT TSVWithNames;
ALTER TABLE table_for_rename RENAME COLUMN value100 to renamed_value100; --{serverError 10}
ALTER TABLE table_for_rename RENAME COLUMN IF EXISTS value100 to renamed_value100;
DROP TABLE IF EXISTS table_for_rename;

View File

@ -0,0 +1,8 @@
1
CREATE TABLE default.table_for_rename_replicated (`date` Date, `key` UInt64, `value1` String, `value2` String, `value3` String) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/table_for_rename_replicated\', \'1\') PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192
CREATE TABLE default.table_for_rename_replicated (`date` Date, `key` UInt64, `renamed_value1` String, `value2` String, `value3` String) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/table_for_rename_replicated\', \'1\') PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192
1
date key renamed_value1 value2 value3
2019-10-02 1 1 1 1
date key renamed_value1 value2 value3
2019-10-02 1 1 1 1

View File

@ -0,0 +1,52 @@
#!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. $CURDIR/../shell_config.sh
$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS table_for_rename_replicated"
$CLICKHOUSE_CLIENT -n --query "
CREATE TABLE table_for_rename_replicated
(
date Date,
key UInt64,
value1 String,
value2 String,
value3 String
)
ENGINE = ReplicatedMergeTree('/clickhouse/tables/table_for_rename_replicated', '1')
PARTITION BY date
ORDER BY key;
"
$CLICKHOUSE_CLIENT --query "INSERT INTO table_for_rename_replicated SELECT toDate('2019-10-01') + number % 3, number, toString(number), toString(number), toString(number) from numbers(9);"
$CLICKHOUSE_CLIENT --query "SELECT value1 FROM table_for_rename_replicated WHERE key = 1;"
$CLICKHOUSE_CLIENT --query "SYSTEM STOP MERGES;"
$CLICKHOUSE_CLIENT --query "SHOW CREATE TABLE table_for_rename_replicated;"
$CLICKHOUSE_CLIENT --query "ALTER TABLE table_for_rename_replicated RENAME COLUMN value1 to renamed_value1" --replication_alter_partitions_sync=0
while [[ -z $($CLICKHOUSE_CLIENT --query "SELECT name FROM system.columns WHERE name = 'renamed_value1' and table = 'table_for_rename_replicated'" 2>/dev/null) ]]; do
sleep 0.5
done
# RENAME on fly works
$CLICKHOUSE_CLIENT --query "SHOW CREATE TABLE table_for_rename_replicated;"
$CLICKHOUSE_CLIENT --query "SELECT renamed_value1 FROM table_for_rename_replicated WHERE key = 1;"
$CLICKHOUSE_CLIENT --query "SELECT * FROM table_for_rename_replicated WHERE key = 1 FORMAT TSVWithNames;"
$CLICKHOUSE_CLIENT --query "SYSTEM START MERGES;"
$CLICKHOUSE_CLIENT --query "SYSTEM SYNC REPLICA table_for_rename_replicated;"
$CLICKHOUSE_CLIENT --query "SELECT * FROM table_for_rename_replicated WHERE key = 1 FORMAT TSVWithNames;"
$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS table_for_rename_replicated;"

View File

@ -0,0 +1,7 @@
1
1
date key renamed_value1 value2 value3
2019-10-02 1 1 1 1
7 7
date key renamed_value1 renamed_value2 renamed_value3
2019-10-02 7 7 7 7

View File

@ -0,0 +1,33 @@
DROP TABLE IF EXISTS table_with_compact_parts;
CREATE TABLE table_with_compact_parts
(
date Date,
key UInt64,
value1 String,
value2 String,
value3 String
)
ENGINE = MergeTree()
PARTITION BY date
ORDER BY key
settings index_granularity = 8,
min_rows_for_wide_part = 10;
INSERT INTO table_with_compact_parts SELECT toDate('2019-10-01') + number % 3, number, toString(number), toString(number), toString(number) from numbers(9);
SELECT value1 FROM table_with_compact_parts WHERE key = 1;
ALTER TABLE table_with_compact_parts RENAME COLUMN value1 to renamed_value1;
SELECT renamed_value1 FROM table_with_compact_parts WHERE key = 1;
SELECT * FROM table_with_compact_parts WHERE key = 1 FORMAT TSVWithNames;
ALTER TABLE table_with_compact_parts RENAME COLUMN value2 TO renamed_value2, RENAME COLUMN value3 TO renamed_value3;
SELECT renamed_value2, renamed_value3 FROM table_with_compact_parts WHERE key = 7;
SELECT * FROM table_with_compact_parts WHERE key = 7 FORMAT TSVWithNames;
DROP TABLE IF EXISTS table_with_compact_parts;

View File

@ -0,0 +1,10 @@
[8,9,10]
['a','b','c']
CREATE TABLE default.table_for_rename_nested (`date` Date, `key` UInt64, `n.x` Array(UInt32), `n.y` Array(String), `value1` Array(Array(LowCardinality(String)))) ENGINE = MergeTree() PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192
CREATE TABLE default.table_for_rename_nested (`date` Date, `key` UInt64, `n.renamed_x` Array(UInt32), `n.renamed_y` Array(String), `value1` Array(Array(LowCardinality(String)))) ENGINE = MergeTree() PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192
7 [8,9,10]
7 ['a','b','c']
[['7']]
CREATE TABLE default.table_for_rename_nested (`date` Date, `key` UInt64, `n.renamed_x` Array(UInt32), `n.renamed_y` Array(String), `renamed_value1` Array(Array(LowCardinality(String)))) ENGINE = MergeTree() PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192
date key n.renamed_x n.renamed_y renamed_value1
2019-10-01 7 [8,9,10] ['a','b','c'] [['7']]

View File

@ -0,0 +1,42 @@
DROP TABLE IF EXISTS table_for_rename_nested;
CREATE TABLE table_for_rename_nested
(
date Date,
key UInt64,
n Nested(x UInt32, y String),
value1 Array(Array(LowCardinality(String))) -- column with several files
)
ENGINE = MergeTree()
PARTITION BY date
ORDER BY key;
INSERT INTO table_for_rename_nested (date, key, n.x, n.y, value1) SELECT toDate('2019-10-01'), number, [number + 1, number + 2, number + 3], ['a', 'b', 'c'], [[toString(number)]] FROM numbers(10);
SELECT n.x FROM table_for_rename_nested WHERE key = 7;
SELECT n.y FROM table_for_rename_nested WHERE key = 7;
SHOW CREATE TABLE table_for_rename_nested;
ALTER TABLE table_for_rename_nested RENAME COLUMN n.x TO n.renamed_x;
ALTER TABLE table_for_rename_nested RENAME COLUMN n.y TO n.renamed_y;
SHOW CREATE TABLE table_for_rename_nested;
SELECT key, n.renamed_x FROM table_for_rename_nested WHERE key = 7;
SELECT key, n.renamed_y FROM table_for_rename_nested WHERE key = 7;
ALTER TABLE table_for_rename_nested RENAME COLUMN n.renamed_x TO not_nested_x; --{serverError 36}
-- Currently not implemented
ALTER TABLE table_for_rename_nested RENAME COLUMN n TO renamed_n; --{serverError 48}
ALTER TABLE table_for_rename_nested RENAME COLUMN value1 TO renamed_value1;
SELECT renamed_value1 FROM table_for_rename_nested WHERE key = 7;
SHOW CREATE TABLE table_for_rename_nested;
SELECT * FROM table_for_rename_nested WHERE key = 7 FORMAT TSVWithNames;
DROP TABLE IF EXISTS table_for_rename_nested;

View File

@ -0,0 +1,55 @@
DROP TABLE IF EXISTS table_for_rename_pk;
CREATE TABLE table_for_rename_pk
(
date Date,
key1 UInt64,
key2 UInt64,
key3 UInt64,
value1 String,
value2 String
)
ENGINE = ReplicatedMergeTree('/clickhouse/tables/table_for_rename_pk', '1')
PARTITION BY date
ORDER BY (key1, pow(key2, 2), key3);
INSERT INTO table_for_rename_pk SELECT toDate('2019-10-01') + number % 3, number, number, number, toString(number), toString(number) from numbers(9);
SELECT key1, value1 FROM table_for_rename_pk WHERE key1 = 1 AND key2 = 1 AND key3 = 1;
ALTER TABLE table_for_rename_pk RENAME COLUMN key1 TO renamed_key1; --{serverError 44}
ALTER TABLE table_for_rename_pk RENAME COLUMN key3 TO renamed_key3; --{serverError 44}
ALTER TABLE table_for_rename_pk RENAME COLUMN key2 TO renamed_key2; --{serverError 44}
DROP TABLE IF EXISTS table_for_rename_pk;
DROP TABLE IF EXISTS table_for_rename_with_primary_key;
CREATE TABLE table_for_rename_with_primary_key
(
date Date,
key1 UInt64,
key2 UInt64,
key3 UInt64,
value1 String,
value2 String,
INDEX idx (value1) TYPE set(1) GRANULARITY 1
)
ENGINE = ReplicatedMergeTree('/clickhouse/tables/table_for_rename_pk', '1')
PARTITION BY date
ORDER BY (key1, key2, key3)
PRIMARY KEY (key1, key2);
INSERT INTO table_for_rename_with_primary_key SELECT toDate('2019-10-01') + number % 3, number, number, number, toString(number), toString(number) from numbers(9);
ALTER TABLE table_for_rename_with_primary_key RENAME COLUMN key1 TO renamed_key1; --{serverError 44}
ALTER TABLE table_for_rename_with_primary_key RENAME COLUMN key2 TO renamed_key2; --{serverError 44}
ALTER TABLE table_for_rename_with_primary_key RENAME COLUMN key3 TO renamed_key3; --{serverError 44}
ALTER TABLE table_for_rename_with_primary_key RENAME COLUMN value1 TO renamed_value1; --{serverError 44}
DROP TABLE IF EXISTS table_for_rename_with_primary_key;

View File

@ -0,0 +1,17 @@
date key value1 value2
2019-10-02 1 1 Hello 1
CREATE TABLE default.table_rename_with_default (`date` Date, `key` UInt64, `value1` String, `value2` String DEFAULT concat(\'Hello \', value1), `value3` String ALIAS concat(\'Word \', value1)) ENGINE = MergeTree() PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192
date key renamed_value1 value2
2019-10-02 1 1 Hello 1
CREATE TABLE default.table_rename_with_default (`date` Date, `key` UInt64, `renamed_value1` String, `value2` String DEFAULT concat(\'Hello \', renamed_value1), `value3` String ALIAS concat(\'Word \', renamed_value1)) ENGINE = MergeTree() PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192
Hello 1
Word 1
date1 date2 value1 value2
2019-10-02 2018-10-02 1 1
CREATE TABLE default.table_rename_with_ttl (`date1` Date, `date2` Date, `value1` String, `value2` String TTL date1 + toIntervalMonth(10000)) ENGINE = ReplicatedMergeTree(\'/clickhouse/test/table_rename_with_ttl\', \'1\') ORDER BY tuple() TTL date2 + toIntervalMonth(10000) SETTINGS index_granularity = 8192
renamed_date1 date2 value1 value2
2019-10-02 2018-10-02 1 1
CREATE TABLE default.table_rename_with_ttl (`renamed_date1` Date, `date2` Date, `value1` String, `value2` String TTL renamed_date1 + toIntervalMonth(10000)) ENGINE = ReplicatedMergeTree(\'/clickhouse/test/table_rename_with_ttl\', \'1\') ORDER BY tuple() TTL date2 + toIntervalMonth(10000) SETTINGS index_granularity = 8192
renamed_date1 renamed_date2 value1 value2
2019-10-02 2018-10-02 1 1
CREATE TABLE default.table_rename_with_ttl (`renamed_date1` Date, `renamed_date2` Date, `value1` String, `value2` String TTL renamed_date1 + toIntervalMonth(10000)) ENGINE = ReplicatedMergeTree(\'/clickhouse/test/table_rename_with_ttl\', \'1\') ORDER BY tuple() TTL renamed_date2 + toIntervalMonth(10000) SETTINGS index_granularity = 8192

View File

@ -0,0 +1,63 @@
DROP TABLE IF EXISTS table_rename_with_default;
CREATE TABLE table_rename_with_default
(
date Date,
key UInt64,
value1 String,
value2 String DEFAULT concat('Hello ', value1),
value3 String ALIAS concat('Word ', value1)
)
ENGINE = MergeTree()
PARTITION BY date
ORDER BY key;
INSERT INTO table_rename_with_default (date, key, value1) SELECT toDate('2019-10-01') + number % 3, number, toString(number) from numbers(9);
SELECT * FROM table_rename_with_default WHERE key = 1 FORMAT TSVWithNames;
SHOW CREATE TABLE table_rename_with_default;
ALTER TABLE table_rename_with_default RENAME COLUMN value1 TO renamed_value1;
SELECT * FROM table_rename_with_default WHERE key = 1 FORMAT TSVWithNames;
SHOW CREATE TABLE table_rename_with_default;
SELECT value2 FROM table_rename_with_default WHERE key = 1;
SELECT value3 FROM table_rename_with_default WHERE key = 1;
DROP TABLE IF EXISTS table_rename_with_default;
DROP TABLE IF EXISTS table_rename_with_ttl;
CREATE TABLE table_rename_with_ttl
(
date1 Date,
date2 Date,
value1 String,
value2 String TTL date1 + INTERVAL 10000 MONTH
)
ENGINE = ReplicatedMergeTree('/clickhouse/test/table_rename_with_ttl', '1')
ORDER BY tuple()
TTL date2 + INTERVAL 10000 MONTH;
INSERT INTO table_rename_with_ttl SELECT toDate('2019-10-01') + number % 3, toDate('2018-10-01') + number % 3, toString(number), toString(number) from numbers(9);
SELECT * FROM table_rename_with_ttl WHERE value1 = '1' FORMAT TSVWithNames;
SHOW CREATE TABLE table_rename_with_ttl;
ALTER TABLE table_rename_with_ttl RENAME COLUMN date1 TO renamed_date1;
SELECT * FROM table_rename_with_ttl WHERE value1 = '1' FORMAT TSVWithNames;
SHOW CREATE TABLE table_rename_with_ttl;
ALTER TABLE table_rename_with_ttl RENAME COLUMN date2 TO renamed_date2;
SELECT * FROM table_rename_with_ttl WHERE value1 = '1' FORMAT TSVWithNames;
SHOW CREATE TABLE table_rename_with_ttl;
DROP TABLE IF EXISTS table_rename_with_ttl;

View File

@ -0,0 +1,6 @@
[8,9,10]
['a','b','c']
CREATE TABLE default.table_for_rename_nested (`date` Date, `key` UInt64, `n.x` Array(UInt32), `n.y` Array(String), `value1` String) ENGINE = MergeTree() PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192
CREATE TABLE default.table_for_rename_nested (`date` Date, `key` UInt64, `n.renamed_x` Array(UInt32), `n.renamed_y` Array(String), `value1` String) ENGINE = MergeTree() PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192
7 [8,9,10]
7 ['a','b','c']

View File

@ -0,0 +1,38 @@
DROP TABLE IF EXISTS table_for_rename_nested;
CREATE TABLE table_for_rename_nested
(
date Date,
key UInt64,
n Nested(x UInt32, y String),
value1 String
)
ENGINE = MergeTree()
PARTITION BY date
ORDER BY key;
INSERT INTO table_for_rename_nested (date, key, n.x, n.y, value1) SELECT toDate('2019-10-01'), number, [number + 1, number + 2, number + 3], ['a', 'b', 'c'], toString(number) FROM numbers(10);
SELECT n.x FROM table_for_rename_nested WHERE key = 7;
SELECT n.y FROM table_for_rename_nested WHERE key = 7;
SHOW CREATE TABLE table_for_rename_nested;
ALTER TABLE table_for_rename_nested RENAME COLUMN n.x TO n.renamed_x;
ALTER TABLE table_for_rename_nested RENAME COLUMN n.y TO n.renamed_y;
SHOW CREATE TABLE table_for_rename_nested;
SELECT key, n.renamed_x FROM table_for_rename_nested WHERE key = 7;
SELECT key, n.renamed_y FROM table_for_rename_nested WHERE key = 7;
ALTER TABLE table_for_rename_nested RENAME COLUMN n.renamed_x TO not_nested_x; --{serverError 36}
ALTER TABLE table_for_rename_nested RENAME COLUMN n.renamed_x TO q.renamed_x; --{serverError 36}
ALTER TABLE table_for_rename_nested RENAME COLUMN value1 TO q.renamed_x; --{serverError 36}
-- Currently not implemented
ALTER TABLE table_for_rename_nested RENAME COLUMN n TO renamed_n; --{serverError 48}
DROP TABLE IF EXISTS table_for_rename_nested;