Merge branch 'master' of github.com:yandex/ClickHouse

This commit is contained in:
Ivan Blinkov 2019-03-29 11:56:58 +03:00
commit 907a2a2159
140 changed files with 1949 additions and 974 deletions

View File

@ -26,7 +26,7 @@
* Исправлено undefined behaviour в функции `dictIsIn` для словарей типа `cache`. [#4515](https://github.com/yandex/ClickHouse/pull/4515) ([alesapin](https://github.com/alesapin)) * Исправлено undefined behaviour в функции `dictIsIn` для словарей типа `cache`. [#4515](https://github.com/yandex/ClickHouse/pull/4515) ([alesapin](https://github.com/alesapin))
* Исправлен deadlock в случае, если запрос SELECT блокирует одну и ту же таблицу несколько раз (например - из разных потоков, либо при выполнении разных подзапросов) и одновременно с этим производится DDL запрос. [#4535](https://github.com/yandex/ClickHouse/pull/4535) ([Alex Zatelepin](https://github.com/ztlpn)) * Исправлен deadlock в случае, если запрос SELECT блокирует одну и ту же таблицу несколько раз (например - из разных потоков, либо при выполнении разных подзапросов) и одновременно с этим производится DDL запрос. [#4535](https://github.com/yandex/ClickHouse/pull/4535) ([Alex Zatelepin](https://github.com/ztlpn))
* Настройка `compile_expressions` выключена по-умолчанию до тех пор, пока мы не зафиксируем исходники используемой библиотеки `LLVM` и не будем проверять её под `ASan` (сейчас библиотека LLVM берётся из системы). [#4579](https://github.com/yandex/ClickHouse/pull/4579) ([alesapin](https://github.com/alesapin)) * Настройка `compile_expressions` выключена по-умолчанию до тех пор, пока мы не зафиксируем исходники используемой библиотеки `LLVM` и не будем проверять её под `ASan` (сейчас библиотека LLVM берётся из системы). [#4579](https://github.com/yandex/ClickHouse/pull/4579) ([alesapin](https://github.com/alesapin))
* Исправлено падение по `std::terminate`, если `invalidate_query` для внешних словарей с истоником `clickhouse` вернул неправильный результат (пустой; более чем одну строку; более чем один столбец). Исправлена ошибка, из-за которой запрос `invalidate_query` производился каждые пять секунд, независимо от указанного `lifetime`. [#4583](https://github.com/yandex/ClickHouse/pull/4583) ([alexey-milovidov](https://github.com/alexey-milovidov)) * Исправлено падение по `std::terminate`, если `invalidate_query` для внешних словарей с источником `clickhouse` вернул неправильный результат (пустой; более чем одну строку; более чем один столбец). Исправлена ошибка, из-за которой запрос `invalidate_query` производился каждые пять секунд, независимо от указанного `lifetime`. [#4583](https://github.com/yandex/ClickHouse/pull/4583) ([alexey-milovidov](https://github.com/alexey-milovidov))
* Исправлен deadlock в случае, если запрос `invalidate_query` для внешнего словаря с источником `clickhouse` использовал таблицу `system.dictionaries` или базу данных типа `Dictionary` (редкий случай). [#4599](https://github.com/yandex/ClickHouse/pull/4599) ([alexey-milovidov](https://github.com/alexey-milovidov)) * Исправлен deadlock в случае, если запрос `invalidate_query` для внешнего словаря с источником `clickhouse` использовал таблицу `system.dictionaries` или базу данных типа `Dictionary` (редкий случай). [#4599](https://github.com/yandex/ClickHouse/pull/4599) ([alexey-milovidov](https://github.com/alexey-milovidov))
* Исправлена работа CROSS JOIN с пустым WHERE [#4598](https://github.com/yandex/ClickHouse/pull/4598) ([Artem Zuikov](https://github.com/4ertus2)) * Исправлена работа CROSS JOIN с пустым WHERE [#4598](https://github.com/yandex/ClickHouse/pull/4598) ([Artem Zuikov](https://github.com/4ertus2))
* Исправлен segfault в функции `replicate` с константным аргументом. [#4603](https://github.com/yandex/ClickHouse/pull/4603) ([alexey-milovidov](https://github.com/alexey-milovidov)) * Исправлен segfault в функции `replicate` с константным аргументом. [#4603](https://github.com/yandex/ClickHouse/pull/4603) ([alexey-milovidov](https://github.com/alexey-milovidov))

View File

@ -1,8 +1,11 @@
project(ClickHouse) project(ClickHouse)
cmake_minimum_required(VERSION 3.3) cmake_minimum_required(VERSION 3.3)
cmake_policy(SET CMP0023 NEW) cmake_policy(SET CMP0023 NEW)
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules/") set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules/")
set(CMAKE_EXPORT_COMPILE_COMMANDS 1) # Write compile_commands.json
set(CMAKE_LINK_DEPENDS_NO_SHARED 1) # Do not relink all depended targets on .so
set(CMAKE_CONFIGURATION_TYPES "RelWithDebInfo;Debug;Release;MinSizeRel" CACHE STRING "" FORCE)
set(CMAKE_DEBUG_POSTFIX "d" CACHE STRING "Generate debug library name with a postfix.") # To be consistent with CMakeLists from contrib libs.
option(ENABLE_IPO "Enable inter-procedural optimization (aka LTO)" OFF) # need cmake 3.9+ option(ENABLE_IPO "Enable inter-procedural optimization (aka LTO)" OFF) # need cmake 3.9+
if(ENABLE_IPO) if(ENABLE_IPO)
@ -38,9 +41,6 @@ if (EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/.git" AND NOT EXISTS "${ClickHouse_SOURC
message (FATAL_ERROR "Submodules are not initialized. Run\n\tgit submodule update --init --recursive") message (FATAL_ERROR "Submodules are not initialized. Run\n\tgit submodule update --init --recursive")
endif () endif ()
# Write compile_commands.json
set(CMAKE_EXPORT_COMPILE_COMMANDS 1)
include (cmake/find_ccache.cmake) include (cmake/find_ccache.cmake)
if (NOT CMAKE_BUILD_TYPE OR CMAKE_BUILD_TYPE STREQUAL "None") if (NOT CMAKE_BUILD_TYPE OR CMAKE_BUILD_TYPE STREQUAL "None")
@ -50,8 +50,6 @@ endif ()
string(TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UC) string(TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UC)
message (STATUS "CMAKE_BUILD_TYPE: ${CMAKE_BUILD_TYPE}") message (STATUS "CMAKE_BUILD_TYPE: ${CMAKE_BUILD_TYPE}")
set (CMAKE_CONFIGURATION_TYPES "RelWithDebInfo;Debug;Release;MinSizeRel" CACHE STRING "" FORCE)
set (CMAKE_DEBUG_POSTFIX "d" CACHE STRING "Generate debug library name with a postfix.") # To be consistent with CMakeLists from contrib libs.
option (USE_STATIC_LIBRARIES "Set to FALSE to use shared libraries" ON) option (USE_STATIC_LIBRARIES "Set to FALSE to use shared libraries" ON)
option (MAKE_STATIC_LIBRARIES "Set to FALSE to make shared libraries" ${USE_STATIC_LIBRARIES}) option (MAKE_STATIC_LIBRARIES "Set to FALSE to make shared libraries" ${USE_STATIC_LIBRARIES})

2
contrib/librdkafka vendored

@ -1 +1 @@
Subproject commit 73295a702cd1c85c11749ade500d713db7099cca Subproject commit 8695b9d63ac0fe1b891b511d5b36302ffc84d4e2

View File

@ -93,6 +93,7 @@ if (CLICKHOUSE_ONE_SHARED)
target_link_libraries(clickhouse-lib ${CLICKHOUSE_SERVER_LINK} ${CLICKHOUSE_CLIENT_LINK} ${CLICKHOUSE_LOCAL_LINK} ${CLICKHOUSE_BENCHMARK_LINK} ${CLICKHOUSE_PERFORMANCE_TEST_LINK} ${CLICKHOUSE_COPIER_LINK} ${CLICKHOUSE_EXTRACT_FROM_CONFIG_LINK} ${CLICKHOUSE_COMPRESSOR_LINK} ${CLICKHOUSE_FORMAT_LINK} ${CLICKHOUSE_OBFUSCATOR_LINK} ${CLICKHOUSE_COMPILER_LINK} ${CLICKHOUSE_ODBC_BRIDGE_LINK}) target_link_libraries(clickhouse-lib ${CLICKHOUSE_SERVER_LINK} ${CLICKHOUSE_CLIENT_LINK} ${CLICKHOUSE_LOCAL_LINK} ${CLICKHOUSE_BENCHMARK_LINK} ${CLICKHOUSE_PERFORMANCE_TEST_LINK} ${CLICKHOUSE_COPIER_LINK} ${CLICKHOUSE_EXTRACT_FROM_CONFIG_LINK} ${CLICKHOUSE_COMPRESSOR_LINK} ${CLICKHOUSE_FORMAT_LINK} ${CLICKHOUSE_OBFUSCATOR_LINK} ${CLICKHOUSE_COMPILER_LINK} ${CLICKHOUSE_ODBC_BRIDGE_LINK})
target_include_directories(clickhouse-lib ${CLICKHOUSE_SERVER_INCLUDE} ${CLICKHOUSE_CLIENT_INCLUDE} ${CLICKHOUSE_LOCAL_INCLUDE} ${CLICKHOUSE_BENCHMARK_INCLUDE} ${CLICKHOUSE_PERFORMANCE_TEST_INCLUDE} ${CLICKHOUSE_COPIER_INCLUDE} ${CLICKHOUSE_EXTRACT_FROM_CONFIG_INCLUDE} ${CLICKHOUSE_COMPRESSOR_INCLUDE} ${CLICKHOUSE_FORMAT_INCLUDE} ${CLICKHOUSE_OBFUSCATOR_INCLUDE} ${CLICKHOUSE_COMPILER_INCLUDE} ${CLICKHOUSE_ODBC_BRIDGE_INCLUDE}) target_include_directories(clickhouse-lib ${CLICKHOUSE_SERVER_INCLUDE} ${CLICKHOUSE_CLIENT_INCLUDE} ${CLICKHOUSE_LOCAL_INCLUDE} ${CLICKHOUSE_BENCHMARK_INCLUDE} ${CLICKHOUSE_PERFORMANCE_TEST_INCLUDE} ${CLICKHOUSE_COPIER_INCLUDE} ${CLICKHOUSE_EXTRACT_FROM_CONFIG_INCLUDE} ${CLICKHOUSE_COMPRESSOR_INCLUDE} ${CLICKHOUSE_FORMAT_INCLUDE} ${CLICKHOUSE_OBFUSCATOR_INCLUDE} ${CLICKHOUSE_COMPILER_INCLUDE} ${CLICKHOUSE_ODBC_BRIDGE_INCLUDE})
set_target_properties(clickhouse-lib PROPERTIES SOVERSION ${VERSION_MAJOR}.${VERSION_MINOR} VERSION ${VERSION_SO} OUTPUT_NAME clickhouse DEBUG_POSTFIX "") set_target_properties(clickhouse-lib PROPERTIES SOVERSION ${VERSION_MAJOR}.${VERSION_MINOR} VERSION ${VERSION_SO} OUTPUT_NAME clickhouse DEBUG_POSTFIX "")
install (TARGETS clickhouse-lib LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT clickhouse)
endif() endif()
if (CLICKHOUSE_SPLIT_BINARY) if (CLICKHOUSE_SPLIT_BINARY)

View File

@ -1,5 +1,11 @@
#!/bin/sh #!/bin/sh
# Helper for split build mode.
# Allows to run commands like
# clickhouse client
# clickhouse server
# ...
set -e set -e
CMD=$1 CMD=$1
shift shift

View File

@ -42,6 +42,7 @@
#include <IO/ReadBufferFromString.h> #include <IO/ReadBufferFromString.h>
#include <IO/ReadHelpers.h> #include <IO/ReadHelpers.h>
#include <IO/WriteHelpers.h> #include <IO/WriteHelpers.h>
#include <IO/Operators.h>
#include <IO/UseSSL.h> #include <IO/UseSSL.h>
#include <DataStreams/AsynchronousBlockInputStream.h> #include <DataStreams/AsynchronousBlockInputStream.h>
#include <DataStreams/AddingDefaultsBlockInputStream.h> #include <DataStreams/AddingDefaultsBlockInputStream.h>
@ -1314,6 +1315,9 @@ private:
/// Received data block is immediately displayed to the user. /// Received data block is immediately displayed to the user.
block_out_stream->flush(); block_out_stream->flush();
/// Restore progress bar after data block.
writeProgress();
} }
@ -1353,8 +1357,8 @@ private:
void clearProgress() void clearProgress()
{ {
std::cerr << RESTORE_CURSOR_POSITION CLEAR_TO_END_OF_LINE;
written_progress_chars = 0; written_progress_chars = 0;
std::cerr << RESTORE_CURSOR_POSITION CLEAR_TO_END_OF_LINE;
} }
@ -1363,6 +1367,9 @@ private:
if (!need_render_progress) if (!need_render_progress)
return; return;
/// Output all progress bar commands to stderr at once to avoid flicker.
WriteBufferFromFileDescriptor message(STDERR_FILENO, 1024);
static size_t increment = 0; static size_t increment = 0;
static const char * indicators[8] = static const char * indicators[8] =
{ {
@ -1377,13 +1384,15 @@ private:
}; };
if (written_progress_chars) if (written_progress_chars)
clearProgress(); message << RESTORE_CURSOR_POSITION CLEAR_TO_END_OF_LINE;
else else
std::cerr << SAVE_CURSOR_POSITION; message << SAVE_CURSOR_POSITION;
message << DISABLE_LINE_WRAPPING;
size_t prefix_size = message.count();
std::stringstream message;
message << indicators[increment % 8] message << indicators[increment % 8]
<< std::fixed << std::setprecision(3)
<< " Progress: "; << " Progress: ";
message message
@ -1398,8 +1407,7 @@ private:
else else
message << ". "; message << ". ";
written_progress_chars = message.str().size() - (increment % 8 == 7 ? 10 : 13); written_progress_chars = message.count() - prefix_size - (increment % 8 == 7 ? 10 : 13); /// Don't count invisible output (escape sequences).
std::cerr << DISABLE_LINE_WRAPPING << message.rdbuf();
/// If the approximate number of rows to process is known, we can display a progress bar and percentage. /// If the approximate number of rows to process is known, we can display a progress bar and percentage.
if (progress.total_rows > 0) if (progress.total_rows > 0)
@ -1421,19 +1429,21 @@ private:
if (width_of_progress_bar > 0) if (width_of_progress_bar > 0)
{ {
std::string bar = UnicodeBar::render(UnicodeBar::getWidth(progress.rows, 0, total_rows_corrected, width_of_progress_bar)); std::string bar = UnicodeBar::render(UnicodeBar::getWidth(progress.rows, 0, total_rows_corrected, width_of_progress_bar));
std::cerr << "\033[0;32m" << bar << "\033[0m"; message << "\033[0;32m" << bar << "\033[0m";
if (width_of_progress_bar > static_cast<ssize_t>(bar.size() / UNICODE_BAR_CHAR_SIZE)) if (width_of_progress_bar > static_cast<ssize_t>(bar.size() / UNICODE_BAR_CHAR_SIZE))
std::cerr << std::string(width_of_progress_bar - bar.size() / UNICODE_BAR_CHAR_SIZE, ' '); message << std::string(width_of_progress_bar - bar.size() / UNICODE_BAR_CHAR_SIZE, ' ');
} }
} }
} }
/// Underestimate percentage a bit to avoid displaying 100%. /// Underestimate percentage a bit to avoid displaying 100%.
std::cerr << ' ' << (99 * progress.rows / total_rows_corrected) << '%'; message << ' ' << (99 * progress.rows / total_rows_corrected) << '%';
} }
std::cerr << ENABLE_LINE_WRAPPING; message << ENABLE_LINE_WRAPPING;
++increment; ++increment;
message.next();
} }

View File

@ -39,7 +39,7 @@ private:
"DATABASES", "LIKE", "PROCESSLIST", "CASE", "WHEN", "THEN", "ELSE", "END", "DESCRIBE", "DESC", "USE", "SET", "OPTIMIZE", "FINAL", "DEDUPLICATE", "DATABASES", "LIKE", "PROCESSLIST", "CASE", "WHEN", "THEN", "ELSE", "END", "DESCRIBE", "DESC", "USE", "SET", "OPTIMIZE", "FINAL", "DEDUPLICATE",
"INSERT", "VALUES", "SELECT", "DISTINCT", "SAMPLE", "ARRAY", "JOIN", "GLOBAL", "LOCAL", "ANY", "ALL", "INNER", "LEFT", "RIGHT", "FULL", "OUTER", "INSERT", "VALUES", "SELECT", "DISTINCT", "SAMPLE", "ARRAY", "JOIN", "GLOBAL", "LOCAL", "ANY", "ALL", "INNER", "LEFT", "RIGHT", "FULL", "OUTER",
"CROSS", "USING", "PREWHERE", "WHERE", "GROUP", "BY", "WITH", "TOTALS", "HAVING", "ORDER", "COLLATE", "LIMIT", "UNION", "AND", "OR", "ASC", "IN", "CROSS", "USING", "PREWHERE", "WHERE", "GROUP", "BY", "WITH", "TOTALS", "HAVING", "ORDER", "COLLATE", "LIMIT", "UNION", "AND", "OR", "ASC", "IN",
"KILL", "QUERY", "SYNC", "ASYNC", "TEST", "BETWEEN" "KILL", "QUERY", "SYNC", "ASYNC", "TEST", "BETWEEN", "TRUNCATE"
}; };
/// Words are fetched asynchonously. /// Words are fetched asynchonously.

View File

@ -67,10 +67,10 @@ struct UniqVariadicHash<false, true>
{ {
UInt64 hash; UInt64 hash;
const Columns & tuple_columns = static_cast<const ColumnTuple *>(columns[0])->getColumns(); const auto & tuple_columns = static_cast<const ColumnTuple *>(columns[0])->getColumns();
const ColumnPtr * column = tuple_columns.data(); const auto * column = tuple_columns.data();
const ColumnPtr * columns_end = column + num_args; const auto * columns_end = column + num_args;
{ {
StringRef value = column->get()->getDataAt(row_num); StringRef value = column->get()->getDataAt(row_num);
@ -116,10 +116,10 @@ struct UniqVariadicHash<true, true>
{ {
static inline UInt128 apply(size_t num_args, const IColumn ** columns, size_t row_num) static inline UInt128 apply(size_t num_args, const IColumn ** columns, size_t row_num)
{ {
const Columns & tuple_columns = static_cast<const ColumnTuple *>(columns[0])->getColumns(); const auto & tuple_columns = static_cast<const ColumnTuple *>(columns[0])->getColumns();
const ColumnPtr * column = tuple_columns.data(); const auto * column = tuple_columns.data();
const ColumnPtr * columns_end = column + num_args; const auto * columns_end = column + num_args;
SipHash hash; SipHash hash;

View File

@ -576,7 +576,7 @@ ColumnPtr ColumnArray::filterTuple(const Filter & filt, ssize_t result_size_hint
/// Make temporary arrays for each components of Tuple, then filter and collect back. /// Make temporary arrays for each components of Tuple, then filter and collect back.
size_t tuple_size = tuple.getColumns().size(); size_t tuple_size = tuple.tupleSize();
if (tuple_size == 0) if (tuple_size == 0)
throw Exception("Logical error: empty tuple", ErrorCodes::LOGICAL_ERROR); throw Exception("Logical error: empty tuple", ErrorCodes::LOGICAL_ERROR);
@ -941,7 +941,7 @@ ColumnPtr ColumnArray::replicateTuple(const Offsets & replicate_offsets) const
/// Make temporary arrays for each components of Tuple. In the same way as for Nullable. /// Make temporary arrays for each components of Tuple. In the same way as for Nullable.
size_t tuple_size = tuple.getColumns().size(); size_t tuple_size = tuple.tupleSize();
if (tuple_size == 0) if (tuple_size == 0)
throw Exception("Logical error: empty tuple", ErrorCodes::LOGICAL_ERROR); throw Exception("Logical error: empty tuple", ErrorCodes::LOGICAL_ERROR);

View File

@ -81,15 +81,15 @@ public:
bool hasEqualOffsets(const ColumnArray & other) const; bool hasEqualOffsets(const ColumnArray & other) const;
/** More efficient methods of manipulation */ /** More efficient methods of manipulation */
IColumn & getData() { return data->assumeMutableRef(); } IColumn & getData() { return *data; }
const IColumn & getData() const { return *data; } const IColumn & getData() const { return *data; }
IColumn & getOffsetsColumn() { return offsets->assumeMutableRef(); } IColumn & getOffsetsColumn() { return *offsets; }
const IColumn & getOffsetsColumn() const { return *offsets; } const IColumn & getOffsetsColumn() const { return *offsets; }
Offsets & ALWAYS_INLINE getOffsets() Offsets & ALWAYS_INLINE getOffsets()
{ {
return static_cast<ColumnOffsets &>(offsets->assumeMutableRef()).getData(); return static_cast<ColumnOffsets &>(*offsets).getData();
} }
const Offsets & ALWAYS_INLINE getOffsets() const const Offsets & ALWAYS_INLINE getOffsets() const
@ -124,8 +124,8 @@ public:
} }
private: private:
ColumnPtr data; WrappedPtr data;
ColumnPtr offsets; WrappedPtr offsets;
size_t ALWAYS_INLINE offsetAt(ssize_t i) const { return getOffsets()[i - 1]; } size_t ALWAYS_INLINE offsetAt(ssize_t i) const { return getOffsets()[i - 1]; }
size_t ALWAYS_INLINE sizeAt(ssize_t i) const { return getOffsets()[i] - getOffsets()[i - 1]; } size_t ALWAYS_INLINE sizeAt(ssize_t i) const { return getOffsets()[i] - getOffsets()[i - 1]; }

View File

@ -23,7 +23,7 @@ class ColumnConst final : public COWPtrHelper<IColumn, ColumnConst>
private: private:
friend class COWPtrHelper<IColumn, ColumnConst>; friend class COWPtrHelper<IColumn, ColumnConst>;
ColumnPtr data; WrappedPtr data;
size_t s; size_t s;
ColumnConst(const ColumnPtr & data, size_t s); ColumnConst(const ColumnPtr & data, size_t s);
@ -141,9 +141,8 @@ public:
const char * deserializeAndInsertFromArena(const char * pos) override const char * deserializeAndInsertFromArena(const char * pos) override
{ {
auto & mutable_data = data->assumeMutableRef(); auto res = data->deserializeAndInsertFromArena(pos);
auto res = mutable_data.deserializeAndInsertFromArena(pos); data->popBack(1);
mutable_data.popBack(1);
++s; ++s;
return res; return res;
} }
@ -208,11 +207,9 @@ public:
/// Not part of the common interface. /// Not part of the common interface.
IColumn & getDataColumn() { return data->assumeMutableRef(); } IColumn & getDataColumn() { return *data; }
const IColumn & getDataColumn() const { return *data; } const IColumn & getDataColumn() const { return *data; }
//MutableColumnPtr getDataColumnMutablePtr() { return data; }
const ColumnPtr & getDataColumnPtr() const { return data; } const ColumnPtr & getDataColumnPtr() const { return data; }
//ColumnPtr & getDataColumnPtr() { return data; }
Field getField() const { return getDataColumn()[0]; } Field getField() const { return getDataColumn()[0]; }

View File

@ -522,7 +522,7 @@ void ColumnLowCardinality::Index::insertPosition(UInt64 position)
while (position > getMaxPositionForCurrentType()) while (position > getMaxPositionForCurrentType())
expandType(); expandType();
positions->assumeMutableRef().insert(position); positions->insert(position);
checkSizeOfType(); checkSizeOfType();
} }
@ -540,7 +540,7 @@ void ColumnLowCardinality::Index::insertPositionsRange(const IColumn & column, U
convertPositions<ColumnType>(); convertPositions<ColumnType>();
if (size_of_type == sizeof(ColumnType)) if (size_of_type == sizeof(ColumnType))
positions->assumeMutableRef().insertRangeFrom(column, offset, limit); positions->insertRangeFrom(column, offset, limit);
else else
{ {
auto copy = [&](auto cur_type) auto copy = [&](auto cur_type)

View File

@ -149,10 +149,10 @@ public:
const IColumnUnique & getDictionary() const { return dictionary.getColumnUnique(); } const IColumnUnique & getDictionary() const { return dictionary.getColumnUnique(); }
const ColumnPtr & getDictionaryPtr() const { return dictionary.getColumnUniquePtr(); } const ColumnPtr & getDictionaryPtr() const { return dictionary.getColumnUniquePtr(); }
/// IColumnUnique & getUnique() { return static_cast<IColumnUnique &>(*column_unique->assumeMutable()); } /// IColumnUnique & getUnique() { return static_cast<IColumnUnique &>(*column_unique); }
/// ColumnPtr getUniquePtr() const { return column_unique; } /// ColumnPtr getUniquePtr() const { return column_unique; }
/// IColumn & getIndexes() { return idx.getPositions()->assumeMutableRef(); } /// IColumn & getIndexes() { return *idx.getPositions(); }
const IColumn & getIndexes() const { return *idx.getPositions(); } const IColumn & getIndexes() const { return *idx.getPositions(); }
const ColumnPtr & getIndexesPtr() const { return idx.getPositions(); } const ColumnPtr & getIndexesPtr() const { return idx.getPositions(); }
size_t getSizeOfIndexType() const { return idx.getSizeOfIndexType(); } size_t getSizeOfIndexType() const { return idx.getSizeOfIndexType(); }
@ -202,13 +202,13 @@ public:
explicit Index(ColumnPtr positions); explicit Index(ColumnPtr positions);
const ColumnPtr & getPositions() const { return positions; } const ColumnPtr & getPositions() const { return positions; }
ColumnPtr & getPositionsPtr() { return positions; } WrappedPtr & getPositionsPtr() { return positions; }
size_t getPositionAt(size_t row) const; size_t getPositionAt(size_t row) const;
void insertPosition(UInt64 position); void insertPosition(UInt64 position);
void insertPositionsRange(const IColumn & column, UInt64 offset, UInt64 limit); void insertPositionsRange(const IColumn & column, UInt64 offset, UInt64 limit);
void popBack(size_t n) { positions->assumeMutableRef().popBack(n); } void popBack(size_t n) { positions->popBack(n); }
void reserve(size_t n) { positions->assumeMutableRef().reserve(n); } void reserve(size_t n) { positions->reserve(n); }
UInt64 getMaxPositionForCurrentType() const; UInt64 getMaxPositionForCurrentType() const;
@ -224,7 +224,7 @@ public:
void countKeys(ColumnUInt64::Container & counts) const; void countKeys(ColumnUInt64::Container & counts) const;
private: private:
ColumnPtr positions; WrappedPtr positions;
size_t size_of_type = 0; size_t size_of_type = 0;
void updateSizeOfType() { size_of_type = getSizeOfIndexType(*positions, size_of_type); } void updateSizeOfType() { size_of_type = getSizeOfIndexType(*positions, size_of_type); }
@ -252,10 +252,10 @@ private:
explicit Dictionary(ColumnPtr column_unique, bool is_shared); explicit Dictionary(ColumnPtr column_unique, bool is_shared);
const ColumnPtr & getColumnUniquePtr() const { return column_unique; } const ColumnPtr & getColumnUniquePtr() const { return column_unique; }
ColumnPtr & getColumnUniquePtr() { return column_unique; } WrappedPtr & getColumnUniquePtr() { return column_unique; }
const IColumnUnique & getColumnUnique() const { return static_cast<const IColumnUnique &>(*column_unique); } const IColumnUnique & getColumnUnique() const { return static_cast<const IColumnUnique &>(*column_unique); }
IColumnUnique & getColumnUnique() { return static_cast<IColumnUnique &>(column_unique->assumeMutableRef()); } IColumnUnique & getColumnUnique() { return static_cast<IColumnUnique &>(*column_unique); }
/// Dictionary may be shared for several mutable columns. /// Dictionary may be shared for several mutable columns.
/// Immutable columns may have the same column unique, which isn't necessarily shared dictionary. /// Immutable columns may have the same column unique, which isn't necessarily shared dictionary.
@ -266,7 +266,7 @@ private:
void compact(ColumnPtr & positions); void compact(ColumnPtr & positions);
private: private:
ColumnPtr column_unique; WrappedPtr column_unique;
bool shared = false; bool shared = false;
void checkColumn(const IColumn & column); void checkColumn(const IColumn & column);

View File

@ -106,16 +106,15 @@ public:
/// Return the column that represents values. /// Return the column that represents values.
IColumn & getNestedColumn() { return nested_column->assumeMutableRef(); } IColumn & getNestedColumn() { return *nested_column; }
const IColumn & getNestedColumn() const { return *nested_column; } const IColumn & getNestedColumn() const { return *nested_column; }
const ColumnPtr & getNestedColumnPtr() const { return nested_column; } const ColumnPtr & getNestedColumnPtr() const { return nested_column; }
/// Return the column that represents the byte map. /// Return the column that represents the byte map.
//ColumnPtr & getNullMapColumnPtr() { return null_map; }
const ColumnPtr & getNullMapColumnPtr() const { return null_map; } const ColumnPtr & getNullMapColumnPtr() const { return null_map; }
ColumnUInt8 & getNullMapColumn() { return static_cast<ColumnUInt8 &>(null_map->assumeMutableRef()); } ColumnUInt8 & getNullMapColumn() { return static_cast<ColumnUInt8 &>(*null_map); }
const ColumnUInt8 & getNullMapColumn() const { return static_cast<const ColumnUInt8 &>(*null_map); } const ColumnUInt8 & getNullMapColumn() const { return static_cast<const ColumnUInt8 &>(*null_map); }
NullMap & getNullMapData() { return getNullMapColumn().getData(); } NullMap & getNullMapData() { return getNullMapColumn().getData(); }
@ -134,8 +133,8 @@ public:
void checkConsistency() const; void checkConsistency() const;
private: private:
ColumnPtr nested_column; WrappedPtr nested_column;
ColumnPtr null_map; WrappedPtr null_map;
template <bool negative> template <bool negative>
void applyNullMapImpl(const ColumnUInt8 & map); void applyNullMapImpl(const ColumnUInt8 & map);

View File

@ -47,6 +47,18 @@ ColumnTuple::ColumnTuple(MutableColumns && mutable_columns)
} }
ColumnTuple::Ptr ColumnTuple::create(const Columns & columns) ColumnTuple::Ptr ColumnTuple::create(const Columns & columns)
{
for (const auto & column : columns)
if (column->isColumnConst())
throw Exception{"ColumnTuple cannot have ColumnConst as its element", ErrorCodes::ILLEGAL_COLUMN};
auto column_tuple = ColumnTuple::create(MutableColumns());
column_tuple->columns.assign(columns.begin(), columns.end());
return column_tuple;
}
ColumnTuple::Ptr ColumnTuple::create(const TupleColumns & columns)
{ {
for (const auto & column : columns) for (const auto & column : columns)
if (column->isColumnConst()) if (column->isColumnConst())
@ -101,7 +113,7 @@ void ColumnTuple::insert(const Field & x)
throw Exception("Cannot insert value of different size into tuple", ErrorCodes::CANNOT_INSERT_VALUE_OF_DIFFERENT_SIZE_INTO_TUPLE); throw Exception("Cannot insert value of different size into tuple", ErrorCodes::CANNOT_INSERT_VALUE_OF_DIFFERENT_SIZE_INTO_TUPLE);
for (size_t i = 0; i < tuple_size; ++i) for (size_t i = 0; i < tuple_size; ++i)
columns[i]->assumeMutableRef().insert(tuple[i]); columns[i]->insert(tuple[i]);
} }
void ColumnTuple::insertFrom(const IColumn & src_, size_t n) void ColumnTuple::insertFrom(const IColumn & src_, size_t n)
@ -113,19 +125,19 @@ void ColumnTuple::insertFrom(const IColumn & src_, size_t n)
throw Exception("Cannot insert value of different size into tuple", ErrorCodes::CANNOT_INSERT_VALUE_OF_DIFFERENT_SIZE_INTO_TUPLE); throw Exception("Cannot insert value of different size into tuple", ErrorCodes::CANNOT_INSERT_VALUE_OF_DIFFERENT_SIZE_INTO_TUPLE);
for (size_t i = 0; i < tuple_size; ++i) for (size_t i = 0; i < tuple_size; ++i)
columns[i]->assumeMutableRef().insertFrom(*src.columns[i], n); columns[i]->insertFrom(*src.columns[i], n);
} }
void ColumnTuple::insertDefault() void ColumnTuple::insertDefault()
{ {
for (auto & column : columns) for (auto & column : columns)
column->assumeMutableRef().insertDefault(); column->insertDefault();
} }
void ColumnTuple::popBack(size_t n) void ColumnTuple::popBack(size_t n)
{ {
for (auto & column : columns) for (auto & column : columns)
column->assumeMutableRef().popBack(n); column->popBack(n);
} }
StringRef ColumnTuple::serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const StringRef ColumnTuple::serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const
@ -140,7 +152,7 @@ StringRef ColumnTuple::serializeValueIntoArena(size_t n, Arena & arena, char con
const char * ColumnTuple::deserializeAndInsertFromArena(const char * pos) const char * ColumnTuple::deserializeAndInsertFromArena(const char * pos)
{ {
for (auto & column : columns) for (auto & column : columns)
pos = column->assumeMutableRef().deserializeAndInsertFromArena(pos); pos = column->deserializeAndInsertFromArena(pos);
return pos; return pos;
} }
@ -155,7 +167,7 @@ void ColumnTuple::insertRangeFrom(const IColumn & src, size_t start, size_t leng
{ {
const size_t tuple_size = columns.size(); const size_t tuple_size = columns.size();
for (size_t i = 0; i < tuple_size; ++i) for (size_t i = 0; i < tuple_size; ++i)
columns[i]->assumeMutableRef().insertRangeFrom( columns[i]->insertRangeFrom(
*static_cast<const ColumnTuple &>(src).columns[i], *static_cast<const ColumnTuple &>(src).columns[i],
start, length); start, length);
} }
@ -238,21 +250,19 @@ int ColumnTuple::compareAt(size_t n, size_t m, const IColumn & rhs, int nan_dire
template <bool positive> template <bool positive>
struct ColumnTuple::Less struct ColumnTuple::Less
{ {
ColumnRawPtrs plain_columns; TupleColumns columns;
int nan_direction_hint; int nan_direction_hint;
Less(const Columns & columns, int nan_direction_hint_) Less(const TupleColumns & columns, int nan_direction_hint_)
: nan_direction_hint(nan_direction_hint_) : columns(columns), nan_direction_hint(nan_direction_hint_)
{ {
for (const auto & column : columns)
plain_columns.push_back(column.get());
} }
bool operator() (size_t a, size_t b) const bool operator() (size_t a, size_t b) const
{ {
for (ColumnRawPtrs::const_iterator it = plain_columns.begin(); it != plain_columns.end(); ++it) for (const auto & column : columns)
{ {
int res = (*it)->compareAt(a, b, **it, nan_direction_hint); int res = column->compareAt(a, b, *column, nan_direction_hint);
if (res < 0) if (res < 0)
return positive; return positive;
else if (res > 0) else if (res > 0)
@ -319,7 +329,7 @@ size_t ColumnTuple::allocatedBytes() const
void ColumnTuple::protect() void ColumnTuple::protect()
{ {
for (auto & column : columns) for (auto & column : columns)
column->assumeMutableRef().protect(); column->protect();
} }
void ColumnTuple::getExtremes(Field & min, Field & max) const void ColumnTuple::getExtremes(Field & min, Field & max) const

View File

@ -17,7 +17,8 @@ class ColumnTuple final : public COWPtrHelper<IColumn, ColumnTuple>
private: private:
friend class COWPtrHelper<IColumn, ColumnTuple>; friend class COWPtrHelper<IColumn, ColumnTuple>;
Columns columns; using TupleColumns = std::vector<WrappedPtr>;
TupleColumns columns;
template <bool positive> template <bool positive>
struct Less; struct Less;
@ -31,6 +32,7 @@ public:
*/ */
using Base = COWPtrHelper<IColumn, ColumnTuple>; using Base = COWPtrHelper<IColumn, ColumnTuple>;
static Ptr create(const Columns & columns); static Ptr create(const Columns & columns);
static Ptr create(const TupleColumns & columns);
static Ptr create(Columns && arg) { return create(arg); } static Ptr create(Columns && arg) { return create(arg); }
template <typename Arg, typename = typename std::enable_if<std::is_rvalue_reference<Arg &&>::value>::type> template <typename Arg, typename = typename std::enable_if<std::is_rvalue_reference<Arg &&>::value>::type>
@ -78,9 +80,10 @@ public:
size_t tupleSize() const { return columns.size(); } size_t tupleSize() const { return columns.size(); }
const IColumn & getColumn(size_t idx) const { return *columns[idx]; } const IColumn & getColumn(size_t idx) const { return *columns[idx]; }
IColumn & getColumn(size_t idx) { return columns[idx]->assumeMutableRef(); } IColumn & getColumn(size_t idx) { return *columns[idx]; }
const Columns & getColumns() const { return columns; } const TupleColumns & getColumns() const { return columns; }
Columns getColumnsCopy() const { return {columns.begin(), columns.end()}; }
const ColumnPtr & getColumnPtr(size_t idx) const { return columns[idx]; } const ColumnPtr & getColumnPtr(size_t idx) const { return columns[idx]; }
}; };

View File

@ -80,7 +80,7 @@ public:
bool isNumeric() const override { return column_holder->isNumeric(); } bool isNumeric() const override { return column_holder->isNumeric(); }
size_t byteSize() const override { return column_holder->byteSize(); } size_t byteSize() const override { return column_holder->byteSize(); }
void protect() override { column_holder->assumeMutableRef().protect(); } void protect() override { column_holder->protect(); }
size_t allocatedBytes() const override size_t allocatedBytes() const override
{ {
return column_holder->allocatedBytes() return column_holder->allocatedBytes()
@ -108,14 +108,14 @@ public:
private: private:
ColumnPtr column_holder; IColumn::WrappedPtr column_holder;
bool is_nullable; bool is_nullable;
size_t size_of_value_if_fixed = 0; size_t size_of_value_if_fixed = 0;
ReverseIndex<UInt64, ColumnType> index; ReverseIndex<UInt64, ColumnType> index;
/// For DataTypeNullable, stores null map. /// For DataTypeNullable, stores null map.
ColumnPtr nested_null_mask; IColumn::WrappedPtr nested_null_mask;
ColumnPtr nested_column_nullable; IColumn::WrappedPtr nested_column_nullable;
class IncrementalHash class IncrementalHash
{ {
@ -138,7 +138,7 @@ private:
static size_t numSpecialValues(bool is_nullable) { return is_nullable ? 2 : 1; } static size_t numSpecialValues(bool is_nullable) { return is_nullable ? 2 : 1; }
size_t numSpecialValues() const { return numSpecialValues(is_nullable); } size_t numSpecialValues() const { return numSpecialValues(is_nullable); }
ColumnType * getRawColumnPtr() { return static_cast<ColumnType *>(column_holder->assumeMutable().get()); } ColumnType * getRawColumnPtr() { return static_cast<ColumnType *>(column_holder.get()); }
const ColumnType * getRawColumnPtr() const { return static_cast<const ColumnType *>(column_holder.get()); } const ColumnType * getRawColumnPtr() const { return static_cast<const ColumnType *>(column_holder.get()); }
template <typename IndexType> template <typename IndexType>
@ -230,10 +230,7 @@ void ColumnUnique<ColumnType>::updateNullMask()
size_t size = getRawColumnPtr()->size(); size_t size = getRawColumnPtr()->size();
if (nested_null_mask->size() != size) if (nested_null_mask->size() != size)
{ static_cast<ColumnUInt8 &>(*nested_null_mask).getData().resize_fill(size);
IColumn & null_mask = nested_null_mask->assumeMutableRef();
static_cast<ColumnUInt8 &>(null_mask).getData().resize_fill(size);
}
} }
} }

View File

@ -259,7 +259,7 @@ public:
/// If the column contains subcolumns (such as Array, Nullable, etc), do callback on them. /// If the column contains subcolumns (such as Array, Nullable, etc), do callback on them.
/// Shallow: doesn't do recursive calls; don't do call for itself. /// Shallow: doesn't do recursive calls; don't do call for itself.
using ColumnCallback = std::function<void(Ptr&)>; using ColumnCallback = std::function<void(WrappedPtr&)>;
virtual void forEachSubcolumn(ColumnCallback) {} virtual void forEachSubcolumn(ColumnCallback) {}
/// Columns have equal structure. /// Columns have equal structure.
@ -272,8 +272,8 @@ public:
MutablePtr mutate() const && MutablePtr mutate() const &&
{ {
MutablePtr res = COWPtr<IColumn>::mutate(); MutablePtr res = shallowMutate();
res->forEachSubcolumn([](Ptr & subcolumn) { subcolumn = (*std::move(subcolumn)).mutate(); }); res->forEachSubcolumn([](WrappedPtr & subcolumn) { subcolumn = std::move(*subcolumn).mutate(); });
return res; return res;
} }

View File

@ -50,7 +50,7 @@
/// Change value of x. /// Change value of x.
{ {
/// Creating mutable ptr. It can clone an object under the hood if it was shared. /// Creating mutable ptr. It can clone an object under the hood if it was shared.
Column::MutablePtr mutate_x = x->mutate(); Column::MutablePtr mutate_x = std::move(*x).mutate();
/// Using non-const methods of an object. /// Using non-const methods of an object.
mutate_x->set(2); mutate_x->set(2);
/// Assigning pointer 'x' to mutated object. /// Assigning pointer 'x' to mutated object.
@ -175,7 +175,8 @@ public:
Ptr getPtr() const { return static_cast<Ptr>(derived()); } Ptr getPtr() const { return static_cast<Ptr>(derived()); }
MutablePtr getPtr() { return static_cast<MutablePtr>(derived()); } MutablePtr getPtr() { return static_cast<MutablePtr>(derived()); }
MutablePtr mutate() const protected:
MutablePtr shallowMutate() const
{ {
if (this->use_count() > 1) if (this->use_count() > 1)
return derived()->clone(); return derived()->clone();
@ -183,6 +184,12 @@ public:
return assumeMutable(); return assumeMutable();
} }
public:
MutablePtr mutate() const &&
{
return shallowMutate();
}
MutablePtr assumeMutable() const MutablePtr assumeMutable() const
{ {
return const_cast<COWPtr*>(this)->getPtr(); return const_cast<COWPtr*>(this)->getPtr();
@ -192,6 +199,56 @@ public:
{ {
return const_cast<Derived &>(*derived()); return const_cast<Derived &>(*derived());
} }
protected:
/// It works as immutable_ptr if it is const and as mutable_ptr if it is non const.
template <typename T>
class chameleon_ptr
{
private:
immutable_ptr<T> value;
public:
template <typename... Args>
chameleon_ptr(Args &&... args) : value(std::forward<Args>(args)...) {}
template <typename U>
chameleon_ptr(std::initializer_list<U> && arg) : value(std::forward<std::initializer_list<U>>(arg)) {}
const T * get() const { return value.get(); }
T * get() { return value->assumeMutable().get(); }
const T * operator->() const { return get(); }
T * operator->() { return get(); }
const T & operator*() const { return *value; }
T & operator*() { return value->assumeMutableRef(); }
operator const immutable_ptr<T> & () const { return value; }
operator immutable_ptr<T> & () { return value; }
operator bool() const { return value != nullptr; }
bool operator! () const { return value == nullptr; }
bool operator== (const chameleon_ptr & rhs) const { return value == rhs.value; }
bool operator!= (const chameleon_ptr & rhs) const { return value != rhs.value; }
};
public:
/** Use this type in class members for compositions.
*
* NOTE:
* For classes with WrappedPtr members,
* you must reimplement 'mutate' method, so it will call 'mutate' of all subobjects (do deep mutate).
* It will guarantee, that mutable object have all subobjects unshared.
*
* NOTE:
* If you override 'mutate' method in inherited classes, don't forget to make it virtual in base class or to make it call a virtual method.
* (COWPtr itself doesn't force any methods to be virtual).
*
* See example in "cow_compositions.cpp".
*/
using WrappedPtr = chameleon_ptr<Derived>;
}; };
@ -217,6 +274,8 @@ public:
* IColumn * IColumn
* CowPtr<IColumn> * CowPtr<IColumn>
* boost::intrusive_ref_counter<IColumn> * boost::intrusive_ref_counter<IColumn>
*
* See example in "cow_columns.cpp".
*/ */
template <typename Base, typename Derived> template <typename Base, typename Derived>
class COWPtrHelper : public Base class COWPtrHelper : public Base
@ -236,25 +295,7 @@ public:
static MutablePtr create(std::initializer_list<T> && arg) { return create(std::forward<std::initializer_list<T>>(arg)); } static MutablePtr create(std::initializer_list<T> && arg) { return create(std::forward<std::initializer_list<T>>(arg)); }
typename Base::MutablePtr clone() const override { return typename Base::MutablePtr(new Derived(*derived())); } typename Base::MutablePtr clone() const override { return typename Base::MutablePtr(new Derived(*derived())); }
protected:
MutablePtr shallowMutate() const { return MutablePtr(static_cast<Derived *>(Base::shallowMutate().get())); }
}; };
/** Compositions.
*
* Sometimes your objects contain another objects, and you have tree-like structure.
* And you want non-const methods of your object to also modify your subobjects.
*
* There are the following possible solutions:
*
* 1. Store subobjects as immutable ptrs. Call mutate method of subobjects inside non-const methods of your objects; modify them and assign back.
* Drawback: additional checks inside methods: CPU overhead on atomic ops.
*
* 2. Store subobjects as mutable ptrs. Subobjects cannot be shared in another objects.
* Drawback: it's not possible to share subobjects.
*
* 3. Store subobjects as immutable ptrs. Implement copy-constructor to do shallow copy.
* But reimplement 'mutate' method, so it will call 'mutate' of all subobjects (do deep mutate).
* It will guarantee, that mutable object have all subobjects unshared.
* From non-const method, you can modify subobjects with 'assumeMutableRef' method.
* Drawback: it's more complex than other solutions.
*/

View File

@ -420,6 +420,7 @@ namespace ErrorCodes
extern const int NO_COMMON_COLUMNS_WITH_PROTOBUF_SCHEMA = 443; extern const int NO_COMMON_COLUMNS_WITH_PROTOBUF_SCHEMA = 443;
extern const int UNKNOWN_PROTOBUF_FORMAT = 444; extern const int UNKNOWN_PROTOBUF_FORMAT = 444;
extern const int CANNOT_MPROTECT = 445; extern const int CANNOT_MPROTECT = 445;
extern const int FUNCTION_NOT_ALLOWED = 446;
extern const int KEEPER_EXCEPTION = 999; extern const int KEEPER_EXCEPTION = 999;
extern const int POCO_EXCEPTION = 1000; extern const int POCO_EXCEPTION = 1000;

View File

@ -1,17 +1,14 @@
#include "Exception.h"
#include <string.h> #include <string.h>
#include <cxxabi.h> #include <cxxabi.h>
#include <Poco/String.h> #include <Poco/String.h>
#include <common/logger_useful.h> #include <common/logger_useful.h>
#include <IO/WriteHelpers.h> #include <IO/WriteHelpers.h>
#include <IO/Operators.h> #include <IO/Operators.h>
#include <IO/ReadBufferFromString.h> #include <IO/ReadBufferFromString.h>
#include <Common/Exception.h>
#include <common/demangle.h> #include <common/demangle.h>
#include <Common/config_version.h>
namespace DB namespace DB
{ {
@ -24,6 +21,10 @@ namespace ErrorCodes
extern const int CANNOT_TRUNCATE_FILE; extern const int CANNOT_TRUNCATE_FILE;
} }
const char * getVersion()
{
return VERSION_STRING;
}
std::string errnoToString(int code, int e) std::string errnoToString(int code, int e)
{ {
@ -81,13 +82,13 @@ std::string getCurrentExceptionMessage(bool with_stacktrace, bool check_embedded
} }
catch (const Exception & e) catch (const Exception & e)
{ {
stream << getExceptionMessage(e, with_stacktrace, check_embedded_stacktrace); stream << "(version " << getVersion() << ") " << getExceptionMessage(e, with_stacktrace, check_embedded_stacktrace);
} }
catch (const Poco::Exception & e) catch (const Poco::Exception & e)
{ {
try try
{ {
stream << "Poco::Exception. Code: " << ErrorCodes::POCO_EXCEPTION << ", e.code() = " << e.code() stream << "(version " << getVersion() << ") " << "Poco::Exception. Code: " << ErrorCodes::POCO_EXCEPTION << ", e.code() = " << e.code()
<< ", e.displayText() = " << e.displayText(); << ", e.displayText() = " << e.displayText();
} }
catch (...) {} catch (...) {}
@ -102,7 +103,7 @@ std::string getCurrentExceptionMessage(bool with_stacktrace, bool check_embedded
if (status) if (status)
name += " (demangling status: " + toString(status) + ")"; name += " (demangling status: " + toString(status) + ")";
stream << "std::exception. Code: " << ErrorCodes::STD_EXCEPTION << ", type: " << name << ", e.what() = " << e.what(); stream << "(version " << getVersion() << ") " << "std::exception. Code: " << ErrorCodes::STD_EXCEPTION << ", type: " << name << ", e.what() = " << e.what();
} }
catch (...) {} catch (...) {}
} }
@ -116,7 +117,7 @@ std::string getCurrentExceptionMessage(bool with_stacktrace, bool check_embedded
if (status) if (status)
name += " (demangling status: " + toString(status) + ")"; name += " (demangling status: " + toString(status) + ")";
stream << "Unknown exception. Code: " << ErrorCodes::UNKNOWN_EXCEPTION << ", type: " << name; stream << "(version " << getVersion() << ") " << "Unknown exception. Code: " << ErrorCodes::UNKNOWN_EXCEPTION << ", type: " << name;
} }
catch (...) {} catch (...) {}
} }

View File

@ -82,5 +82,8 @@ target_link_libraries (allocator PRIVATE clickhouse_common_io)
add_executable (cow_columns cow_columns.cpp) add_executable (cow_columns cow_columns.cpp)
target_link_libraries (cow_columns PRIVATE clickhouse_common_io) target_link_libraries (cow_columns PRIVATE clickhouse_common_io)
add_executable (cow_compositions cow_compositions.cpp)
target_link_libraries (cow_compositions PRIVATE clickhouse_common_io)
add_executable (stopwatch stopwatch.cpp) add_executable (stopwatch stopwatch.cpp)
target_link_libraries (stopwatch PRIVATE clickhouse_common_io) target_link_libraries (stopwatch PRIVATE clickhouse_common_io)

View File

@ -53,7 +53,7 @@ int main(int, char **)
std::cerr << "addresses: " << x.get() << ", " << y.get() << "\n"; std::cerr << "addresses: " << x.get() << ", " << y.get() << "\n";
{ {
MutableColumnPtr mut = y->mutate(); MutableColumnPtr mut = std::move(*y).mutate();
mut->set(2); mut->set(2);
std::cerr << "refcounts: " << x->use_count() << ", " << y->use_count() << ", " << mut->use_count() << "\n"; std::cerr << "refcounts: " << x->use_count() << ", " << y->use_count() << ", " << mut->use_count() << "\n";
@ -72,7 +72,7 @@ int main(int, char **)
std::cerr << "addresses: " << x.get() << ", " << y.get() << "\n"; std::cerr << "addresses: " << x.get() << ", " << y.get() << "\n";
{ {
MutableColumnPtr mut = y->mutate(); MutableColumnPtr mut = std::move(*y).mutate();
mut->set(3); mut->set(3);
std::cerr << "refcounts: " << x->use_count() << ", " << y->use_count() << ", " << mut->use_count() << "\n"; std::cerr << "refcounts: " << x->use_count() << ", " << y->use_count() << ", " << mut->use_count() << "\n";

View File

@ -0,0 +1,107 @@
#include <Common/COWPtr.h>
#include <iostream>
class IColumn : public COWPtr<IColumn>
{
private:
friend class COWPtr<IColumn>;
virtual MutablePtr clone() const = 0;
virtual MutablePtr deepMutate() const { return shallowMutate(); }
public:
IColumn() = default;
IColumn(const IColumn &) = default;
virtual ~IColumn() = default;
virtual int get() const = 0;
virtual void set(int value) = 0;
MutablePtr mutate() const && { return deepMutate(); }
};
using ColumnPtr = IColumn::Ptr;
using MutableColumnPtr = IColumn::MutablePtr;
class ConcreteColumn : public COWPtrHelper<IColumn, ConcreteColumn>
{
private:
friend class COWPtrHelper<IColumn, ConcreteColumn>;
int data;
ConcreteColumn(int data) : data(data) {}
ConcreteColumn(const ConcreteColumn &) = default;
public:
int get() const override { return data; }
void set(int value) override { data = value; }
};
class ColumnComposition : public COWPtrHelper<IColumn, ColumnComposition>
{
private:
friend class COWPtrHelper<IColumn, ColumnComposition>;
ConcreteColumn::WrappedPtr wrapped;
ColumnComposition(int data) : wrapped(ConcreteColumn::create(data)) {}
ColumnComposition(const ColumnComposition &) = default;
IColumn::MutablePtr deepMutate() const override
{
std::cerr << "Mutating\n";
auto res = shallowMutate();
res->wrapped = std::move(*wrapped).mutate();
return res;
}
public:
int get() const override { return wrapped->get(); }
void set(int value) override { wrapped->set(value); }
};
int main(int, char **)
{
ColumnPtr x = ColumnComposition::create(1);
ColumnPtr y = x;
std::cerr << "values: " << x->get() << ", " << y->get() << "\n";
std::cerr << "refcounts: " << x->use_count() << ", " << y->use_count() << "\n";
std::cerr << "addresses: " << x.get() << ", " << y.get() << "\n";
{
MutableColumnPtr mut = std::move(*y).mutate();
mut->set(2);
std::cerr << "refcounts: " << x->use_count() << ", " << y->use_count() << ", " << mut->use_count() << "\n";
std::cerr << "addresses: " << x.get() << ", " << y.get() << ", " << mut.get() << "\n";
y = std::move(mut);
}
std::cerr << "values: " << x->get() << ", " << y->get() << "\n";
std::cerr << "refcounts: " << x->use_count() << ", " << y->use_count() << "\n";
std::cerr << "addresses: " << x.get() << ", " << y.get() << "\n";
x = ColumnComposition::create(0);
std::cerr << "values: " << x->get() << ", " << y->get() << "\n";
std::cerr << "refcounts: " << x->use_count() << ", " << y->use_count() << "\n";
std::cerr << "addresses: " << x.get() << ", " << y.get() << "\n";
{
MutableColumnPtr mut = std::move(*y).mutate();
mut->set(3);
std::cerr << "refcounts: " << x->use_count() << ", " << y->use_count() << ", " << mut->use_count() << "\n";
std::cerr << "addresses: " << x.get() << ", " << y.get() << ", " << mut.get() << "\n";
y = std::move(mut);
}
std::cerr << "values: " << x->get() << ", " << y->get() << "\n";
std::cerr << "refcounts: " << x->use_count() << ", " << y->use_count() << "\n";
return 0;
}

View File

@ -306,6 +306,8 @@ struct Settings
M(SettingBool, cancel_http_readonly_queries_on_client_close, false, "Cancel HTTP readonly queries when a client closes the connection without waiting for response.") \ M(SettingBool, cancel_http_readonly_queries_on_client_close, false, "Cancel HTTP readonly queries when a client closes the connection without waiting for response.") \
M(SettingBool, external_table_functions_use_nulls, true, "If it is set to true, external table functions will implicitly use Nullable type if needed. Otherwise NULLs will be substituted with default values. Currently supported only for 'mysql' table function.") \ M(SettingBool, external_table_functions_use_nulls, true, "If it is set to true, external table functions will implicitly use Nullable type if needed. Otherwise NULLs will be substituted with default values. Currently supported only for 'mysql' table function.") \
M(SettingBool, allow_experimental_data_skipping_indices, false, "If it is set to true, data skipping indices can be used in CREATE TABLE/ALTER TABLE queries.") \ M(SettingBool, allow_experimental_data_skipping_indices, false, "If it is set to true, data skipping indices can be used in CREATE TABLE/ALTER TABLE queries.") \
\
M(SettingBool, allow_hyperscan, true, "Allow functions that use Hyperscan library. Disable to avoid potentially long compilation times and excessive resource usage.") \
#define DECLARE(TYPE, NAME, DEFAULT, DESCRIPTION) \ #define DECLARE(TYPE, NAME, DEFAULT, DESCRIPTION) \
TYPE NAME {DEFAULT}; TYPE NAME {DEFAULT};

View File

@ -80,9 +80,9 @@ std::ostream & operator<<(std::ostream & stream, const IColumn & what)
stream << "{"; stream << "{";
for (size_t i = 0; i < what.size(); ++i) for (size_t i = 0; i < what.size(); ++i)
{ {
stream << applyVisitor(FieldVisitorDump(), what[i]);
if (i) if (i)
stream << ", "; stream << ", ";
stream << applyVisitor(FieldVisitorDump(), what[i]);
} }
stream << "}"; stream << "}";

View File

@ -60,12 +60,11 @@ Block ColumnGathererStream::readImpl()
if (!source_to_fully_copy && row_sources_buf.eof()) if (!source_to_fully_copy && row_sources_buf.eof())
return Block(); return Block();
MutableColumnPtr output_column = column.column->cloneEmpty();
output_block = Block{column.cloneEmpty()}; output_block = Block{column.cloneEmpty()};
MutableColumnPtr output_column = output_block.getByPosition(0).column->assumeMutable();
output_column->gather(*this); output_column->gather(*this);
if (!output_column->empty()) if (!output_column->empty())
output_block.getByPosition(0).column = std::move(output_column); output_block.getByPosition(0).column = std::move(output_column);
return output_block; return output_block;
} }

View File

@ -69,7 +69,7 @@ ColumnPtr recursiveRemoveLowCardinality(const ColumnPtr & column)
if (const auto * column_tuple = typeid_cast<const ColumnTuple *>(column.get())) if (const auto * column_tuple = typeid_cast<const ColumnTuple *>(column.get()))
{ {
Columns columns = column_tuple->getColumns(); auto columns = column_tuple->getColumns();
for (auto & element : columns) for (auto & element : columns)
element = recursiveRemoveLowCardinality(element); element = recursiveRemoveLowCardinality(element);
return ColumnTuple::create(columns); return ColumnTuple::create(columns);
@ -142,7 +142,7 @@ ColumnPtr recursiveLowCardinalityConversion(const ColumnPtr & column, const Data
throw Exception("Unexpected column " + column->getName() + " for type " + from_type->getName(), throw Exception("Unexpected column " + column->getName() + " for type " + from_type->getName(),
ErrorCodes::ILLEGAL_COLUMN); ErrorCodes::ILLEGAL_COLUMN);
Columns columns = column_tuple->getColumns(); auto columns = column_tuple->getColumns();
auto & from_elements = from_tuple_type->getElements(); auto & from_elements = from_tuple_type->getElements();
auto & to_elements = to_tuple_type->getElements(); auto & to_elements = to_tuple_type->getElements();

View File

@ -101,7 +101,7 @@ Block flatten(const Block & block)
const ColumnPtr & column_offsets = column_array->getOffsetsPtr(); const ColumnPtr & column_offsets = column_array->getOffsetsPtr();
const ColumnTuple & column_tuple = typeid_cast<const ColumnTuple &>(column_array->getData()); const ColumnTuple & column_tuple = typeid_cast<const ColumnTuple &>(column_array->getData());
const Columns & element_columns = column_tuple.getColumns(); const auto & element_columns = column_tuple.getColumns();
for (size_t i = 0; i < tuple_size; ++i) for (size_t i = 0; i < tuple_size; ++i)
{ {

View File

@ -206,28 +206,42 @@ CapnProtoRowInputStream::CapnProtoRowInputStream(ReadBuffer & istr_, const Block
createActions(list, root); createActions(list, root);
} }
kj::Array<capnp::word> CapnProtoRowInputStream::readMessage()
{
uint32_t segment_count;
istr.readStrict(reinterpret_cast<char*>(&segment_count), sizeof(uint32_t));
// one for segmentCount and one because segmentCount starts from 0
const auto prefix_size = (2 + segment_count) * sizeof(uint32_t);
const auto words_prefix_size = (segment_count + 1) / 2 + 1;
auto prefix = kj::heapArray<capnp::word>(words_prefix_size);
auto prefix_chars = prefix.asChars();
::memcpy(prefix_chars.begin(), &segment_count, sizeof(uint32_t));
// read size of each segment
for (size_t i = 0; i <= segment_count; ++i)
istr.readStrict(prefix_chars.begin() + ((i + 1) * sizeof(uint32_t)), sizeof(uint32_t));
// calculate size of message
const auto expected_words = capnp::expectedSizeInWordsFromPrefix(prefix);
const auto expected_bytes = expected_words * sizeof(capnp::word);
const auto data_size = expected_bytes - prefix_size;
auto msg = kj::heapArray<capnp::word>(expected_words);
auto msg_chars = msg.asChars();
// read full message
::memcpy(msg_chars.begin(), prefix_chars.begin(), prefix_size);
istr.readStrict(msg_chars.begin() + prefix_size, data_size);
return msg;
}
bool CapnProtoRowInputStream::read(MutableColumns & columns, RowReadExtension &) bool CapnProtoRowInputStream::read(MutableColumns & columns, RowReadExtension &)
{ {
if (istr.eof()) if (istr.eof())
return false; return false;
// Read from underlying buffer directly auto array = readMessage();
auto buf = istr.buffer();
auto base = reinterpret_cast<const capnp::word *>(istr.position());
// Check if there's enough bytes in the buffer to read the full message
kj::Array<capnp::word> heap_array;
auto array = kj::arrayPtr(base, buf.size() - istr.offset());
auto expected_words = capnp::expectedSizeInWordsFromPrefix(array);
if (expected_words * sizeof(capnp::word) > array.size())
{
// We'll need to reassemble the message in a contiguous buffer
heap_array = kj::heapArray<capnp::word>(expected_words);
istr.readStrict(heap_array.asChars().begin(), heap_array.asChars().size());
array = heap_array.asPtr();
}
#if CAPNP_VERSION >= 8000 #if CAPNP_VERSION >= 8000
capnp::UnalignedFlatArrayMessageReader msg(array); capnp::UnalignedFlatArrayMessageReader msg(array);
@ -281,13 +295,6 @@ bool CapnProtoRowInputStream::read(MutableColumns & columns, RowReadExtension &)
} }
} }
// Advance buffer position if used directly
if (heap_array.size() == 0)
{
auto parsed = (msg.getEnd() - base) * sizeof(capnp::word);
istr.position() += parsed;
}
return true; return true;
} }

View File

@ -38,6 +38,8 @@ public:
bool read(MutableColumns & columns, RowReadExtension &) override; bool read(MutableColumns & columns, RowReadExtension &) override;
private: private:
kj::Array<capnp::word> readMessage();
// Build a traversal plan from a sorted list of fields // Build a traversal plan from a sorted list of fields
void createActions(const NestedFieldList & sortedFields, capnp::StructSchema reader); void createActions(const NestedFieldList & sortedFields, capnp::StructSchema reader);

View File

@ -33,7 +33,7 @@ const ColumnConst * checkAndGetColumnConstStringOrFixedString(const IColumn * co
Columns convertConstTupleToConstantElements(const ColumnConst & column) Columns convertConstTupleToConstantElements(const ColumnConst & column)
{ {
const ColumnTuple & src_tuple = static_cast<const ColumnTuple &>(column.getDataColumn()); const ColumnTuple & src_tuple = static_cast<const ColumnTuple &>(column.getDataColumn());
const Columns & src_tuple_columns = src_tuple.getColumns(); const auto & src_tuple_columns = src_tuple.getColumns();
size_t tuple_size = src_tuple_columns.size(); size_t tuple_size = src_tuple_columns.size();
size_t rows = column.size(); size_t rows = column.size();

View File

@ -932,12 +932,12 @@ private:
if (x_const) if (x_const)
x_columns = convertConstTupleToConstantElements(*x_const); x_columns = convertConstTupleToConstantElements(*x_const);
else else
x_columns = static_cast<const ColumnTuple &>(*c0.column).getColumns(); x_columns = static_cast<const ColumnTuple &>(*c0.column).getColumnsCopy();
if (y_const) if (y_const)
y_columns = convertConstTupleToConstantElements(*y_const); y_columns = convertConstTupleToConstantElements(*y_const);
else else
y_columns = static_cast<const ColumnTuple &>(*c1.column).getColumns(); y_columns = static_cast<const ColumnTuple &>(*c1.column).getColumnsCopy();
for (size_t i = 0; i < tuple_size; ++i) for (size_t i = 0; i < tuple_size; ++i)
{ {

View File

@ -167,7 +167,7 @@ private:
if (checkColumn<ColumnTuple>(key_col.get())) if (checkColumn<ColumnTuple>(key_col.get()))
{ {
const auto & key_columns = static_cast<const ColumnTuple &>(*key_col).getColumns(); const auto & key_columns = static_cast<const ColumnTuple &>(*key_col).getColumnsCopy();
const auto & key_types = static_cast<const DataTypeTuple &>(*key_col_with_type.type).getElements(); const auto & key_types = static_cast<const DataTypeTuple &>(*key_col_with_type.type).getElements();
auto out = ColumnUInt8::create(key_col_with_type.column->size()); auto out = ColumnUInt8::create(key_col_with_type.column->size());
@ -353,7 +353,7 @@ private:
if (checkColumn<ColumnTuple>(key_col.get())) if (checkColumn<ColumnTuple>(key_col.get()))
{ {
const auto & key_columns = static_cast<const ColumnTuple &>(*key_col).getColumns(); const auto & key_columns = static_cast<const ColumnTuple &>(*key_col).getColumnsCopy();
const auto & key_types = static_cast<const DataTypeTuple &>(*key_col_with_type.type).getElements(); const auto & key_types = static_cast<const DataTypeTuple &>(*key_col_with_type.type).getElements();
auto out = ColumnString::create(); auto out = ColumnString::create();
@ -580,7 +580,7 @@ private:
/// Functions in external dictionaries only support full-value (not constant) columns with keys. /// Functions in external dictionaries only support full-value (not constant) columns with keys.
ColumnPtr key_col = key_col_with_type.column->convertToFullColumnIfConst(); ColumnPtr key_col = key_col_with_type.column->convertToFullColumnIfConst();
const auto & key_columns = typeid_cast<const ColumnTuple &>(*key_col).getColumns(); const auto & key_columns = typeid_cast<const ColumnTuple &>(*key_col).getColumnsCopy();
const auto & key_types = static_cast<const DataTypeTuple &>(*key_col_with_type.type).getElements(); const auto & key_types = static_cast<const DataTypeTuple &>(*key_col_with_type.type).getElements();
auto out = ColumnString::create(); auto out = ColumnString::create();
@ -815,7 +815,7 @@ private:
if (checkColumn<ColumnTuple>(key_col.get())) if (checkColumn<ColumnTuple>(key_col.get()))
{ {
const auto & key_columns = static_cast<const ColumnTuple &>(*key_col).getColumns(); const auto & key_columns = static_cast<const ColumnTuple &>(*key_col).getColumnsCopy();
const auto & key_types = static_cast<const DataTypeTuple &>(*key_col_with_type.type).getElements(); const auto & key_types = static_cast<const DataTypeTuple &>(*key_col_with_type.type).getElements();
auto out = ColumnVector<Type>::create(key_columns.front()->size()); auto out = ColumnVector<Type>::create(key_columns.front()->size());
@ -1077,7 +1077,7 @@ private:
/// Functions in external dictionaries only support full-value (not constant) columns with keys. /// Functions in external dictionaries only support full-value (not constant) columns with keys.
ColumnPtr key_col = key_col_with_type.column->convertToFullColumnIfConst(); ColumnPtr key_col = key_col_with_type.column->convertToFullColumnIfConst();
const auto & key_columns = typeid_cast<const ColumnTuple &>(*key_col).getColumns(); const auto & key_columns = typeid_cast<const ColumnTuple &>(*key_col).getColumnsCopy();
const auto & key_types = static_cast<const DataTypeTuple &>(*key_col_with_type.type).getElements(); const auto & key_types = static_cast<const DataTypeTuple &>(*key_col_with_type.type).getElements();
/// @todo detect when all key columns are constant /// @todo detect when all key columns are constant

View File

@ -159,7 +159,7 @@ public:
ErrorCodes::ILLEGAL_COLUMN); ErrorCodes::ILLEGAL_COLUMN);
} }
const Columns & tuple_columns = tuple_col->getColumns(); const auto & tuple_columns = tuple_col->getColumns();
const DataTypes & tuple_types = typeid_cast<const DataTypeTuple &>(*block.getByPosition(arguments[0]).type).getElements(); const DataTypes & tuple_types = typeid_cast<const DataTypeTuple &>(*block.getByPosition(arguments[0]).type).getElements();
bool use_float64 = WhichDataType(tuple_types[0]).isFloat64() || WhichDataType(tuple_types[1]).isFloat64(); bool use_float64 = WhichDataType(tuple_types[0]).isFloat64() || WhichDataType(tuple_types[1]).isFloat64();

View File

@ -818,7 +818,7 @@ private:
/// Flattening of tuples. /// Flattening of tuples.
if (const ColumnTuple * tuple = typeid_cast<const ColumnTuple *>(column)) if (const ColumnTuple * tuple = typeid_cast<const ColumnTuple *>(column))
{ {
const Columns & tuple_columns = tuple->getColumns(); const auto & tuple_columns = tuple->getColumns();
const DataTypes & tuple_types = typeid_cast<const DataTypeTuple &>(*type).getElements(); const DataTypes & tuple_types = typeid_cast<const DataTypeTuple &>(*type).getElements();
size_t tuple_size = tuple_columns.size(); size_t tuple_size = tuple_columns.size();
for (size_t i = 0; i < tuple_size; ++i) for (size_t i = 0; i < tuple_size; ++i)
@ -826,7 +826,7 @@ private:
} }
else if (const ColumnTuple * tuple_const = checkAndGetColumnConstData<ColumnTuple>(column)) else if (const ColumnTuple * tuple_const = checkAndGetColumnConstData<ColumnTuple>(column))
{ {
const Columns & tuple_columns = tuple_const->getColumns(); const auto & tuple_columns = tuple_const->getColumns();
const DataTypes & tuple_types = typeid_cast<const DataTypeTuple &>(*type).getElements(); const DataTypes & tuple_types = typeid_cast<const DataTypeTuple &>(*type).getElements();
size_t tuple_size = tuple_columns.size(); size_t tuple_size = tuple_columns.size();
for (size_t i = 0; i < tuple_size; ++i) for (size_t i = 0; i < tuple_size; ++i)

View File

@ -33,6 +33,7 @@ namespace ErrorCodes
{ {
extern const int BAD_ARGUMENTS; extern const int BAD_ARGUMENTS;
extern const int ILLEGAL_COLUMN; extern const int ILLEGAL_COLUMN;
extern const int TOO_MANY_BYTES;
} }
@ -340,6 +341,7 @@ template <typename Impl>
struct MultiSearchImpl struct MultiSearchImpl
{ {
using ResultType = UInt8; using ResultType = UInt8;
static constexpr bool is_using_hyperscan = false;
static void vector_constant( static void vector_constant(
const ColumnString::Chars & haystack_data, const ColumnString::Chars & haystack_data,
@ -355,6 +357,7 @@ template <typename Impl>
struct MultiSearchFirstPositionImpl struct MultiSearchFirstPositionImpl
{ {
using ResultType = UInt64; using ResultType = UInt64;
static constexpr bool is_using_hyperscan = false;
static void vector_constant( static void vector_constant(
const ColumnString::Chars & haystack_data, const ColumnString::Chars & haystack_data,
@ -374,6 +377,7 @@ template <typename Impl>
struct MultiSearchFirstIndexImpl struct MultiSearchFirstIndexImpl
{ {
using ResultType = UInt64; using ResultType = UInt64;
static constexpr bool is_using_hyperscan = false;
static void vector_constant( static void vector_constant(
const ColumnString::Chars & haystack_data, const ColumnString::Chars & haystack_data,
@ -610,6 +614,7 @@ struct MultiMatchAnyImpl
{ {
static_assert(static_cast<int>(FindAny) + static_cast<int>(FindAnyIndex) == 1); static_assert(static_cast<int>(FindAny) + static_cast<int>(FindAnyIndex) == 1);
using ResultType = Type; using ResultType = Type;
static constexpr bool is_using_hyperscan = true;
static void vector_constant( static void vector_constant(
const ColumnString::Chars & haystack_data, const ColumnString::Chars & haystack_data,
@ -642,14 +647,17 @@ struct MultiMatchAnyImpl
return 0; return 0;
}; };
const size_t haystack_offsets_size = haystack_offsets.size(); const size_t haystack_offsets_size = haystack_offsets.size();
size_t offset = 0; UInt64 offset = 0;
for (size_t i = 0; i < haystack_offsets_size; ++i) for (size_t i = 0; i < haystack_offsets_size; ++i)
{ {
UInt64 length = haystack_offsets[i] - offset - 1;
if (length > std::numeric_limits<UInt32>::max())
throw Exception("Too long string to search", ErrorCodes::TOO_MANY_BYTES);
res[i] = 0; res[i] = 0;
hs_scan( hs_scan(
hyperscan_regex->get(), hyperscan_regex->get(),
reinterpret_cast<const char *>(haystack_data.data()) + offset, reinterpret_cast<const char *>(haystack_data.data()) + offset,
haystack_offsets[i] - offset - 1, length,
0, 0,
smart_scratch.get(), smart_scratch.get(),
on_match, on_match,
@ -657,7 +665,7 @@ struct MultiMatchAnyImpl
offset = haystack_offsets[i]; offset = haystack_offsets[i];
} }
#else #else
/// Fallback if not an intel processor /// Fallback if do not use hyperscan
PaddedPODArray<UInt8> accum(res.size()); PaddedPODArray<UInt8> accum(res.size());
memset(res.data(), 0, res.size() * sizeof(res.front())); memset(res.data(), 0, res.size() * sizeof(res.front()));
memset(accum.data(), 0, accum.size()); memset(accum.data(), 0, accum.size());

View File

@ -11,6 +11,7 @@
#include <Functions/FunctionHelpers.h> #include <Functions/FunctionHelpers.h>
#include <Functions/IFunction.h> #include <Functions/IFunction.h>
#include <IO/WriteHelpers.h> #include <IO/WriteHelpers.h>
#include <Interpreters/Context.h>
#include <common/StringRef.h> #include <common/StringRef.h>
namespace DB namespace DB
@ -67,6 +68,7 @@ namespace ErrorCodes
extern const int ILLEGAL_TYPE_OF_ARGUMENT; extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int ILLEGAL_COLUMN; extern const int ILLEGAL_COLUMN;
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int FUNCTION_NOT_ALLOWED;
} }
template <typename Impl, typename Name> template <typename Impl, typename Name>
@ -207,15 +209,11 @@ public:
String getName() const override { return name; } String getName() const override { return name; }
size_t getNumberOfArguments() const override { return 2; } size_t getNumberOfArguments() const override { return 2; }
bool useDefaultImplementationForConstants() const override { return true; }
ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1}; }
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
{ {
if (arguments.size() + 1 >= std::numeric_limits<UInt8>::max())
throw Exception(
"Number of arguments for function " + getName() + " doesn't match: passed " + std::to_string(arguments.size())
+ ", should be at most 255.",
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
if (!isString(arguments[0])) if (!isString(arguments[0]))
throw Exception( throw Exception(
"Illegal type " + arguments[0]->getName() + " of argument of function " + getName(), ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); "Illegal type " + arguments[0]->getName() + " of argument of function " + getName(), ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
@ -225,7 +223,6 @@ public:
throw Exception( throw Exception(
"Illegal type " + arguments[1]->getName() + " of argument of function " + getName(), ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); "Illegal type " + arguments[1]->getName() + " of argument of function " + getName(), ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
return std::make_shared<DataTypeArray>(std::make_shared<DataTypeUInt64>()); return std::make_shared<DataTypeArray>(std::make_shared<DataTypeUInt64>());
} }
@ -247,6 +244,12 @@ public:
Array src_arr = col_const_arr->getValue<Array>(); Array src_arr = col_const_arr->getValue<Array>();
if (src_arr.size() > std::numeric_limits<UInt8>::max())
throw Exception(
"Number of arguments for function " + getName() + " doesn't match: passed " + std::to_string(src_arr.size())
+ ", should be at most 255",
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
std::vector<StringRef> refs; std::vector<StringRef> refs;
for (const auto & el : src_arr) for (const auto & el : src_arr)
refs.emplace_back(el.get<String>()); refs.emplace_back(el.get<String>());
@ -285,20 +288,22 @@ class FunctionsMultiStringSearch : public IFunction
public: public:
static constexpr auto name = Name::name; static constexpr auto name = Name::name;
static FunctionPtr create(const Context &) { return std::make_shared<FunctionsMultiStringSearch>(); } static FunctionPtr create(const Context & context)
{
if (Impl::is_using_hyperscan && !context.getSettingsRef().allow_hyperscan)
throw Exception("Hyperscan functions are disabled, because setting 'allow_hyperscan' is set to 0", ErrorCodes::FUNCTION_NOT_ALLOWED);
return std::make_shared<FunctionsMultiStringSearch>();
}
String getName() const override { return name; } String getName() const override { return name; }
size_t getNumberOfArguments() const override { return 2; } size_t getNumberOfArguments() const override { return 2; }
bool useDefaultImplementationForConstants() const override { return true; }
ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1}; }
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
{ {
if (arguments.size() + 1 >= LimitArgs)
throw Exception(
"Number of arguments for function " + getName() + " doesn't match: passed " + std::to_string(arguments.size())
+ ", should be at most " + std::to_string(LimitArgs) + ".",
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
if (!isString(arguments[0])) if (!isString(arguments[0]))
throw Exception( throw Exception(
"Illegal type " + arguments[0]->getName() + " of argument of function " + getName(), ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); "Illegal type " + arguments[0]->getName() + " of argument of function " + getName(), ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
@ -330,6 +335,12 @@ public:
Array src_arr = col_const_arr->getValue<Array>(); Array src_arr = col_const_arr->getValue<Array>();
if (src_arr.size() > LimitArgs)
throw Exception(
"Number of arguments for function " + getName() + " doesn't match: passed " + std::to_string(src_arr.size())
+ ", should be at most " + std::to_string(LimitArgs),
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
std::vector<StringRef> refs; std::vector<StringRef> refs;
refs.reserve(src_arr.size()); refs.reserve(src_arr.size());

View File

@ -41,8 +41,7 @@ std::unique_ptr<IArraySink> createArraySink(ColumnArray & col, size_t column_siz
using Creator = ApplyTypeListForClass<ArraySinkCreator, TypeListNumbers>::Type; using Creator = ApplyTypeListForClass<ArraySinkCreator, TypeListNumbers>::Type;
if (auto column_nullable = typeid_cast<ColumnNullable *>(&col.getData())) if (auto column_nullable = typeid_cast<ColumnNullable *>(&col.getData()))
{ {
auto column = ColumnArray::create(column_nullable->getNestedColumnPtr()->assumeMutable(), auto column = ColumnArray::create(column_nullable->getNestedColumnPtr()->assumeMutable(), col.getOffsetsPtr()->assumeMutable());
col.getOffsetsPtr()->assumeMutable());
return Creator::create(*column, &column_nullable->getNullMapData(), column_size); return Creator::create(*column, &column_nullable->getNullMapData(), column_size);
} }
return Creator::create(col, nullptr, column_size); return Creator::create(col, nullptr, column_size);

View File

@ -678,7 +678,7 @@ bool FunctionArrayElement::executeTuple(Block & block, const ColumnNumbers & arg
if (!col_nested) if (!col_nested)
return false; return false;
const Columns & tuple_columns = col_nested->getColumns(); const auto & tuple_columns = col_nested->getColumns();
size_t tuple_size = tuple_columns.size(); size_t tuple_size = tuple_columns.size();
const DataTypes & tuple_types = typeid_cast<const DataTypeTuple &>( const DataTypes & tuple_types = typeid_cast<const DataTypeTuple &>(

View File

@ -156,7 +156,7 @@ ColumnPtr FunctionArrayIntersect::castRemoveNullable(const ColumnPtr & column, c
throw Exception{"Cannot cast tuple column to type " throw Exception{"Cannot cast tuple column to type "
+ data_type->getName() + " in function " + getName(), ErrorCodes::LOGICAL_ERROR}; + data_type->getName() + " in function " + getName(), ErrorCodes::LOGICAL_ERROR};
auto columns_number = column_tuple->getColumns().size(); auto columns_number = column_tuple->tupleSize();
Columns columns(columns_number); Columns columns(columns_number);
const auto & types = tuple_type->getElements(); const auto & types = tuple_type->getElements();

View File

@ -545,14 +545,14 @@ private:
Columns col2_contents; Columns col2_contents;
if (const ColumnTuple * tuple1 = typeid_cast<const ColumnTuple *>(arg1.column.get())) if (const ColumnTuple * tuple1 = typeid_cast<const ColumnTuple *>(arg1.column.get()))
col1_contents = tuple1->getColumns(); col1_contents = tuple1->getColumnsCopy();
else if (const ColumnConst * const_tuple = checkAndGetColumnConst<ColumnTuple>(arg1.column.get())) else if (const ColumnConst * const_tuple = checkAndGetColumnConst<ColumnTuple>(arg1.column.get()))
col1_contents = convertConstTupleToConstantElements(*const_tuple); col1_contents = convertConstTupleToConstantElements(*const_tuple);
else else
return false; return false;
if (const ColumnTuple * tuple2 = typeid_cast<const ColumnTuple *>(arg2.column.get())) if (const ColumnTuple * tuple2 = typeid_cast<const ColumnTuple *>(arg2.column.get()))
col2_contents = tuple2->getColumns(); col2_contents = tuple2->getColumnsCopy();
else if (const ColumnConst * const_tuple = checkAndGetColumnConst<ColumnTuple>(arg2.column.get())) else if (const ColumnConst * const_tuple = checkAndGetColumnConst<ColumnTuple>(arg2.column.get()))
col2_contents = convertConstTupleToConstantElements(*const_tuple); col2_contents = convertConstTupleToConstantElements(*const_tuple);
else else

View File

@ -106,7 +106,7 @@ public:
auto set_types = set->getDataTypes(); auto set_types = set->getDataTypes();
if (tuple && (set_types.size() != 1 || !set_types[0]->equals(*type_tuple))) if (tuple && (set_types.size() != 1 || !set_types[0]->equals(*type_tuple)))
{ {
const Columns & tuple_columns = tuple->getColumns(); const auto & tuple_columns = tuple->getColumns();
const DataTypes & tuple_types = type_tuple->getElements(); const DataTypes & tuple_types = type_tuple->getElements();
size_t tuple_size = tuple_columns.size(); size_t tuple_size = tuple_columns.size();
for (size_t i = 0; i < tuple_size; ++i) for (size_t i = 0; i < tuple_size; ++i)

View File

@ -79,3 +79,6 @@ target_link_libraries (parse_date_time_best_effort PRIVATE clickhouse_common_io)
add_executable (zlib_ng_bug zlib_ng_bug.cpp) add_executable (zlib_ng_bug zlib_ng_bug.cpp)
target_link_libraries (zlib_ng_bug PRIVATE ${Poco_Foundation_LIBRARY}) target_link_libraries (zlib_ng_bug PRIVATE ${Poco_Foundation_LIBRARY})
if(NOT USE_INTERNAL_POCO_LIBRARY)
target_include_directories(zlib_ng_bug SYSTEM BEFORE PRIVATE ${Poco_INCLUDE_DIRS})
endif()

View File

@ -58,7 +58,7 @@ namespace
BlockInputStreamPtr createLocalStream(const ASTPtr & query_ast, const Context & context, QueryProcessingStage::Enum processed_stage) BlockInputStreamPtr createLocalStream(const ASTPtr & query_ast, const Context & context, QueryProcessingStage::Enum processed_stage)
{ {
InterpreterSelectQuery interpreter{query_ast, context, Names{}, processed_stage}; InterpreterSelectQuery interpreter{query_ast, context, SelectQueryOptions(processed_stage)};
BlockInputStreamPtr stream = interpreter.execute().in; BlockInputStreamPtr stream = interpreter.execute().in;
/** Materialization is needed, since from remote servers the constants come materialized. /** Materialization is needed, since from remote servers the constants come materialized.

View File

@ -76,7 +76,7 @@ void ExecuteScalarSubqueriesMatcher::visit(const ASTSubquery & subquery, ASTPtr
ASTPtr subquery_select = subquery.children.at(0); ASTPtr subquery_select = subquery.children.at(0);
BlockIO res = InterpreterSelectWithUnionQuery( BlockIO res = InterpreterSelectWithUnionQuery(
subquery_select, subquery_context, {}, QueryProcessingStage::Complete, data.subquery_depth + 1).execute(); subquery_select, subquery_context, SelectQueryOptions(QueryProcessingStage::Complete, data.subquery_depth + 1)).execute();
Block block; Block block;
try try

View File

@ -51,7 +51,8 @@ BlockInputStreamPtr InterpreterExplainQuery::executeImpl()
} }
else if (ast.getKind() == ASTExplainQuery::AnalyzedSyntax) else if (ast.getKind() == ASTExplainQuery::AnalyzedSyntax)
{ {
InterpreterSelectWithUnionQuery interpreter(ast.children.at(0), context, {}, QueryProcessingStage::FetchColumns, 0, true, true); InterpreterSelectWithUnionQuery interpreter(ast.children.at(0), context,
SelectQueryOptions(QueryProcessingStage::FetchColumns).analyze().modify());
interpreter.getQuery()->format(IAST::FormatSettings(ss, false)); interpreter.getQuery()->format(IAST::FormatSettings(ss, false));
} }

View File

@ -84,12 +84,12 @@ std::unique_ptr<IInterpreter> InterpreterFactory::get(ASTPtr & query, Context &
{ {
/// This is internal part of ASTSelectWithUnionQuery. /// This is internal part of ASTSelectWithUnionQuery.
/// Even if there is SELECT without union, it is represented by ASTSelectWithUnionQuery with single ASTSelectQuery as a child. /// Even if there is SELECT without union, it is represented by ASTSelectWithUnionQuery with single ASTSelectQuery as a child.
return std::make_unique<InterpreterSelectQuery>(query, context, Names{}, stage); return std::make_unique<InterpreterSelectQuery>(query, context, SelectQueryOptions(stage));
} }
else if (query->as<ASTSelectWithUnionQuery>()) else if (query->as<ASTSelectWithUnionQuery>())
{ {
ProfileEvents::increment(ProfileEvents::SelectQuery); ProfileEvents::increment(ProfileEvents::SelectQuery);
return std::make_unique<InterpreterSelectWithUnionQuery>(query, context, Names{}, stage); return std::make_unique<InterpreterSelectWithUnionQuery>(query, context, SelectQueryOptions(stage));
} }
else if (query->as<ASTInsertQuery>()) else if (query->as<ASTInsertQuery>())
{ {

View File

@ -128,7 +128,7 @@ BlockIO InterpreterInsertQuery::execute()
if (query.select) if (query.select)
{ {
/// Passing 1 as subquery_depth will disable limiting size of intermediate result. /// Passing 1 as subquery_depth will disable limiting size of intermediate result.
InterpreterSelectWithUnionQuery interpreter_select{query.select, context, {}, QueryProcessingStage::Complete, 1}; InterpreterSelectWithUnionQuery interpreter_select{query.select, context, SelectQueryOptions(QueryProcessingStage::Complete, 1)};
res.in = interpreter_select.execute().in; res.in = interpreter_select.execute().in;

View File

@ -78,13 +78,9 @@ namespace ErrorCodes
InterpreterSelectQuery::InterpreterSelectQuery( InterpreterSelectQuery::InterpreterSelectQuery(
const ASTPtr & query_ptr_, const ASTPtr & query_ptr_,
const Context & context_, const Context & context_,
const Names & required_result_column_names, const SelectQueryOptions & options,
QueryProcessingStage::Enum to_stage_, const Names & required_result_column_names)
size_t subquery_depth_, : InterpreterSelectQuery(query_ptr_, context_, nullptr, nullptr, options, required_result_column_names)
bool only_analyze_,
bool modify_inplace)
: InterpreterSelectQuery(
query_ptr_, context_, nullptr, nullptr, required_result_column_names, to_stage_, subquery_depth_, only_analyze_, modify_inplace)
{ {
} }
@ -92,23 +88,17 @@ InterpreterSelectQuery::InterpreterSelectQuery(
const ASTPtr & query_ptr_, const ASTPtr & query_ptr_,
const Context & context_, const Context & context_,
const BlockInputStreamPtr & input_, const BlockInputStreamPtr & input_,
QueryProcessingStage::Enum to_stage_, const SelectQueryOptions & options)
bool only_analyze_, : InterpreterSelectQuery(query_ptr_, context_, input_, nullptr, options.copy().noSubquery())
bool modify_inplace) {}
: InterpreterSelectQuery(query_ptr_, context_, input_, nullptr, Names{}, to_stage_, 0, only_analyze_, modify_inplace)
{
}
InterpreterSelectQuery::InterpreterSelectQuery( InterpreterSelectQuery::InterpreterSelectQuery(
const ASTPtr & query_ptr_, const ASTPtr & query_ptr_,
const Context & context_, const Context & context_,
const StoragePtr & storage_, const StoragePtr & storage_,
QueryProcessingStage::Enum to_stage_, const SelectQueryOptions & options)
bool only_analyze_, : InterpreterSelectQuery(query_ptr_, context_, nullptr, storage_, options.copy().noSubquery())
bool modify_inplace) {}
: InterpreterSelectQuery(query_ptr_, context_, nullptr, storage_, Names{}, to_stage_, 0, only_analyze_, modify_inplace)
{
}
InterpreterSelectQuery::~InterpreterSelectQuery() = default; InterpreterSelectQuery::~InterpreterSelectQuery() = default;
@ -133,17 +123,12 @@ InterpreterSelectQuery::InterpreterSelectQuery(
const Context & context_, const Context & context_,
const BlockInputStreamPtr & input_, const BlockInputStreamPtr & input_,
const StoragePtr & storage_, const StoragePtr & storage_,
const Names & required_result_column_names, const SelectQueryOptions & options_,
QueryProcessingStage::Enum to_stage_, const Names & required_result_column_names)
size_t subquery_depth_, : options(options_)
bool only_analyze_,
bool modify_inplace)
/// NOTE: the query almost always should be cloned because it will be modified during analysis. /// NOTE: the query almost always should be cloned because it will be modified during analysis.
: query_ptr(modify_inplace ? query_ptr_ : query_ptr_->clone()) , query_ptr(options.modify_inplace ? query_ptr_ : query_ptr_->clone())
, context(context_) , context(context_)
, to_stage(to_stage_)
, subquery_depth(subquery_depth_)
, only_analyze(only_analyze_)
, storage(storage_) , storage(storage_)
, input(input_) , input(input_)
, log(&Logger::get("InterpreterSelectQuery")) , log(&Logger::get("InterpreterSelectQuery"))
@ -151,7 +136,7 @@ InterpreterSelectQuery::InterpreterSelectQuery(
initSettings(); initSettings();
const Settings & settings = context.getSettingsRef(); const Settings & settings = context.getSettingsRef();
if (settings.max_subquery_depth && subquery_depth > settings.max_subquery_depth) if (settings.max_subquery_depth && options.subquery_depth > settings.max_subquery_depth)
throw Exception("Too deep subqueries. Maximum: " + settings.max_subquery_depth.toString(), throw Exception("Too deep subqueries. Maximum: " + settings.max_subquery_depth.toString(),
ErrorCodes::TOO_DEEP_SUBQUERIES); ErrorCodes::TOO_DEEP_SUBQUERIES);
@ -189,7 +174,7 @@ InterpreterSelectQuery::InterpreterSelectQuery(
{ {
/// Read from subquery. /// Read from subquery.
interpreter_subquery = std::make_unique<InterpreterSelectWithUnionQuery>( interpreter_subquery = std::make_unique<InterpreterSelectWithUnionQuery>(
table_expression, getSubqueryContext(context), required_columns, QueryProcessingStage::Complete, subquery_depth + 1, only_analyze, modify_inplace); table_expression, getSubqueryContext(context), options.subquery(), required_columns);
source_header = interpreter_subquery->getSampleBlock(); source_header = interpreter_subquery->getSampleBlock();
} }
@ -215,13 +200,14 @@ InterpreterSelectQuery::InterpreterSelectQuery(
if (storage) if (storage)
table_lock = storage->lockStructureForShare(false, context.getCurrentQueryId()); table_lock = storage->lockStructureForShare(false, context.getCurrentQueryId());
syntax_analyzer_result = SyntaxAnalyzer(context, subquery_depth).analyze( syntax_analyzer_result = SyntaxAnalyzer(context, options).analyze(
query_ptr, source_header.getNamesAndTypesList(), required_result_column_names, storage); query_ptr, source_header.getNamesAndTypesList(), required_result_column_names, storage);
query_analyzer = std::make_unique<ExpressionAnalyzer>( query_analyzer = std::make_unique<ExpressionAnalyzer>(
query_ptr, syntax_analyzer_result, context, NamesAndTypesList(), query_ptr, syntax_analyzer_result, context, NamesAndTypesList(),
NameSet(required_result_column_names.begin(), required_result_column_names.end()), subquery_depth, !only_analyze); NameSet(required_result_column_names.begin(), required_result_column_names.end()),
options.subquery_depth, !options.only_analyze);
if (!only_analyze) if (!options.only_analyze)
{ {
if (query.sample_size() && (input || !storage || !storage->supportsSampling())) if (query.sample_size() && (input || !storage || !storage->supportsSampling()))
throw Exception("Illegal SAMPLE: table doesn't support sampling", ErrorCodes::SAMPLING_NOT_SUPPORTED); throw Exception("Illegal SAMPLE: table doesn't support sampling", ErrorCodes::SAMPLING_NOT_SUPPORTED);
@ -238,7 +224,7 @@ InterpreterSelectQuery::InterpreterSelectQuery(
context.addExternalTable(it.first, it.second); context.addExternalTable(it.first, it.second);
} }
if (!only_analyze || modify_inplace) if (!options.only_analyze || options.modify_inplace)
{ {
if (query_analyzer->isRewriteSubqueriesPredicate()) if (query_analyzer->isRewriteSubqueriesPredicate())
{ {
@ -247,11 +233,8 @@ InterpreterSelectQuery::InterpreterSelectQuery(
interpreter_subquery = std::make_unique<InterpreterSelectWithUnionQuery>( interpreter_subquery = std::make_unique<InterpreterSelectWithUnionQuery>(
table_expression, table_expression,
getSubqueryContext(context), getSubqueryContext(context),
required_columns, options.subquery(),
QueryProcessingStage::Complete, required_columns);
subquery_depth + 1,
only_analyze,
modify_inplace);
} }
} }
@ -304,7 +287,7 @@ Block InterpreterSelectQuery::getSampleBlock()
BlockIO InterpreterSelectQuery::execute() BlockIO InterpreterSelectQuery::execute()
{ {
Pipeline pipeline; Pipeline pipeline;
executeImpl(pipeline, input, only_analyze); executeImpl(pipeline, input, options.only_analyze);
executeUnion(pipeline); executeUnion(pipeline);
BlockIO res; BlockIO res;
@ -315,7 +298,7 @@ BlockIO InterpreterSelectQuery::execute()
BlockInputStreams InterpreterSelectQuery::executeWithMultipleStreams() BlockInputStreams InterpreterSelectQuery::executeWithMultipleStreams()
{ {
Pipeline pipeline; Pipeline pipeline;
executeImpl(pipeline, input, only_analyze); executeImpl(pipeline, input, options.only_analyze);
return pipeline.streams; return pipeline.streams;
} }
@ -325,10 +308,10 @@ InterpreterSelectQuery::AnalysisResult InterpreterSelectQuery::analyzeExpression
/// Do I need to perform the first part of the pipeline - running on remote servers during distributed processing. /// Do I need to perform the first part of the pipeline - running on remote servers during distributed processing.
res.first_stage = from_stage < QueryProcessingStage::WithMergeableState res.first_stage = from_stage < QueryProcessingStage::WithMergeableState
&& to_stage >= QueryProcessingStage::WithMergeableState; && options.to_stage >= QueryProcessingStage::WithMergeableState;
/// Do I need to execute the second part of the pipeline - running on the initiating server during distributed processing. /// Do I need to execute the second part of the pipeline - running on the initiating server during distributed processing.
res.second_stage = from_stage <= QueryProcessingStage::WithMergeableState res.second_stage = from_stage <= QueryProcessingStage::WithMergeableState
&& to_stage > QueryProcessingStage::WithMergeableState; && options.to_stage > QueryProcessingStage::WithMergeableState;
/** First we compose a chain of actions and remember the necessary steps from it. /** First we compose a chain of actions and remember the necessary steps from it.
* Regardless of from_stage and to_stage, we will compose a complete sequence of actions to perform optimization and * Regardless of from_stage and to_stage, we will compose a complete sequence of actions to perform optimization and
@ -553,16 +536,16 @@ void InterpreterSelectQuery::executeImpl(Pipeline & pipeline, const BlockInputSt
expressions = analyzeExpressions(from_stage, false); expressions = analyzeExpressions(from_stage, false);
if (from_stage == QueryProcessingStage::WithMergeableState && if (from_stage == QueryProcessingStage::WithMergeableState &&
to_stage == QueryProcessingStage::WithMergeableState) options.to_stage == QueryProcessingStage::WithMergeableState)
throw Exception("Distributed on Distributed is not supported", ErrorCodes::NOT_IMPLEMENTED); throw Exception("Distributed on Distributed is not supported", ErrorCodes::NOT_IMPLEMENTED);
/** Read the data from Storage. from_stage - to what stage the request was completed in Storage. */ /** Read the data from Storage. from_stage - to what stage the request was completed in Storage. */
executeFetchColumns(from_stage, pipeline, expressions.prewhere_info, expressions.columns_to_remove_after_prewhere); executeFetchColumns(from_stage, pipeline, expressions.prewhere_info, expressions.columns_to_remove_after_prewhere);
LOG_TRACE(log, QueryProcessingStage::toString(from_stage) << " -> " << QueryProcessingStage::toString(to_stage)); LOG_TRACE(log, QueryProcessingStage::toString(from_stage) << " -> " << QueryProcessingStage::toString(options.to_stage));
} }
if (to_stage > QueryProcessingStage::FetchColumns) if (options.to_stage > QueryProcessingStage::FetchColumns)
{ {
/// Do I need to aggregate in a separate row rows that have not passed max_rows_to_group_by. /// Do I need to aggregate in a separate row rows that have not passed max_rows_to_group_by.
bool aggregate_overflow_row = bool aggregate_overflow_row =
@ -575,7 +558,7 @@ void InterpreterSelectQuery::executeImpl(Pipeline & pipeline, const BlockInputSt
/// Do I need to immediately finalize the aggregate functions after the aggregation? /// Do I need to immediately finalize the aggregate functions after the aggregation?
bool aggregate_final = bool aggregate_final =
expressions.need_aggregate && expressions.need_aggregate &&
to_stage > QueryProcessingStage::WithMergeableState && options.to_stage > QueryProcessingStage::WithMergeableState &&
!query.group_by_with_totals && !query.group_by_with_rollup && !query.group_by_with_cube; !query.group_by_with_totals && !query.group_by_with_rollup && !query.group_by_with_cube;
if (expressions.first_stage) if (expressions.first_stage)
@ -938,7 +921,7 @@ void InterpreterSelectQuery::executeFetchColumns(
/// Limitation on the number of columns to read. /// Limitation on the number of columns to read.
/// It's not applied in 'only_analyze' mode, because the query could be analyzed without removal of unnecessary columns. /// It's not applied in 'only_analyze' mode, because the query could be analyzed without removal of unnecessary columns.
if (!only_analyze && settings.max_columns_to_read && required_columns.size() > settings.max_columns_to_read) if (!options.only_analyze && settings.max_columns_to_read && required_columns.size() > settings.max_columns_to_read)
throw Exception("Limit for number of columns to read exceeded. " throw Exception("Limit for number of columns to read exceeded. "
"Requested: " + toString(required_columns.size()) "Requested: " + toString(required_columns.size())
+ ", maximum: " + settings.max_columns_to_read.toString(), + ", maximum: " + settings.max_columns_to_read.toString(),
@ -1000,7 +983,8 @@ void InterpreterSelectQuery::executeFetchColumns(
throw Exception("Subquery expected", ErrorCodes::LOGICAL_ERROR); throw Exception("Subquery expected", ErrorCodes::LOGICAL_ERROR);
interpreter_subquery = std::make_unique<InterpreterSelectWithUnionQuery>( interpreter_subquery = std::make_unique<InterpreterSelectWithUnionQuery>(
subquery, getSubqueryContext(context), required_columns, QueryProcessingStage::Complete, subquery_depth + 1, only_analyze); subquery, getSubqueryContext(context),
options.copy().subquery().noModify(), required_columns);
if (query_analyzer->hasAggregation()) if (query_analyzer->hasAggregation())
interpreter_subquery->ignoreWithTotals(); interpreter_subquery->ignoreWithTotals();
@ -1057,7 +1041,7 @@ void InterpreterSelectQuery::executeFetchColumns(
* additionally on each remote server, because these limits are checked per block of data processed, * additionally on each remote server, because these limits are checked per block of data processed,
* and remote servers may process way more blocks of data than are received by initiator. * and remote servers may process way more blocks of data than are received by initiator.
*/ */
if (to_stage == QueryProcessingStage::Complete) if (options.to_stage == QueryProcessingStage::Complete)
{ {
limits.min_execution_speed = settings.min_execution_speed; limits.min_execution_speed = settings.min_execution_speed;
limits.max_execution_speed = settings.max_execution_speed; limits.max_execution_speed = settings.max_execution_speed;
@ -1072,7 +1056,7 @@ void InterpreterSelectQuery::executeFetchColumns(
{ {
stream->setLimits(limits); stream->setLimits(limits);
if (to_stage == QueryProcessingStage::Complete) if (options.to_stage == QueryProcessingStage::Complete)
stream->setQuota(quota); stream->setQuota(quota);
}); });
} }

View File

@ -3,12 +3,13 @@
#include <memory> #include <memory>
#include <Core/QueryProcessingStage.h> #include <Core/QueryProcessingStage.h>
#include <Parsers/ASTSelectQuery.h>
#include <DataStreams/IBlockInputStream.h> #include <DataStreams/IBlockInputStream.h>
#include <Interpreters/Context.h> #include <Interpreters/Context.h>
#include <Interpreters/ExpressionActions.h> #include <Interpreters/ExpressionActions.h>
#include <Interpreters/ExpressionAnalyzer.h> #include <Interpreters/ExpressionAnalyzer.h>
#include <Interpreters/IInterpreter.h> #include <Interpreters/IInterpreter.h>
#include <Parsers/ASTSelectQuery.h> #include <Interpreters/SelectQueryOptions.h>
#include <Storages/SelectQueryInfo.h> #include <Storages/SelectQueryInfo.h>
@ -23,6 +24,7 @@ class InterpreterSelectWithUnionQuery;
struct SyntaxAnalyzerResult; struct SyntaxAnalyzerResult;
using SyntaxAnalyzerResultPtr = std::shared_ptr<const SyntaxAnalyzerResult>; using SyntaxAnalyzerResultPtr = std::shared_ptr<const SyntaxAnalyzerResult>;
/** Interprets the SELECT query. Returns the stream of blocks with the results of the query before `to_stage` stage. /** Interprets the SELECT query. Returns the stream of blocks with the results of the query before `to_stage` stage.
*/ */
class InterpreterSelectQuery : public IInterpreter class InterpreterSelectQuery : public IInterpreter
@ -32,14 +34,6 @@ public:
* query_ptr * query_ptr
* - A query AST to interpret. * - A query AST to interpret.
* *
* to_stage
* - the stage to which the query is to be executed. By default - till to the end.
* You can perform till the intermediate aggregation state, which are combined from different servers for distributed query processing.
*
* subquery_depth
* - to control the limit on the depth of nesting of subqueries. For subqueries, a value that is incremented by one is passed;
* for INSERT SELECT, a value 1 is passed instead of 0.
*
* required_result_column_names * required_result_column_names
* - don't calculate all columns except the specified ones from the query * - don't calculate all columns except the specified ones from the query
* - it is used to remove calculation (and reading) of unnecessary columns from subqueries. * - it is used to remove calculation (and reading) of unnecessary columns from subqueries.
@ -49,29 +43,22 @@ public:
InterpreterSelectQuery( InterpreterSelectQuery(
const ASTPtr & query_ptr_, const ASTPtr & query_ptr_,
const Context & context_, const Context & context_,
const Names & required_result_column_names = Names{}, const SelectQueryOptions &,
QueryProcessingStage::Enum to_stage_ = QueryProcessingStage::Complete, const Names & required_result_column_names = Names{});
size_t subquery_depth_ = 0,
bool only_analyze_ = false,
bool modify_inplace = false);
/// Read data not from the table specified in the query, but from the prepared source `input`. /// Read data not from the table specified in the query, but from the prepared source `input`.
InterpreterSelectQuery( InterpreterSelectQuery(
const ASTPtr & query_ptr_, const ASTPtr & query_ptr_,
const Context & context_, const Context & context_,
const BlockInputStreamPtr & input_, const BlockInputStreamPtr & input_,
QueryProcessingStage::Enum to_stage_ = QueryProcessingStage::Complete, const SelectQueryOptions & = {});
bool only_analyze_ = false,
bool modify_inplace = false);
/// Read data not from the table specified in the query, but from the specified `storage_`. /// Read data not from the table specified in the query, but from the specified `storage_`.
InterpreterSelectQuery( InterpreterSelectQuery(
const ASTPtr & query_ptr_, const ASTPtr & query_ptr_,
const Context & context_, const Context & context_,
const StoragePtr & storage_, const StoragePtr & storage_,
QueryProcessingStage::Enum to_stage_ = QueryProcessingStage::Complete, const SelectQueryOptions & = {});
bool only_analyze_ = false,
bool modify_inplace = false);
~InterpreterSelectQuery() override; ~InterpreterSelectQuery() override;
@ -93,11 +80,8 @@ private:
const Context & context_, const Context & context_,
const BlockInputStreamPtr & input_, const BlockInputStreamPtr & input_,
const StoragePtr & storage_, const StoragePtr & storage_,
const Names & required_result_column_names, const SelectQueryOptions &,
QueryProcessingStage::Enum to_stage_, const Names & required_result_column_names = {});
size_t subquery_depth_,
bool only_analyze_,
bool modify_inplace);
ASTSelectQuery & getSelectQuery() { return query_ptr->as<ASTSelectQuery &>(); } ASTSelectQuery & getSelectQuery() { return query_ptr->as<ASTSelectQuery &>(); }
@ -223,10 +207,9 @@ private:
*/ */
void initSettings(); void initSettings();
const SelectQueryOptions options;
ASTPtr query_ptr; ASTPtr query_ptr;
Context context; Context context;
QueryProcessingStage::Enum to_stage;
size_t subquery_depth = 0;
NamesAndTypesList source_columns; NamesAndTypesList source_columns;
SyntaxAnalyzerResultPtr syntax_analyzer_result; SyntaxAnalyzerResultPtr syntax_analyzer_result;
std::unique_ptr<ExpressionAnalyzer> query_analyzer; std::unique_ptr<ExpressionAnalyzer> query_analyzer;
@ -234,9 +217,6 @@ private:
/// How many streams we ask for storage to produce, and in how many threads we will do further processing. /// How many streams we ask for storage to produce, and in how many threads we will do further processing.
size_t max_streams = 1; size_t max_streams = 1;
/// The object was created only for query analysis.
bool only_analyze = false;
/// List of columns to read to execute the query. /// List of columns to read to execute the query.
Names required_columns; Names required_columns;
/// Structure of query source (table, subquery, etc). /// Structure of query source (table, subquery, etc).

View File

@ -26,15 +26,11 @@ namespace ErrorCodes
InterpreterSelectWithUnionQuery::InterpreterSelectWithUnionQuery( InterpreterSelectWithUnionQuery::InterpreterSelectWithUnionQuery(
const ASTPtr & query_ptr_, const ASTPtr & query_ptr_,
const Context & context_, const Context & context_,
const Names & required_result_column_names, const SelectQueryOptions & options_,
QueryProcessingStage::Enum to_stage_, const Names & required_result_column_names)
size_t subquery_depth_, : options(options_),
bool only_analyze, query_ptr(query_ptr_),
bool modify_inplace) context(context_)
: query_ptr(query_ptr_),
context(context_),
to_stage(to_stage_),
subquery_depth(subquery_depth_)
{ {
const auto & ast = query_ptr->as<ASTSelectWithUnionQuery &>(); const auto & ast = query_ptr->as<ASTSelectWithUnionQuery &>();
@ -57,7 +53,7 @@ InterpreterSelectWithUnionQuery::InterpreterSelectWithUnionQuery(
/// We use it to determine positions of 'required_result_column_names' in SELECT clause. /// We use it to determine positions of 'required_result_column_names' in SELECT clause.
Block full_result_header = InterpreterSelectQuery( Block full_result_header = InterpreterSelectQuery(
ast.list_of_selects->children.at(0), context, Names(), to_stage, subquery_depth, true).getSampleBlock(); ast.list_of_selects->children.at(0), context, options.copy().analyze().noModify()).getSampleBlock();
std::vector<size_t> positions_of_required_result_columns(required_result_column_names.size()); std::vector<size_t> positions_of_required_result_columns(required_result_column_names.size());
for (size_t required_result_num = 0, size = required_result_column_names.size(); required_result_num < size; ++required_result_num) for (size_t required_result_num = 0, size = required_result_column_names.size(); required_result_num < size; ++required_result_num)
@ -66,7 +62,7 @@ InterpreterSelectWithUnionQuery::InterpreterSelectWithUnionQuery(
for (size_t query_num = 1; query_num < num_selects; ++query_num) for (size_t query_num = 1; query_num < num_selects; ++query_num)
{ {
Block full_result_header_for_current_select = InterpreterSelectQuery( Block full_result_header_for_current_select = InterpreterSelectQuery(
ast.list_of_selects->children.at(query_num), context, Names(), to_stage, subquery_depth, true).getSampleBlock(); ast.list_of_selects->children.at(query_num), context, options.copy().analyze().noModify()).getSampleBlock();
if (full_result_header_for_current_select.columns() != full_result_header.columns()) if (full_result_header_for_current_select.columns() != full_result_header.columns())
throw Exception("Different number of columns in UNION ALL elements:\n" throw Exception("Different number of columns in UNION ALL elements:\n"
@ -89,11 +85,8 @@ InterpreterSelectWithUnionQuery::InterpreterSelectWithUnionQuery(
nested_interpreters.emplace_back(std::make_unique<InterpreterSelectQuery>( nested_interpreters.emplace_back(std::make_unique<InterpreterSelectQuery>(
ast.list_of_selects->children.at(query_num), ast.list_of_selects->children.at(query_num),
context, context,
current_required_result_column_names, options,
to_stage, current_required_result_column_names));
subquery_depth,
only_analyze,
modify_inplace));
} }
/// Determine structure of the result. /// Determine structure of the result.
@ -179,7 +172,7 @@ Block InterpreterSelectWithUnionQuery::getSampleBlock(
return cache[key]; return cache[key];
} }
return cache[key] = InterpreterSelectWithUnionQuery(query_ptr, context, {}, QueryProcessingStage::Complete, 0, true).getSampleBlock(); return cache[key] = InterpreterSelectWithUnionQuery(query_ptr, context, SelectQueryOptions().analyze()).getSampleBlock();
} }

View File

@ -3,6 +3,7 @@
#include <Core/QueryProcessingStage.h> #include <Core/QueryProcessingStage.h>
#include <Interpreters/Context.h> #include <Interpreters/Context.h>
#include <Interpreters/IInterpreter.h> #include <Interpreters/IInterpreter.h>
#include <Interpreters/SelectQueryOptions.h>
namespace DB namespace DB
@ -19,11 +20,8 @@ public:
InterpreterSelectWithUnionQuery( InterpreterSelectWithUnionQuery(
const ASTPtr & query_ptr_, const ASTPtr & query_ptr_,
const Context & context_, const Context & context_,
const Names & required_result_column_names = Names{}, const SelectQueryOptions &,
QueryProcessingStage::Enum to_stage_ = QueryProcessingStage::Complete, const Names & required_result_column_names = {});
size_t subquery_depth_ = 0,
bool only_analyze = false,
bool modify_inplace = false);
~InterpreterSelectWithUnionQuery() override; ~InterpreterSelectWithUnionQuery() override;
@ -43,10 +41,9 @@ public:
ASTPtr getQuery() const { return query_ptr; } ASTPtr getQuery() const { return query_ptr; }
private: private:
const SelectQueryOptions options;
ASTPtr query_ptr; ASTPtr query_ptr;
Context context; Context context;
QueryProcessingStage::Enum to_stage;
size_t subquery_depth;
std::vector<std::unique_ptr<InterpreterSelectQuery>> nested_interpreters; std::vector<std::unique_ptr<InterpreterSelectQuery>> nested_interpreters;

View File

@ -32,23 +32,54 @@ namespace ErrorCodes
extern const int ILLEGAL_COLUMN; extern const int ILLEGAL_COLUMN;
} }
static NameSet requiredRightKeys(const Names & key_names, const NamesAndTypesList & columns_added_by_join)
{
NameSet required;
static std::unordered_map<String, DataTypePtr> requiredRightKeys(const Names & key_names, const NamesAndTypesList & columns_added_by_join)
{
NameSet right_keys; NameSet right_keys;
for (const auto & name : key_names) for (const auto & name : key_names)
right_keys.insert(name); right_keys.insert(name);
std::unordered_map<String, DataTypePtr> required;
for (const auto & column : columns_added_by_join) for (const auto & column : columns_added_by_join)
{
if (right_keys.count(column.name)) if (right_keys.count(column.name))
required.insert(column.name); required.insert({column.name, column.type});
}
return required; return required;
} }
static void convertColumnToNullable(ColumnWithTypeAndName & column)
{
if (column.type->isNullable())
return;
column.type = makeNullable(column.type);
if (column.column)
column.column = makeNullable(column.column);
}
/// Converts column to nullable if needed. No backward convertion.
static ColumnWithTypeAndName correctNullability(ColumnWithTypeAndName && column, bool nullable)
{
if (nullable)
convertColumnToNullable(column);
return std::move(column);
}
static ColumnWithTypeAndName correctNullability(ColumnWithTypeAndName && column, bool nullable, const ColumnUInt8 & negative_null_map)
{
if (nullable)
{
convertColumnToNullable(column);
if (negative_null_map.size())
{
MutableColumnPtr mutable_column = (*std::move(column.column)).mutate();
static_cast<ColumnNullable &>(*mutable_column).applyNegatedNullMap(negative_null_map);
column.column = std::move(mutable_column);
}
}
return std::move(column);
}
Join::Join(const Names & key_names_right_, bool use_nulls_, const SizeLimits & limits, Join::Join(const Names & key_names_right_, bool use_nulls_, const SizeLimits & limits,
ASTTableJoin::Kind kind_, ASTTableJoin::Strictness strictness_, bool any_take_last_row_) ASTTableJoin::Kind kind_, ASTTableJoin::Strictness strictness_, bool any_take_last_row_)
@ -120,56 +151,6 @@ Join::Type Join::chooseMethod(const ColumnRawPtrs & key_columns, Sizes & key_siz
} }
template <typename Maps>
static void initImpl(Maps & maps, Join::Type type)
{
switch (type)
{
case Join::Type::EMPTY: break;
case Join::Type::CROSS: break;
#define M(TYPE) \
case Join::Type::TYPE: maps.TYPE = std::make_unique<typename decltype(maps.TYPE)::element_type>(); break;
APPLY_FOR_JOIN_VARIANTS(M)
#undef M
}
}
template <typename Maps>
static size_t getTotalRowCountImpl(const Maps & maps, Join::Type type)
{
switch (type)
{
case Join::Type::EMPTY: return 0;
case Join::Type::CROSS: return 0;
#define M(NAME) \
case Join::Type::NAME: return maps.NAME ? maps.NAME->size() : 0;
APPLY_FOR_JOIN_VARIANTS(M)
#undef M
}
__builtin_unreachable();
}
template <typename Maps>
static size_t getTotalByteCountImpl(const Maps & maps, Join::Type type)
{
switch (type)
{
case Join::Type::EMPTY: return 0;
case Join::Type::CROSS: return 0;
#define M(NAME) \
case Join::Type::NAME: return maps.NAME ? maps.NAME->getBufferSizeInBytes() : 0;
APPLY_FOR_JOIN_VARIANTS(M)
#undef M
}
__builtin_unreachable();
}
template <Join::Type type, typename Value, typename Mapped> template <Join::Type type, typename Value, typename Mapped>
struct KeyGetterForTypeImpl; struct KeyGetterForTypeImpl;
@ -227,7 +208,7 @@ void Join::init(Type type_)
if (kind == ASTTableJoin::Kind::Cross) if (kind == ASTTableJoin::Kind::Cross)
return; return;
dispatch(MapInitTag()); dispatch(MapInitTag());
dispatch([&](auto, auto, auto & map) { initImpl(map, type); }); dispatch([&](auto, auto, auto & map) { map.create(type); });
} }
size_t Join::getTotalRowCount() const size_t Join::getTotalRowCount() const
@ -241,7 +222,7 @@ size_t Join::getTotalRowCount() const
} }
else else
{ {
dispatch([&](auto, auto, auto & map) { res += getTotalRowCountImpl(map, type); }); dispatch([&](auto, auto, auto & map) { res += map.getTotalRowCount(type); });
} }
return res; return res;
@ -258,22 +239,13 @@ size_t Join::getTotalByteCount() const
} }
else else
{ {
dispatch([&](auto, auto, auto & map) { res += getTotalByteCountImpl(map, type); }); dispatch([&](auto, auto, auto & map) { res += map.getTotalByteCountImpl(type); });
res += pool.size(); res += pool.size();
} }
return res; return res;
} }
static void convertColumnToNullable(ColumnWithTypeAndName & column)
{
column.type = makeNullable(column.type);
if (column.column)
column.column = makeNullable(column.column);
}
void Join::setSampleBlock(const Block & block) void Join::setSampleBlock(const Block & block)
{ {
std::unique_lock lock(rwlock); std::unique_lock lock(rwlock);
@ -526,113 +498,112 @@ bool Join::insertFromBlock(const Block & block)
namespace namespace
{ {
template <bool fill_left, ASTTableJoin::Strictness STRICTNESS, typename Map>
struct Adder;
template <typename Map> class AddedColumns
struct Adder<true, ASTTableJoin::Strictness::Any, Map>
{ {
static void addFound(const typename Map::mapped_type & mapped, size_t num_columns_to_add, MutableColumns & added_columns, public:
size_t i, IColumn::Filter & filter, IColumn::Offset & /*current_offset*/, IColumn::Offsets * /*offsets*/, using TypeAndNames = std::vector<std::pair<decltype(ColumnWithTypeAndName::type), decltype(ColumnWithTypeAndName::name)>>;
const std::vector<size_t> & right_indexes)
{
filter[i] = 1;
for (size_t j = 0; j < num_columns_to_add; ++j) AddedColumns(const Block & sample_block_with_columns_to_add,
added_columns[j]->insertFrom(*mapped.block->getByPosition(right_indexes[j]).column, mapped.row_num); const Block & block_with_columns_to_add,
const Block & block, size_t num_columns_to_skip)
{
size_t num_columns_to_add = sample_block_with_columns_to_add.columns();
columns.reserve(num_columns_to_add);
type_name.reserve(num_columns_to_add);
right_indexes.reserve(num_columns_to_add);
for (size_t i = 0; i < num_columns_to_add; ++i)
{
const ColumnWithTypeAndName & src_column = sample_block_with_columns_to_add.safeGetByPosition(i);
/// Don't insert column if it's in left block or not explicitly required.
if (!block.has(src_column.name) && block_with_columns_to_add.has(src_column.name))
addColumn(src_column, num_columns_to_skip + i);
}
} }
static void addNotFound(size_t num_columns_to_add, MutableColumns & added_columns, size_t size() const { return columns.size(); }
size_t i, IColumn::Filter & filter, IColumn::Offset & /*current_offset*/, IColumn::Offsets * /*offsets*/)
{
filter[i] = 0;
for (size_t j = 0; j < num_columns_to_add; ++j) ColumnWithTypeAndName moveColumn(size_t i)
added_columns[j]->insertDefault(); {
return ColumnWithTypeAndName(std::move(columns[i]), type_name[i].first, type_name[i].second);
}
void appendFromBlock(const Block & block, size_t row_num)
{
for (size_t j = 0; j < right_indexes.size(); ++j)
columns[j]->insertFrom(*block.getByPosition(right_indexes[j]).column, row_num);
}
void appendDefaultRow()
{
for (size_t j = 0; j < right_indexes.size(); ++j)
columns[j]->insertDefault();
}
private:
TypeAndNames type_name;
MutableColumns columns;
std::vector<size_t> right_indexes;
void addColumn(const ColumnWithTypeAndName & src_column, size_t idx)
{
columns.push_back(src_column.column->cloneEmpty());
columns.back()->reserve(src_column.column->size());
type_name.emplace_back(src_column.type, src_column.name);
right_indexes.push_back(idx);
} }
}; };
template <typename Map> template <ASTTableJoin::Strictness STRICTNESS, typename Map>
struct Adder<false, ASTTableJoin::Strictness::Any, Map> void addFoundRow(const typename Map::mapped_type & mapped, AddedColumns & added, IColumn::Offset & current_offset [[maybe_unused]])
{ {
static void addFound(const typename Map::mapped_type & mapped, size_t num_columns_to_add, MutableColumns & added_columns, if constexpr (STRICTNESS == ASTTableJoin::Strictness::Any)
size_t i, IColumn::Filter & filter, IColumn::Offset & /*current_offset*/, IColumn::Offsets * /*offsets*/,
const std::vector<size_t> & right_indexes)
{ {
filter[i] = 1; added.appendFromBlock(*mapped.block, mapped.row_num);
for (size_t j = 0; j < num_columns_to_add; ++j)
added_columns[j]->insertFrom(*mapped.block->getByPosition(right_indexes[j]).column, mapped.row_num);
} }
static void addNotFound(size_t /*num_columns_to_add*/, MutableColumns & /*added_columns*/, if constexpr (STRICTNESS == ASTTableJoin::Strictness::All)
size_t i, IColumn::Filter & filter, IColumn::Offset & /*current_offset*/, IColumn::Offsets * /*offsets*/)
{ {
filter[i] = 0;
}
};
template <bool fill_left, typename Map>
struct Adder<fill_left, ASTTableJoin::Strictness::All, Map>
{
static void addFound(const typename Map::mapped_type & mapped, size_t num_columns_to_add, MutableColumns & added_columns,
size_t i, IColumn::Filter & filter, IColumn::Offset & current_offset, IColumn::Offsets * offsets,
const std::vector<size_t> & right_indexes)
{
filter[i] = 1;
size_t rows_joined = 0;
for (auto current = &static_cast<const typename Map::mapped_type::Base_t &>(mapped); current != nullptr; current = current->next) for (auto current = &static_cast<const typename Map::mapped_type::Base_t &>(mapped); current != nullptr; current = current->next)
{ {
for (size_t j = 0; j < num_columns_to_add; ++j) added.appendFromBlock(*current->block, current->row_num);
added_columns[j]->insertFrom(*current->block->getByPosition(right_indexes[j]).column.get(), current->row_num);
++rows_joined;
}
current_offset += rows_joined;
(*offsets)[i] = current_offset;
}
static void addNotFound(size_t num_columns_to_add, MutableColumns & added_columns,
size_t i, IColumn::Filter & filter, IColumn::Offset & current_offset, IColumn::Offsets * offsets)
{
filter[i] = 0;
if (!fill_left)
{
(*offsets)[i] = current_offset;
}
else
{
++current_offset; ++current_offset;
(*offsets)[i] = current_offset;
for (size_t j = 0; j < num_columns_to_add; ++j)
added_columns[j]->insertDefault();
} }
} }
}; };
template <ASTTableJoin::Kind KIND, ASTTableJoin::Strictness STRICTNESS, typename KeyGetter, typename Map, bool has_null_map> template <bool _add_missing>
void NO_INLINE joinBlockImplTypeCase( void addNotFoundRow(AddedColumns & added [[maybe_unused]], IColumn::Offset & current_offset [[maybe_unused]])
const Map & map, size_t rows, const ColumnRawPtrs & key_columns, const Sizes & key_sizes,
MutableColumns & added_columns, ConstNullMapPtr null_map, IColumn::Filter & filter,
std::unique_ptr<IColumn::Offsets> & offsets_to_replicate,
const std::vector<size_t> & right_indexes)
{ {
IColumn::Offset current_offset = 0; if constexpr (_add_missing)
size_t num_columns_to_add = right_indexes.size(); {
added.appendDefaultRow();
++current_offset;
}
}
/// Joins right table columns which indexes are present in right_indexes using specified map.
/// Makes filter (1 if row presented in right table) and returns offsets to replicate (for ALL JOINS).
template <bool _add_missing, ASTTableJoin::Strictness STRICTNESS, typename KeyGetter, typename Map, bool _has_null_map>
std::unique_ptr<IColumn::Offsets> NO_INLINE joinRightIndexedColumns(
const Map & map, size_t rows, KeyGetter & key_getter,
AddedColumns & added_columns, ConstNullMapPtr null_map, IColumn::Filter & filter)
{
std::unique_ptr<IColumn::Offsets> offsets_to_replicate;
if constexpr (STRICTNESS == ASTTableJoin::Strictness::All)
offsets_to_replicate = std::make_unique<IColumn::Offsets>(rows);
IColumn::Offset current_offset = 0;
Arena pool; Arena pool;
KeyGetter key_getter(key_columns, key_sizes, nullptr);
for (size_t i = 0; i < rows; ++i) for (size_t i = 0; i < rows; ++i)
{ {
if (has_null_map && (*null_map)[i]) if (_has_null_map && (*null_map)[i])
{ {
Adder<Join::KindTrait<KIND>::fill_left, STRICTNESS, Map>::addNotFound( addNotFoundRow<_add_missing>(added_columns, current_offset);
num_columns_to_add, added_columns, i, filter, current_offset, offsets_to_replicate.get());
} }
else else
{ {
@ -640,46 +611,65 @@ namespace
if (find_result.isFound()) if (find_result.isFound())
{ {
filter[i] = 1;
auto & mapped = find_result.getMapped(); auto & mapped = find_result.getMapped();
mapped.setUsed(); mapped.setUsed();
Adder<Join::KindTrait<KIND>::fill_left, STRICTNESS, Map>::addFound( addFoundRow<STRICTNESS, Map>(mapped, added_columns, current_offset);
mapped, num_columns_to_add, added_columns, i, filter, current_offset, offsets_to_replicate.get(), right_indexes);
} }
else else
Adder<Join::KindTrait<KIND>::fill_left, STRICTNESS, Map>::addNotFound( addNotFoundRow<_add_missing>(added_columns, current_offset);
num_columns_to_add, added_columns, i, filter, current_offset, offsets_to_replicate.get());
}
}
} }
using BlockFilterData = std::pair< if constexpr (STRICTNESS == ASTTableJoin::Strictness::All)
std::unique_ptr<IColumn::Filter>, (*offsets_to_replicate)[i] = current_offset;
std::unique_ptr<IColumn::Offsets>>; }
return offsets_to_replicate;
}
template <ASTTableJoin::Kind KIND, ASTTableJoin::Strictness STRICTNESS, typename KeyGetter, typename Map> template <ASTTableJoin::Kind KIND, ASTTableJoin::Strictness STRICTNESS, typename KeyGetter, typename Map>
BlockFilterData joinBlockImplType( IColumn::Filter joinRightColumns(
const Map & map, size_t rows, const ColumnRawPtrs & key_columns, const Sizes & key_sizes, const Map & map, size_t rows, const ColumnRawPtrs & key_columns, const Sizes & key_sizes,
MutableColumns & added_columns, ConstNullMapPtr null_map, const std::vector<size_t> & right_indexes) AddedColumns & added_columns, ConstNullMapPtr null_map, std::unique_ptr<IColumn::Offsets> & offsets_to_replicate)
{ {
std::unique_ptr<IColumn::Filter> filter = std::make_unique<IColumn::Filter>(rows); constexpr bool left_or_full = static_in_v<KIND, ASTTableJoin::Kind::Left, ASTTableJoin::Kind::Full>;
std::unique_ptr<IColumn::Offsets> offsets_to_replicate;
if (STRICTNESS == ASTTableJoin::Strictness::All) IColumn::Filter filter(rows, 0);
offsets_to_replicate = std::make_unique<IColumn::Offsets>(rows); KeyGetter key_getter(key_columns, key_sizes, nullptr);
if (null_map) if (null_map)
joinBlockImplTypeCase<KIND, STRICTNESS, KeyGetter, Map, true>( offsets_to_replicate = joinRightIndexedColumns<left_or_full, STRICTNESS, KeyGetter, Map, true>(
map, rows, key_columns, key_sizes, added_columns, null_map, *filter, map, rows, key_getter, added_columns, null_map, filter);
offsets_to_replicate, right_indexes);
else else
joinBlockImplTypeCase<KIND, STRICTNESS, KeyGetter, Map, false>( offsets_to_replicate = joinRightIndexedColumns<left_or_full, STRICTNESS, KeyGetter, Map, false>(
map, rows, key_columns, key_sizes, added_columns, null_map, *filter, map, rows, key_getter, added_columns, null_map, filter);
offsets_to_replicate, right_indexes);
return {std::move(filter), std::move(offsets_to_replicate)}; return filter;
}
template <ASTTableJoin::Kind KIND, ASTTableJoin::Strictness STRICTNESS, typename Maps>
IColumn::Filter switchJoinRightColumns(
Join::Type type,
const Maps & maps_, size_t rows, const ColumnRawPtrs & key_columns, const Sizes & key_sizes,
AddedColumns & added_columns, ConstNullMapPtr null_map,
std::unique_ptr<IColumn::Offsets> & offsets_to_replicate)
{
switch (type)
{
#define M(TYPE) \
case Join::Type::TYPE: \
return joinRightColumns<KIND, STRICTNESS, typename KeyGetterForType<Join::Type::TYPE, const std::remove_reference_t<decltype(*maps_.TYPE)>>::Type>(\
*maps_.TYPE, rows, key_columns, key_sizes, added_columns, null_map, offsets_to_replicate);
APPLY_FOR_JOIN_VARIANTS(M)
#undef M
default:
throw Exception("Unknown JOIN keys variant.", ErrorCodes::UNKNOWN_SET_DATA_VARIANT);
} }
} }
} /// nameless
template <ASTTableJoin::Kind KIND, ASTTableJoin::Strictness STRICTNESS, typename Maps> template <ASTTableJoin::Kind KIND, ASTTableJoin::Strictness STRICTNESS, typename Maps>
void Join::joinBlockImpl( void Join::joinBlockImpl(
@ -714,7 +704,8 @@ void Join::joinBlockImpl(
* Because if they are constants, then in the "not joined" rows, they may have different values * Because if they are constants, then in the "not joined" rows, they may have different values
* - default values, which can differ from the values of these constants. * - default values, which can differ from the values of these constants.
*/ */
if (isRightOrFull(kind)) constexpr bool right_or_full = static_in_v<KIND, ASTTableJoin::Kind::Right, ASTTableJoin::Kind::Full>;
if constexpr (right_or_full)
{ {
for (size_t i = 0; i < existing_columns; ++i) for (size_t i = 0; i < existing_columns; ++i)
{ {
@ -734,68 +725,39 @@ void Join::joinBlockImpl(
* but they will not be used at this stage of joining (and will be in `AdderNonJoined`), and they need to be skipped. * but they will not be used at this stage of joining (and will be in `AdderNonJoined`), and they need to be skipped.
*/ */
size_t num_columns_to_skip = 0; size_t num_columns_to_skip = 0;
if (isRightOrFull(kind)) if constexpr (right_or_full)
num_columns_to_skip = keys_size; num_columns_to_skip = keys_size;
/// Add new columns to the block. /// Add new columns to the block.
size_t num_columns_to_add = sample_block_with_columns_to_add.columns();
MutableColumns added_columns;
added_columns.reserve(num_columns_to_add);
std::vector<std::pair<decltype(ColumnWithTypeAndName::type), decltype(ColumnWithTypeAndName::name)>> added_type_name; AddedColumns added(sample_block_with_columns_to_add, block_with_columns_to_add, block, num_columns_to_skip);
added_type_name.reserve(num_columns_to_add);
std::vector<size_t> right_indexes;
right_indexes.reserve(num_columns_to_add);
for (size_t i = 0; i < num_columns_to_add; ++i)
{
const ColumnWithTypeAndName & src_column = sample_block_with_columns_to_add.safeGetByPosition(i);
/// Don't insert column if it's in left block or not explicitly required.
if (!block.has(src_column.name) && block_with_columns_to_add.has(src_column.name))
{
added_columns.push_back(src_column.column->cloneEmpty());
added_columns.back()->reserve(src_column.column->size());
added_type_name.emplace_back(src_column.type, src_column.name);
right_indexes.push_back(num_columns_to_skip + i);
}
}
std::unique_ptr<IColumn::Filter> filter;
std::unique_ptr<IColumn::Offsets> offsets_to_replicate; std::unique_ptr<IColumn::Offsets> offsets_to_replicate;
switch (type) IColumn::Filter row_filter = switchJoinRightColumns<KIND, STRICTNESS>(
type, maps_, block.rows(), key_columns, key_sizes, added, null_map, offsets_to_replicate);
for (size_t i = 0; i < added.size(); ++i)
block.insert(added.moveColumn(i));
/// Filter & insert missing rows
auto right_keys = requiredRightKeys(key_names_right, columns_added_by_join);
if constexpr (STRICTNESS == ASTTableJoin::Strictness::Any)
{ {
#define M(TYPE) \ /// Some trash to represent IColumn::Filter as ColumnUInt8 needed for ColumnNullable::applyNullMap()
case Join::Type::TYPE: \ auto null_map_filter_ptr = ColumnUInt8::create();
std::tie(filter, offsets_to_replicate) = \ ColumnUInt8 & null_map_filter = static_cast<ColumnUInt8 &>(*null_map_filter_ptr);
joinBlockImplType<KIND, STRICTNESS, typename KeyGetterForType<Join::Type::TYPE, const std::remove_reference_t<decltype(*maps_.TYPE)>>::Type>(\ null_map_filter.getData().swap(row_filter);
*maps_.TYPE, block.rows(), key_columns, key_sizes, added_columns, null_map, right_indexes); \ const IColumn::Filter & filter = null_map_filter.getData();
break;
APPLY_FOR_JOIN_VARIANTS(M)
#undef M
default: constexpr bool inner_or_right = static_in_v<KIND, ASTTableJoin::Kind::Inner, ASTTableJoin::Kind::Right>;
throw Exception("Unknown JOIN keys variant.", ErrorCodes::UNKNOWN_SET_DATA_VARIANT); if constexpr (inner_or_right)
}
const auto added_columns_size = added_columns.size();
for (size_t i = 0; i < added_columns_size; ++i)
block.insert(ColumnWithTypeAndName(std::move(added_columns[i]), added_type_name[i].first, added_type_name[i].second));
if (!filter)
throw Exception("No data to filter columns", ErrorCodes::LOGICAL_ERROR);
NameSet needed_key_names_right = requiredRightKeys(key_names_right, columns_added_by_join);
if (strictness == ASTTableJoin::Strictness::Any)
{
if (isInnerOrRight(kind))
{ {
/// If ANY INNER | RIGHT JOIN - filter all the columns except the new ones. /// If ANY INNER | RIGHT JOIN - filter all the columns except the new ones.
for (size_t i = 0; i < existing_columns; ++i) for (size_t i = 0; i < existing_columns; ++i)
block.safeGetByPosition(i).column = block.safeGetByPosition(i).column->filter(*filter, -1); block.safeGetByPosition(i).column = block.safeGetByPosition(i).column->filter(filter, -1);
/// Add join key columns from right block if they has different name. /// Add join key columns from right block if they has different name.
for (size_t i = 0; i < key_names_right.size(); ++i) for (size_t i = 0; i < key_names_right.size(); ++i)
@ -803,10 +765,12 @@ void Join::joinBlockImpl(
auto & right_name = key_names_right[i]; auto & right_name = key_names_right[i];
auto & left_name = key_names_left[i]; auto & left_name = key_names_left[i];
if (needed_key_names_right.count(right_name) && !block.has(right_name)) auto it = right_keys.find(right_name);
if (it != right_keys.end() && !block.has(right_name))
{ {
const auto & col = block.getByName(left_name); const auto & col = block.getByName(left_name);
block.insert({col.column, col.type, right_name}); bool is_nullable = it->second->isNullable();
block.insert(correctNullability({col.column, col.type, right_name}, is_nullable));
} }
} }
} }
@ -818,27 +782,30 @@ void Join::joinBlockImpl(
auto & right_name = key_names_right[i]; auto & right_name = key_names_right[i];
auto & left_name = key_names_left[i]; auto & left_name = key_names_left[i];
if (needed_key_names_right.count(right_name) && !block.has(right_name)) auto it = right_keys.find(right_name);
if (it != right_keys.end() && !block.has(right_name))
{ {
const auto & col = block.getByName(left_name); const auto & col = block.getByName(left_name);
auto & column = col.column; ColumnPtr column = col.column->convertToFullColumnIfConst();
MutableColumnPtr mut_column = column->cloneEmpty(); MutableColumnPtr mut_column = column->cloneEmpty();
for (size_t col_no = 0; col_no < filter->size(); ++col_no) for (size_t row = 0; row < filter.size(); ++row)
{ {
if ((*filter)[col_no]) if (filter[row])
mut_column->insertFrom(*column, col_no); mut_column->insertFrom(*column, row);
else else
mut_column->insertDefault(); mut_column->insertDefault();
} }
block.insert({std::move(mut_column), col.type, right_name}); bool is_nullable = use_nulls || it->second->isNullable();
block.insert(correctNullability({std::move(mut_column), col.type, right_name}, is_nullable, null_map_filter));
} }
} }
} }
} }
else else
{ {
constexpr bool left_or_full = static_in_v<KIND, ASTTableJoin::Kind::Left, ASTTableJoin::Kind::Full>;
if (!offsets_to_replicate) if (!offsets_to_replicate)
throw Exception("No data to filter columns", ErrorCodes::LOGICAL_ERROR); throw Exception("No data to filter columns", ErrorCodes::LOGICAL_ERROR);
@ -848,28 +815,31 @@ void Join::joinBlockImpl(
auto & right_name = key_names_right[i]; auto & right_name = key_names_right[i];
auto & left_name = key_names_left[i]; auto & left_name = key_names_left[i];
if (needed_key_names_right.count(right_name) && !block.has(right_name)) auto it = right_keys.find(right_name);
if (it != right_keys.end() && !block.has(right_name))
{ {
const auto & col = block.getByName(left_name); const auto & col = block.getByName(left_name);
auto & column = col.column; ColumnPtr column = col.column->convertToFullColumnIfConst();
MutableColumnPtr mut_column = column->cloneEmpty(); MutableColumnPtr mut_column = column->cloneEmpty();
size_t last_offset = 0; size_t last_offset = 0;
for (size_t col_no = 0; col_no < column->size(); ++col_no) for (size_t row = 0; row < column->size(); ++row)
{ {
if (size_t to_insert = (*offsets_to_replicate)[col_no] - last_offset) if (size_t to_insert = (*offsets_to_replicate)[row] - last_offset)
{ {
if (!(*filter)[col_no]) if (!row_filter[row])
mut_column->insertDefault(); mut_column->insertDefault();
else else
for (size_t dup = 0; dup < to_insert; ++dup) for (size_t dup = 0; dup < to_insert; ++dup)
mut_column->insertFrom(*column, col_no); mut_column->insertFrom(*column, row);
} }
last_offset = (*offsets_to_replicate)[col_no]; last_offset = (*offsets_to_replicate)[row];
} }
block.insert({std::move(mut_column), col.type, right_name}); /// TODO: null_map_filter
bool is_nullable = (use_nulls && left_or_full) || it->second->isNullable();
block.insert(correctNullability({std::move(mut_column), col.type, right_name}, is_nullable));
} }
} }
@ -1063,11 +1033,8 @@ struct AdderNonJoined;
template <typename Mapped> template <typename Mapped>
struct AdderNonJoined<ASTTableJoin::Strictness::Any, Mapped> struct AdderNonJoined<ASTTableJoin::Strictness::Any, Mapped>
{ {
static void add(const Mapped & mapped, size_t & rows_added, MutableColumns & columns_left, MutableColumns & columns_right) static void add(const Mapped & mapped, size_t & rows_added, MutableColumns & columns_right)
{ {
for (size_t j = 0; j < columns_left.size(); ++j)
columns_left[j]->insertDefault();
for (size_t j = 0; j < columns_right.size(); ++j) for (size_t j = 0; j < columns_right.size(); ++j)
columns_right[j]->insertFrom(*mapped.block->getByPosition(j).column.get(), mapped.row_num); columns_right[j]->insertFrom(*mapped.block->getByPosition(j).column.get(), mapped.row_num);
@ -1078,13 +1045,10 @@ struct AdderNonJoined<ASTTableJoin::Strictness::Any, Mapped>
template <typename Mapped> template <typename Mapped>
struct AdderNonJoined<ASTTableJoin::Strictness::All, Mapped> struct AdderNonJoined<ASTTableJoin::Strictness::All, Mapped>
{ {
static void add(const Mapped & mapped, size_t & rows_added, MutableColumns & columns_left, MutableColumns & columns_right) static void add(const Mapped & mapped, size_t & rows_added, MutableColumns & columns_right)
{ {
for (auto current = &static_cast<const typename Mapped::Base_t &>(mapped); current != nullptr; current = current->next) for (auto current = &static_cast<const typename Mapped::Base_t &>(mapped); current != nullptr; current = current->next)
{ {
for (size_t j = 0; j < columns_left.size(); ++j)
columns_left[j]->insertDefault();
for (size_t j = 0; j < columns_right.size(); ++j) for (size_t j = 0; j < columns_right.size(); ++j)
columns_right[j]->insertFrom(*current->block->getByPosition(j).column.get(), current->row_num); columns_right[j]->insertFrom(*current->block->getByPosition(j).column.get(), current->row_num);
@ -1106,54 +1070,52 @@ public:
* result_sample_block - keys, "left" columns, and "right" columns. * result_sample_block - keys, "left" columns, and "right" columns.
*/ */
std::unordered_map<String, String> key_renames;
makeResultSampleBlock(left_sample_block, key_names_left, columns_added_by_join, key_renames);
const Block & right_sample_block = parent.sample_block_with_columns_to_add;
size_t num_keys = key_names_left.size();
size_t num_columns_left = left_sample_block.columns() - num_keys;
size_t num_columns_right = right_sample_block.columns();
column_indices_left.reserve(num_columns_left);
column_indices_keys_and_right.reserve(num_keys + num_columns_right);
std::vector<bool> is_left_key(left_sample_block.columns(), false); std::vector<bool> is_left_key(left_sample_block.columns(), false);
std::vector<size_t> key_positions_left;
key_positions_left.reserve(key_names_left.size());
for (const std::string & key : key_names_left) for (const std::string & key : key_names_left)
{ {
size_t key_pos = left_sample_block.getPositionByName(key); size_t key_pos = left_sample_block.getPositionByName(key);
key_positions_left.push_back(key_pos);
is_left_key[key_pos] = true; is_left_key[key_pos] = true;
}
const Block & right_sample_block = parent.sample_block_with_columns_to_add;
std::unordered_map<size_t, size_t> left_to_right_key_map;
makeResultSampleBlock(left_sample_block, right_sample_block, columns_added_by_join,
key_positions_left, is_left_key, left_to_right_key_map);
column_indices_left.reserve(left_sample_block.columns() - key_names_left.size());
column_indices_keys_and_right.reserve(key_names_left.size() + right_sample_block.columns());
/// Use right key columns if present. @note left & right key columns could have different nullability.
for (size_t key_pos : key_positions_left)
{
/// Here we establish the mapping between key columns of the left- and right-side tables. /// Here we establish the mapping between key columns of the left- and right-side tables.
/// key_pos index is inserted in the position corresponding to key column in parent.blocks /// key_pos index is inserted in the position corresponding to key column in parent.blocks
/// (saved blocks of the right-side table) and points to the same key column /// (saved blocks of the right-side table) and points to the same key column
/// in the left_sample_block and thus in the result_sample_block. /// in the left_sample_block and thus in the result_sample_block.
column_indices_keys_and_right.push_back(key_pos);
auto it = key_renames.find(key); auto it = left_to_right_key_map.find(key_pos);
if (it != key_renames.end()) if (it != left_to_right_key_map.end())
key_renames_indices[key_pos] = result_sample_block.getPositionByName(it->second); {
column_indices_keys_and_right.push_back(it->second);
column_indices_left.push_back(key_pos);
}
else
column_indices_keys_and_right.push_back(key_pos);
} }
size_t num_src_columns = left_sample_block.columns() + right_sample_block.columns(); for (size_t i = 0; i < left_sample_block.columns(); ++i)
for (size_t i = 0; i < result_sample_block.columns(); ++i)
{
if (i < left_sample_block.columns())
{
if (!is_left_key[i]) if (!is_left_key[i])
{
column_indices_left.emplace_back(i); column_indices_left.emplace_back(i);
/// If use_nulls, convert left columns to Nullable. size_t num_additional_keys = left_to_right_key_map.size();
if (parent.use_nulls) for (size_t i = left_sample_block.columns(); i < result_sample_block.columns() - num_additional_keys; ++i)
convertColumnToNullable(result_sample_block.getByPosition(i));
}
}
else if (i < num_src_columns)
column_indices_keys_and_right.emplace_back(i); column_indices_keys_and_right.emplace_back(i);
} }
}
String getName() const override { return "NonJoined"; } String getName() const override { return "NonJoined"; }
@ -1184,18 +1146,25 @@ private:
/// Indices of key columns in result_sample_block or columns that come from the right-side table. /// Indices of key columns in result_sample_block or columns that come from the right-side table.
/// Order is significant: it is the same as the order of columns in the blocks of the right-side table that are saved in parent.blocks. /// Order is significant: it is the same as the order of columns in the blocks of the right-side table that are saved in parent.blocks.
ColumnNumbers column_indices_keys_and_right; ColumnNumbers column_indices_keys_and_right;
std::unordered_map<size_t, size_t> key_renames_indices;
std::unique_ptr<void, std::function<void(void *)>> position; /// type erasure std::unique_ptr<void, std::function<void(void *)>> position; /// type erasure
void makeResultSampleBlock(const Block & left_sample_block, const Names & key_names_left, void makeResultSampleBlock(const Block & left_sample_block, const Block & right_sample_block,
const NamesAndTypesList & columns_added_by_join, std::unordered_map<String, String> & key_renames) const NamesAndTypesList & columns_added_by_join,
const std::vector<size_t> & key_positions_left, const std::vector<bool> & is_left_key,
std::unordered_map<size_t, size_t> & left_to_right_key_map)
{ {
const Block & right_sample_block = parent.sample_block_with_columns_to_add;
result_sample_block = materializeBlock(left_sample_block); result_sample_block = materializeBlock(left_sample_block);
/// Convert left columns to Nullable if allowed
if (parent.use_nulls)
{
for (size_t i = 0; i < result_sample_block.columns(); ++i)
if (!is_left_key[i])
convertColumnToNullable(result_sample_block.getByPosition(i));
}
/// Add columns from the right-side table to the block. /// Add columns from the right-side table to the block.
for (size_t i = 0; i < right_sample_block.columns(); ++i) for (size_t i = 0; i < right_sample_block.columns(); ++i)
{ {
@ -1205,20 +1174,23 @@ private:
} }
const auto & key_names_right = parent.key_names_right; const auto & key_names_right = parent.key_names_right;
NameSet needed_key_names_right = requiredRightKeys(key_names_right, columns_added_by_join); auto right_keys = requiredRightKeys(key_names_right, columns_added_by_join);
/// Add join key columns from right block if they has different name. /// Add join key columns from right block if they has different name.
for (size_t i = 0; i < key_names_right.size(); ++i) for (size_t i = 0; i < key_names_right.size(); ++i)
{ {
auto & right_name = key_names_right[i]; auto & right_name = key_names_right[i];
auto & left_name = key_names_left[i]; size_t left_key_pos = key_positions_left[i];
if (needed_key_names_right.count(right_name) && !result_sample_block.has(right_name)) auto it = right_keys.find(right_name);
if (it != right_keys.end() && !result_sample_block.has(right_name))
{ {
const auto & col = result_sample_block.getByName(left_name); const auto & col = result_sample_block.getByPosition(left_key_pos);
result_sample_block.insert({col.column, col.type, right_name}); bool is_nullable = (parent.use_nulls && isFull(parent.kind)) || it->second->isNullable();
result_sample_block.insert(correctNullability({col.column, col.type, right_name}, is_nullable));
key_renames[left_name] = right_name; size_t right_key_pos = result_sample_block.getPositionByName(right_name);
left_to_right_key_map[left_key_pos] = right_key_pos;
} }
} }
} }
@ -1235,7 +1207,7 @@ private:
{ {
#define M(TYPE) \ #define M(TYPE) \
case Join::Type::TYPE: \ case Join::Type::TYPE: \
rows_added = fillColumns<STRICTNESS>(*maps.TYPE, columns_left, columns_keys_and_right); \ rows_added = fillColumns<STRICTNESS>(*maps.TYPE, columns_keys_and_right); \
break; break;
APPLY_FOR_JOIN_VARIANTS(M) APPLY_FOR_JOIN_VARIANTS(M)
#undef M #undef M
@ -1249,32 +1221,12 @@ private:
Block res = result_sample_block.cloneEmpty(); Block res = result_sample_block.cloneEmpty();
/// @note it's possible to make ColumnConst here and materialize it later
for (size_t i = 0; i < columns_left.size(); ++i) for (size_t i = 0; i < columns_left.size(); ++i)
res.getByPosition(column_indices_left[i]).column = std::move(columns_left[i]); res.getByPosition(column_indices_left[i]).column = columns_left[i]->cloneResized(rows_added);
if (key_renames_indices.empty())
{
for (size_t i = 0; i < columns_keys_and_right.size(); ++i) for (size_t i = 0; i < columns_keys_and_right.size(); ++i)
res.getByPosition(column_indices_keys_and_right[i]).column = std::move(columns_keys_and_right[i]); res.getByPosition(column_indices_keys_and_right[i]).column = std::move(columns_keys_and_right[i]);
}
else
{
for (size_t i = 0; i < columns_keys_and_right.size(); ++i)
{
size_t key_idx = column_indices_keys_and_right[i];
auto it = key_renames_indices.find(key_idx);
if (it != key_renames_indices.end())
{
auto & key_column = res.getByPosition(key_idx).column;
if (key_column->empty())
key_column = key_column->cloneResized(columns_keys_and_right[i]->size());
res.getByPosition(it->second).column = std::move(columns_keys_and_right[i]);
}
else
res.getByPosition(key_idx).column = std::move(columns_keys_and_right[i]);
}
}
return res; return res;
} }
@ -1296,7 +1248,7 @@ private:
} }
template <ASTTableJoin::Strictness STRICTNESS, typename Map> template <ASTTableJoin::Strictness STRICTNESS, typename Map>
size_t fillColumns(const Map & map, MutableColumns & columns_left, MutableColumns & columns_keys_and_right) size_t fillColumns(const Map & map, MutableColumns & columns_keys_and_right)
{ {
size_t rows_added = 0; size_t rows_added = 0;
@ -1313,7 +1265,7 @@ private:
if (it->getSecond().getUsed()) if (it->getSecond().getUsed())
continue; continue;
AdderNonJoined<STRICTNESS, typename Map::mapped_type>::add(it->getSecond(), rows_added, columns_left, columns_keys_and_right); AdderNonJoined<STRICTNESS, typename Map::mapped_type>::add(it->getSecond(), rows_added, columns_keys_and_right);
if (rows_added >= max_block_size) if (rows_added >= max_block_size)
{ {

View File

@ -228,6 +228,52 @@ public:
std::unique_ptr<HashMap<UInt128, Mapped, UInt128HashCRC32>> keys128; std::unique_ptr<HashMap<UInt128, Mapped, UInt128HashCRC32>> keys128;
std::unique_ptr<HashMap<UInt256, Mapped, UInt256HashCRC32>> keys256; std::unique_ptr<HashMap<UInt256, Mapped, UInt256HashCRC32>> keys256;
std::unique_ptr<HashMap<UInt128, Mapped, UInt128TrivialHash>> hashed; std::unique_ptr<HashMap<UInt128, Mapped, UInt128TrivialHash>> hashed;
void create(Type which)
{
switch (which)
{
case Type::EMPTY: break;
case Type::CROSS: break;
#define M(NAME) \
case Type::NAME: NAME = std::make_unique<typename decltype(NAME)::element_type>(); break;
APPLY_FOR_JOIN_VARIANTS(M)
#undef M
}
}
size_t getTotalRowCount(Type which) const
{
switch (which)
{
case Type::EMPTY: return 0;
case Type::CROSS: return 0;
#define M(NAME) \
case Type::NAME: return NAME ? NAME->size() : 0;
APPLY_FOR_JOIN_VARIANTS(M)
#undef M
}
__builtin_unreachable();
}
size_t getTotalByteCountImpl(Type which) const
{
switch (which)
{
case Type::EMPTY: return 0;
case Type::CROSS: return 0;
#define M(NAME) \
case Type::NAME: return NAME ? NAME->getBufferSizeInBytes() : 0;
APPLY_FOR_JOIN_VARIANTS(M)
#undef M
}
__builtin_unreachable();
}
}; };
using MapsAny = MapsTemplate<WithFlags<false, false, RowRef>>; using MapsAny = MapsTemplate<WithFlags<false, false, RowRef>>;

View File

@ -72,7 +72,7 @@ bool MutationsInterpreter::isStorageTouchedByMutations() const
context_copy.getSettingsRef().merge_tree_uniform_read_distribution = 0; context_copy.getSettingsRef().merge_tree_uniform_read_distribution = 0;
context_copy.getSettingsRef().max_threads = 1; context_copy.getSettingsRef().max_threads = 1;
BlockInputStreamPtr in = InterpreterSelectQuery(select, context_copy, storage, QueryProcessingStage::Complete).execute().in; BlockInputStreamPtr in = InterpreterSelectQuery(select, context_copy, storage).execute().in;
Block block = in->read(); Block block = in->read();
if (!block.rows()) if (!block.rows())
@ -367,7 +367,7 @@ void MutationsInterpreter::prepare(bool dry_run)
select->children.push_back(where_expression); select->children.push_back(where_expression);
} }
interpreter_select = std::make_unique<InterpreterSelectQuery>(select, context, storage, QueryProcessingStage::Complete, dry_run); interpreter_select = std::make_unique<InterpreterSelectQuery>(select, context, storage, SelectQueryOptions().analyze(dry_run));
is_prepared = true; is_prepared = true;
} }

View File

@ -0,0 +1,76 @@
#pragma once
#include <Core/QueryProcessingStage.h>
namespace DB
{
/**
* to_stage
* - the stage to which the query is to be executed. By default - till to the end.
* You can perform till the intermediate aggregation state, which are combined from different servers for distributed query processing.
*
* subquery_depth
* - to control the limit on the depth of nesting of subqueries. For subqueries, a value that is incremented by one is passed;
* for INSERT SELECT, a value 1 is passed instead of 0.
*
* only_analyze
* - the object was created only for query analysis.
*
* is_subquery
* - there could be some specific for subqueries. Ex. there's no need to pass duplicated columns in results, cause of indirect results.
*/
struct SelectQueryOptions
{
QueryProcessingStage::Enum to_stage;
size_t subquery_depth;
bool only_analyze;
bool modify_inplace;
bool remove_duplicates;
SelectQueryOptions(QueryProcessingStage::Enum stage = QueryProcessingStage::Complete, size_t depth = 0)
: to_stage(stage)
, subquery_depth(depth)
, only_analyze(false)
, modify_inplace(false)
, remove_duplicates(false)
{}
SelectQueryOptions copy() const { return *this; }
SelectQueryOptions subquery() const
{
SelectQueryOptions out = *this;
out.to_stage = QueryProcessingStage::Complete;
++out.subquery_depth;
return out;
}
SelectQueryOptions & analyze(bool value = true)
{
only_analyze = value;
return *this;
}
SelectQueryOptions & modify(bool value = true)
{
modify_inplace = value;
return *this;
}
SelectQueryOptions & noModify() { return modify(false); }
SelectQueryOptions & removeDuplicates(bool value = true)
{
remove_duplicates = value;
return *this;
}
SelectQueryOptions & noSubquery()
{
subquery_depth = 0;
return *this;
}
};
}

View File

@ -123,24 +123,69 @@ bool hasArrayJoin(const ASTPtr & ast)
return false; return false;
} }
/// Keep number of columns for 'GLOBAL IN (SELECT 1 AS a, a)'
void renameDuplicatedColumns(const ASTSelectQuery * select_query)
{
ASTs & elements = select_query->select_expression_list->children;
std::set<String> all_column_names;
std::set<String> assigned_column_names;
for (auto & expr : elements)
all_column_names.insert(expr->getAliasOrColumnName());
for (auto & expr : elements)
{
auto name = expr->getAliasOrColumnName();
if (!assigned_column_names.insert(name).second)
{
size_t i = 1;
while (all_column_names.end() != all_column_names.find(name + "_" + toString(i)))
++i;
name = name + "_" + toString(i);
expr = expr->clone(); /// Cancels fuse of the same expressions in the tree.
expr->setAlias(name);
all_column_names.insert(name);
assigned_column_names.insert(name);
}
}
}
/// Sometimes we have to calculate more columns in SELECT clause than will be returned from query. /// Sometimes we have to calculate more columns in SELECT clause than will be returned from query.
/// This is the case when we have DISTINCT or arrayJoin: we require more columns in SELECT even if we need less columns in result. /// This is the case when we have DISTINCT or arrayJoin: we require more columns in SELECT even if we need less columns in result.
void removeUnneededColumnsFromSelectClause(const ASTSelectQuery * select_query, const Names & required_result_columns) /// Also we have to remove duplicates in case of GLOBAL subqueries. Their results are placed into tables so duplicates are inpossible.
void removeUnneededColumnsFromSelectClause(const ASTSelectQuery * select_query, const Names & required_result_columns, bool remove_dups)
{ {
if (required_result_columns.empty())
return;
ASTs & elements = select_query->select_expression_list->children; ASTs & elements = select_query->select_expression_list->children;
std::map<String, size_t> required_columns_with_duplicate_count;
if (!required_result_columns.empty())
{
/// Some columns may be queried multiple times, like SELECT x, y, y FROM table.
for (const auto & name : required_result_columns)
{
if (remove_dups)
required_columns_with_duplicate_count[name] = 1;
else
++required_columns_with_duplicate_count[name];
}
}
else if (remove_dups)
{
/// Even if we have no requirements there could be duplicates cause of asterisks. SELECT *, t.*
for (const auto & elem : elements)
required_columns_with_duplicate_count.emplace(elem->getAliasOrColumnName(), 1);
}
else
return;
ASTs new_elements; ASTs new_elements;
new_elements.reserve(elements.size()); new_elements.reserve(elements.size());
/// Some columns may be queried multiple times, like SELECT x, y, y FROM table.
/// In that case we keep them exactly same number of times.
std::map<String, size_t> required_columns_with_duplicate_count;
for (const auto & name : required_result_columns)
++required_columns_with_duplicate_count[name];
for (const auto & elem : elements) for (const auto & elem : elements)
{ {
String name = elem->getAliasOrColumnName(); String name = elem->getAliasOrColumnName();
@ -645,6 +690,9 @@ SyntaxAnalyzerResultPtr SyntaxAnalyzer::analyze(
if (select_query) if (select_query)
{ {
if (remove_duplicates)
renameDuplicatedColumns(select_query);
if (const ASTTablesInSelectQueryElement * node = select_query->join()) if (const ASTTablesInSelectQueryElement * node = select_query->join())
{ {
if (settings.enable_optimize_predicate_expression) if (settings.enable_optimize_predicate_expression)
@ -688,7 +736,7 @@ SyntaxAnalyzerResultPtr SyntaxAnalyzer::analyze(
/// Must be after 'normalizeTree' (after expanding aliases, for aliases not get lost) /// Must be after 'normalizeTree' (after expanding aliases, for aliases not get lost)
/// and before 'executeScalarSubqueries', 'analyzeAggregation', etc. to avoid excessive calculations. /// and before 'executeScalarSubqueries', 'analyzeAggregation', etc. to avoid excessive calculations.
if (select_query) if (select_query)
removeUnneededColumnsFromSelectClause(select_query, required_result_columns); removeUnneededColumnsFromSelectClause(select_query, required_result_columns, remove_duplicates);
/// Executing scalar subqueries - replacing them with constant values. /// Executing scalar subqueries - replacing them with constant values.
executeScalarSubqueries(query, context, subquery_depth); executeScalarSubqueries(query, context, subquery_depth);

View File

@ -2,6 +2,7 @@
#include <Interpreters/AnalyzedJoin.h> #include <Interpreters/AnalyzedJoin.h>
#include <Interpreters/Aliases.h> #include <Interpreters/Aliases.h>
#include <Interpreters/SelectQueryOptions.h>
namespace DB namespace DB
{ {
@ -55,9 +56,10 @@ using SyntaxAnalyzerResultPtr = std::shared_ptr<const SyntaxAnalyzerResult>;
class SyntaxAnalyzer class SyntaxAnalyzer
{ {
public: public:
SyntaxAnalyzer(const Context & context_, size_t subquery_depth_ = 0) SyntaxAnalyzer(const Context & context_, const SelectQueryOptions & select_options = {})
: context(context_) : context(context_)
, subquery_depth(subquery_depth_) , subquery_depth(select_options.subquery_depth)
, remove_duplicates(select_options.remove_duplicates)
{} {}
SyntaxAnalyzerResultPtr analyze( SyntaxAnalyzerResultPtr analyze(
@ -69,6 +71,7 @@ public:
private: private:
const Context & context; const Context & context;
size_t subquery_depth; size_t subquery_depth;
bool remove_duplicates;
}; };
} }

View File

@ -41,6 +41,8 @@ std::shared_ptr<InterpreterSelectWithUnionQuery> interpretSubquery(
subquery_settings.extremes = 0; subquery_settings.extremes = 0;
subquery_context.setSettings(subquery_settings); subquery_context.setSettings(subquery_settings);
auto subquery_options = SelectQueryOptions(QueryProcessingStage::Complete, subquery_depth).subquery();
ASTPtr query; ASTPtr query;
if (table || function) if (table || function)
{ {
@ -83,48 +85,10 @@ std::shared_ptr<InterpreterSelectWithUnionQuery> interpretSubquery(
else else
{ {
query = subquery->children.at(0); query = subquery->children.at(0);
subquery_options.removeDuplicates();
/** Columns with the same name can be specified in a subquery. For example, SELECT x, x FROM t
* This is bad, because the result of such a query can not be saved to the table, because the table can not have the same name columns.
* Saving to the table is required for GLOBAL subqueries.
*
* To avoid this situation, we will rename the same columns.
*/
std::set<std::string> all_column_names;
std::set<std::string> assigned_column_names;
if (const auto * select_with_union = query->as<ASTSelectWithUnionQuery>())
{
if (const auto * select = select_with_union->list_of_selects->children.at(0)->as<ASTSelectQuery>())
{
for (auto & expr : select->select_expression_list->children)
all_column_names.insert(expr->getAliasOrColumnName());
for (auto & expr : select->select_expression_list->children)
{
auto name = expr->getAliasOrColumnName();
if (!assigned_column_names.insert(name).second)
{
size_t i = 1;
while (all_column_names.end() != all_column_names.find(name + "_" + toString(i)))
++i;
name = name + "_" + toString(i);
expr = expr->clone(); /// Cancels fuse of the same expressions in the tree.
expr->setAlias(name);
all_column_names.insert(name);
assigned_column_names.insert(name);
}
}
}
}
} }
return std::make_shared<InterpreterSelectWithUnionQuery>( return std::make_shared<InterpreterSelectWithUnionQuery>(query, subquery_context, subquery_options, required_source_columns);
query, subquery_context, required_source_columns, QueryProcessingStage::Complete, subquery_depth + 1);
} }
} }

View File

@ -39,9 +39,13 @@ ASTPtr ASTColumnDeclaration::clone() const
void ASTColumnDeclaration::formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const void ASTColumnDeclaration::formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const
{ {
frame.need_parens = false; frame.need_parens = false;
std::string indent_str = settings.one_line ? "" : std::string(4 * frame.indent, ' ');
settings.ostr << settings.nl_or_ws << indent_str << backQuoteIfNeed(name); if (!settings.one_line)
settings.ostr << settings.nl_or_ws << std::string(4 * frame.indent, ' ');
/// We have to always backquote column names to avoid ambiguouty with INDEX and other declarations in CREATE query.
settings.ostr << backQuote(name);
if (type) if (type)
{ {
settings.ostr << ' '; settings.ostr << ' ';

View File

@ -25,7 +25,6 @@ const char * IAST::hilite_alias = "\033[0;32m";
const char * IAST::hilite_none = "\033[0m"; const char * IAST::hilite_none = "\033[0m";
/// Quote the identifier with backquotes, if required.
String backQuoteIfNeed(const String & x) String backQuoteIfNeed(const String & x)
{ {
String res(x.size(), '\0'); String res(x.size(), '\0');
@ -36,6 +35,16 @@ String backQuoteIfNeed(const String & x)
return res; return res;
} }
String backQuote(const String & x)
{
String res(x.size(), '\0');
{
WriteBufferFromString wb(res);
writeBackQuotedString(x, wb);
}
return res;
}
size_t IAST::checkSize(size_t max_size) const size_t IAST::checkSize(size_t max_size) const
{ {

View File

@ -208,7 +208,9 @@ private:
}; };
/// Surrounds an identifier by back quotes if it is necessary. /// Quote the identifier with backquotes, if required.
String backQuoteIfNeed(const String & x); String backQuoteIfNeed(const String & x);
/// Quote the identifier with backquotes.
String backQuote(const String & x);
} }

View File

@ -205,7 +205,7 @@ void AlterCommand::apply(ColumnsDescription & columns_description, IndicesDescri
} }
else if (type == MODIFY_ORDER_BY) else if (type == MODIFY_ORDER_BY)
{ {
if (!primary_key_ast) if (!primary_key_ast && order_by_ast)
{ {
/// Primary and sorting key become independent after this ALTER so we have to /// Primary and sorting key become independent after this ALTER so we have to
/// save the old ORDER BY expression as the new primary key. /// save the old ORDER BY expression as the new primary key.

View File

@ -611,7 +611,7 @@ String MergeTreeData::MergingParams::getModeName() const
Int64 MergeTreeData::getMaxBlockNumber() Int64 MergeTreeData::getMaxBlockNumber()
{ {
std::lock_guard lock_all(data_parts_mutex); auto lock = lockParts();
Int64 max_block_num = 0; Int64 max_block_num = 0;
for (const DataPartPtr & part : data_parts_by_info) for (const DataPartPtr & part : data_parts_by_info)
@ -640,7 +640,7 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks)
DataPartsVector broken_parts_to_detach; DataPartsVector broken_parts_to_detach;
size_t suspicious_broken_parts = 0; size_t suspicious_broken_parts = 0;
std::lock_guard lock(data_parts_mutex); auto lock = lockParts();
data_parts_indexes.clear(); data_parts_indexes.clear();
for (const String & file_name : part_file_names) for (const String & file_name : part_file_names)
@ -866,7 +866,7 @@ MergeTreeData::DataPartsVector MergeTreeData::grabOldParts()
std::vector<DataPartIteratorByStateAndInfo> parts_to_delete; std::vector<DataPartIteratorByStateAndInfo> parts_to_delete;
{ {
std::lock_guard lock_parts(data_parts_mutex); auto parts_lock = lockParts();
auto outdated_parts_range = getDataPartsStateRange(DataPartState::Outdated); auto outdated_parts_range = getDataPartsStateRange(DataPartState::Outdated);
for (auto it = outdated_parts_range.begin(); it != outdated_parts_range.end(); ++it) for (auto it = outdated_parts_range.begin(); it != outdated_parts_range.end(); ++it)
@ -900,7 +900,7 @@ MergeTreeData::DataPartsVector MergeTreeData::grabOldParts()
void MergeTreeData::rollbackDeletingParts(const MergeTreeData::DataPartsVector & parts) void MergeTreeData::rollbackDeletingParts(const MergeTreeData::DataPartsVector & parts)
{ {
std::lock_guard lock(data_parts_mutex); auto lock = lockParts();
for (auto & part : parts) for (auto & part : parts)
{ {
/// We should modify it under data_parts_mutex /// We should modify it under data_parts_mutex
@ -912,7 +912,7 @@ void MergeTreeData::rollbackDeletingParts(const MergeTreeData::DataPartsVector &
void MergeTreeData::removePartsFinally(const MergeTreeData::DataPartsVector & parts) void MergeTreeData::removePartsFinally(const MergeTreeData::DataPartsVector & parts)
{ {
{ {
std::lock_guard lock(data_parts_mutex); auto lock = lockParts();
/// TODO: use data_parts iterators instead of pointers /// TODO: use data_parts iterators instead of pointers
for (auto & part : parts) for (auto & part : parts)
@ -980,7 +980,7 @@ void MergeTreeData::dropAllData()
{ {
LOG_TRACE(log, "dropAllData: waiting for locks."); LOG_TRACE(log, "dropAllData: waiting for locks.");
std::lock_guard lock(data_parts_mutex); auto lock = lockParts();
LOG_TRACE(log, "dropAllData: removing data from memory."); LOG_TRACE(log, "dropAllData: removing data from memory.");
@ -1717,7 +1717,7 @@ MergeTreeData::DataPartsVector MergeTreeData::renameTempPartAndReplace(
DataPartsVector covered_parts; DataPartsVector covered_parts;
{ {
std::unique_lock lock(data_parts_mutex); auto lock = lockParts();
renameTempPartAndReplace(part, increment, out_transaction, lock, &covered_parts); renameTempPartAndReplace(part, increment, out_transaction, lock, &covered_parts);
} }
return covered_parts; return covered_parts;
@ -1814,7 +1814,7 @@ restore_covered)
{ {
LOG_INFO(log, "Renaming " << part_to_detach->relative_path << " to " << prefix << part_to_detach->name << " and forgiving it."); LOG_INFO(log, "Renaming " << part_to_detach->relative_path << " to " << prefix << part_to_detach->name << " and forgiving it.");
auto data_parts_lock = lockParts(); auto lock = lockParts();
auto it_part = data_parts_by_info.find(part_to_detach->info); auto it_part = data_parts_by_info.find(part_to_detach->info);
if (it_part == data_parts_by_info.end()) if (it_part == data_parts_by_info.end())
@ -1931,7 +1931,7 @@ void MergeTreeData::tryRemovePartImmediately(DataPartPtr && part)
{ {
DataPartPtr part_to_delete; DataPartPtr part_to_delete;
{ {
std::lock_guard lock_parts(data_parts_mutex); auto lock = lockParts();
LOG_TRACE(log, "Trying to immediately remove part " << part->getNameWithState()); LOG_TRACE(log, "Trying to immediately remove part " << part->getNameWithState());
@ -1967,7 +1967,7 @@ size_t MergeTreeData::getTotalActiveSizeInBytes() const
{ {
size_t res = 0; size_t res = 0;
{ {
std::lock_guard lock(data_parts_mutex); auto lock = lockParts();
for (auto & part : getDataPartsStateRange(DataPartState::Committed)) for (auto & part : getDataPartsStateRange(DataPartState::Committed))
res += part->bytes_on_disk; res += part->bytes_on_disk;
@ -1979,7 +1979,7 @@ size_t MergeTreeData::getTotalActiveSizeInBytes() const
size_t MergeTreeData::getMaxPartsCountForPartition() const size_t MergeTreeData::getMaxPartsCountForPartition() const
{ {
std::lock_guard lock(data_parts_mutex); auto lock = lockParts();
size_t res = 0; size_t res = 0;
size_t cur_count = 0; size_t cur_count = 0;
@ -2006,7 +2006,7 @@ size_t MergeTreeData::getMaxPartsCountForPartition() const
std::optional<Int64> MergeTreeData::getMinPartDataVersion() const std::optional<Int64> MergeTreeData::getMinPartDataVersion() const
{ {
std::lock_guard lock(data_parts_mutex); auto lock = lockParts();
std::optional<Int64> result; std::optional<Int64> result;
for (const DataPartPtr & part : getDataPartsStateRange(DataPartState::Committed)) for (const DataPartPtr & part : getDataPartsStateRange(DataPartState::Committed))
@ -2088,8 +2088,8 @@ MergeTreeData::DataPartPtr MergeTreeData::getActiveContainingPart(
MergeTreeData::DataPartPtr MergeTreeData::getActiveContainingPart(const MergeTreePartInfo & part_info) MergeTreeData::DataPartPtr MergeTreeData::getActiveContainingPart(const MergeTreePartInfo & part_info)
{ {
DataPartsLock data_parts_lock(data_parts_mutex); auto lock = lockParts();
return getActiveContainingPart(part_info, DataPartState::Committed, data_parts_lock); return getActiveContainingPart(part_info, DataPartState::Committed, lock);
} }
MergeTreeData::DataPartPtr MergeTreeData::getActiveContainingPart(const String & part_name) MergeTreeData::DataPartPtr MergeTreeData::getActiveContainingPart(const String & part_name)
@ -2103,7 +2103,7 @@ MergeTreeData::DataPartsVector MergeTreeData::getDataPartsVectorInPartition(Merg
{ {
DataPartStateAndPartitionID state_with_partition{state, partition_id}; DataPartStateAndPartitionID state_with_partition{state, partition_id};
std::lock_guard lock(data_parts_mutex); auto lock = lockParts();
return DataPartsVector( return DataPartsVector(
data_parts_by_state_and_info.lower_bound(state_with_partition), data_parts_by_state_and_info.lower_bound(state_with_partition),
data_parts_by_state_and_info.upper_bound(state_with_partition)); data_parts_by_state_and_info.upper_bound(state_with_partition));
@ -2112,7 +2112,7 @@ MergeTreeData::DataPartsVector MergeTreeData::getDataPartsVectorInPartition(Merg
MergeTreeData::DataPartPtr MergeTreeData::getPartIfExists(const MergeTreePartInfo & part_info, const MergeTreeData::DataPartStates & valid_states) MergeTreeData::DataPartPtr MergeTreeData::getPartIfExists(const MergeTreePartInfo & part_info, const MergeTreeData::DataPartStates & valid_states)
{ {
std::lock_guard lock(data_parts_mutex); auto lock = lockParts();
auto it = data_parts_by_info.find(part_info); auto it = data_parts_by_info.find(part_info);
if (it == data_parts_by_info.end()) if (it == data_parts_by_info.end())
@ -2331,7 +2331,7 @@ String MergeTreeData::getPartitionIDFromQuery(const ASTPtr & ast, const Context
String partition_id = partition.getID(*this); String partition_id = partition.getID(*this);
{ {
DataPartsLock data_parts_lock(data_parts_mutex); auto data_parts_lock = lockParts();
DataPartPtr existing_part_in_partition = getAnyPartInPartition(partition_id, data_parts_lock); DataPartPtr existing_part_in_partition = getAnyPartInPartition(partition_id, data_parts_lock);
if (existing_part_in_partition && existing_part_in_partition->partition.value != partition.value) if (existing_part_in_partition && existing_part_in_partition->partition.value != partition.value)
{ {
@ -2352,7 +2352,7 @@ MergeTreeData::DataPartsVector MergeTreeData::getDataPartsVector(const DataPartS
DataPartsVector res; DataPartsVector res;
DataPartsVector buf; DataPartsVector buf;
{ {
std::lock_guard lock(data_parts_mutex); auto lock = lockParts();
for (auto state : affordable_states) for (auto state : affordable_states)
{ {
@ -2378,7 +2378,7 @@ MergeTreeData::DataPartsVector MergeTreeData::getAllDataPartsVector(MergeTreeDat
{ {
DataPartsVector res; DataPartsVector res;
{ {
std::lock_guard lock(data_parts_mutex); auto lock = lockParts();
res.assign(data_parts_by_info.begin(), data_parts_by_info.end()); res.assign(data_parts_by_info.begin(), data_parts_by_info.end());
if (out_states != nullptr) if (out_states != nullptr)
@ -2396,7 +2396,7 @@ MergeTreeData::DataParts MergeTreeData::getDataParts(const DataPartStates & affo
{ {
DataParts res; DataParts res;
{ {
std::lock_guard lock(data_parts_mutex); auto lock = lockParts();
for (auto state : affordable_states) for (auto state : affordable_states)
{ {
auto range = getDataPartsStateRange(state); auto range = getDataPartsStateRange(state);

View File

@ -538,8 +538,7 @@ public:
size_t getColumnCompressedSize(const std::string & name) const size_t getColumnCompressedSize(const std::string & name) const
{ {
std::lock_guard lock{data_parts_mutex}; auto lock = lockParts();
const auto it = column_sizes.find(name); const auto it = column_sizes.find(name);
return it == std::end(column_sizes) ? 0 : it->second.data_compressed; return it == std::end(column_sizes) ? 0 : it->second.data_compressed;
} }
@ -547,14 +546,14 @@ public:
using ColumnSizeByName = std::unordered_map<std::string, DataPart::ColumnSize>; using ColumnSizeByName = std::unordered_map<std::string, DataPart::ColumnSize>;
ColumnSizeByName getColumnSizes() const ColumnSizeByName getColumnSizes() const
{ {
std::lock_guard lock{data_parts_mutex}; auto lock = lockParts();
return column_sizes; return column_sizes;
} }
/// Calculates column sizes in compressed form for the current state of data_parts. /// Calculates column sizes in compressed form for the current state of data_parts.
void recalculateColumnSizes() void recalculateColumnSizes()
{ {
std::lock_guard lock{data_parts_mutex}; auto lock = lockParts();
calculateColumnSizesImpl(); calculateColumnSizesImpl();
} }

View File

@ -25,6 +25,8 @@
#include <Poco/Ext/ThreadNumber.h> #include <Poco/Ext/ThreadNumber.h>
#include <ext/range.h> #include <ext/range.h>
#include <DataStreams/FilterBlockInputStream.h>
#include <DataStreams/ExpressionBlockInputStream.h>
namespace ProfileEvents namespace ProfileEvents
@ -221,7 +223,21 @@ BlockInputStreams StorageBuffer::read(
*/ */
if (processed_stage > QueryProcessingStage::FetchColumns) if (processed_stage > QueryProcessingStage::FetchColumns)
for (auto & stream : streams_from_buffers) for (auto & stream : streams_from_buffers)
stream = InterpreterSelectQuery(query_info.query, context, stream, processed_stage).execute().in; stream = InterpreterSelectQuery(query_info.query, context, stream, SelectQueryOptions(processed_stage)).execute().in;
if (query_info.prewhere_info)
{
for (auto & stream : streams_from_buffers)
stream = std::make_shared<FilterBlockInputStream>(stream, query_info.prewhere_info->prewhere_actions,
query_info.prewhere_info->prewhere_column_name, query_info.prewhere_info->remove_prewhere_column);
if (query_info.prewhere_info->alias_actions)
{
for (auto & stream : streams_from_buffers)
stream = std::make_shared<ExpressionBlockInputStream>(stream, query_info.prewhere_info->alias_actions);
}
}
streams_from_dst.insert(streams_from_dst.end(), streams_from_buffers.begin(), streams_from_buffers.end()); streams_from_dst.insert(streams_from_dst.end(), streams_from_buffers.begin(), streams_from_buffers.end());
return streams_from_dst; return streams_from_dst;

View File

@ -74,7 +74,15 @@ public:
void rename(const String & /*new_path_to_db*/, const String & /*new_database_name*/, const String & new_table_name) override { name = new_table_name; } void rename(const String & /*new_path_to_db*/, const String & /*new_database_name*/, const String & new_table_name) override { name = new_table_name; }
bool supportsSampling() const override { return true; } bool supportsSampling() const override { return true; }
bool supportsPrewhere() const override { return false; } bool supportsPrewhere() const override
{
if (no_destination)
return false;
auto dest = global_context.tryGetTable(destination_database, destination_table);
if (dest && dest.get() != this)
return dest->supportsPrewhere();
return false;
}
bool supportsFinal() const override { return true; } bool supportsFinal() const override { return true; }
bool supportsIndexForIn() const override { return true; } bool supportsIndexForIn() const override { return true; }

View File

@ -286,7 +286,8 @@ BlockInputStreams StorageDistributed::read(
const auto & modified_query_ast = rewriteSelectQuery( const auto & modified_query_ast = rewriteSelectQuery(
query_info.query, remote_database, remote_table, remote_table_function_ptr); query_info.query, remote_database, remote_table, remote_table_function_ptr);
Block header = materializeBlock(InterpreterSelectQuery(query_info.query, context, Names{}, processed_stage).getSampleBlock()); Block header = materializeBlock(
InterpreterSelectQuery(query_info.query, context, SelectQueryOptions(processed_stage)).getSampleBlock());
ClusterProxy::SelectStreamFactory select_stream_factory = remote_table_function_ptr ClusterProxy::SelectStreamFactory select_stream_factory = remote_table_function_ptr
? ClusterProxy::SelectStreamFactory( ? ClusterProxy::SelectStreamFactory(

View File

@ -295,7 +295,7 @@ private:
if (column_with_null[i]) if (column_with_null[i])
{ {
if (key_pos == i) if (key_pos == i)
res.getByPosition(i).column = makeNullable(std::move(columns[i]))->assumeMutable(); res.getByPosition(i).column = makeNullable(std::move(columns[i]));
else else
{ {
const ColumnNullable & nullable_col = static_cast<const ColumnNullable &>(*columns[i]); const ColumnNullable & nullable_col = static_cast<const ColumnNullable &>(*columns[i]);

View File

@ -274,7 +274,7 @@ BlockInputStreams StorageMerge::createSourceStreams(const SelectQueryInfo & quer
if (!storage) if (!storage)
return BlockInputStreams{ return BlockInputStreams{
InterpreterSelectQuery(modified_query_info.query, modified_context, std::make_shared<OneBlockInputStream>(header), InterpreterSelectQuery(modified_query_info.query, modified_context, std::make_shared<OneBlockInputStream>(header),
processed_stage, true).execute().in}; SelectQueryOptions(processed_stage).analyze()).execute().in};
BlockInputStreams source_streams; BlockInputStreams source_streams;
@ -295,7 +295,7 @@ BlockInputStreams StorageMerge::createSourceStreams(const SelectQueryInfo & quer
modified_context.getSettingsRef().max_threads = UInt64(streams_num); modified_context.getSettingsRef().max_threads = UInt64(streams_num);
modified_context.getSettingsRef().max_streams_to_max_threads_ratio = 1; modified_context.getSettingsRef().max_streams_to_max_threads_ratio = 1;
InterpreterSelectQuery interpreter{modified_query_info.query, modified_context, Names{}, processed_stage}; InterpreterSelectQuery interpreter{modified_query_info.query, modified_context, SelectQueryOptions(processed_stage)};
BlockInputStreamPtr interpreter_stream = interpreter.execute().in; BlockInputStreamPtr interpreter_stream = interpreter.execute().in;
/** Materialization is needed, since from distributed storage the constants come materialized. /** Materialization is needed, since from distributed storage the constants come materialized.
@ -429,7 +429,7 @@ Block StorageMerge::getQueryHeader(
case QueryProcessingStage::Complete: case QueryProcessingStage::Complete:
return materializeBlock(InterpreterSelectQuery( return materializeBlock(InterpreterSelectQuery(
query_info.query, context, std::make_shared<OneBlockInputStream>(getSampleBlockForColumns(column_names)), query_info.query, context, std::make_shared<OneBlockInputStream>(getSampleBlockForColumns(column_names)),
processed_stage, true).getSampleBlock()); SelectQueryOptions(processed_stage).analyze()).getSampleBlock());
} }
throw Exception("Logical Error: unknown processed stage.", ErrorCodes::LOGICAL_ERROR); throw Exception("Logical Error: unknown processed stage.", ErrorCodes::LOGICAL_ERROR);
} }

View File

@ -2655,7 +2655,7 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Strin
if (auto part = data.getPartIfExists(part_info, {MergeTreeDataPart::State::Outdated, MergeTreeDataPart::State::Deleting})) if (auto part = data.getPartIfExists(part_info, {MergeTreeDataPart::State::Outdated, MergeTreeDataPart::State::Deleting}))
{ {
LOG_DEBUG(log, "Part " << part->getNameWithState() << " should be deleted after previous attempt before fetch"); LOG_DEBUG(log, "Part " << part->name << " should be deleted after previous attempt before fetch");
/// Force immediate parts cleanup to delete the part that was left from the previous fetch attempt. /// Force immediate parts cleanup to delete the part that was left from the previous fetch attempt.
cleanup_thread.wakeup(); cleanup_thread.wakeup();
return false; return false;

View File

@ -63,7 +63,7 @@ BlockInputStreams StorageView::read(
current_inner_query = new_inner_query; current_inner_query = new_inner_query;
} }
res = InterpreterSelectWithUnionQuery(current_inner_query, context, column_names).executeWithMultipleStreams(); res = InterpreterSelectWithUnionQuery(current_inner_query, context, {}, column_names).executeWithMultipleStreams();
/// It's expected that the columns read from storage are not constant. /// It's expected that the columns read from storage are not constant.
/// Because method 'getSampleBlockForColumns' is used to obtain a structure of result in InterpreterSelectQuery. /// Because method 'getSampleBlockForColumns' is used to obtain a structure of result in InterpreterSelectQuery.

View File

@ -21,6 +21,7 @@ namespace DB
namespace ErrorCodes namespace ErrorCodes
{ {
extern const int CANNOT_GET_CREATE_TABLE_QUERY; extern const int CANNOT_GET_CREATE_TABLE_QUERY;
extern const int TABLE_IS_DROPPED;
} }
@ -174,9 +175,20 @@ protected:
for (; rows_count < max_block_size && tables_it->isValid(); tables_it->next()) for (; rows_count < max_block_size && tables_it->isValid(); tables_it->next())
{ {
auto table_name = tables_it->name(); auto table_name = tables_it->name();
const auto table = context.tryGetTable(database_name, table_name); const StoragePtr & table = tables_it->table();
if (!table)
TableStructureReadLockHolder lock;
try
{
lock = table->lockStructureForShare(false, context.getCurrentQueryId());
}
catch (const Exception & e)
{
if (e.code() == ErrorCodes::TABLE_IS_DROPPED)
continue; continue;
throw;
}
++rows_count; ++rows_count;
@ -190,13 +202,13 @@ protected:
res_columns[res_index++]->insert(table_name); res_columns[res_index++]->insert(table_name);
if (columns_mask[src_index++]) if (columns_mask[src_index++])
res_columns[res_index++]->insert(tables_it->table()->getName()); res_columns[res_index++]->insert(table->getName());
if (columns_mask[src_index++]) if (columns_mask[src_index++])
res_columns[res_index++]->insert(0u); // is_temporary res_columns[res_index++]->insert(0u); // is_temporary
if (columns_mask[src_index++]) if (columns_mask[src_index++])
res_columns[res_index++]->insert(tables_it->table()->getDataPath()); res_columns[res_index++]->insert(table->getDataPath());
if (columns_mask[src_index++]) if (columns_mask[src_index++])
res_columns[res_index++]->insert(database->getTableMetadataPath(table_name)); res_columns[res_index++]->insert(database->getTableMetadataPath(table_name));

View File

@ -31,6 +31,7 @@ MSG_OK = OP_SQUARE_BRACKET + colored(" OK ", "green", attrs=['bold']) + CL_SQUAR
MSG_SKIPPED = OP_SQUARE_BRACKET + colored(" SKIPPED ", "cyan", attrs=['bold']) + CL_SQUARE_BRACKET MSG_SKIPPED = OP_SQUARE_BRACKET + colored(" SKIPPED ", "cyan", attrs=['bold']) + CL_SQUARE_BRACKET
MESSAGES_TO_RETRY = [ MESSAGES_TO_RETRY = [
"DB::Exception: ZooKeeper session has been expired",
"Coordination::Exception: Connection loss", "Coordination::Exception: Connection loss",
] ]

View File

@ -1,14 +1,14 @@
d Date d Date
k UInt64 k UInt64
i32 Int32 i32 Int32
CREATE TABLE test.alter ( d Date, k UInt64, i32 Int32) ENGINE = MergeTree(d, k, 8192) CREATE TABLE test.alter (`d` Date, `k` UInt64, `i32` Int32) ENGINE = MergeTree(d, k, 8192)
2015-01-01 10 42 2015-01-01 10 42
d Date d Date
k UInt64 k UInt64
i32 Int32 i32 Int32
n.ui8 Array(UInt8) n.ui8 Array(UInt8)
n.s Array(String) n.s Array(String)
CREATE TABLE test.alter ( d Date, k UInt64, i32 Int32, `n.ui8` Array(UInt8), `n.s` Array(String)) ENGINE = MergeTree(d, k, 8192) CREATE TABLE test.alter (`d` Date, `k` UInt64, `i32` Int32, `n.ui8` Array(UInt8), `n.s` Array(String)) ENGINE = MergeTree(d, k, 8192)
2015-01-01 8 40 [1,2,3] ['12','13','14'] 2015-01-01 8 40 [1,2,3] ['12','13','14']
2015-01-01 10 42 [] [] 2015-01-01 10 42 [] []
d Date d Date
@ -17,7 +17,7 @@ i32 Int32
n.ui8 Array(UInt8) n.ui8 Array(UInt8)
n.s Array(String) n.s Array(String)
n.d Array(Date) n.d Array(Date)
CREATE TABLE test.alter ( d Date, k UInt64, i32 Int32, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date)) ENGINE = MergeTree(d, k, 8192) CREATE TABLE test.alter (`d` Date, `k` UInt64, `i32` Int32, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date)) ENGINE = MergeTree(d, k, 8192)
2015-01-01 7 39 [10,20,30] ['120','130','140'] ['2000-01-01','2000-01-01','2000-01-03'] 2015-01-01 7 39 [10,20,30] ['120','130','140'] ['2000-01-01','2000-01-01','2000-01-03']
2015-01-01 8 40 [1,2,3] ['12','13','14'] ['0000-00-00','0000-00-00','0000-00-00'] 2015-01-01 8 40 [1,2,3] ['12','13','14'] ['0000-00-00','0000-00-00','0000-00-00']
2015-01-01 10 42 [] [] [] 2015-01-01 10 42 [] [] []
@ -28,7 +28,7 @@ n.ui8 Array(UInt8)
n.s Array(String) n.s Array(String)
n.d Array(Date) n.d Array(Date)
s String DEFAULT \'0\' s String DEFAULT \'0\'
CREATE TABLE test.alter ( d Date, k UInt64, i32 Int32, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date), s String DEFAULT \'0\') ENGINE = MergeTree(d, k, 8192) CREATE TABLE test.alter (`d` Date, `k` UInt64, `i32` Int32, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date), `s` String DEFAULT \'0\') ENGINE = MergeTree(d, k, 8192)
2015-01-01 6 38 [10,20,30] ['asd','qwe','qwe'] ['2000-01-01','2000-01-01','2000-01-03'] 100500 2015-01-01 6 38 [10,20,30] ['asd','qwe','qwe'] ['2000-01-01','2000-01-01','2000-01-03'] 100500
2015-01-01 7 39 [10,20,30] ['120','130','140'] ['2000-01-01','2000-01-01','2000-01-03'] 0 2015-01-01 7 39 [10,20,30] ['120','130','140'] ['2000-01-01','2000-01-01','2000-01-03'] 0
2015-01-01 8 40 [1,2,3] ['12','13','14'] ['0000-00-00','0000-00-00','0000-00-00'] 0 2015-01-01 8 40 [1,2,3] ['12','13','14'] ['0000-00-00','0000-00-00','0000-00-00'] 0
@ -39,7 +39,7 @@ i32 Int32
n.ui8 Array(UInt8) n.ui8 Array(UInt8)
n.s Array(String) n.s Array(String)
s Int64 s Int64
CREATE TABLE test.alter ( d Date, k UInt64, i32 Int32, `n.ui8` Array(UInt8), `n.s` Array(String), s Int64) ENGINE = MergeTree(d, k, 8192) CREATE TABLE test.alter (`d` Date, `k` UInt64, `i32` Int32, `n.ui8` Array(UInt8), `n.s` Array(String), `s` Int64) ENGINE = MergeTree(d, k, 8192)
2015-01-01 6 38 [10,20,30] ['asd','qwe','qwe'] 100500 2015-01-01 6 38 [10,20,30] ['asd','qwe','qwe'] 100500
2015-01-01 7 39 [10,20,30] ['120','130','140'] 0 2015-01-01 7 39 [10,20,30] ['120','130','140'] 0
2015-01-01 8 40 [1,2,3] ['12','13','14'] 0 2015-01-01 8 40 [1,2,3] ['12','13','14'] 0
@ -51,7 +51,7 @@ n.ui8 Array(UInt8)
n.s Array(String) n.s Array(String)
s UInt32 s UInt32
n.d Array(Date) n.d Array(Date)
CREATE TABLE test.alter ( d Date, k UInt64, i32 Int32, `n.ui8` Array(UInt8), `n.s` Array(String), s UInt32, `n.d` Array(Date)) ENGINE = MergeTree(d, k, 8192) CREATE TABLE test.alter (`d` Date, `k` UInt64, `i32` Int32, `n.ui8` Array(UInt8), `n.s` Array(String), `s` UInt32, `n.d` Array(Date)) ENGINE = MergeTree(d, k, 8192)
2015-01-01 6 38 [10,20,30] ['asd','qwe','qwe'] 100500 ['0000-00-00','0000-00-00','0000-00-00'] 2015-01-01 6 38 [10,20,30] ['asd','qwe','qwe'] 100500 ['0000-00-00','0000-00-00','0000-00-00']
2015-01-01 7 39 [10,20,30] ['120','130','140'] 0 ['0000-00-00','0000-00-00','0000-00-00'] 2015-01-01 7 39 [10,20,30] ['120','130','140'] 0 ['0000-00-00','0000-00-00','0000-00-00']
2015-01-01 8 40 [1,2,3] ['12','13','14'] 0 ['0000-00-00','0000-00-00','0000-00-00'] 2015-01-01 8 40 [1,2,3] ['12','13','14'] 0 ['0000-00-00','0000-00-00','0000-00-00']
@ -65,7 +65,7 @@ k UInt64
i32 Int32 i32 Int32
n.s Array(String) n.s Array(String)
s UInt32 s UInt32
CREATE TABLE test.alter ( d Date, k UInt64, i32 Int32, `n.s` Array(String), s UInt32) ENGINE = MergeTree(d, k, 8192) CREATE TABLE test.alter (`d` Date, `k` UInt64, `i32` Int32, `n.s` Array(String), `s` UInt32) ENGINE = MergeTree(d, k, 8192)
2015-01-01 6 38 ['asd','qwe','qwe'] 100500 2015-01-01 6 38 ['asd','qwe','qwe'] 100500
2015-01-01 7 39 ['120','130','140'] 0 2015-01-01 7 39 ['120','130','140'] 0
2015-01-01 8 40 ['12','13','14'] 0 2015-01-01 8 40 ['12','13','14'] 0
@ -74,7 +74,7 @@ d Date
k UInt64 k UInt64
i32 Int32 i32 Int32
s UInt32 s UInt32
CREATE TABLE test.alter ( d Date, k UInt64, i32 Int32, s UInt32) ENGINE = MergeTree(d, k, 8192) CREATE TABLE test.alter (`d` Date, `k` UInt64, `i32` Int32, `s` UInt32) ENGINE = MergeTree(d, k, 8192)
2015-01-01 6 38 100500 2015-01-01 6 38 100500
2015-01-01 7 39 0 2015-01-01 7 39 0
2015-01-01 8 40 0 2015-01-01 8 40 0
@ -85,7 +85,7 @@ i32 Int32
s UInt32 s UInt32
n.s Array(String) n.s Array(String)
n.d Array(Date) n.d Array(Date)
CREATE TABLE test.alter ( d Date, k UInt64, i32 Int32, s UInt32, `n.s` Array(String), `n.d` Array(Date)) ENGINE = MergeTree(d, k, 8192) CREATE TABLE test.alter (`d` Date, `k` UInt64, `i32` Int32, `s` UInt32, `n.s` Array(String), `n.d` Array(Date)) ENGINE = MergeTree(d, k, 8192)
2015-01-01 6 38 100500 [] [] 2015-01-01 6 38 100500 [] []
2015-01-01 7 39 0 [] [] 2015-01-01 7 39 0 [] []
2015-01-01 8 40 0 [] [] 2015-01-01 8 40 0 [] []
@ -94,7 +94,7 @@ d Date
k UInt64 k UInt64
i32 Int32 i32 Int32
s UInt32 s UInt32
CREATE TABLE test.alter ( d Date, k UInt64, i32 Int32, s UInt32) ENGINE = MergeTree(d, k, 8192) CREATE TABLE test.alter (`d` Date, `k` UInt64, `i32` Int32, `s` UInt32) ENGINE = MergeTree(d, k, 8192)
2015-01-01 6 38 100500 2015-01-01 6 38 100500
2015-01-01 7 39 0 2015-01-01 7 39 0
2015-01-01 8 40 0 2015-01-01 8 40 0

View File

@ -1,22 +1,22 @@
d Date d Date
k UInt64 k UInt64
i32 Int32 i32 Int32
CREATE TABLE test.replicated_alter1 ( d Date, k UInt64, i32 Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
d Date d Date
k UInt64 k UInt64
i32 Int32 i32 Int32
CREATE TABLE test.replicated_alter2 ( d Date, k UInt64, i32 Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
2015-01-01 10 42 2015-01-01 10 42
d Date d Date
k UInt64 k UInt64
i32 Int32 i32 Int32
dt DateTime dt DateTime
CREATE TABLE test.replicated_alter1 ( d Date, k UInt64, i32 Int32, dt DateTime) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
d Date d Date
k UInt64 k UInt64
i32 Int32 i32 Int32
dt DateTime dt DateTime
CREATE TABLE test.replicated_alter2 ( d Date, k UInt64, i32 Int32, dt DateTime) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
2015-01-01 9 41 1992-01-01 08:00:00 2015-01-01 9 41 1992-01-01 08:00:00
2015-01-01 10 42 0000-00-00 00:00:00 2015-01-01 10 42 0000-00-00 00:00:00
d Date d Date
@ -25,14 +25,14 @@ i32 Int32
dt DateTime dt DateTime
n.ui8 Array(UInt8) n.ui8 Array(UInt8)
n.s Array(String) n.s Array(String)
CREATE TABLE test.replicated_alter1 ( d Date, k UInt64, i32 Int32, dt DateTime, `n.ui8` Array(UInt8), `n.s` Array(String)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
d Date d Date
k UInt64 k UInt64
i32 Int32 i32 Int32
dt DateTime dt DateTime
n.ui8 Array(UInt8) n.ui8 Array(UInt8)
n.s Array(String) n.s Array(String)
CREATE TABLE test.replicated_alter2 ( d Date, k UInt64, i32 Int32, dt DateTime, `n.ui8` Array(UInt8), `n.s` Array(String)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] 2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14']
2015-01-01 9 41 1992-01-01 08:00:00 [] [] 2015-01-01 9 41 1992-01-01 08:00:00 [] []
2015-01-01 10 42 0000-00-00 00:00:00 [] [] 2015-01-01 10 42 0000-00-00 00:00:00 [] []
@ -43,7 +43,7 @@ dt DateTime
n.ui8 Array(UInt8) n.ui8 Array(UInt8)
n.s Array(String) n.s Array(String)
n.d Array(Date) n.d Array(Date)
CREATE TABLE test.replicated_alter1 ( d Date, k UInt64, i32 Int32, dt DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
d Date d Date
k UInt64 k UInt64
i32 Int32 i32 Int32
@ -51,7 +51,7 @@ dt DateTime
n.ui8 Array(UInt8) n.ui8 Array(UInt8)
n.s Array(String) n.s Array(String)
n.d Array(Date) n.d Array(Date)
CREATE TABLE test.replicated_alter2 ( d Date, k UInt64, i32 Int32, dt DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] ['2000-01-01','2000-01-01','2000-01-03'] 2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] ['2000-01-01','2000-01-01','2000-01-03']
2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] ['0000-00-00','0000-00-00','0000-00-00'] 2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] ['0000-00-00','0000-00-00','0000-00-00']
2015-01-01 9 41 1992-01-01 08:00:00 [] [] [] 2015-01-01 9 41 1992-01-01 08:00:00 [] [] []
@ -64,7 +64,7 @@ n.ui8 Array(UInt8)
n.s Array(String) n.s Array(String)
n.d Array(Date) n.d Array(Date)
s String DEFAULT \'0\' s String DEFAULT \'0\'
CREATE TABLE test.replicated_alter1 ( d Date, k UInt64, i32 Int32, dt DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date), s String DEFAULT \'0\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date), `s` String DEFAULT \'0\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
d Date d Date
k UInt64 k UInt64
i32 Int32 i32 Int32
@ -73,7 +73,7 @@ n.ui8 Array(UInt8)
n.s Array(String) n.s Array(String)
n.d Array(Date) n.d Array(Date)
s String DEFAULT \'0\' s String DEFAULT \'0\'
CREATE TABLE test.replicated_alter2 ( d Date, k UInt64, i32 Int32, dt DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date), s String DEFAULT \'0\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date), `s` String DEFAULT \'0\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
2015-01-01 6 38 2014-07-15 13:26:50 [10,20,30] ['asd','qwe','qwe'] ['2000-01-01','2000-01-01','2000-01-03'] 100500 2015-01-01 6 38 2014-07-15 13:26:50 [10,20,30] ['asd','qwe','qwe'] ['2000-01-01','2000-01-01','2000-01-03'] 100500
2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] ['2000-01-01','2000-01-01','2000-01-03'] 0 2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] ['2000-01-01','2000-01-01','2000-01-03'] 0
2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] ['0000-00-00','0000-00-00','0000-00-00'] 0 2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] ['0000-00-00','0000-00-00','0000-00-00'] 0
@ -86,7 +86,7 @@ dt DateTime
n.ui8 Array(UInt8) n.ui8 Array(UInt8)
n.s Array(String) n.s Array(String)
s Int64 s Int64
CREATE TABLE test.replicated_alter1 ( d Date, k UInt64, i32 Int32, dt DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), s Int64) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `s` Int64) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
d Date d Date
k UInt64 k UInt64
i32 Int32 i32 Int32
@ -94,7 +94,7 @@ dt DateTime
n.ui8 Array(UInt8) n.ui8 Array(UInt8)
n.s Array(String) n.s Array(String)
s Int64 s Int64
CREATE TABLE test.replicated_alter2 ( d Date, k UInt64, i32 Int32, dt DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), s Int64) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `s` Int64) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
2015-01-01 6 38 2014-07-15 13:26:50 [10,20,30] ['asd','qwe','qwe'] 100500 2015-01-01 6 38 2014-07-15 13:26:50 [10,20,30] ['asd','qwe','qwe'] 100500
2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] 0 2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] 0
2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] 0 2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] 0
@ -108,7 +108,7 @@ n.ui8 Array(UInt8)
n.s Array(String) n.s Array(String)
s UInt32 s UInt32
n.d Array(Date) n.d Array(Date)
CREATE TABLE test.replicated_alter1 ( d Date, k UInt64, i32 Int32, dt DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), s UInt32, `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `s` UInt32, `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
d Date d Date
k UInt64 k UInt64
i32 Int32 i32 Int32
@ -117,7 +117,7 @@ n.ui8 Array(UInt8)
n.s Array(String) n.s Array(String)
s UInt32 s UInt32
n.d Array(Date) n.d Array(Date)
CREATE TABLE test.replicated_alter2 ( d Date, k UInt64, i32 Int32, dt DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), s UInt32, `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `s` UInt32, `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
2015-01-01 6 38 2014-07-15 13:26:50 [10,20,30] ['asd','qwe','qwe'] 100500 ['0000-00-00','0000-00-00','0000-00-00'] 2015-01-01 6 38 2014-07-15 13:26:50 [10,20,30] ['asd','qwe','qwe'] 100500 ['0000-00-00','0000-00-00','0000-00-00']
2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] 0 ['0000-00-00','0000-00-00','0000-00-00'] 2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] 0 ['0000-00-00','0000-00-00','0000-00-00']
2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] 0 ['0000-00-00','0000-00-00','0000-00-00'] 2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] 0 ['0000-00-00','0000-00-00','0000-00-00']
@ -129,14 +129,14 @@ i32 Int32
dt DateTime dt DateTime
n.s Array(String) n.s Array(String)
s UInt32 s UInt32
CREATE TABLE test.replicated_alter1 ( d Date, k UInt64, i32 Int32, dt DateTime, `n.s` Array(String), s UInt32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.s` Array(String), `s` UInt32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
d Date d Date
k UInt64 k UInt64
i32 Int32 i32 Int32
dt DateTime dt DateTime
n.s Array(String) n.s Array(String)
s UInt32 s UInt32
CREATE TABLE test.replicated_alter2 ( d Date, k UInt64, i32 Int32, dt DateTime, `n.s` Array(String), s UInt32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.s` Array(String), `s` UInt32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
2015-01-01 6 38 2014-07-15 13:26:50 ['asd','qwe','qwe'] 100500 2015-01-01 6 38 2014-07-15 13:26:50 ['asd','qwe','qwe'] 100500
2015-01-01 7 39 2014-07-14 13:26:50 ['120','130','140'] 0 2015-01-01 7 39 2014-07-14 13:26:50 ['120','130','140'] 0
2015-01-01 8 40 2012-12-12 12:12:12 ['12','13','14'] 0 2015-01-01 8 40 2012-12-12 12:12:12 ['12','13','14'] 0
@ -147,13 +147,13 @@ k UInt64
i32 Int32 i32 Int32
dt DateTime dt DateTime
s UInt32 s UInt32
CREATE TABLE test.replicated_alter1 ( d Date, k UInt64, i32 Int32, dt DateTime, s UInt32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `s` UInt32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
d Date d Date
k UInt64 k UInt64
i32 Int32 i32 Int32
dt DateTime dt DateTime
s UInt32 s UInt32
CREATE TABLE test.replicated_alter2 ( d Date, k UInt64, i32 Int32, dt DateTime, s UInt32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `s` UInt32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
2015-01-01 6 38 2014-07-15 13:26:50 100500 2015-01-01 6 38 2014-07-15 13:26:50 100500
2015-01-01 7 39 2014-07-14 13:26:50 0 2015-01-01 7 39 2014-07-14 13:26:50 0
2015-01-01 8 40 2012-12-12 12:12:12 0 2015-01-01 8 40 2012-12-12 12:12:12 0
@ -166,7 +166,7 @@ dt DateTime
s UInt32 s UInt32
n.s Array(String) n.s Array(String)
n.d Array(Date) n.d Array(Date)
CREATE TABLE test.replicated_alter1 ( d Date, k UInt64, i32 Int32, dt DateTime, s UInt32, `n.s` Array(String), `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `s` UInt32, `n.s` Array(String), `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
d Date d Date
k UInt64 k UInt64
i32 Int32 i32 Int32
@ -174,7 +174,7 @@ dt DateTime
s UInt32 s UInt32
n.s Array(String) n.s Array(String)
n.d Array(Date) n.d Array(Date)
CREATE TABLE test.replicated_alter2 ( d Date, k UInt64, i32 Int32, dt DateTime, s UInt32, `n.s` Array(String), `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `s` UInt32, `n.s` Array(String), `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
2015-01-01 6 38 2014-07-15 13:26:50 100500 [] [] 2015-01-01 6 38 2014-07-15 13:26:50 100500 [] []
2015-01-01 7 39 2014-07-14 13:26:50 0 [] [] 2015-01-01 7 39 2014-07-14 13:26:50 0 [] []
2015-01-01 8 40 2012-12-12 12:12:12 0 [] [] 2015-01-01 8 40 2012-12-12 12:12:12 0 [] []
@ -185,13 +185,13 @@ k UInt64
i32 Int32 i32 Int32
dt DateTime dt DateTime
s UInt32 s UInt32
CREATE TABLE test.replicated_alter1 ( d Date, k UInt64, i32 Int32, dt DateTime, s UInt32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `s` UInt32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
d Date d Date
k UInt64 k UInt64
i32 Int32 i32 Int32
dt DateTime dt DateTime
s UInt32 s UInt32
CREATE TABLE test.replicated_alter2 ( d Date, k UInt64, i32 Int32, dt DateTime, s UInt32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `s` UInt32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
2015-01-01 6 38 2014-07-15 13:26:50 100500 2015-01-01 6 38 2014-07-15 13:26:50 100500
2015-01-01 7 39 2014-07-14 13:26:50 0 2015-01-01 7 39 2014-07-14 13:26:50 0
2015-01-01 8 40 2012-12-12 12:12:12 0 2015-01-01 8 40 2012-12-12 12:12:12 0
@ -202,13 +202,13 @@ k UInt64
i32 Int32 i32 Int32
dt Date dt Date
s DateTime s DateTime
CREATE TABLE test.replicated_alter1 ( d Date, k UInt64, i32 Int32, dt Date, s DateTime) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` Date, `s` DateTime) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
d Date d Date
k UInt64 k UInt64
i32 Int32 i32 Int32
dt Date dt Date
s DateTime s DateTime
CREATE TABLE test.replicated_alter2 ( d Date, k UInt64, i32 Int32, dt Date, s DateTime) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` Date, `s` DateTime) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
2015-01-01 6 38 2014-07-15 1970-01-02 06:55:00 2015-01-01 6 38 2014-07-15 1970-01-02 06:55:00
2015-01-01 7 39 2014-07-14 0000-00-00 00:00:00 2015-01-01 7 39 2014-07-14 0000-00-00 00:00:00
2015-01-01 8 40 2012-12-12 0000-00-00 00:00:00 2015-01-01 8 40 2012-12-12 0000-00-00 00:00:00

View File

@ -1,7 +1,7 @@
A A
B B
A 1 TinyLog CREATE TABLE test_show_tables.A ( A UInt8) ENGINE = TinyLog A 1 TinyLog CREATE TABLE test_show_tables.A (`A` UInt8) ENGINE = TinyLog
B 1 TinyLog CREATE TABLE test_show_tables.B ( A UInt8) ENGINE = TinyLog B 1 TinyLog CREATE TABLE test_show_tables.B (`A` UInt8) ENGINE = TinyLog
test_temporary_table test_temporary_table
['test_show_tables'] ['test_materialized'] ['test_show_tables'] ['test_materialized']
0 0

View File

@ -23432,3 +23432,7 @@
1 1
1 1
1 1
1
1
1
1

View File

@ -1,3 +1,4 @@
SET send_logs_level = 'none';
select 1 = position('', ''); select 1 = position('', '');
select 1 = position('abc', ''); select 1 = position('abc', '');
select 0 = position('', 'abc'); select 0 = position('', 'abc');
@ -1462,3 +1463,87 @@ select 0 = multiSearchAny(materialize('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab',
'b']); 'b']);
-- 254
select
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1
] =
multiSearchAllPositions(materialize('string'),
['o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'str']);
select 254 = multiSearchFirstIndex(materialize('string'),
['o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'str']);
select
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1
] =
multiSearchAllPositions(materialize('string'),
['o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'str']);
select 255 = multiSearchFirstIndex(materialize('string'),
['o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'str']);
select multiSearchAllPositions(materialize('string'),
['o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'str']); -- { serverError 42 }
select multiSearchFirstIndex(materialize('string'),
['o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o',
'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'str']); -- { serverError 42 }

View File

@ -1,4 +1,4 @@
1 1
CREATE TEMPORARY TABLE temp_tab ( number UInt64) ENGINE = Memory CREATE TEMPORARY TABLE temp_tab (`number` UInt64) ENGINE = Memory
temp_tab temp_tab
0 0

View File

@ -1 +1 @@
CREATE VIEW test.test_view ( id UInt64) AS SELECT * FROM test.test WHERE id = (SELECT 1) CREATE VIEW test.test_view (`id` UInt64) AS SELECT * FROM test.test WHERE id = (SELECT 1)

View File

@ -7,7 +7,7 @@ hello
hello hello
hello hello
1970-01-01 00:00:01 1970-01-01 00:00:01
CREATE TABLE test.cast ( x UInt8, e Enum8('hello' = 1, 'world' = 2) DEFAULT CAST(x, 'Enum8(\'hello\' = 1, \'world\' = 2)')) ENGINE = MergeTree ORDER BY e SETTINGS index_granularity = 8192 CREATE TABLE test.cast (`x` UInt8, `e` Enum8('hello' = 1, 'world' = 2) DEFAULT CAST(x, 'Enum8(\'hello\' = 1, \'world\' = 2)')) ENGINE = MergeTree ORDER BY e SETTINGS index_granularity = 8192
x UInt8 x UInt8
e Enum8(\'hello\' = 1, \'world\' = 2) DEFAULT CAST(x, \'Enum8(\\\'hello\\\' = 1, \\\'world\\\' = 2)\') e Enum8(\'hello\' = 1, \'world\' = 2) DEFAULT CAST(x, \'Enum8(\\\'hello\\\' = 1, \\\'world\\\' = 2)\')
1 hello 1 hello

View File

@ -1,4 +1,4 @@
CREATE TABLE test.cast1 ( x UInt8, e Enum8('hello' = 1, 'world' = 2) DEFAULT CAST(x, 'Enum8(\'hello\' = 1, \'world\' = 2)')) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_cast', 'r1') ORDER BY e SETTINGS index_granularity = 8192 CREATE TABLE test.cast1 (`x` UInt8, `e` Enum8('hello' = 1, 'world' = 2) DEFAULT CAST(x, 'Enum8(\'hello\' = 1, \'world\' = 2)')) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_cast', 'r1') ORDER BY e SETTINGS index_granularity = 8192
x UInt8 x UInt8
e Enum8(\'hello\' = 1, \'world\' = 2) DEFAULT CAST(x, \'Enum8(\\\'hello\\\' = 1, \\\'world\\\' = 2)\') e Enum8(\'hello\' = 1, \'world\' = 2) DEFAULT CAST(x, \'Enum8(\\\'hello\\\' = 1, \\\'world\\\' = 2)\')
1 hello 1 hello

View File

@ -1,4 +1,4 @@
CREATE TABLE test.check_query_comment_column ( first_column UInt8 DEFAULT 1 COMMENT \'comment 1\', second_column UInt8 MATERIALIZED first_column COMMENT \'comment 2\', third_column UInt8 ALIAS second_column COMMENT \'comment 3\', fourth_column UInt8 COMMENT \'comment 4\', fifth_column UInt8) ENGINE = TinyLog CREATE TABLE test.check_query_comment_column (`first_column` UInt8 DEFAULT 1 COMMENT \'comment 1\', `second_column` UInt8 MATERIALIZED first_column COMMENT \'comment 2\', `third_column` UInt8 ALIAS second_column COMMENT \'comment 3\', `fourth_column` UInt8 COMMENT \'comment 4\', `fifth_column` UInt8) ENGINE = TinyLog
first_column UInt8 DEFAULT 1 comment 1 first_column UInt8 DEFAULT 1 comment 1
second_column UInt8 MATERIALIZED first_column comment 2 second_column UInt8 MATERIALIZED first_column comment 2
third_column UInt8 ALIAS second_column comment 3 third_column UInt8 ALIAS second_column comment 3
@ -11,7 +11,7 @@ fifth_column UInt8
│ check_query_comment_column │ fourth_column │ comment 4 │ │ check_query_comment_column │ fourth_column │ comment 4 │
│ check_query_comment_column │ fifth_column │ │ │ check_query_comment_column │ fifth_column │ │
└────────────────────────────┴───────────────┴───────────┘ └────────────────────────────┴───────────────┴───────────┘
CREATE TABLE test.check_query_comment_column ( first_column UInt8 DEFAULT 1 COMMENT \'comment 1_1\', second_column UInt8 MATERIALIZED first_column COMMENT \'comment 2_1\', third_column UInt8 ALIAS second_column COMMENT \'comment 3_1\', fourth_column UInt8 COMMENT \'comment 4_1\', fifth_column UInt8 COMMENT \'comment 5_1\') ENGINE = TinyLog CREATE TABLE test.check_query_comment_column (`first_column` UInt8 DEFAULT 1 COMMENT \'comment 1_1\', `second_column` UInt8 MATERIALIZED first_column COMMENT \'comment 2_1\', `third_column` UInt8 ALIAS second_column COMMENT \'comment 3_1\', `fourth_column` UInt8 COMMENT \'comment 4_1\', `fifth_column` UInt8 COMMENT \'comment 5_1\') ENGINE = TinyLog
┌─table──────────────────────┬─name──────────┬─comment─────┐ ┌─table──────────────────────┬─name──────────┬─comment─────┐
│ check_query_comment_column │ first_column │ comment 1_2 │ │ check_query_comment_column │ first_column │ comment 1_2 │
│ check_query_comment_column │ second_column │ comment 2_2 │ │ check_query_comment_column │ second_column │ comment 2_2 │
@ -19,8 +19,8 @@ CREATE TABLE test.check_query_comment_column ( first_column UInt8 DEFAULT 1 COMM
│ check_query_comment_column │ fourth_column │ comment 4_2 │ │ check_query_comment_column │ fourth_column │ comment 4_2 │
│ check_query_comment_column │ fifth_column │ comment 5_2 │ │ check_query_comment_column │ fifth_column │ comment 5_2 │
└────────────────────────────┴───────────────┴─────────────┘ └────────────────────────────┴───────────────┴─────────────┘
CREATE TABLE test.check_query_comment_column ( first_column UInt8 DEFAULT 1 COMMENT \'comment 1_2\', second_column UInt8 MATERIALIZED first_column COMMENT \'comment 2_2\', third_column UInt8 ALIAS second_column COMMENT \'comment 3_2\', fourth_column UInt8 COMMENT \'comment 4_2\', fifth_column UInt8 COMMENT \'comment 5_2\') ENGINE = TinyLog CREATE TABLE test.check_query_comment_column (`first_column` UInt8 DEFAULT 1 COMMENT \'comment 1_2\', `second_column` UInt8 MATERIALIZED first_column COMMENT \'comment 2_2\', `third_column` UInt8 ALIAS second_column COMMENT \'comment 3_2\', `fourth_column` UInt8 COMMENT \'comment 4_2\', `fifth_column` UInt8 COMMENT \'comment 5_2\') ENGINE = TinyLog
CREATE TABLE test.check_query_comment_column ( first_column UInt8 COMMENT \'comment 1\', second_column UInt8 COMMENT \'comment 2\', third_column UInt8 COMMENT \'comment 3\') ENGINE = MergeTree() PARTITION BY second_column ORDER BY first_column SAMPLE BY first_column SETTINGS index_granularity = 8192 CREATE TABLE test.check_query_comment_column (`first_column` UInt8 COMMENT \'comment 1\', `second_column` UInt8 COMMENT \'comment 2\', `third_column` UInt8 COMMENT \'comment 3\') ENGINE = MergeTree() PARTITION BY second_column ORDER BY first_column SAMPLE BY first_column SETTINGS index_granularity = 8192
first_column UInt8 comment 1 first_column UInt8 comment 1
second_column UInt8 comment 2 second_column UInt8 comment 2
third_column UInt8 comment 3 third_column UInt8 comment 3
@ -29,8 +29,8 @@ third_column UInt8 comment 3
│ check_query_comment_column │ second_column │ comment 2 │ │ check_query_comment_column │ second_column │ comment 2 │
│ check_query_comment_column │ third_column │ comment 3 │ │ check_query_comment_column │ third_column │ comment 3 │
└────────────────────────────┴───────────────┴───────────┘ └────────────────────────────┴───────────────┴───────────┘
CREATE TABLE test.check_query_comment_column ( first_column UInt8 COMMENT \'comment 1_2\', second_column UInt8 COMMENT \'comment 2_2\', third_column UInt8 COMMENT \'comment 3_2\') ENGINE = MergeTree() PARTITION BY second_column ORDER BY first_column SAMPLE BY first_column SETTINGS index_granularity = 8192 CREATE TABLE test.check_query_comment_column (`first_column` UInt8 COMMENT \'comment 1_2\', `second_column` UInt8 COMMENT \'comment 2_2\', `third_column` UInt8 COMMENT \'comment 3_2\') ENGINE = MergeTree() PARTITION BY second_column ORDER BY first_column SAMPLE BY first_column SETTINGS index_granularity = 8192
CREATE TABLE test.check_query_comment_column ( first_column UInt8 COMMENT \'comment 1_3\', second_column UInt8 COMMENT \'comment 2_3\', third_column UInt8 COMMENT \'comment 3_3\') ENGINE = MergeTree() PARTITION BY second_column ORDER BY first_column SAMPLE BY first_column SETTINGS index_granularity = 8192 CREATE TABLE test.check_query_comment_column (`first_column` UInt8 COMMENT \'comment 1_3\', `second_column` UInt8 COMMENT \'comment 2_3\', `third_column` UInt8 COMMENT \'comment 3_3\') ENGINE = MergeTree() PARTITION BY second_column ORDER BY first_column SAMPLE BY first_column SETTINGS index_granularity = 8192
┌─table──────────────────────┬─name──────────┬─comment─────┐ ┌─table──────────────────────┬─name──────────┬─comment─────┐
│ check_query_comment_column │ first_column │ comment 1_3 │ │ check_query_comment_column │ first_column │ comment 1_3 │
│ check_query_comment_column │ second_column │ comment 2_3 │ │ check_query_comment_column │ second_column │ comment 2_3 │

View File

@ -1,4 +1,4 @@
CREATE TABLE test.ipv4_test ( ipv4_ IPv4) ENGINE = Memory CREATE TABLE test.ipv4_test (`ipv4_` IPv4) ENGINE = Memory
0.0.0.0 00 0.0.0.0 00
8.8.8.8 08080808 8.8.8.8 08080808
127.0.0.1 7F000001 127.0.0.1 7F000001
@ -10,7 +10,7 @@ CREATE TABLE test.ipv4_test ( ipv4_ IPv4) ENGINE = Memory
> 127.0.0.1 255.255.255.255 > 127.0.0.1 255.255.255.255
= 127.0.0.1 127.0.0.1 = 127.0.0.1 127.0.0.1
euqality of IPv4-mapped IPv6 value and IPv4 promoted to IPv6 with function: 1 euqality of IPv4-mapped IPv6 value and IPv4 promoted to IPv6 with function: 1
CREATE TABLE test.ipv6_test ( ipv6_ IPv6) ENGINE = Memory CREATE TABLE test.ipv6_test (`ipv6_` IPv6) ENGINE = Memory
:: 00000000000000000000000000000000 :: 00000000000000000000000000000000
:: 00000000000000000000000000000000 :: 00000000000000000000000000000000
::ffff:8.8.8.8 00000000000000000000FFFF08080808 ::ffff:8.8.8.8 00000000000000000000FFFF08080808

View File

@ -1,4 +1,4 @@
CREATE MATERIALIZED VIEW test.t_mv ( date Date, platform Enum8('a' = 0, 'b' = 1), app Enum8('a' = 0, 'b' = 1)) ENGINE = MergeTree ORDER BY date SETTINGS index_granularity = 8192 AS SELECT date, platform, app FROM test.t WHERE (app = (SELECT min(app) FROM test.u )) AND (platform = (SELECT (SELECT min(platform) FROM test.v ))) CREATE MATERIALIZED VIEW test.t_mv (`date` Date, `platform` Enum8('a' = 0, 'b' = 1), `app` Enum8('a' = 0, 'b' = 1)) ENGINE = MergeTree ORDER BY date SETTINGS index_granularity = 8192 AS SELECT date, platform, app FROM test.t WHERE (app = (SELECT min(app) FROM test.u )) AND (platform = (SELECT (SELECT min(platform) FROM test.v )))
2000-01-01 a a 2000-01-01 a a
2000-01-02 b b 2000-01-02 b b
2000-01-03 a a 2000-01-03 a a

View File

@ -1,6 +1,6 @@
CREATE TABLE test.check_comments ( column_name1 UInt8 DEFAULT 1 COMMENT \'comment\', column_name2 UInt8 COMMENT \'non default comment\') ENGINE = ReplicatedMergeTree(\'clickhouse/tables/test_comments\', \'r1\') ORDER BY column_name1 SETTINGS index_granularity = 8192 CREATE TABLE test.check_comments (`column_name1` UInt8 DEFAULT 1 COMMENT \'comment\', `column_name2` UInt8 COMMENT \'non default comment\') ENGINE = ReplicatedMergeTree(\'clickhouse/tables/test_comments\', \'r1\') ORDER BY column_name1 SETTINGS index_granularity = 8192
column_name1 UInt8 DEFAULT 1 comment column_name1 UInt8 DEFAULT 1 comment
column_name2 UInt8 non default comment column_name2 UInt8 non default comment
CREATE TABLE test.check_comments ( column_name1 UInt8 DEFAULT 1 COMMENT \'another comment\', column_name2 UInt8 COMMENT \'non default comment\') ENGINE = ReplicatedMergeTree(\'clickhouse/tables/test_comments\', \'r1\') ORDER BY column_name1 SETTINGS index_granularity = 8192 CREATE TABLE test.check_comments (`column_name1` UInt8 DEFAULT 1 COMMENT \'another comment\', `column_name2` UInt8 COMMENT \'non default comment\') ENGINE = ReplicatedMergeTree(\'clickhouse/tables/test_comments\', \'r1\') ORDER BY column_name1 SETTINGS index_granularity = 8192
column_name1 UInt8 DEFAULT 1 another comment column_name1 UInt8 DEFAULT 1 another comment
column_name2 UInt8 non default comment column_name2 UInt8 non default comment

View File

@ -9,4 +9,4 @@
1 2 1 30 1 2 1 30
1 2 4 90 1 2 4 90
*** Check SHOW CREATE TABLE *** *** Check SHOW CREATE TABLE ***
CREATE TABLE test.summing ( x UInt32, y UInt32, z UInt32, val UInt32) ENGINE = SummingMergeTree PRIMARY KEY (x, y) ORDER BY (x, y, -z) SETTINGS index_granularity = 8192 CREATE TABLE test.summing (`x` UInt32, `y` UInt32, `z` UInt32, `val` UInt32) ENGINE = SummingMergeTree PRIMARY KEY (x, y) ORDER BY (x, y, -z) SETTINGS index_granularity = 8192

View File

@ -9,6 +9,6 @@
1 2 1 30 1 2 1 30
1 2 4 90 1 2 4 90
*** Check SHOW CREATE TABLE *** *** Check SHOW CREATE TABLE ***
CREATE TABLE test.summing_r2 ( x UInt32, y UInt32, z UInt32, val UInt32) ENGINE = ReplicatedSummingMergeTree(\'/clickhouse/tables/test/summing\', \'r2\') PRIMARY KEY (x, y) ORDER BY (x, y, -z) SETTINGS index_granularity = 8192 CREATE TABLE test.summing_r2 (`x` UInt32, `y` UInt32, `z` UInt32, `val` UInt32) ENGINE = ReplicatedSummingMergeTree(\'/clickhouse/tables/test/summing\', \'r2\') PRIMARY KEY (x, y) ORDER BY (x, y, -z) SETTINGS index_granularity = 8192
*** Check SHOW CREATE TABLE after offline ALTER *** *** Check SHOW CREATE TABLE after offline ALTER ***
CREATE TABLE test.summing_r2 ( x UInt32, y UInt32, z UInt32, t UInt32, val UInt32) ENGINE = ReplicatedSummingMergeTree(\'/clickhouse/tables/test/summing\', \'r2\') PRIMARY KEY (x, y) ORDER BY (x, y, t * t) SETTINGS index_granularity = 8192 CREATE TABLE test.summing_r2 (`x` UInt32, `y` UInt32, `z` UInt32, `t` UInt32, `val` UInt32) ENGINE = ReplicatedSummingMergeTree(\'/clickhouse/tables/test/summing\', \'r2\') PRIMARY KEY (x, y) ORDER BY (x, y, t * t) SETTINGS index_granularity = 8192

View File

@ -9,10 +9,10 @@
10003 10003
274972506.6 274972506.6
9175437371954010821 9175437371954010821
CREATE TABLE test.compression_codec_multiple_more_types ( id Decimal(38, 13) CODEC(ZSTD(1), LZ4, ZSTD(1), ZSTD(1), Delta(2), Delta(4), Delta(1), LZ4HC(0)), data FixedString(12) CODEC(ZSTD(1), ZSTD(1), Delta(1), Delta(1), Delta(1), NONE, NONE, NONE, LZ4HC(0)), `ddd.age` Array(UInt8) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8)), `ddd.Name` Array(String) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8))) ENGINE = MergeTree() ORDER BY tuple() SETTINGS index_granularity = 8192 CREATE TABLE test.compression_codec_multiple_more_types (`id` Decimal(38, 13) CODEC(ZSTD(1), LZ4, ZSTD(1), ZSTD(1), Delta(2), Delta(4), Delta(1), LZ4HC(0)), `data` FixedString(12) CODEC(ZSTD(1), ZSTD(1), Delta(1), Delta(1), Delta(1), NONE, NONE, NONE, LZ4HC(0)), `ddd.age` Array(UInt8) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8)), `ddd.Name` Array(String) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8))) ENGINE = MergeTree() ORDER BY tuple() SETTINGS index_granularity = 8192
1.5555555555555 hello world! [77] ['John'] 1.5555555555555 hello world! [77] ['John']
7.1000000000000 xxxxxxxxxxxx [127] ['Henry'] 7.1000000000000 xxxxxxxxxxxx [127] ['Henry']
! !
222 222
!ZSTD !ZSTD
CREATE TABLE test.test_default_delta ( id UInt64 CODEC(Delta(8)), data String CODEC(Delta(1)), somedate Date CODEC(Delta(2)), somenum Float64 CODEC(Delta(8)), somestr FixedString(3) CODEC(Delta(1)), othernum Int64 CODEC(Delta(8)), yetothernum Float32 CODEC(Delta(4)), `ddd.age` Array(UInt8) CODEC(Delta(1)), `ddd.Name` Array(String) CODEC(Delta(1)), `ddd.OName` Array(String) CODEC(Delta(1)), `ddd.BName` Array(String) CODEC(Delta(1))) ENGINE = MergeTree() ORDER BY tuple() SETTINGS index_granularity = 8192 CREATE TABLE test.test_default_delta (`id` UInt64 CODEC(Delta(8)), `data` String CODEC(Delta(1)), `somedate` Date CODEC(Delta(2)), `somenum` Float64 CODEC(Delta(8)), `somestr` FixedString(3) CODEC(Delta(1)), `othernum` Int64 CODEC(Delta(8)), `yetothernum` Float32 CODEC(Delta(4)), `ddd.age` Array(UInt8) CODEC(Delta(1)), `ddd.Name` Array(String) CODEC(Delta(1)), `ddd.OName` Array(String) CODEC(Delta(1)), `ddd.BName` Array(String) CODEC(Delta(1))) ENGINE = MergeTree() ORDER BY tuple() SETTINGS index_granularity = 8192

View File

@ -1,9 +1,9 @@
CREATE TABLE test.compression_codec_log ( id UInt64 CODEC(LZ4), data String CODEC(ZSTD(1)), ddd Date CODEC(NONE), somenum Float64 CODEC(ZSTD(2)), somestr FixedString(3) CODEC(LZ4HC(7)), othernum Int64 CODEC(Delta(8))) ENGINE = Log() CREATE TABLE test.compression_codec_log (`id` UInt64 CODEC(LZ4), `data` String CODEC(ZSTD(1)), `ddd` Date CODEC(NONE), `somenum` Float64 CODEC(ZSTD(2)), `somestr` FixedString(3) CODEC(LZ4HC(7)), `othernum` Int64 CODEC(Delta(8))) ENGINE = Log()
1 hello 2018-12-14 1.1 aaa 5 1 hello 2018-12-14 1.1 aaa 5
2 world 2018-12-15 2.2 bbb 6 2 world 2018-12-15 2.2 bbb 6
3 ! 2018-12-16 3.3 ccc 7 3 ! 2018-12-16 3.3 ccc 7
2 2
CREATE TABLE test.compression_codec_multiple_log ( id UInt64 CODEC(LZ4, ZSTD(1), NONE, LZ4HC(0), Delta(4)), data String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC(0), LZ4, LZ4, Delta(8)), ddd Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD(1), LZ4HC(0), LZ4HC(0)), somenum Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD(1))) ENGINE = Log() CREATE TABLE test.compression_codec_multiple_log (`id` UInt64 CODEC(LZ4, ZSTD(1), NONE, LZ4HC(0), Delta(4)), `data` String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC(0), LZ4, LZ4, Delta(8)), `ddd` Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD(1), LZ4HC(0), LZ4HC(0)), `somenum` Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD(1))) ENGINE = Log()
1 world 2018-10-05 1.1 1 world 2018-10-05 1.1
2 hello 2018-10-01 2.2 2 hello 2018-10-01 2.2
3 buy 2018-10-11 3.3 3 buy 2018-10-11 3.3
@ -11,12 +11,12 @@ CREATE TABLE test.compression_codec_multiple_log ( id UInt64 CODEC(LZ4, ZSTD(1),
10003 10003
274972506.6 274972506.6
9175437371954010821 9175437371954010821
CREATE TABLE test.compression_codec_tiny_log ( id UInt64 CODEC(LZ4), data String CODEC(ZSTD(1)), ddd Date CODEC(NONE), somenum Float64 CODEC(ZSTD(2)), somestr FixedString(3) CODEC(LZ4HC(7)), othernum Int64 CODEC(Delta(8))) ENGINE = TinyLog() CREATE TABLE test.compression_codec_tiny_log (`id` UInt64 CODEC(LZ4), `data` String CODEC(ZSTD(1)), `ddd` Date CODEC(NONE), `somenum` Float64 CODEC(ZSTD(2)), `somestr` FixedString(3) CODEC(LZ4HC(7)), `othernum` Int64 CODEC(Delta(8))) ENGINE = TinyLog()
1 hello 2018-12-14 1.1 aaa 5 1 hello 2018-12-14 1.1 aaa 5
2 world 2018-12-15 2.2 bbb 6 2 world 2018-12-15 2.2 bbb 6
3 ! 2018-12-16 3.3 ccc 7 3 ! 2018-12-16 3.3 ccc 7
2 2
CREATE TABLE test.compression_codec_multiple_tiny_log ( id UInt64 CODEC(LZ4, ZSTD(1), NONE, LZ4HC(0), Delta(4)), data String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC(0), LZ4, LZ4, Delta(8)), ddd Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD(1), LZ4HC(0), LZ4HC(0)), somenum Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD(1))) ENGINE = TinyLog() CREATE TABLE test.compression_codec_multiple_tiny_log (`id` UInt64 CODEC(LZ4, ZSTD(1), NONE, LZ4HC(0), Delta(4)), `data` String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC(0), LZ4, LZ4, Delta(8)), `ddd` Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD(1), LZ4HC(0), LZ4HC(0)), `somenum` Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD(1))) ENGINE = TinyLog()
1 world 2018-10-05 1.1 1 world 2018-10-05 1.1
2 hello 2018-10-01 2.2 2 hello 2018-10-01 2.2
3 buy 2018-10-11 3.3 3 buy 2018-10-11 3.3

View File

@ -17,7 +17,8 @@ ${CLICKHOUSE_CLIENT} --query="SELECT '*** Create and kill a single invalid mutat
${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation DELETE WHERE toUInt32(s) = 1" ${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation DELETE WHERE toUInt32(s) = 1"
sleep 0.1 sleep 0.1
${CLICKHOUSE_CLIENT} --query="SELECT mutation_id, latest_failed_part IN ('20000101_1_1_0', '20010101_2_2_0'), latest_fail_time != 0, substr(latest_fail_reason, 1, 8) FROM system.mutations WHERE database = 'test' AND table = 'kill_mutation'"
${CLICKHOUSE_CLIENT} --query="SELECT mutation_id, latest_failed_part IN ('20000101_1_1_0', '20010101_2_2_0'), latest_fail_time != 0, substr(replaceRegexpOne(latest_fail_reason, '.version [0-9.]+. ', ''), 1, 8) FROM system.mutations WHERE database = 'test' AND table = 'kill_mutation'"
${CLICKHOUSE_CLIENT} --query="KILL MUTATION WHERE database = 'test' AND table = 'kill_mutation'" ${CLICKHOUSE_CLIENT} --query="KILL MUTATION WHERE database = 'test' AND table = 'kill_mutation'"
@ -29,7 +30,7 @@ ${CLICKHOUSE_CLIENT} --query="SELECT '*** Create and kill invalid mutation that
${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation DELETE WHERE toUInt32(s) = 1" ${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation DELETE WHERE toUInt32(s) = 1"
${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation DELETE WHERE x = 1" ${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation DELETE WHERE x = 1"
${CLICKHOUSE_CLIENT} --query="SELECT mutation_id, latest_failed_part IN ('20000101_1_1_0', '20010101_2_2_0'), latest_fail_time != 0, substr(latest_fail_reason, 1, 8) FROM system.mutations WHERE database = 'test' AND table = 'kill_mutation' AND mutation_id = 'mutation_4.txt'" ${CLICKHOUSE_CLIENT} --query="SELECT mutation_id, latest_failed_part IN ('20000101_1_1_0', '20010101_2_2_0'), latest_fail_time != 0, substr(replaceRegexpOne(latest_fail_reason, '.version [0-9.]+. ', ''), 1, 8) FROM system.mutations WHERE database = 'test' AND table = 'kill_mutation' AND mutation_id = 'mutation_4.txt'"
sleep 0.1 sleep 0.1
${CLICKHOUSE_CLIENT} --query="KILL MUTATION WHERE database = 'test' AND table = 'kill_mutation' AND mutation_id = 'mutation_4.txt'" ${CLICKHOUSE_CLIENT} --query="KILL MUTATION WHERE database = 'test' AND table = 'kill_mutation' AND mutation_id = 'mutation_4.txt'"

View File

@ -20,7 +20,7 @@ ${CLICKHOUSE_CLIENT} --query="SELECT '*** Create and kill a single invalid mutat
${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation_r1 DELETE WHERE toUInt32(s) = 1" ${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation_r1 DELETE WHERE toUInt32(s) = 1"
sleep 1 sleep 1
${CLICKHOUSE_CLIENT} --query="SELECT mutation_id, latest_failed_part IN ('20000101_0_0_0', '20010101_0_0_0'), latest_fail_time != 0, substr(latest_fail_reason, 1, 8) FROM system.mutations WHERE database = 'test' AND table = 'kill_mutation_r1'" ${CLICKHOUSE_CLIENT} --query="SELECT mutation_id, latest_failed_part IN ('20000101_0_0_0', '20010101_0_0_0'), latest_fail_time != 0, substr(replaceRegexpOne(latest_fail_reason, '.version [0-9.]+. ', ''), 1, 8) FROM system.mutations WHERE database = 'test' AND table = 'kill_mutation_r1'"
${CLICKHOUSE_CLIENT} --query="KILL MUTATION WHERE database = 'test' AND table = 'kill_mutation_r1'" ${CLICKHOUSE_CLIENT} --query="KILL MUTATION WHERE database = 'test' AND table = 'kill_mutation_r1'"
@ -34,7 +34,7 @@ ${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation_r1 DELETE WHERE toU
${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation_r1 DELETE WHERE x = 1" ${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation_r1 DELETE WHERE x = 1"
sleep 1 sleep 1
${CLICKHOUSE_CLIENT} --query="SELECT mutation_id, latest_failed_part IN ('20000101_0_0_0_1', '20010101_0_0_0_1'), latest_fail_time != 0, substr(latest_fail_reason, 1, 8) FROM system.mutations WHERE database = 'test' AND table = 'kill_mutation_r1' AND mutation_id = '0000000001'" ${CLICKHOUSE_CLIENT} --query="SELECT mutation_id, latest_failed_part IN ('20000101_0_0_0_1', '20010101_0_0_0_1'), latest_fail_time != 0, substr(replaceRegexpOne(latest_fail_reason, '.version [0-9.]+. ', ''), 1, 8) FROM system.mutations WHERE database = 'test' AND table = 'kill_mutation_r1' AND mutation_id = '0000000001'"
${CLICKHOUSE_CLIENT} --query="KILL MUTATION WHERE database = 'test' AND table = 'kill_mutation_r1' AND mutation_id = '0000000001'" ${CLICKHOUSE_CLIENT} --query="KILL MUTATION WHERE database = 'test' AND table = 'kill_mutation_r1' AND mutation_id = '0000000001'"

View File

@ -1,4 +1,4 @@
CREATE TABLE test.minmax_idx ( u64 UInt64, i32 Int32, INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10, INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10) ENGINE = MergeTree() ORDER BY u64 SETTINGS index_granularity = 8192 CREATE TABLE test.minmax_idx (`u64` UInt64, `i32` Int32, INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10, INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10) ENGINE = MergeTree() ORDER BY u64 SETTINGS index_granularity = 8192
1 2 1 2
1 2 1 2
1 2 1 2
@ -6,15 +6,15 @@ CREATE TABLE test.minmax_idx ( u64 UInt64, i32 Int32, INDEX idx1 u64 * i32 TYP
1 2 1 2
1 2 1 2
1 2 1 2
CREATE TABLE test.minmax_idx ( u64 UInt64, i32 Int32, INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10) ENGINE = MergeTree() ORDER BY u64 SETTINGS index_granularity = 8192 CREATE TABLE test.minmax_idx (`u64` UInt64, `i32` Int32, INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10) ENGINE = MergeTree() ORDER BY u64 SETTINGS index_granularity = 8192
1 2 1 2
1 2 1 2
1 2 1 2
1 2 1 2
1 2 1 2
1 2 1 2
CREATE TABLE test.minmax_idx ( u64 UInt64, i32 Int32) ENGINE = MergeTree() ORDER BY u64 SETTINGS index_granularity = 8192 CREATE TABLE test.minmax_idx (`u64` UInt64, `i32` Int32) ENGINE = MergeTree() ORDER BY u64 SETTINGS index_granularity = 8192
CREATE TABLE test.minmax_idx ( u64 UInt64, i32 Int32, INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10) ENGINE = MergeTree() ORDER BY u64 SETTINGS index_granularity = 8192 CREATE TABLE test.minmax_idx (`u64` UInt64, `i32` Int32, INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10) ENGINE = MergeTree() ORDER BY u64 SETTINGS index_granularity = 8192
1 2 1 2
1 2 1 2
1 2 1 2
@ -23,6 +23,6 @@ CREATE TABLE test.minmax_idx ( u64 UInt64, i32 Int32, INDEX idx1 u64 * i32 TYP
1 2 1 2
1 2 1 2
1 2 1 2
CREATE TABLE test.minmax_idx2 ( u64 UInt64, i32 Int32) ENGINE = MergeTree() ORDER BY u64 SETTINGS index_granularity = 8192 CREATE TABLE test.minmax_idx2 (`u64` UInt64, `i32` Int32) ENGINE = MergeTree() ORDER BY u64 SETTINGS index_granularity = 8192
1 2 1 2
1 2 1 2

View File

@ -1,5 +1,5 @@
CREATE TABLE test.minmax_idx ( u64 UInt64, i32 Int32, INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10, INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\') ORDER BY u64 SETTINGS index_granularity = 8192 CREATE TABLE test.minmax_idx (`u64` UInt64, `i32` Int32, INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10, INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\') ORDER BY u64 SETTINGS index_granularity = 8192
CREATE TABLE test.minmax_idx_r ( u64 UInt64, i32 Int32, INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10, INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\') ORDER BY u64 SETTINGS index_granularity = 8192 CREATE TABLE test.minmax_idx_r (`u64` UInt64, `i32` Int32, INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10, INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\') ORDER BY u64 SETTINGS index_granularity = 8192
1 2 1 2
1 2 1 2
1 2 1 2
@ -14,8 +14,8 @@ CREATE TABLE test.minmax_idx_r ( u64 UInt64, i32 Int32, INDEX idx1 u64 * i32 T
3 2 3 2
19 9 19 9
65 75 65 75
CREATE TABLE test.minmax_idx ( u64 UInt64, i32 Int32, INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\') ORDER BY u64 SETTINGS index_granularity = 8192 CREATE TABLE test.minmax_idx (`u64` UInt64, `i32` Int32, INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\') ORDER BY u64 SETTINGS index_granularity = 8192
CREATE TABLE test.minmax_idx_r ( u64 UInt64, i32 Int32, INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\') ORDER BY u64 SETTINGS index_granularity = 8192 CREATE TABLE test.minmax_idx_r (`u64` UInt64, `i32` Int32, INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\') ORDER BY u64 SETTINGS index_granularity = 8192
1 2 1 2
1 4 1 4
1 5 1 5
@ -28,10 +28,10 @@ CREATE TABLE test.minmax_idx_r ( u64 UInt64, i32 Int32, INDEX idx3 u64 - i32 T
3 2 3 2
19 9 19 9
65 75 65 75
CREATE TABLE test.minmax_idx ( u64 UInt64, i32 Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\') ORDER BY u64 SETTINGS index_granularity = 8192 CREATE TABLE test.minmax_idx (`u64` UInt64, `i32` Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\') ORDER BY u64 SETTINGS index_granularity = 8192
CREATE TABLE test.minmax_idx_r ( u64 UInt64, i32 Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\') ORDER BY u64 SETTINGS index_granularity = 8192 CREATE TABLE test.minmax_idx_r (`u64` UInt64, `i32` Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\') ORDER BY u64 SETTINGS index_granularity = 8192
CREATE TABLE test.minmax_idx ( u64 UInt64, i32 Int32, INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\') ORDER BY u64 SETTINGS index_granularity = 8192 CREATE TABLE test.minmax_idx (`u64` UInt64, `i32` Int32, INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\') ORDER BY u64 SETTINGS index_granularity = 8192
CREATE TABLE test.minmax_idx_r ( u64 UInt64, i32 Int32, INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\') ORDER BY u64 SETTINGS index_granularity = 8192 CREATE TABLE test.minmax_idx_r (`u64` UInt64, `i32` Int32, INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\') ORDER BY u64 SETTINGS index_granularity = 8192
1 2 1 2
1 4 1 4
1 5 1 5
@ -44,14 +44,14 @@ CREATE TABLE test.minmax_idx_r ( u64 UInt64, i32 Int32, INDEX idx1 u64 * i32 T
3 2 3 2
19 9 19 9
65 75 65 75
CREATE TABLE test.minmax_idx2 ( u64 UInt64, i32 Int32, INDEX idx1 u64 + i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 * i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r1\') ORDER BY u64 SETTINGS index_granularity = 8192 CREATE TABLE test.minmax_idx2 (`u64` UInt64, `i32` Int32, INDEX idx1 u64 + i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 * i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r1\') ORDER BY u64 SETTINGS index_granularity = 8192
CREATE TABLE test.minmax_idx2_r ( u64 UInt64, i32 Int32, INDEX idx1 u64 + i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 * i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r2\') ORDER BY u64 SETTINGS index_granularity = 8192 CREATE TABLE test.minmax_idx2_r (`u64` UInt64, `i32` Int32, INDEX idx1 u64 + i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 * i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r2\') ORDER BY u64 SETTINGS index_granularity = 8192
1 2 1 2
1 3 1 3
1 2 1 2
1 3 1 3
CREATE TABLE test.minmax_idx2 ( u64 UInt64, i32 Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r1\') ORDER BY u64 SETTINGS index_granularity = 8192 CREATE TABLE test.minmax_idx2 (`u64` UInt64, `i32` Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r1\') ORDER BY u64 SETTINGS index_granularity = 8192
CREATE TABLE test.minmax_idx2_r ( u64 UInt64, i32 Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r2\') ORDER BY u64 SETTINGS index_granularity = 8192 CREATE TABLE test.minmax_idx2_r (`u64` UInt64, `i32` Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r2\') ORDER BY u64 SETTINGS index_granularity = 8192
1 2 1 2
1 3 1 3
1 2 1 2

View File

@ -0,0 +1,44 @@
on
l \N \N String Nullable(String)
l \N \N String Nullable(String)
r \N String Nullable(String)
\N r \N Nullable(String) Nullable(String)
l \N String Nullable(String)
l \N String Nullable(String)
r \N String Nullable(String)
\N r \N Nullable(String) Nullable(String)
\N \N
0 \N
using
l \N String Nullable(String)
l \N String Nullable(String)
\N String Nullable(String)
\N \N Nullable(String) Nullable(String)
l \N String Nullable(String)
l \N String Nullable(String)
\N String Nullable(String)
\N \N Nullable(String) Nullable(String)
\N \N
0 \N
on + join_use_nulls
l \N \N TODO Nullable(String)
l \N \N TODO Nullable(String)
r \N TODO Nullable(String)
\N r \N Nullable(String) Nullable(String)
l \N TODO Nullable(String)
l \N TODO Nullable(String)
r \N TODO Nullable(String)
\N r \N Nullable(String) Nullable(String)
\N \N
0 \N
using + join_use_nulls
l \N TODO Nullable(String)
l \N TODO Nullable(String)
\N TODO Nullable(String)
\N \N Nullable(String) Nullable(String)
l \N TODO Nullable(String)
l \N TODO Nullable(String)
\N TODO Nullable(String)
\N \N Nullable(String) Nullable(String)
\N \N
0 \N

View File

@ -0,0 +1,69 @@
USE test;
DROP TABLE IF EXISTS t1;
DROP TABLE IF EXISTS t2;
DROP TABLE IF EXISTS t3;
CREATE TABLE t1 ( id String ) ENGINE = Memory;
CREATE TABLE t2 ( id Nullable(String) ) ENGINE = Memory;
CREATE TABLE t3 ( id Nullable(String), not_id Nullable(String) ) ENGINE = Memory;
insert into t1 values ('l');
insert into t3 (id) values ('r');
SELECT 'on';
SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1 ANY LEFT JOIN t3 ON t1.id = t3.id;
SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1 ANY FULL JOIN t3 ON t1.id = t3.id;
SELECT *, toTypeName(t2.id), toTypeName(t3.id) FROM t2 ANY FULL JOIN t3 ON t2.id = t3.id;
SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1 LEFT JOIN t3 ON t1.id = t3.id;
SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1 FULL JOIN t3 ON t1.id = t3.id;
SELECT *, toTypeName(t2.id), toTypeName(t3.id) FROM t2 FULL JOIN t3 ON t2.id = t3.id;
SELECT t3.id = 'l', t3.not_id = 'l' FROM t1 ANY LEFT JOIN t3 ON t1.id = t3.id;
SELECT t3.id = 'l', t3.not_id = 'l' FROM t1 LEFT JOIN t3 ON t1.id = t3.id;
SELECT 'using';
SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1 ANY LEFT JOIN t3 USING(id);
SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1 ANY FULL JOIN t3 USING(id);
SELECT *, toTypeName(t2.id), toTypeName(t3.id) FROM t2 ANY FULL JOIN t3 USING(id);
SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1 LEFT JOIN t3 USING(id);
SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1 FULL JOIN t3 USING(id);
SELECT *, toTypeName(t2.id), toTypeName(t3.id) FROM t2 FULL JOIN t3 USING(id);
SELECT t3.id = 'l', t3.not_id = 'l' FROM t1 ANY LEFT JOIN t3 USING(id);
SELECT t3.id = 'l', t3.not_id = 'l' FROM t1 LEFT JOIN t3 USING(id);
SET join_use_nulls = 1;
-- TODO: toTypeName(t1.id) String -> Nullable(String)
SELECT 'on + join_use_nulls';
SELECT *, 'TODO', toTypeName(t3.id) FROM t1 ANY LEFT JOIN t3 ON t1.id = t3.id;
SELECT *, 'TODO', toTypeName(t3.id) FROM t1 ANY FULL JOIN t3 ON t1.id = t3.id;
SELECT *, toTypeName(t2.id), toTypeName(t3.id) FROM t2 ANY FULL JOIN t3 ON t2.id = t3.id;
SELECT *, 'TODO', toTypeName(t3.id) FROM t1 LEFT JOIN t3 ON t1.id = t3.id;
SELECT *, 'TODO', toTypeName(t3.id) FROM t1 FULL JOIN t3 ON t1.id = t3.id;
SELECT *, toTypeName(t2.id), toTypeName(t3.id) FROM t2 FULL JOIN t3 ON t2.id = t3.id;
SELECT t3.id = 'l', t3.not_id = 'l' FROM t1 ANY LEFT JOIN t3 ON t1.id = t3.id;
SELECT t3.id = 'l', t3.not_id = 'l' FROM t1 LEFT JOIN t3 ON t1.id = t3.id;
SELECT 'using + join_use_nulls';
SELECT *, 'TODO', toTypeName(t3.id) FROM t1 ANY LEFT JOIN t3 USING(id);
SELECT *, 'TODO', toTypeName(t3.id) FROM t1 ANY FULL JOIN t3 USING(id);
SELECT *, toTypeName(t2.id), toTypeName(t3.id) FROM t2 ANY FULL JOIN t3 USING(id);
SELECT *, 'TODO', toTypeName(t3.id) FROM t1 LEFT JOIN t3 USING(id);
SELECT *, 'TODO', toTypeName(t3.id) FROM t1 FULL JOIN t3 USING(id);
SELECT *, toTypeName(t2.id), toTypeName(t3.id) FROM t2 FULL JOIN t3 USING(id);
SELECT t3.id = 'l', t3.not_id = 'l' FROM t1 ANY LEFT JOIN t3 USING(id);
SELECT t3.id = 'l', t3.not_id = 'l' FROM t1 LEFT JOIN t3 USING(id);
DROP TABLE t1;
DROP TABLE t2;

View File

@ -1,2 +1,7 @@
1 1
0 0
0
0 0
0
0 0
0 0

View File

@ -35,49 +35,49 @@ GLOBAL INNER JOIN
) USING dummy; ) USING dummy;
-- SET asterisk_left_columns_only = 0; SET asterisk_left_columns_only = 0;
--
-- SELECT * FROM remote('127.0.0.2', system.one) SELECT * FROM remote('127.0.0.2', system.one)
-- GLOBAL INNER JOIN GLOBAL INNER JOIN
-- ( (
-- SELECT *, dummy SELECT *, dummy
-- FROM ( SELECT dummy FROM remote('127.0.0.2', system.one) ) t1 FROM ( SELECT dummy FROM remote('127.0.0.2', system.one) ) t1
-- GLOBAL INNER JOIN ( SELECT dummy FROM remote('127.0.0.3', system.one) ) t2 GLOBAL INNER JOIN ( SELECT dummy FROM remote('127.0.0.3', system.one) ) t2
-- USING dummy USING dummy
-- ) USING dummy; ) USING dummy;
--
-- SELECT * FROM remote('127.0.0.2', system.one) SELECT * FROM remote('127.0.0.2', system.one)
-- GLOBAL INNER JOIN GLOBAL INNER JOIN
-- ( (
-- SELECT *, t1.*, t2.* SELECT *, t1.*, t2.*
-- FROM ( SELECT toUInt8(1) AS dummy ) t1 FROM ( SELECT toUInt8(0) AS dummy ) t1
-- INNER JOIN ( SELECT toUInt8(1) AS dummy ) t2 INNER JOIN ( SELECT toUInt8(0) AS dummy ) t2
-- USING dummy USING dummy
-- ) USING dummy; ) USING dummy;
--
-- SELECT * FROM remote('127.0.0.2', system.one) SELECT * FROM remote('127.0.0.2', system.one)
-- GLOBAL INNER JOIN GLOBAL INNER JOIN
-- ( (
-- SELECT *, dummy SELECT *, dummy
-- FROM ( SELECT toUInt8(1) AS dummy ) t1 FROM ( SELECT toUInt8(0) AS dummy ) t1
-- INNER JOIN ( SELECT toUInt8(1) AS dummy ) t2 INNER JOIN ( SELECT toUInt8(0) AS dummy ) t2
-- USING dummy USING dummy
-- ) USING dummy; ) USING dummy;
--
-- SELECT * FROM remote('127.0.0.2', system.one) SELECT * FROM remote('127.0.0.2', system.one)
-- GLOBAL INNER JOIN GLOBAL INNER JOIN
-- ( (
-- SELECT * SELECT *, dummy as other
-- FROM ( SELECT dummy FROM remote('127.0.0.3', system.one) ) t1 FROM ( SELECT dummy FROM remote('127.0.0.3', system.one) ) t1
-- GLOBAL INNER JOIN ( SELECT toUInt8(1) AS dummy ) t2 GLOBAL INNER JOIN ( SELECT toUInt8(0) AS dummy ) t2
-- USING dummy USING dummy
-- ) USING dummy; ) USING dummy;
--
-- SELECT * FROM remote('127.0.0.2', system.one) SELECT * FROM remote('127.0.0.2', system.one)
-- GLOBAL INNER JOIN GLOBAL INNER JOIN
-- ( (
-- SELECT * SELECT *, dummy, dummy as other
-- FROM ( SELECT toUInt8(1) AS dummy ) t1 FROM ( SELECT toUInt8(0) AS dummy ) t1
-- GLOBAL INNER JOIN ( SELECT dummy FROM remote('127.0.0.3', system.one) ) t2 GLOBAL INNER JOIN ( SELECT dummy FROM remote('127.0.0.3', system.one) ) t2
-- USING dummy USING dummy
-- ) USING dummy; ) USING dummy;

Some files were not shown because too many files have changed in this diff Show More