mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-25 17:12:03 +00:00
Merge branch 'master' into cow-ptr-compositions
This commit is contained in:
commit
4417705f32
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -76,3 +76,6 @@
|
||||
[submodule "contrib/brotli"]
|
||||
path = contrib/brotli
|
||||
url = https://github.com/google/brotli.git
|
||||
[submodule "contrib/hyperscan"]
|
||||
path = contrib/hyperscan
|
||||
url = https://github.com/ClickHouse-Extras/hyperscan.git
|
||||
|
@ -26,7 +26,7 @@
|
||||
* Исправлено undefined behaviour в функции `dictIsIn` для словарей типа `cache`. [#4515](https://github.com/yandex/ClickHouse/pull/4515) ([alesapin](https://github.com/alesapin))
|
||||
* Исправлен deadlock в случае, если запрос SELECT блокирует одну и ту же таблицу несколько раз (например - из разных потоков, либо при выполнении разных подзапросов) и одновременно с этим производится DDL запрос. [#4535](https://github.com/yandex/ClickHouse/pull/4535) ([Alex Zatelepin](https://github.com/ztlpn))
|
||||
* Настройка `compile_expressions` выключена по-умолчанию до тех пор, пока мы не зафиксируем исходники используемой библиотеки `LLVM` и не будем проверять её под `ASan` (сейчас библиотека LLVM берётся из системы). [#4579](https://github.com/yandex/ClickHouse/pull/4579) ([alesapin](https://github.com/alesapin))
|
||||
* Исправлено падение по `std::terminate`, если `invalidate_query` для внешних словарей с истоником `clickhouse` вернул неправильный результат (пустой; более чем одну строку; более чем один столбец). Исправлена ошибка, из-за которой запрос `invalidate_query` производился каждые пять секунд, независимо от указанного `lifetime`. [#4583](https://github.com/yandex/ClickHouse/pull/4583) ([alexey-milovidov](https://github.com/alexey-milovidov))
|
||||
* Исправлено падение по `std::terminate`, если `invalidate_query` для внешних словарей с источником `clickhouse` вернул неправильный результат (пустой; более чем одну строку; более чем один столбец). Исправлена ошибка, из-за которой запрос `invalidate_query` производился каждые пять секунд, независимо от указанного `lifetime`. [#4583](https://github.com/yandex/ClickHouse/pull/4583) ([alexey-milovidov](https://github.com/alexey-milovidov))
|
||||
* Исправлен deadlock в случае, если запрос `invalidate_query` для внешнего словаря с источником `clickhouse` использовал таблицу `system.dictionaries` или базу данных типа `Dictionary` (редкий случай). [#4599](https://github.com/yandex/ClickHouse/pull/4599) ([alexey-milovidov](https://github.com/alexey-milovidov))
|
||||
* Исправлена работа CROSS JOIN с пустым WHERE [#4598](https://github.com/yandex/ClickHouse/pull/4598) ([Artem Zuikov](https://github.com/4ertus2))
|
||||
* Исправлен segfault в функции `replicate` с константным аргументом. [#4603](https://github.com/yandex/ClickHouse/pull/4603) ([alexey-milovidov](https://github.com/alexey-milovidov))
|
||||
|
@ -1,8 +1,11 @@
|
||||
project(ClickHouse)
|
||||
cmake_minimum_required(VERSION 3.3)
|
||||
cmake_policy(SET CMP0023 NEW)
|
||||
|
||||
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules/")
|
||||
set(CMAKE_EXPORT_COMPILE_COMMANDS 1) # Write compile_commands.json
|
||||
set(CMAKE_LINK_DEPENDS_NO_SHARED 1) # Do not relink all depended targets on .so
|
||||
set(CMAKE_CONFIGURATION_TYPES "RelWithDebInfo;Debug;Release;MinSizeRel" CACHE STRING "" FORCE)
|
||||
set(CMAKE_DEBUG_POSTFIX "d" CACHE STRING "Generate debug library name with a postfix.") # To be consistent with CMakeLists from contrib libs.
|
||||
|
||||
option(ENABLE_IPO "Enable inter-procedural optimization (aka LTO)" OFF) # need cmake 3.9+
|
||||
if(ENABLE_IPO)
|
||||
@ -38,9 +41,6 @@ if (EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/.git" AND NOT EXISTS "${ClickHouse_SOURC
|
||||
message (FATAL_ERROR "Submodules are not initialized. Run\n\tgit submodule update --init --recursive")
|
||||
endif ()
|
||||
|
||||
# Write compile_commands.json
|
||||
set(CMAKE_EXPORT_COMPILE_COMMANDS 1)
|
||||
|
||||
include (cmake/find_ccache.cmake)
|
||||
|
||||
if (NOT CMAKE_BUILD_TYPE OR CMAKE_BUILD_TYPE STREQUAL "None")
|
||||
@ -50,8 +50,6 @@ endif ()
|
||||
string(TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UC)
|
||||
message (STATUS "CMAKE_BUILD_TYPE: ${CMAKE_BUILD_TYPE}")
|
||||
|
||||
set (CMAKE_CONFIGURATION_TYPES "RelWithDebInfo;Debug;Release;MinSizeRel" CACHE STRING "" FORCE)
|
||||
set (CMAKE_DEBUG_POSTFIX "d" CACHE STRING "Generate debug library name with a postfix.") # To be consistent with CMakeLists from contrib libs.
|
||||
|
||||
option (USE_STATIC_LIBRARIES "Set to FALSE to use shared libraries" ON)
|
||||
option (MAKE_STATIC_LIBRARIES "Set to FALSE to make shared libraries" ${USE_STATIC_LIBRARIES})
|
||||
@ -318,6 +316,7 @@ include (cmake/find_pdqsort.cmake)
|
||||
include (cmake/find_hdfs3.cmake) # uses protobuf
|
||||
include (cmake/find_consistent-hashing.cmake)
|
||||
include (cmake/find_base64.cmake)
|
||||
include (cmake/find_hyperscan.cmake)
|
||||
find_contrib_lib(cityhash)
|
||||
find_contrib_lib(farmhash)
|
||||
find_contrib_lib(metrohash)
|
||||
|
33
cmake/find_hyperscan.cmake
Normal file
33
cmake/find_hyperscan.cmake
Normal file
@ -0,0 +1,33 @@
|
||||
if (HAVE_SSSE3)
|
||||
option (ENABLE_HYPERSCAN "Enable hyperscan" ON)
|
||||
endif ()
|
||||
|
||||
if (ENABLE_HYPERSCAN)
|
||||
|
||||
option (USE_INTERNAL_HYPERSCAN_LIBRARY "Set to FALSE to use system hyperscan instead of the bundled" ${NOT_UNBUNDLED})
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/hyperscan/CMakeLists.txt")
|
||||
if (USE_INTERNAL_HYPERSCAN_LIBRARY)
|
||||
message (WARNING "submodule contrib/hyperscan is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
endif ()
|
||||
set (MISSING_INTERNAL_HYPERSCAN_LIBRARY 1)
|
||||
set (USE_INTERNAL_HYPERSCAN_LIBRARY 0)
|
||||
endif ()
|
||||
|
||||
if (NOT USE_INTERNAL_HYPERSCAN_LIBRARY)
|
||||
find_library (HYPERSCAN_LIBRARY hs)
|
||||
find_path (HYPERSCAN_INCLUDE_DIR NAMES hs/hs.h hs.h PATHS ${HYPERSCAN_INCLUDE_PATHS})
|
||||
endif ()
|
||||
|
||||
if (HYPERSCAN_LIBRARY AND HYPERSCAN_INCLUDE_DIR)
|
||||
set (USE_HYPERSCAN 1)
|
||||
elseif (NOT MISSING_INTERNAL_HYPERSCAN_LIBRARY)
|
||||
set (HYPERSCAN_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/hyperscan/src)
|
||||
set (HYPERSCAN_LIBRARY hs)
|
||||
set (USE_HYPERSCAN 1)
|
||||
set (USE_INTERNAL_HYPERSCAN_LIBRARY 1)
|
||||
endif()
|
||||
|
||||
message (STATUS "Using hyperscan=${USE_HYPERSCAN}: ${HYPERSCAN_INCLUDE_DIR} : ${HYPERSCAN_LIBRARY}")
|
||||
|
||||
endif ()
|
4
contrib/CMakeLists.txt
vendored
4
contrib/CMakeLists.txt
vendored
@ -304,3 +304,7 @@ endif ()
|
||||
if (USE_BASE64)
|
||||
add_subdirectory (base64-cmake)
|
||||
endif()
|
||||
|
||||
if (USE_INTERNAL_HYPERSCAN_LIBRARY)
|
||||
add_subdirectory (hyperscan)
|
||||
endif()
|
||||
|
2
contrib/boost
vendored
2
contrib/boost
vendored
@ -1 +1 @@
|
||||
Subproject commit 6a96e8b59f76148eb8ad54a9d15259f8ce84c606
|
||||
Subproject commit 471ea208abb92a5cba7d3a08a819bb728f27e95f
|
1
contrib/hyperscan
vendored
Submodule
1
contrib/hyperscan
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 05dab0efee80be405aad5f74721b692b6889b75e
|
2
contrib/librdkafka
vendored
2
contrib/librdkafka
vendored
@ -1 +1 @@
|
||||
Subproject commit 73295a702cd1c85c11749ade500d713db7099cca
|
||||
Subproject commit 8695b9d63ac0fe1b891b511d5b36302ffc84d4e2
|
@ -93,6 +93,7 @@ if (CLICKHOUSE_ONE_SHARED)
|
||||
target_link_libraries(clickhouse-lib ${CLICKHOUSE_SERVER_LINK} ${CLICKHOUSE_CLIENT_LINK} ${CLICKHOUSE_LOCAL_LINK} ${CLICKHOUSE_BENCHMARK_LINK} ${CLICKHOUSE_PERFORMANCE_TEST_LINK} ${CLICKHOUSE_COPIER_LINK} ${CLICKHOUSE_EXTRACT_FROM_CONFIG_LINK} ${CLICKHOUSE_COMPRESSOR_LINK} ${CLICKHOUSE_FORMAT_LINK} ${CLICKHOUSE_OBFUSCATOR_LINK} ${CLICKHOUSE_COMPILER_LINK} ${CLICKHOUSE_ODBC_BRIDGE_LINK})
|
||||
target_include_directories(clickhouse-lib ${CLICKHOUSE_SERVER_INCLUDE} ${CLICKHOUSE_CLIENT_INCLUDE} ${CLICKHOUSE_LOCAL_INCLUDE} ${CLICKHOUSE_BENCHMARK_INCLUDE} ${CLICKHOUSE_PERFORMANCE_TEST_INCLUDE} ${CLICKHOUSE_COPIER_INCLUDE} ${CLICKHOUSE_EXTRACT_FROM_CONFIG_INCLUDE} ${CLICKHOUSE_COMPRESSOR_INCLUDE} ${CLICKHOUSE_FORMAT_INCLUDE} ${CLICKHOUSE_OBFUSCATOR_INCLUDE} ${CLICKHOUSE_COMPILER_INCLUDE} ${CLICKHOUSE_ODBC_BRIDGE_INCLUDE})
|
||||
set_target_properties(clickhouse-lib PROPERTIES SOVERSION ${VERSION_MAJOR}.${VERSION_MINOR} VERSION ${VERSION_SO} OUTPUT_NAME clickhouse DEBUG_POSTFIX "")
|
||||
install (TARGETS clickhouse-lib LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT clickhouse)
|
||||
endif()
|
||||
|
||||
if (CLICKHOUSE_SPLIT_BINARY)
|
||||
|
@ -1,5 +1,11 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Helper for split build mode.
|
||||
# Allows to run commands like
|
||||
# clickhouse client
|
||||
# clickhouse server
|
||||
# ...
|
||||
|
||||
set -e
|
||||
CMD=$1
|
||||
shift
|
||||
|
@ -42,6 +42,7 @@
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/UseSSL.h>
|
||||
#include <DataStreams/AsynchronousBlockInputStream.h>
|
||||
#include <DataStreams/AddingDefaultsBlockInputStream.h>
|
||||
@ -1314,6 +1315,9 @@ private:
|
||||
|
||||
/// Received data block is immediately displayed to the user.
|
||||
block_out_stream->flush();
|
||||
|
||||
/// Restore progress bar after data block.
|
||||
writeProgress();
|
||||
}
|
||||
|
||||
|
||||
@ -1353,8 +1357,8 @@ private:
|
||||
|
||||
void clearProgress()
|
||||
{
|
||||
std::cerr << RESTORE_CURSOR_POSITION CLEAR_TO_END_OF_LINE;
|
||||
written_progress_chars = 0;
|
||||
std::cerr << RESTORE_CURSOR_POSITION CLEAR_TO_END_OF_LINE;
|
||||
}
|
||||
|
||||
|
||||
@ -1363,6 +1367,9 @@ private:
|
||||
if (!need_render_progress)
|
||||
return;
|
||||
|
||||
/// Output all progress bar commands to stderr at once to avoid flicker.
|
||||
WriteBufferFromFileDescriptor message(STDERR_FILENO, 1024);
|
||||
|
||||
static size_t increment = 0;
|
||||
static const char * indicators[8] =
|
||||
{
|
||||
@ -1377,13 +1384,15 @@ private:
|
||||
};
|
||||
|
||||
if (written_progress_chars)
|
||||
clearProgress();
|
||||
message << RESTORE_CURSOR_POSITION CLEAR_TO_END_OF_LINE;
|
||||
else
|
||||
std::cerr << SAVE_CURSOR_POSITION;
|
||||
message << SAVE_CURSOR_POSITION;
|
||||
|
||||
message << DISABLE_LINE_WRAPPING;
|
||||
|
||||
size_t prefix_size = message.count();
|
||||
|
||||
std::stringstream message;
|
||||
message << indicators[increment % 8]
|
||||
<< std::fixed << std::setprecision(3)
|
||||
<< " Progress: ";
|
||||
|
||||
message
|
||||
@ -1398,8 +1407,7 @@ private:
|
||||
else
|
||||
message << ". ";
|
||||
|
||||
written_progress_chars = message.str().size() - (increment % 8 == 7 ? 10 : 13);
|
||||
std::cerr << DISABLE_LINE_WRAPPING << message.rdbuf();
|
||||
written_progress_chars = message.count() - prefix_size - (increment % 8 == 7 ? 10 : 13); /// Don't count invisible output (escape sequences).
|
||||
|
||||
/// If the approximate number of rows to process is known, we can display a progress bar and percentage.
|
||||
if (progress.total_rows > 0)
|
||||
@ -1421,19 +1429,21 @@ private:
|
||||
if (width_of_progress_bar > 0)
|
||||
{
|
||||
std::string bar = UnicodeBar::render(UnicodeBar::getWidth(progress.rows, 0, total_rows_corrected, width_of_progress_bar));
|
||||
std::cerr << "\033[0;32m" << bar << "\033[0m";
|
||||
message << "\033[0;32m" << bar << "\033[0m";
|
||||
if (width_of_progress_bar > static_cast<ssize_t>(bar.size() / UNICODE_BAR_CHAR_SIZE))
|
||||
std::cerr << std::string(width_of_progress_bar - bar.size() / UNICODE_BAR_CHAR_SIZE, ' ');
|
||||
message << std::string(width_of_progress_bar - bar.size() / UNICODE_BAR_CHAR_SIZE, ' ');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Underestimate percentage a bit to avoid displaying 100%.
|
||||
std::cerr << ' ' << (99 * progress.rows / total_rows_corrected) << '%';
|
||||
message << ' ' << (99 * progress.rows / total_rows_corrected) << '%';
|
||||
}
|
||||
|
||||
std::cerr << ENABLE_LINE_WRAPPING;
|
||||
message << ENABLE_LINE_WRAPPING;
|
||||
++increment;
|
||||
|
||||
message.next();
|
||||
}
|
||||
|
||||
|
||||
|
@ -420,6 +420,7 @@ namespace ErrorCodes
|
||||
extern const int NO_COMMON_COLUMNS_WITH_PROTOBUF_SCHEMA = 443;
|
||||
extern const int UNKNOWN_PROTOBUF_FORMAT = 444;
|
||||
extern const int CANNOT_MPROTECT = 445;
|
||||
extern const int FUNCTION_NOT_ALLOWED = 446;
|
||||
|
||||
extern const int KEEPER_EXCEPTION = 999;
|
||||
extern const int POCO_EXCEPTION = 1000;
|
||||
|
@ -1,17 +1,14 @@
|
||||
#include "Exception.h"
|
||||
|
||||
#include <string.h>
|
||||
#include <cxxabi.h>
|
||||
|
||||
#include <Poco/String.h>
|
||||
|
||||
#include <common/logger_useful.h>
|
||||
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
|
||||
#include <Common/Exception.h>
|
||||
#include <common/demangle.h>
|
||||
|
||||
#include <Common/config_version.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -24,6 +21,10 @@ namespace ErrorCodes
|
||||
extern const int CANNOT_TRUNCATE_FILE;
|
||||
}
|
||||
|
||||
const char * getVersion()
|
||||
{
|
||||
return VERSION_STRING;
|
||||
}
|
||||
|
||||
std::string errnoToString(int code, int e)
|
||||
{
|
||||
@ -81,13 +82,13 @@ std::string getCurrentExceptionMessage(bool with_stacktrace, bool check_embedded
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
stream << getExceptionMessage(e, with_stacktrace, check_embedded_stacktrace);
|
||||
stream << "(version " << getVersion() << ") " << getExceptionMessage(e, with_stacktrace, check_embedded_stacktrace);
|
||||
}
|
||||
catch (const Poco::Exception & e)
|
||||
{
|
||||
try
|
||||
{
|
||||
stream << "Poco::Exception. Code: " << ErrorCodes::POCO_EXCEPTION << ", e.code() = " << e.code()
|
||||
stream << "(version " << getVersion() << ") " << "Poco::Exception. Code: " << ErrorCodes::POCO_EXCEPTION << ", e.code() = " << e.code()
|
||||
<< ", e.displayText() = " << e.displayText();
|
||||
}
|
||||
catch (...) {}
|
||||
@ -102,7 +103,7 @@ std::string getCurrentExceptionMessage(bool with_stacktrace, bool check_embedded
|
||||
if (status)
|
||||
name += " (demangling status: " + toString(status) + ")";
|
||||
|
||||
stream << "std::exception. Code: " << ErrorCodes::STD_EXCEPTION << ", type: " << name << ", e.what() = " << e.what();
|
||||
stream << "(version " << getVersion() << ") " << "std::exception. Code: " << ErrorCodes::STD_EXCEPTION << ", type: " << name << ", e.what() = " << e.what();
|
||||
}
|
||||
catch (...) {}
|
||||
}
|
||||
@ -116,7 +117,7 @@ std::string getCurrentExceptionMessage(bool with_stacktrace, bool check_embedded
|
||||
if (status)
|
||||
name += " (demangling status: " + toString(status) + ")";
|
||||
|
||||
stream << "Unknown exception. Code: " << ErrorCodes::UNKNOWN_EXCEPTION << ", type: " << name;
|
||||
stream << "(version " << getVersion() << ") " << "Unknown exception. Code: " << ErrorCodes::UNKNOWN_EXCEPTION << ", type: " << name;
|
||||
}
|
||||
catch (...) {}
|
||||
}
|
||||
|
@ -437,10 +437,10 @@ public:
|
||||
}
|
||||
|
||||
template <typename ResultType, typename AnsCallback>
|
||||
void searchAll(
|
||||
void searchAllPositions(
|
||||
const ColumnString::Chars & haystack_data,
|
||||
const ColumnString::Offsets & haystack_offsets,
|
||||
const AnsCallback & ansCallback,
|
||||
const AnsCallback & ans_callback,
|
||||
ResultType & ans)
|
||||
{
|
||||
const size_t haystack_string_size = haystack_offsets.size();
|
||||
@ -461,7 +461,7 @@ public:
|
||||
{
|
||||
const UInt8 * ptr = fallback_searchers[fallback_needles[i]].search(haystack, haystack_end);
|
||||
if (ptr != haystack_end)
|
||||
ans[from + fallback_needles[i]] = ansCallback(haystack, ptr);
|
||||
ans[from + fallback_needles[i]] = ans_callback(haystack, ptr);
|
||||
}
|
||||
|
||||
/// check if we have one non empty volnitsky searcher
|
||||
@ -481,7 +481,7 @@ public:
|
||||
{
|
||||
if (fallback_searchers[ind].compare(res))
|
||||
{
|
||||
ans[from + ind] = ansCallback(haystack, res);
|
||||
ans[from + ind] = ans_callback(haystack, res);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -513,6 +513,16 @@ public:
|
||||
searchInternal(haystack_data, haystack_offsets, callback, ans);
|
||||
}
|
||||
|
||||
template <typename ResultType, typename CountCharsCallback>
|
||||
void searchFirstPosition(const ColumnString::Chars & haystack_data, const ColumnString::Offsets & haystack_offsets, const CountCharsCallback & count_chars_callback, ResultType & ans)
|
||||
{
|
||||
auto callback = [this, &count_chars_callback](const UInt8 * haystack, const UInt8 * haystack_end) -> UInt64
|
||||
{
|
||||
return this->searchOneFirstPosition(haystack, haystack_end, count_chars_callback);
|
||||
};
|
||||
searchInternal(haystack_data, haystack_offsets, callback, ans);
|
||||
}
|
||||
|
||||
private:
|
||||
/**
|
||||
* This function is needed to initialize hash table
|
||||
@ -582,7 +592,7 @@ private:
|
||||
inline void searchInternal(
|
||||
const ColumnString::Chars & haystack_data,
|
||||
const ColumnString::Offsets & haystack_offsets,
|
||||
const OneSearcher & searchFallback,
|
||||
const OneSearcher & search_fallback,
|
||||
ResultType & ans)
|
||||
{
|
||||
const size_t haystack_string_size = haystack_offsets.size();
|
||||
@ -593,7 +603,7 @@ private:
|
||||
{
|
||||
const auto * haystack = &haystack_data[prev_offset];
|
||||
const auto * haystack_end = haystack + haystack_offsets[j] - prev_offset - 1;
|
||||
ans[j] = searchFallback(haystack, haystack_end);
|
||||
ans[j] = search_fallback(haystack, haystack_end);
|
||||
prev_offset = haystack_offsets[j];
|
||||
}
|
||||
}
|
||||
@ -665,6 +675,41 @@ private:
|
||||
return ans + 1;
|
||||
}
|
||||
|
||||
template <typename CountCharsCallback>
|
||||
inline UInt64 searchOneFirstPosition(const UInt8 * haystack, const UInt8 * haystack_end, const CountCharsCallback & callback) const
|
||||
{
|
||||
const size_t fallback_size = fallback_needles.size();
|
||||
|
||||
UInt64 ans = std::numeric_limits<UInt64>::max();
|
||||
|
||||
for (size_t i = 0; i < fallback_size; ++i)
|
||||
if (auto pos = fallback_searchers[fallback_needles[i]].search(haystack, haystack_end); pos != haystack_end)
|
||||
ans = std::min(ans, callback(haystack, pos));
|
||||
|
||||
/// check if we have one non empty volnitsky searcher
|
||||
if (step != std::numeric_limits<size_t>::max())
|
||||
{
|
||||
const auto * pos = haystack + step - sizeof(VolnitskyTraits::Ngram);
|
||||
for (; pos <= haystack_end - sizeof(VolnitskyTraits::Ngram); pos += step)
|
||||
{
|
||||
for (size_t cell_num = VolnitskyTraits::toNGram(pos) % VolnitskyTraits::hash_size; hash[cell_num].off;
|
||||
cell_num = (cell_num + 1) % VolnitskyTraits::hash_size)
|
||||
{
|
||||
if (pos >= haystack + hash[cell_num].off - 1)
|
||||
{
|
||||
const auto res = pos - (hash[cell_num].off - 1);
|
||||
const size_t ind = hash[cell_num].id;
|
||||
if (res + needles[ind].size <= haystack_end && fallback_searchers[ind].compare(res))
|
||||
ans = std::min(ans, callback(haystack, res));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (ans == std::numeric_limits<UInt64>::max())
|
||||
return 0;
|
||||
return ans;
|
||||
}
|
||||
|
||||
void putNGramBase(const VolnitskyTraits::Ngram ngram, const int offset, const size_t num)
|
||||
{
|
||||
size_t cell_num = ngram % VolnitskyTraits::hash_size;
|
||||
|
@ -24,6 +24,7 @@
|
||||
#cmakedefine01 USE_CPUINFO
|
||||
#cmakedefine01 USE_BROTLI
|
||||
#cmakedefine01 USE_SSL
|
||||
#cmakedefine01 USE_HYPERSCAN
|
||||
|
||||
#cmakedefine01 CLICKHOUSE_SPLIT_BINARY
|
||||
#cmakedefine01 LLVM_HAS_RTTI
|
||||
|
@ -306,6 +306,8 @@ struct Settings
|
||||
M(SettingBool, cancel_http_readonly_queries_on_client_close, false, "Cancel HTTP readonly queries when a client closes the connection without waiting for response.") \
|
||||
M(SettingBool, external_table_functions_use_nulls, true, "If it is set to true, external table functions will implicitly use Nullable type if needed. Otherwise NULLs will be substituted with default values. Currently supported only for 'mysql' table function.") \
|
||||
M(SettingBool, allow_experimental_data_skipping_indices, false, "If it is set to true, data skipping indices can be used in CREATE TABLE/ALTER TABLE queries.") \
|
||||
\
|
||||
M(SettingBool, allow_hyperscan, true, "Allow functions that use Hyperscan library. Disable to avoid potentially long compilation times and excessive resource usage.") \
|
||||
|
||||
#define DECLARE(TYPE, NAME, DEFAULT, DESCRIPTION) \
|
||||
TYPE NAME {DEFAULT};
|
||||
|
@ -206,28 +206,42 @@ CapnProtoRowInputStream::CapnProtoRowInputStream(ReadBuffer & istr_, const Block
|
||||
createActions(list, root);
|
||||
}
|
||||
|
||||
kj::Array<capnp::word> CapnProtoRowInputStream::readMessage()
|
||||
{
|
||||
uint32_t segment_count;
|
||||
istr.readStrict(reinterpret_cast<char*>(&segment_count), sizeof(uint32_t));
|
||||
|
||||
// one for segmentCount and one because segmentCount starts from 0
|
||||
const auto prefix_size = (2 + segment_count) * sizeof(uint32_t);
|
||||
const auto words_prefix_size = (segment_count + 1) / 2 + 1;
|
||||
auto prefix = kj::heapArray<capnp::word>(words_prefix_size);
|
||||
auto prefix_chars = prefix.asChars();
|
||||
::memcpy(prefix_chars.begin(), &segment_count, sizeof(uint32_t));
|
||||
|
||||
// read size of each segment
|
||||
for (size_t i = 0; i <= segment_count; ++i)
|
||||
istr.readStrict(prefix_chars.begin() + ((i + 1) * sizeof(uint32_t)), sizeof(uint32_t));
|
||||
|
||||
// calculate size of message
|
||||
const auto expected_words = capnp::expectedSizeInWordsFromPrefix(prefix);
|
||||
const auto expected_bytes = expected_words * sizeof(capnp::word);
|
||||
const auto data_size = expected_bytes - prefix_size;
|
||||
auto msg = kj::heapArray<capnp::word>(expected_words);
|
||||
auto msg_chars = msg.asChars();
|
||||
|
||||
// read full message
|
||||
::memcpy(msg_chars.begin(), prefix_chars.begin(), prefix_size);
|
||||
istr.readStrict(msg_chars.begin() + prefix_size, data_size);
|
||||
|
||||
return msg;
|
||||
}
|
||||
|
||||
bool CapnProtoRowInputStream::read(MutableColumns & columns, RowReadExtension &)
|
||||
{
|
||||
if (istr.eof())
|
||||
return false;
|
||||
|
||||
// Read from underlying buffer directly
|
||||
auto buf = istr.buffer();
|
||||
auto base = reinterpret_cast<const capnp::word *>(istr.position());
|
||||
|
||||
// Check if there's enough bytes in the buffer to read the full message
|
||||
kj::Array<capnp::word> heap_array;
|
||||
auto array = kj::arrayPtr(base, buf.size() - istr.offset());
|
||||
auto expected_words = capnp::expectedSizeInWordsFromPrefix(array);
|
||||
if (expected_words * sizeof(capnp::word) > array.size())
|
||||
{
|
||||
// We'll need to reassemble the message in a contiguous buffer
|
||||
heap_array = kj::heapArray<capnp::word>(expected_words);
|
||||
istr.readStrict(heap_array.asChars().begin(), heap_array.asChars().size());
|
||||
array = heap_array.asPtr();
|
||||
}
|
||||
|
||||
auto array = readMessage();
|
||||
|
||||
#if CAPNP_VERSION >= 8000
|
||||
capnp::UnalignedFlatArrayMessageReader msg(array);
|
||||
@ -281,13 +295,6 @@ bool CapnProtoRowInputStream::read(MutableColumns & columns, RowReadExtension &)
|
||||
}
|
||||
}
|
||||
|
||||
// Advance buffer position if used directly
|
||||
if (heap_array.size() == 0)
|
||||
{
|
||||
auto parsed = (msg.getEnd() - base) * sizeof(capnp::word);
|
||||
istr.position() += parsed;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -38,6 +38,8 @@ public:
|
||||
bool read(MutableColumns & columns, RowReadExtension &) override;
|
||||
|
||||
private:
|
||||
kj::Array<capnp::word> readMessage();
|
||||
|
||||
// Build a traversal plan from a sorted list of fields
|
||||
void createActions(const NestedFieldList & sortedFields, capnp::StructSchema reader);
|
||||
|
||||
|
@ -64,3 +64,8 @@ if (USE_XXHASH)
|
||||
target_link_libraries(clickhouse_functions PRIVATE ${XXHASH_LIBRARY})
|
||||
target_include_directories(clickhouse_functions SYSTEM PRIVATE ${XXHASH_INCLUDE_DIR})
|
||||
endif()
|
||||
|
||||
if (USE_HYPERSCAN)
|
||||
target_link_libraries (clickhouse_functions PRIVATE ${HYPERSCAN_LIBRARY})
|
||||
target_include_directories (clickhouse_functions SYSTEM PRIVATE ${HYPERSCAN_INCLUDE_DIR})
|
||||
endif ()
|
||||
|
@ -1,8 +1,6 @@
|
||||
#include <Functions/FunctionsStringSearch.h>
|
||||
#include "FunctionsStringSearch.h"
|
||||
|
||||
#include <Columns/ColumnFixedString.h>
|
||||
#include <Common/config.h>
|
||||
|
||||
#include <DataTypes/DataTypeFixedString.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/Regexps.h>
|
||||
@ -11,10 +9,18 @@
|
||||
#include <re2/stringpiece.h>
|
||||
#include <Poco/UTF8String.h>
|
||||
#include <Common/Volnitsky.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <memory>
|
||||
|
||||
#include <Common/config.h>
|
||||
#if USE_HYPERSCAN
|
||||
# if __has_include(<hs/hs.h>)
|
||||
# include <hs/hs.h>
|
||||
# else
|
||||
# include <hs.h>
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#if USE_RE2_ST
|
||||
# include <re2_st/re2.h> // Y_IGNORE
|
||||
#else
|
||||
@ -312,7 +318,7 @@ struct PositionImpl
|
||||
};
|
||||
|
||||
template <typename Impl>
|
||||
struct MultiPositionImpl
|
||||
struct MultiSearchAllPositionsImpl
|
||||
{
|
||||
using ResultType = UInt64;
|
||||
|
||||
@ -322,19 +328,35 @@ struct MultiPositionImpl
|
||||
const std::vector<StringRef> & needles,
|
||||
PaddedPODArray<UInt64> & res)
|
||||
{
|
||||
auto resCallback = [](const UInt8 * start, const UInt8 * end) -> UInt64
|
||||
auto res_callback = [](const UInt8 * start, const UInt8 * end) -> UInt64
|
||||
{
|
||||
return 1 + Impl::countChars(reinterpret_cast<const char *>(start), reinterpret_cast<const char *>(end));
|
||||
};
|
||||
|
||||
Impl::createMultiSearcherInBigHaystack(needles).searchAll(haystack_data, haystack_offsets, resCallback, res);
|
||||
Impl::createMultiSearcherInBigHaystack(needles).searchAllPositions(haystack_data, haystack_offsets, res_callback, res);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Impl>
|
||||
struct MultiSearchImpl
|
||||
{
|
||||
using ResultType = UInt8;
|
||||
static constexpr bool is_using_hyperscan = false;
|
||||
|
||||
static void vector_constant(
|
||||
const ColumnString::Chars & haystack_data,
|
||||
const ColumnString::Offsets & haystack_offsets,
|
||||
const std::vector<StringRef> & needles,
|
||||
PaddedPODArray<UInt8> & res)
|
||||
{
|
||||
Impl::createMultiSearcherInBigHaystack(needles).search(haystack_data, haystack_offsets, res);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Impl>
|
||||
struct MultiSearchFirstPositionImpl
|
||||
{
|
||||
using ResultType = UInt64;
|
||||
static constexpr bool is_using_hyperscan = false;
|
||||
|
||||
static void vector_constant(
|
||||
const ColumnString::Chars & haystack_data,
|
||||
@ -342,14 +364,19 @@ struct MultiSearchImpl
|
||||
const std::vector<StringRef> & needles,
|
||||
PaddedPODArray<UInt64> & res)
|
||||
{
|
||||
Impl::createMultiSearcherInBigHaystack(needles).search(haystack_data, haystack_offsets, res);
|
||||
auto res_callback = [](const UInt8 * start, const UInt8 * end) -> UInt64
|
||||
{
|
||||
return 1 + Impl::countChars(reinterpret_cast<const char *>(start), reinterpret_cast<const char *>(end));
|
||||
};
|
||||
Impl::createMultiSearcherInBigHaystack(needles).searchFirstPosition(haystack_data, haystack_offsets, res_callback, res);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Impl>
|
||||
struct FirstMatchImpl
|
||||
struct MultiSearchFirstIndexImpl
|
||||
{
|
||||
using ResultType = UInt64;
|
||||
static constexpr bool is_using_hyperscan = false;
|
||||
|
||||
static void vector_constant(
|
||||
const ColumnString::Chars & haystack_data,
|
||||
@ -524,8 +551,8 @@ struct MatchImpl
|
||||
res[i] = !revert;
|
||||
else
|
||||
{
|
||||
const char * str_data = reinterpret_cast<const char *>(&data[i != 0 ? offsets[i - 1] : 0]);
|
||||
size_t str_size = (i != 0 ? offsets[i] - offsets[i - 1] : offsets[0]) - 1;
|
||||
const char * str_data = reinterpret_cast<const char *>(&data[offsets[i - 1]]);
|
||||
size_t str_size = offsets[i] - offsets[i - 1] - 1;
|
||||
|
||||
/** Even in the case of `required_substring_is_prefix` use UNANCHORED check for regexp,
|
||||
* so that it can match when `required_substring` occurs into the string several times,
|
||||
@ -581,6 +608,79 @@ struct MatchImpl
|
||||
};
|
||||
|
||||
|
||||
template <typename Type, bool FindAny, bool FindAnyIndex>
|
||||
struct MultiMatchAnyImpl
|
||||
{
|
||||
static_assert(static_cast<int>(FindAny) + static_cast<int>(FindAnyIndex) == 1);
|
||||
using ResultType = Type;
|
||||
static constexpr bool is_using_hyperscan = true;
|
||||
|
||||
static void vector_constant(
|
||||
const ColumnString::Chars & haystack_data,
|
||||
const ColumnString::Offsets & haystack_offsets,
|
||||
const std::vector<StringRef> & needles,
|
||||
PaddedPODArray<Type> & res)
|
||||
{
|
||||
(void)FindAny;
|
||||
(void)FindAnyIndex;
|
||||
#if USE_HYPERSCAN
|
||||
using ScratchPtr = std::unique_ptr<hs_scratch_t, DB::MultiRegexps::HyperscanDeleter<decltype(&hs_free_scratch), &hs_free_scratch>>;
|
||||
|
||||
const auto & hyperscan_regex = MultiRegexps::get<FindAnyIndex>(needles);
|
||||
hs_scratch_t * scratch = nullptr;
|
||||
hs_error_t err = hs_alloc_scratch(hyperscan_regex->get(), &scratch);
|
||||
if (err != HS_SUCCESS)
|
||||
throw Exception("Could not allocate scratch space for hyperscan.", ErrorCodes::CANNOT_ALLOCATE_MEMORY);
|
||||
ScratchPtr smart_scratch(scratch);
|
||||
|
||||
auto on_match = []([[maybe_unused]] unsigned int id,
|
||||
unsigned long long /* from */,
|
||||
unsigned long long /* to */,
|
||||
unsigned int /* flags */,
|
||||
void * context) -> int
|
||||
{
|
||||
if constexpr (FindAnyIndex)
|
||||
*reinterpret_cast<Type *>(context) = id;
|
||||
else if constexpr (FindAny)
|
||||
*reinterpret_cast<Type *>(context) = 1;
|
||||
return 0;
|
||||
};
|
||||
const size_t haystack_offsets_size = haystack_offsets.size();
|
||||
size_t offset = 0;
|
||||
for (size_t i = 0; i < haystack_offsets_size; ++i)
|
||||
{
|
||||
res[i] = 0;
|
||||
hs_scan(
|
||||
hyperscan_regex->get(),
|
||||
reinterpret_cast<const char *>(haystack_data.data()) + offset,
|
||||
haystack_offsets[i] - offset - 1,
|
||||
0,
|
||||
smart_scratch.get(),
|
||||
on_match,
|
||||
&res[i]);
|
||||
offset = haystack_offsets[i];
|
||||
}
|
||||
#else
|
||||
/// Fallback if not an intel processor
|
||||
PaddedPODArray<UInt8> accum(res.size());
|
||||
memset(res.data(), 0, res.size() * sizeof(res.front()));
|
||||
memset(accum.data(), 0, accum.size());
|
||||
for (size_t j = 0; j < needles.size(); ++j)
|
||||
{
|
||||
MatchImpl<false, false>::vector_constant(haystack_data, haystack_offsets, needles[j].toString(), accum);
|
||||
for (size_t i = 0; i < res.size(); ++i)
|
||||
{
|
||||
if constexpr (FindAny)
|
||||
res[i] |= accum[i];
|
||||
else if (accum[i])
|
||||
res[i] = j + 1;
|
||||
}
|
||||
}
|
||||
#endif // USE_HYPERSCAN
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
struct ExtractImpl
|
||||
{
|
||||
static void vector(
|
||||
@ -1090,53 +1190,69 @@ struct NamePositionCaseInsensitiveUTF8
|
||||
{
|
||||
static constexpr auto name = "positionCaseInsensitiveUTF8";
|
||||
};
|
||||
struct NameMultiPosition
|
||||
struct NameMultiSearchAllPositions
|
||||
{
|
||||
static constexpr auto name = "multiPosition";
|
||||
static constexpr auto name = "multiSearchAllPositions";
|
||||
};
|
||||
struct NameMultiPositionUTF8
|
||||
struct NameMultiSearchAllPositionsUTF8
|
||||
{
|
||||
static constexpr auto name = "multiPositionUTF8";
|
||||
static constexpr auto name = "multiSearchAllPositionsUTF8";
|
||||
};
|
||||
struct NameMultiPositionCaseInsensitive
|
||||
struct NameMultiSearchAllPositionsCaseInsensitive
|
||||
{
|
||||
static constexpr auto name = "multiPositionCaseInsensitive";
|
||||
static constexpr auto name = "multiSearchAllPositionsCaseInsensitive";
|
||||
};
|
||||
struct NameMultiPositionCaseInsensitiveUTF8
|
||||
struct NameMultiSearchAllPositionsCaseInsensitiveUTF8
|
||||
{
|
||||
static constexpr auto name = "multiPositionCaseInsensitiveUTF8";
|
||||
static constexpr auto name = "multiSearchAllPositionsCaseInsensitiveUTF8";
|
||||
};
|
||||
struct NameMultiSearch
|
||||
struct NameMultiSearchAny
|
||||
{
|
||||
static constexpr auto name = "multiSearch";
|
||||
static constexpr auto name = "multiSearchAny";
|
||||
};
|
||||
struct NameMultiSearchUTF8
|
||||
struct NameMultiSearchAnyUTF8
|
||||
{
|
||||
static constexpr auto name = "multiSearchUTF8";
|
||||
static constexpr auto name = "multiSearchAnyUTF8";
|
||||
};
|
||||
struct NameMultiSearchCaseInsensitive
|
||||
struct NameMultiSearchAnyCaseInsensitive
|
||||
{
|
||||
static constexpr auto name = "multiSearchCaseInsensitive";
|
||||
static constexpr auto name = "multiSearchAnyCaseInsensitive";
|
||||
};
|
||||
struct NameMultiSearchCaseInsensitiveUTF8
|
||||
struct NameMultiSearchAnyCaseInsensitiveUTF8
|
||||
{
|
||||
static constexpr auto name = "multiSearchCaseInsensitiveUTF8";
|
||||
static constexpr auto name = "multiSearchAnyCaseInsensitiveUTF8";
|
||||
};
|
||||
struct NameFirstMatch
|
||||
struct NameMultiSearchFirstIndex
|
||||
{
|
||||
static constexpr auto name = "firstMatch";
|
||||
static constexpr auto name = "multiSearchFirstIndex";
|
||||
};
|
||||
struct NameFirstMatchUTF8
|
||||
struct NameMultiSearchFirstIndexUTF8
|
||||
{
|
||||
static constexpr auto name = "firstMatchUTF8";
|
||||
static constexpr auto name = "multiSearchFirstIndexUTF8";
|
||||
};
|
||||
struct NameFirstMatchCaseInsensitive
|
||||
struct NameMultiSearchFirstIndexCaseInsensitive
|
||||
{
|
||||
static constexpr auto name = "firstMatchCaseInsensitive";
|
||||
static constexpr auto name = "multiSearchFirstIndexCaseInsensitive";
|
||||
};
|
||||
struct NameFirstMatchCaseInsensitiveUTF8
|
||||
struct NameMultiSearchFirstIndexCaseInsensitiveUTF8
|
||||
{
|
||||
static constexpr auto name = "firstMatchCaseInsensitiveUTF8";
|
||||
static constexpr auto name = "multiSearchFirstIndexCaseInsensitiveUTF8";
|
||||
};
|
||||
struct NameMultiSearchFirstPosition
|
||||
{
|
||||
static constexpr auto name = "multiSearchFirstPosition";
|
||||
};
|
||||
struct NameMultiSearchFirstPositionUTF8
|
||||
{
|
||||
static constexpr auto name = "multiSearchFirstPositionUTF8";
|
||||
};
|
||||
struct NameMultiSearchFirstPositionCaseInsensitive
|
||||
{
|
||||
static constexpr auto name = "multiSearchFirstPositionCaseInsensitive";
|
||||
};
|
||||
struct NameMultiSearchFirstPositionCaseInsensitiveUTF8
|
||||
{
|
||||
static constexpr auto name = "multiSearchFirstPositionCaseInsensitiveUTF8";
|
||||
};
|
||||
struct NameMatch
|
||||
{
|
||||
@ -1150,6 +1266,14 @@ struct NameNotLike
|
||||
{
|
||||
static constexpr auto name = "notLike";
|
||||
};
|
||||
struct NameMultiMatchAny
|
||||
{
|
||||
static constexpr auto name = "multiMatchAny";
|
||||
};
|
||||
struct NameMultiMatchAnyIndex
|
||||
{
|
||||
static constexpr auto name = "multiMatchAnyIndex";
|
||||
};
|
||||
struct NameExtract
|
||||
{
|
||||
static constexpr auto name = "extract";
|
||||
@ -1177,28 +1301,37 @@ using FunctionPositionCaseInsensitive = FunctionsStringSearch<PositionImpl<Posit
|
||||
using FunctionPositionCaseInsensitiveUTF8
|
||||
= FunctionsStringSearch<PositionImpl<PositionCaseInsensitiveUTF8>, NamePositionCaseInsensitiveUTF8>;
|
||||
|
||||
using FunctionMultiPosition = FunctionsMultiStringPosition<MultiPositionImpl<PositionCaseSensitiveASCII>, NameMultiPosition>;
|
||||
using FunctionMultiPositionUTF8 = FunctionsMultiStringPosition<MultiPositionImpl<PositionCaseSensitiveUTF8>, NameMultiPositionUTF8>;
|
||||
using FunctionMultiPositionCaseInsensitive
|
||||
= FunctionsMultiStringPosition<MultiPositionImpl<PositionCaseInsensitiveASCII>, NameMultiPositionCaseInsensitive>;
|
||||
using FunctionMultiPositionCaseInsensitiveUTF8
|
||||
= FunctionsMultiStringPosition<MultiPositionImpl<PositionCaseInsensitiveUTF8>, NameMultiPositionCaseInsensitiveUTF8>;
|
||||
using FunctionMultiSearchAllPositions = FunctionsMultiStringPosition<MultiSearchAllPositionsImpl<PositionCaseSensitiveASCII>, NameMultiSearchAllPositions>;
|
||||
using FunctionMultiSearchAllPositionsUTF8 = FunctionsMultiStringPosition<MultiSearchAllPositionsImpl<PositionCaseSensitiveUTF8>, NameMultiSearchAllPositionsUTF8>;
|
||||
using FunctionMultiSearchAllPositionsCaseInsensitive
|
||||
= FunctionsMultiStringPosition<MultiSearchAllPositionsImpl<PositionCaseInsensitiveASCII>, NameMultiSearchAllPositionsCaseInsensitive>;
|
||||
using FunctionMultiSearchAllPositionsCaseInsensitiveUTF8
|
||||
= FunctionsMultiStringPosition<MultiSearchAllPositionsImpl<PositionCaseInsensitiveUTF8>, NameMultiSearchAllPositionsCaseInsensitiveUTF8>;
|
||||
|
||||
using FunctionMultiSearch = FunctionsMultiStringSearch<MultiSearchImpl<PositionCaseSensitiveASCII>, NameMultiSearch>;
|
||||
using FunctionMultiSearchUTF8 = FunctionsMultiStringSearch<MultiSearchImpl<PositionCaseSensitiveUTF8>, NameMultiSearchUTF8>;
|
||||
using FunctionMultiSearch = FunctionsMultiStringSearch<MultiSearchImpl<PositionCaseSensitiveASCII>, NameMultiSearchAny>;
|
||||
using FunctionMultiSearchUTF8 = FunctionsMultiStringSearch<MultiSearchImpl<PositionCaseSensitiveUTF8>, NameMultiSearchAnyUTF8>;
|
||||
using FunctionMultiSearchCaseInsensitive
|
||||
= FunctionsMultiStringSearch<MultiSearchImpl<PositionCaseInsensitiveASCII>, NameMultiSearchCaseInsensitive>;
|
||||
= FunctionsMultiStringSearch<MultiSearchImpl<PositionCaseInsensitiveASCII>, NameMultiSearchAnyCaseInsensitive>;
|
||||
using FunctionMultiSearchCaseInsensitiveUTF8
|
||||
= FunctionsMultiStringSearch<MultiSearchImpl<PositionCaseInsensitiveUTF8>, NameMultiSearchCaseInsensitiveUTF8>;
|
||||
= FunctionsMultiStringSearch<MultiSearchImpl<PositionCaseInsensitiveUTF8>, NameMultiSearchAnyCaseInsensitiveUTF8>;
|
||||
|
||||
using FunctionFirstMatch = FunctionsMultiStringSearch<FirstMatchImpl<PositionCaseSensitiveASCII>, NameFirstMatch>;
|
||||
using FunctionFirstMatchUTF8 = FunctionsMultiStringSearch<FirstMatchImpl<PositionCaseSensitiveUTF8>, NameFirstMatchUTF8>;
|
||||
using FunctionFirstMatchCaseInsensitive
|
||||
= FunctionsMultiStringSearch<FirstMatchImpl<PositionCaseInsensitiveASCII>, NameFirstMatchCaseInsensitive>;
|
||||
using FunctionFirstMatchCaseInsensitiveUTF8
|
||||
= FunctionsMultiStringSearch<FirstMatchImpl<PositionCaseInsensitiveUTF8>, NameFirstMatchCaseInsensitiveUTF8>;
|
||||
using FunctionMultiSearchFirstIndex = FunctionsMultiStringSearch<MultiSearchFirstIndexImpl<PositionCaseSensitiveASCII>, NameMultiSearchFirstIndex>;
|
||||
using FunctionMultiSearchFirstIndexUTF8 = FunctionsMultiStringSearch<MultiSearchFirstIndexImpl<PositionCaseSensitiveUTF8>, NameMultiSearchFirstIndexUTF8>;
|
||||
using FunctionMultiSearchFirstIndexCaseInsensitive
|
||||
= FunctionsMultiStringSearch<MultiSearchFirstIndexImpl<PositionCaseInsensitiveASCII>, NameMultiSearchFirstIndexCaseInsensitive>;
|
||||
using FunctionMultiSearchFirstIndexCaseInsensitiveUTF8
|
||||
= FunctionsMultiStringSearch<MultiSearchFirstIndexImpl<PositionCaseInsensitiveUTF8>, NameMultiSearchFirstIndexCaseInsensitiveUTF8>;
|
||||
|
||||
using FunctionMultiSearchFirstPosition = FunctionsMultiStringSearch<MultiSearchFirstPositionImpl<PositionCaseSensitiveASCII>, NameMultiSearchFirstPosition>;
|
||||
using FunctionMultiSearchFirstPositionUTF8 = FunctionsMultiStringSearch<MultiSearchFirstPositionImpl<PositionCaseSensitiveUTF8>, NameMultiSearchFirstPositionUTF8>;
|
||||
using FunctionMultiSearchFirstPositionCaseInsensitive
|
||||
= FunctionsMultiStringSearch<MultiSearchFirstPositionImpl<PositionCaseInsensitiveASCII>, NameMultiSearchFirstPositionCaseInsensitive>;
|
||||
using FunctionMultiSearchFirstPositionCaseInsensitiveUTF8
|
||||
= FunctionsMultiStringSearch<MultiSearchFirstPositionImpl<PositionCaseInsensitiveUTF8>, NameMultiSearchFirstPositionCaseInsensitiveUTF8>;
|
||||
|
||||
using FunctionMatch = FunctionsStringSearch<MatchImpl<false>, NameMatch>;
|
||||
using FunctionMultiMatchAny = FunctionsMultiStringSearch<MultiMatchAnyImpl<UInt8, true, false>, NameMultiMatchAny, std::numeric_limits<UInt32>::max()>;
|
||||
using FunctionMultiMatchAnyIndex = FunctionsMultiStringSearch<MultiMatchAnyImpl<UInt64, false, true>, NameMultiMatchAnyIndex, std::numeric_limits<UInt32>::max()>;
|
||||
using FunctionLike = FunctionsStringSearch<MatchImpl<true>, NameLike>;
|
||||
using FunctionNotLike = FunctionsStringSearch<MatchImpl<true, true>, NameNotLike>;
|
||||
using FunctionExtract = FunctionsStringSearchToString<ExtractImpl, NameExtract>;
|
||||
@ -1220,26 +1353,34 @@ void registerFunctionsStringSearch(FunctionFactory & factory)
|
||||
factory.registerFunction<FunctionPositionCaseInsensitive>();
|
||||
factory.registerFunction<FunctionPositionCaseInsensitiveUTF8>();
|
||||
|
||||
factory.registerFunction<FunctionMultiPosition>();
|
||||
factory.registerFunction<FunctionMultiPositionUTF8>();
|
||||
factory.registerFunction<FunctionMultiPositionCaseInsensitive>();
|
||||
factory.registerFunction<FunctionMultiPositionCaseInsensitiveUTF8>();
|
||||
factory.registerFunction<FunctionMultiSearchAllPositions>();
|
||||
factory.registerFunction<FunctionMultiSearchAllPositionsUTF8>();
|
||||
factory.registerFunction<FunctionMultiSearchAllPositionsCaseInsensitive>();
|
||||
factory.registerFunction<FunctionMultiSearchAllPositionsCaseInsensitiveUTF8>();
|
||||
|
||||
factory.registerFunction<FunctionMultiSearch>();
|
||||
factory.registerFunction<FunctionMultiSearchUTF8>();
|
||||
factory.registerFunction<FunctionMultiSearchCaseInsensitive>();
|
||||
factory.registerFunction<FunctionMultiSearchCaseInsensitiveUTF8>();
|
||||
|
||||
factory.registerFunction<FunctionFirstMatch>();
|
||||
factory.registerFunction<FunctionFirstMatchUTF8>();
|
||||
factory.registerFunction<FunctionFirstMatchCaseInsensitive>();
|
||||
factory.registerFunction<FunctionFirstMatchCaseInsensitiveUTF8>();
|
||||
factory.registerFunction<FunctionMultiSearchFirstIndex>();
|
||||
factory.registerFunction<FunctionMultiSearchFirstIndexUTF8>();
|
||||
factory.registerFunction<FunctionMultiSearchFirstIndexCaseInsensitive>();
|
||||
factory.registerFunction<FunctionMultiSearchFirstIndexCaseInsensitiveUTF8>();
|
||||
|
||||
factory.registerFunction<FunctionMultiSearchFirstPosition>();
|
||||
factory.registerFunction<FunctionMultiSearchFirstPositionUTF8>();
|
||||
factory.registerFunction<FunctionMultiSearchFirstPositionCaseInsensitive>();
|
||||
factory.registerFunction<FunctionMultiSearchFirstPositionCaseInsensitiveUTF8>();
|
||||
|
||||
factory.registerFunction<FunctionMatch>();
|
||||
factory.registerFunction<FunctionLike>();
|
||||
factory.registerFunction<FunctionNotLike>();
|
||||
factory.registerFunction<FunctionExtract>();
|
||||
|
||||
factory.registerFunction<FunctionMultiMatchAny>();
|
||||
factory.registerFunction<FunctionMultiMatchAnyIndex>();
|
||||
|
||||
factory.registerAlias("locate", NamePosition::name, FunctionFactory::CaseInsensitive);
|
||||
factory.registerAlias("replace", NameReplaceAll::name, FunctionFactory::CaseInsensitive);
|
||||
}
|
||||
|
@ -11,8 +11,10 @@
|
||||
#include <Functions/FunctionHelpers.h>
|
||||
#include <Functions/IFunction.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <common/StringRef.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
/** Search and replace functions in strings:
|
||||
@ -26,6 +28,8 @@ namespace DB
|
||||
* notLike(haystack, pattern)
|
||||
*
|
||||
* match(haystack, pattern) - search by regular expression re2; Returns 0 or 1.
|
||||
* multiMatchAny(haystack, [pattern_1, pattern_2, ..., pattern_n]) -- search by re2 regular expressions pattern_i; Returns 0 or 1 if any pattern_i matches.
|
||||
* multiMatchAnyIndex(haystack, [pattern_1, pattern_2, ..., pattern_n]) -- search by re2 regular expressions pattern_i; Returns index of any match or zero if none;
|
||||
*
|
||||
* Applies regexp re2 and pulls:
|
||||
* - the first subpattern, if the regexp has a subpattern;
|
||||
@ -39,20 +43,25 @@ namespace DB
|
||||
* replaceRegexpOne(haystack, pattern, replacement) - replaces the pattern with the specified regexp, only the first occurrence.
|
||||
* replaceRegexpAll(haystack, pattern, replacement) - replaces the pattern with the specified type, all occurrences.
|
||||
*
|
||||
* multiPosition(haystack, [pattern_1, pattern_2, ..., pattern_n]) -- find first occurrences (positions) of all the const patterns inside haystack
|
||||
* multiPositionUTF8(haystack, [pattern_1, pattern_2, ..., pattern_n])
|
||||
* multiPositionCaseInsensitive(haystack, [pattern_1, pattern_2, ..., pattern_n])
|
||||
* multiPositionCaseInsensitiveUTF8(haystack, [pattern_1, pattern_2, ..., pattern_n])
|
||||
*
|
||||
* multiSearch(haystack, [pattern_1, pattern_2, ..., pattern_n]) -- find any of the const patterns inside haystack and return 0 or 1
|
||||
* multiSearchUTF8(haystack, [pattern_1, pattern_2, ..., pattern_n])
|
||||
* multiSearchCaseInsensitive(haystack, [pattern_1, pattern_2, ..., pattern_n])
|
||||
* multiSearchCaseInsensitiveUTF8(haystack, [pattern_1, pattern_2, ..., pattern_n])
|
||||
* multiSearchAllPositions(haystack, [pattern_1, pattern_2, ..., pattern_n]) -- find first occurrences (positions) of all the const patterns inside haystack
|
||||
* multiSearchAllPositionsUTF8(haystack, [pattern_1, pattern_2, ..., pattern_n])
|
||||
* multiSearchAllPositionsCaseInsensitive(haystack, [pattern_1, pattern_2, ..., pattern_n])
|
||||
* multiSearchAllPositionsCaseInsensitiveUTF8(haystack, [pattern_1, pattern_2, ..., pattern_n])
|
||||
|
||||
* firstMatch(haystack, [pattern_1, pattern_2, ..., pattern_n]) -- returns the first index of the matched string or zero if nothing was found
|
||||
* firstMatchUTF8(haystack, [pattern_1, pattern_2, ..., pattern_n])
|
||||
* firstMatchCaseInsensitive(haystack, [pattern_1, pattern_2, ..., pattern_n])
|
||||
* firstMatchCaseInsensitiveUTF8(haystack, [pattern_1, pattern_2, ..., pattern_n])
|
||||
* multiSearchFirstPosition(haystack, [pattern_1, pattern_2, ..., pattern_n]) -- returns the first position of the haystack matched by strings or zero if nothing was found
|
||||
* multiSearchFirstPositionUTF8(haystack, [pattern_1, pattern_2, ..., pattern_n])
|
||||
* multiSearchFirstPositionCaseInsensitive(haystack, [pattern_1, pattern_2, ..., pattern_n])
|
||||
* multiSearchFirstPositionCaseInsensitiveUTF8(haystack, [pattern_1, pattern_2, ..., pattern_n])
|
||||
*
|
||||
* multiSearchAny(haystack, [pattern_1, pattern_2, ..., pattern_n]) -- find any of the const patterns inside haystack and return 0 or 1
|
||||
* multiSearchAnyUTF8(haystack, [pattern_1, pattern_2, ..., pattern_n])
|
||||
* multiSearchAnyCaseInsensitive(haystack, [pattern_1, pattern_2, ..., pattern_n])
|
||||
* multiSearchAnyCaseInsensitiveUTF8(haystack, [pattern_1, pattern_2, ..., pattern_n])
|
||||
|
||||
* multiSearchFirstIndex(haystack, [pattern_1, pattern_2, ..., pattern_n]) -- returns the first index of the matched string or zero if nothing was found
|
||||
* multiSearchFirstIndexUTF8(haystack, [pattern_1, pattern_2, ..., pattern_n])
|
||||
* multiSearchFirstIndexCaseInsensitive(haystack, [pattern_1, pattern_2, ..., pattern_n])
|
||||
* multiSearchFirstIndexCaseInsensitiveUTF8(haystack, [pattern_1, pattern_2, ..., pattern_n])
|
||||
*/
|
||||
|
||||
namespace ErrorCodes
|
||||
@ -60,6 +69,7 @@ namespace ErrorCodes
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
extern const int ILLEGAL_COLUMN;
|
||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||
extern const int FUNCTION_NOT_ALLOWED;
|
||||
}
|
||||
|
||||
template <typename Impl, typename Name>
|
||||
@ -200,6 +210,8 @@ public:
|
||||
String getName() const override { return name; }
|
||||
|
||||
size_t getNumberOfArguments() const override { return 2; }
|
||||
bool useDefaultImplementationForConstants() const override { return true; }
|
||||
ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1}; }
|
||||
|
||||
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
|
||||
{
|
||||
@ -269,23 +281,35 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Impl, typename Name>
|
||||
/// The argument limiting raises from Volnitsky searcher -- it is performance crucial to save only one byte for pattern number.
|
||||
/// But some other searchers use this function, for example, multiMatchAny -- hyperscan does not have such restrictions
|
||||
template <typename Impl, typename Name, size_t LimitArgs = std::numeric_limits<UInt8>::max()>
|
||||
class FunctionsMultiStringSearch : public IFunction
|
||||
{
|
||||
static_assert(LimitArgs > 0);
|
||||
|
||||
public:
|
||||
static constexpr auto name = Name::name;
|
||||
static FunctionPtr create(const Context &) { return std::make_shared<FunctionsMultiStringSearch>(); }
|
||||
static FunctionPtr create(const Context & context)
|
||||
{
|
||||
if (Impl::is_using_hyperscan && !context.getSettingsRef().allow_hyperscan)
|
||||
throw Exception("Hyperscan functions are disabled, because setting 'allow_hyperscan' is set to 0", ErrorCodes::FUNCTION_NOT_ALLOWED);
|
||||
|
||||
return std::make_shared<FunctionsMultiStringSearch>();
|
||||
}
|
||||
|
||||
String getName() const override { return name; }
|
||||
|
||||
size_t getNumberOfArguments() const override { return 2; }
|
||||
bool useDefaultImplementationForConstants() const override { return true; }
|
||||
ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1}; }
|
||||
|
||||
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
|
||||
{
|
||||
if (arguments.size() + 1 >= std::numeric_limits<UInt8>::max())
|
||||
if (arguments.size() + 1 >= LimitArgs)
|
||||
throw Exception(
|
||||
"Number of arguments for function " + getName() + " doesn't match: passed " + std::to_string(arguments.size())
|
||||
+ ", should be at most 255.",
|
||||
+ ", should be at most " + std::to_string(LimitArgs) + ".",
|
||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
|
||||
if (!isString(arguments[0]))
|
||||
@ -333,6 +357,7 @@ public:
|
||||
|
||||
vec_res.resize(column_haystack_size);
|
||||
|
||||
/// TODO support constant_constant version
|
||||
if (col_haystack_vector)
|
||||
Impl::vector_constant(col_haystack_vector->getChars(), col_haystack_vector->getOffsets(), refs, vec_res);
|
||||
else
|
||||
|
@ -1,10 +1,22 @@
|
||||
#pragma once
|
||||
|
||||
#include <Common/OptimizedRegularExpression.h>
|
||||
#include <Common/ObjectPool.h>
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <Functions/likePatternToRegexp.h>
|
||||
#include <Common/ObjectPool.h>
|
||||
#include <Common/OptimizedRegularExpression.h>
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <common/StringRef.h>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include <Common/config.h>
|
||||
#if USE_HYPERSCAN
|
||||
# if __has_include(<hs/hs.h>)
|
||||
# include <hs/hs.h>
|
||||
# else
|
||||
# include <hs.h>
|
||||
# endif
|
||||
#endif
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
@ -14,6 +26,11 @@ namespace ProfileEvents
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int CANNOT_ALLOCATE_MEMORY;
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
namespace Regexps
|
||||
{
|
||||
@ -21,10 +38,16 @@ namespace Regexps
|
||||
using Pool = ObjectPoolMap<Regexp, String>;
|
||||
|
||||
template <bool like>
|
||||
inline Regexp createRegexp(const std::string & pattern, int flags) { return {pattern, flags}; }
|
||||
inline Regexp createRegexp(const std::string & pattern, int flags)
|
||||
{
|
||||
return {pattern, flags};
|
||||
}
|
||||
|
||||
template <>
|
||||
inline Regexp createRegexp<true>(const std::string & pattern, int flags) { return {likePatternToRegexp(pattern), flags}; }
|
||||
inline Regexp createRegexp<true>(const std::string & pattern, int flags)
|
||||
{
|
||||
return {likePatternToRegexp(pattern), flags};
|
||||
}
|
||||
|
||||
template <bool like, bool no_capture>
|
||||
inline Pool::Pointer get(const std::string & pattern)
|
||||
@ -44,4 +67,82 @@ namespace Regexps
|
||||
}
|
||||
}
|
||||
|
||||
#if USE_HYPERSCAN
|
||||
|
||||
namespace MultiRegexps
|
||||
{
|
||||
template <typename Deleter, Deleter deleter>
|
||||
struct HyperscanDeleter
|
||||
{
|
||||
template <typename T>
|
||||
void operator()(T * ptr) const
|
||||
{
|
||||
deleter(ptr);
|
||||
}
|
||||
};
|
||||
|
||||
using Regexps = std::unique_ptr<hs_database_t, HyperscanDeleter<decltype(&hs_free_database), &hs_free_database>>;
|
||||
|
||||
using Pool = ObjectPoolMap<Regexps, std::vector<String>>;
|
||||
|
||||
template <bool FindAnyIndex>
|
||||
inline Pool::Pointer get(const std::vector<StringRef> & patterns)
|
||||
{
|
||||
/// C++11 has thread-safe function-local statics on most modern compilers.
|
||||
static Pool known_regexps; /// Different variables for different pattern parameters.
|
||||
|
||||
std::vector<String> str_patterns;
|
||||
str_patterns.reserve(patterns.size());
|
||||
for (const StringRef & ref : patterns)
|
||||
str_patterns.push_back(ref.toString());
|
||||
|
||||
return known_regexps.get(str_patterns, [&str_patterns]
|
||||
{
|
||||
std::vector<const char *> ptrns;
|
||||
std::vector<unsigned int> flags;
|
||||
ptrns.reserve(str_patterns.size());
|
||||
flags.reserve(str_patterns.size());
|
||||
for (const StringRef ref : str_patterns)
|
||||
{
|
||||
ptrns.push_back(ref.data);
|
||||
flags.push_back(HS_FLAG_DOTALL | HS_FLAG_ALLOWEMPTY | HS_FLAG_SINGLEMATCH);
|
||||
}
|
||||
hs_database_t * db = nullptr;
|
||||
hs_compile_error_t * compile_error;
|
||||
|
||||
|
||||
std::unique_ptr<unsigned int[]> ids;
|
||||
|
||||
if constexpr (FindAnyIndex)
|
||||
{
|
||||
ids.reset(new unsigned int[ptrns.size()]);
|
||||
for (size_t i = 0; i < ptrns.size(); ++i)
|
||||
ids[i] = i + 1;
|
||||
}
|
||||
|
||||
hs_error_t err
|
||||
= hs_compile_multi(ptrns.data(), flags.data(), ids.get(), ptrns.size(), HS_MODE_BLOCK, nullptr, &db, &compile_error);
|
||||
if (err != HS_SUCCESS)
|
||||
{
|
||||
std::unique_ptr<
|
||||
hs_compile_error_t,
|
||||
HyperscanDeleter<decltype(&hs_free_compile_error), &hs_free_compile_error>> error(compile_error);
|
||||
|
||||
if (error->expression < 0)
|
||||
throw Exception(String(error->message), ErrorCodes::LOGICAL_ERROR);
|
||||
else
|
||||
throw Exception(
|
||||
"Pattern '" + str_patterns[error->expression] + "' failed with error '" + String(error->message),
|
||||
ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::RegexpCreated);
|
||||
|
||||
return new Regexps{db};
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
#endif // USE_HYPERSCAN
|
||||
|
||||
}
|
||||
|
@ -79,3 +79,6 @@ target_link_libraries (parse_date_time_best_effort PRIVATE clickhouse_common_io)
|
||||
|
||||
add_executable (zlib_ng_bug zlib_ng_bug.cpp)
|
||||
target_link_libraries (zlib_ng_bug PRIVATE ${Poco_Foundation_LIBRARY})
|
||||
if(NOT USE_INTERNAL_POCO_LIBRARY)
|
||||
target_include_directories(zlib_ng_bug SYSTEM BEFORE PRIVATE ${Poco_INCLUDE_DIRS})
|
||||
endif()
|
||||
|
@ -58,7 +58,7 @@ namespace
|
||||
|
||||
BlockInputStreamPtr createLocalStream(const ASTPtr & query_ast, const Context & context, QueryProcessingStage::Enum processed_stage)
|
||||
{
|
||||
InterpreterSelectQuery interpreter{query_ast, context, Names{}, processed_stage};
|
||||
InterpreterSelectQuery interpreter{query_ast, context, SelectQueryOptions(processed_stage)};
|
||||
BlockInputStreamPtr stream = interpreter.execute().in;
|
||||
|
||||
/** Materialization is needed, since from remote servers the constants come materialized.
|
||||
|
@ -76,7 +76,7 @@ void ExecuteScalarSubqueriesMatcher::visit(const ASTSubquery & subquery, ASTPtr
|
||||
|
||||
ASTPtr subquery_select = subquery.children.at(0);
|
||||
BlockIO res = InterpreterSelectWithUnionQuery(
|
||||
subquery_select, subquery_context, {}, QueryProcessingStage::Complete, data.subquery_depth + 1).execute();
|
||||
subquery_select, subquery_context, SelectQueryOptions(QueryProcessingStage::Complete, data.subquery_depth + 1)).execute();
|
||||
|
||||
Block block;
|
||||
try
|
||||
|
@ -51,7 +51,8 @@ BlockInputStreamPtr InterpreterExplainQuery::executeImpl()
|
||||
}
|
||||
else if (ast.getKind() == ASTExplainQuery::AnalyzedSyntax)
|
||||
{
|
||||
InterpreterSelectWithUnionQuery interpreter(ast.children.at(0), context, {}, QueryProcessingStage::FetchColumns, 0, true, true);
|
||||
InterpreterSelectWithUnionQuery interpreter(ast.children.at(0), context,
|
||||
SelectQueryOptions(QueryProcessingStage::FetchColumns).analyze().modify());
|
||||
interpreter.getQuery()->format(IAST::FormatSettings(ss, false));
|
||||
}
|
||||
|
||||
|
@ -84,12 +84,12 @@ std::unique_ptr<IInterpreter> InterpreterFactory::get(ASTPtr & query, Context &
|
||||
{
|
||||
/// This is internal part of ASTSelectWithUnionQuery.
|
||||
/// Even if there is SELECT without union, it is represented by ASTSelectWithUnionQuery with single ASTSelectQuery as a child.
|
||||
return std::make_unique<InterpreterSelectQuery>(query, context, Names{}, stage);
|
||||
return std::make_unique<InterpreterSelectQuery>(query, context, SelectQueryOptions(stage));
|
||||
}
|
||||
else if (query->as<ASTSelectWithUnionQuery>())
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::SelectQuery);
|
||||
return std::make_unique<InterpreterSelectWithUnionQuery>(query, context, Names{}, stage);
|
||||
return std::make_unique<InterpreterSelectWithUnionQuery>(query, context, SelectQueryOptions(stage));
|
||||
}
|
||||
else if (query->as<ASTInsertQuery>())
|
||||
{
|
||||
|
@ -128,7 +128,7 @@ BlockIO InterpreterInsertQuery::execute()
|
||||
if (query.select)
|
||||
{
|
||||
/// Passing 1 as subquery_depth will disable limiting size of intermediate result.
|
||||
InterpreterSelectWithUnionQuery interpreter_select{query.select, context, {}, QueryProcessingStage::Complete, 1};
|
||||
InterpreterSelectWithUnionQuery interpreter_select{query.select, context, SelectQueryOptions(QueryProcessingStage::Complete, 1)};
|
||||
|
||||
res.in = interpreter_select.execute().in;
|
||||
|
||||
|
@ -78,13 +78,9 @@ namespace ErrorCodes
|
||||
InterpreterSelectQuery::InterpreterSelectQuery(
|
||||
const ASTPtr & query_ptr_,
|
||||
const Context & context_,
|
||||
const Names & required_result_column_names,
|
||||
QueryProcessingStage::Enum to_stage_,
|
||||
size_t subquery_depth_,
|
||||
bool only_analyze_,
|
||||
bool modify_inplace)
|
||||
: InterpreterSelectQuery(
|
||||
query_ptr_, context_, nullptr, nullptr, required_result_column_names, to_stage_, subquery_depth_, only_analyze_, modify_inplace)
|
||||
const SelectQueryOptions & options,
|
||||
const Names & required_result_column_names)
|
||||
: InterpreterSelectQuery(query_ptr_, context_, nullptr, nullptr, options, required_result_column_names)
|
||||
{
|
||||
}
|
||||
|
||||
@ -92,23 +88,17 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
||||
const ASTPtr & query_ptr_,
|
||||
const Context & context_,
|
||||
const BlockInputStreamPtr & input_,
|
||||
QueryProcessingStage::Enum to_stage_,
|
||||
bool only_analyze_,
|
||||
bool modify_inplace)
|
||||
: InterpreterSelectQuery(query_ptr_, context_, input_, nullptr, Names{}, to_stage_, 0, only_analyze_, modify_inplace)
|
||||
{
|
||||
}
|
||||
const SelectQueryOptions & options)
|
||||
: InterpreterSelectQuery(query_ptr_, context_, input_, nullptr, options.copy().noSubquery())
|
||||
{}
|
||||
|
||||
InterpreterSelectQuery::InterpreterSelectQuery(
|
||||
const ASTPtr & query_ptr_,
|
||||
const Context & context_,
|
||||
const StoragePtr & storage_,
|
||||
QueryProcessingStage::Enum to_stage_,
|
||||
bool only_analyze_,
|
||||
bool modify_inplace)
|
||||
: InterpreterSelectQuery(query_ptr_, context_, nullptr, storage_, Names{}, to_stage_, 0, only_analyze_, modify_inplace)
|
||||
{
|
||||
}
|
||||
const SelectQueryOptions & options)
|
||||
: InterpreterSelectQuery(query_ptr_, context_, nullptr, storage_, options.copy().noSubquery())
|
||||
{}
|
||||
|
||||
InterpreterSelectQuery::~InterpreterSelectQuery() = default;
|
||||
|
||||
@ -133,17 +123,12 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
||||
const Context & context_,
|
||||
const BlockInputStreamPtr & input_,
|
||||
const StoragePtr & storage_,
|
||||
const Names & required_result_column_names,
|
||||
QueryProcessingStage::Enum to_stage_,
|
||||
size_t subquery_depth_,
|
||||
bool only_analyze_,
|
||||
bool modify_inplace)
|
||||
const SelectQueryOptions & options_,
|
||||
const Names & required_result_column_names)
|
||||
: options(options_)
|
||||
/// NOTE: the query almost always should be cloned because it will be modified during analysis.
|
||||
: query_ptr(modify_inplace ? query_ptr_ : query_ptr_->clone())
|
||||
, query_ptr(options.modify_inplace ? query_ptr_ : query_ptr_->clone())
|
||||
, context(context_)
|
||||
, to_stage(to_stage_)
|
||||
, subquery_depth(subquery_depth_)
|
||||
, only_analyze(only_analyze_)
|
||||
, storage(storage_)
|
||||
, input(input_)
|
||||
, log(&Logger::get("InterpreterSelectQuery"))
|
||||
@ -151,7 +136,7 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
||||
initSettings();
|
||||
const Settings & settings = context.getSettingsRef();
|
||||
|
||||
if (settings.max_subquery_depth && subquery_depth > settings.max_subquery_depth)
|
||||
if (settings.max_subquery_depth && options.subquery_depth > settings.max_subquery_depth)
|
||||
throw Exception("Too deep subqueries. Maximum: " + settings.max_subquery_depth.toString(),
|
||||
ErrorCodes::TOO_DEEP_SUBQUERIES);
|
||||
|
||||
@ -189,7 +174,7 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
||||
{
|
||||
/// Read from subquery.
|
||||
interpreter_subquery = std::make_unique<InterpreterSelectWithUnionQuery>(
|
||||
table_expression, getSubqueryContext(context), required_columns, QueryProcessingStage::Complete, subquery_depth + 1, only_analyze, modify_inplace);
|
||||
table_expression, getSubqueryContext(context), options.subquery(), required_columns);
|
||||
|
||||
source_header = interpreter_subquery->getSampleBlock();
|
||||
}
|
||||
@ -215,13 +200,14 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
||||
if (storage)
|
||||
table_lock = storage->lockStructureForShare(false, context.getCurrentQueryId());
|
||||
|
||||
syntax_analyzer_result = SyntaxAnalyzer(context, subquery_depth).analyze(
|
||||
syntax_analyzer_result = SyntaxAnalyzer(context, options).analyze(
|
||||
query_ptr, source_header.getNamesAndTypesList(), required_result_column_names, storage);
|
||||
query_analyzer = std::make_unique<ExpressionAnalyzer>(
|
||||
query_ptr, syntax_analyzer_result, context, NamesAndTypesList(),
|
||||
NameSet(required_result_column_names.begin(), required_result_column_names.end()), subquery_depth, !only_analyze);
|
||||
NameSet(required_result_column_names.begin(), required_result_column_names.end()),
|
||||
options.subquery_depth, !options.only_analyze);
|
||||
|
||||
if (!only_analyze)
|
||||
if (!options.only_analyze)
|
||||
{
|
||||
if (query.sample_size() && (input || !storage || !storage->supportsSampling()))
|
||||
throw Exception("Illegal SAMPLE: table doesn't support sampling", ErrorCodes::SAMPLING_NOT_SUPPORTED);
|
||||
@ -238,7 +224,7 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
||||
context.addExternalTable(it.first, it.second);
|
||||
}
|
||||
|
||||
if (!only_analyze || modify_inplace)
|
||||
if (!options.only_analyze || options.modify_inplace)
|
||||
{
|
||||
if (query_analyzer->isRewriteSubqueriesPredicate())
|
||||
{
|
||||
@ -247,11 +233,8 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
||||
interpreter_subquery = std::make_unique<InterpreterSelectWithUnionQuery>(
|
||||
table_expression,
|
||||
getSubqueryContext(context),
|
||||
required_columns,
|
||||
QueryProcessingStage::Complete,
|
||||
subquery_depth + 1,
|
||||
only_analyze,
|
||||
modify_inplace);
|
||||
options.subquery(),
|
||||
required_columns);
|
||||
}
|
||||
}
|
||||
|
||||
@ -304,7 +287,7 @@ Block InterpreterSelectQuery::getSampleBlock()
|
||||
BlockIO InterpreterSelectQuery::execute()
|
||||
{
|
||||
Pipeline pipeline;
|
||||
executeImpl(pipeline, input, only_analyze);
|
||||
executeImpl(pipeline, input, options.only_analyze);
|
||||
executeUnion(pipeline);
|
||||
|
||||
BlockIO res;
|
||||
@ -315,7 +298,7 @@ BlockIO InterpreterSelectQuery::execute()
|
||||
BlockInputStreams InterpreterSelectQuery::executeWithMultipleStreams()
|
||||
{
|
||||
Pipeline pipeline;
|
||||
executeImpl(pipeline, input, only_analyze);
|
||||
executeImpl(pipeline, input, options.only_analyze);
|
||||
return pipeline.streams;
|
||||
}
|
||||
|
||||
@ -325,10 +308,10 @@ InterpreterSelectQuery::AnalysisResult InterpreterSelectQuery::analyzeExpression
|
||||
|
||||
/// Do I need to perform the first part of the pipeline - running on remote servers during distributed processing.
|
||||
res.first_stage = from_stage < QueryProcessingStage::WithMergeableState
|
||||
&& to_stage >= QueryProcessingStage::WithMergeableState;
|
||||
&& options.to_stage >= QueryProcessingStage::WithMergeableState;
|
||||
/// Do I need to execute the second part of the pipeline - running on the initiating server during distributed processing.
|
||||
res.second_stage = from_stage <= QueryProcessingStage::WithMergeableState
|
||||
&& to_stage > QueryProcessingStage::WithMergeableState;
|
||||
&& options.to_stage > QueryProcessingStage::WithMergeableState;
|
||||
|
||||
/** First we compose a chain of actions and remember the necessary steps from it.
|
||||
* Regardless of from_stage and to_stage, we will compose a complete sequence of actions to perform optimization and
|
||||
@ -553,16 +536,16 @@ void InterpreterSelectQuery::executeImpl(Pipeline & pipeline, const BlockInputSt
|
||||
expressions = analyzeExpressions(from_stage, false);
|
||||
|
||||
if (from_stage == QueryProcessingStage::WithMergeableState &&
|
||||
to_stage == QueryProcessingStage::WithMergeableState)
|
||||
options.to_stage == QueryProcessingStage::WithMergeableState)
|
||||
throw Exception("Distributed on Distributed is not supported", ErrorCodes::NOT_IMPLEMENTED);
|
||||
|
||||
/** Read the data from Storage. from_stage - to what stage the request was completed in Storage. */
|
||||
executeFetchColumns(from_stage, pipeline, expressions.prewhere_info, expressions.columns_to_remove_after_prewhere);
|
||||
|
||||
LOG_TRACE(log, QueryProcessingStage::toString(from_stage) << " -> " << QueryProcessingStage::toString(to_stage));
|
||||
LOG_TRACE(log, QueryProcessingStage::toString(from_stage) << " -> " << QueryProcessingStage::toString(options.to_stage));
|
||||
}
|
||||
|
||||
if (to_stage > QueryProcessingStage::FetchColumns)
|
||||
if (options.to_stage > QueryProcessingStage::FetchColumns)
|
||||
{
|
||||
/// Do I need to aggregate in a separate row rows that have not passed max_rows_to_group_by.
|
||||
bool aggregate_overflow_row =
|
||||
@ -575,7 +558,7 @@ void InterpreterSelectQuery::executeImpl(Pipeline & pipeline, const BlockInputSt
|
||||
/// Do I need to immediately finalize the aggregate functions after the aggregation?
|
||||
bool aggregate_final =
|
||||
expressions.need_aggregate &&
|
||||
to_stage > QueryProcessingStage::WithMergeableState &&
|
||||
options.to_stage > QueryProcessingStage::WithMergeableState &&
|
||||
!query.group_by_with_totals && !query.group_by_with_rollup && !query.group_by_with_cube;
|
||||
|
||||
if (expressions.first_stage)
|
||||
@ -938,7 +921,7 @@ void InterpreterSelectQuery::executeFetchColumns(
|
||||
|
||||
/// Limitation on the number of columns to read.
|
||||
/// It's not applied in 'only_analyze' mode, because the query could be analyzed without removal of unnecessary columns.
|
||||
if (!only_analyze && settings.max_columns_to_read && required_columns.size() > settings.max_columns_to_read)
|
||||
if (!options.only_analyze && settings.max_columns_to_read && required_columns.size() > settings.max_columns_to_read)
|
||||
throw Exception("Limit for number of columns to read exceeded. "
|
||||
"Requested: " + toString(required_columns.size())
|
||||
+ ", maximum: " + settings.max_columns_to_read.toString(),
|
||||
@ -1000,7 +983,8 @@ void InterpreterSelectQuery::executeFetchColumns(
|
||||
throw Exception("Subquery expected", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
interpreter_subquery = std::make_unique<InterpreterSelectWithUnionQuery>(
|
||||
subquery, getSubqueryContext(context), required_columns, QueryProcessingStage::Complete, subquery_depth + 1, only_analyze);
|
||||
subquery, getSubqueryContext(context),
|
||||
options.copy().subquery().noModify(), required_columns);
|
||||
|
||||
if (query_analyzer->hasAggregation())
|
||||
interpreter_subquery->ignoreWithTotals();
|
||||
@ -1057,7 +1041,7 @@ void InterpreterSelectQuery::executeFetchColumns(
|
||||
* additionally on each remote server, because these limits are checked per block of data processed,
|
||||
* and remote servers may process way more blocks of data than are received by initiator.
|
||||
*/
|
||||
if (to_stage == QueryProcessingStage::Complete)
|
||||
if (options.to_stage == QueryProcessingStage::Complete)
|
||||
{
|
||||
limits.min_execution_speed = settings.min_execution_speed;
|
||||
limits.max_execution_speed = settings.max_execution_speed;
|
||||
@ -1072,7 +1056,7 @@ void InterpreterSelectQuery::executeFetchColumns(
|
||||
{
|
||||
stream->setLimits(limits);
|
||||
|
||||
if (to_stage == QueryProcessingStage::Complete)
|
||||
if (options.to_stage == QueryProcessingStage::Complete)
|
||||
stream->setQuota(quota);
|
||||
});
|
||||
}
|
||||
|
@ -3,12 +3,13 @@
|
||||
#include <memory>
|
||||
|
||||
#include <Core/QueryProcessingStage.h>
|
||||
#include <Parsers/ASTSelectQuery.h>
|
||||
#include <DataStreams/IBlockInputStream.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/ExpressionActions.h>
|
||||
#include <Interpreters/ExpressionAnalyzer.h>
|
||||
#include <Interpreters/IInterpreter.h>
|
||||
#include <Parsers/ASTSelectQuery.h>
|
||||
#include <Interpreters/SelectQueryOptions.h>
|
||||
#include <Storages/SelectQueryInfo.h>
|
||||
|
||||
|
||||
@ -23,6 +24,7 @@ class InterpreterSelectWithUnionQuery;
|
||||
struct SyntaxAnalyzerResult;
|
||||
using SyntaxAnalyzerResultPtr = std::shared_ptr<const SyntaxAnalyzerResult>;
|
||||
|
||||
|
||||
/** Interprets the SELECT query. Returns the stream of blocks with the results of the query before `to_stage` stage.
|
||||
*/
|
||||
class InterpreterSelectQuery : public IInterpreter
|
||||
@ -32,14 +34,6 @@ public:
|
||||
* query_ptr
|
||||
* - A query AST to interpret.
|
||||
*
|
||||
* to_stage
|
||||
* - the stage to which the query is to be executed. By default - till to the end.
|
||||
* You can perform till the intermediate aggregation state, which are combined from different servers for distributed query processing.
|
||||
*
|
||||
* subquery_depth
|
||||
* - to control the limit on the depth of nesting of subqueries. For subqueries, a value that is incremented by one is passed;
|
||||
* for INSERT SELECT, a value 1 is passed instead of 0.
|
||||
*
|
||||
* required_result_column_names
|
||||
* - don't calculate all columns except the specified ones from the query
|
||||
* - it is used to remove calculation (and reading) of unnecessary columns from subqueries.
|
||||
@ -49,29 +43,22 @@ public:
|
||||
InterpreterSelectQuery(
|
||||
const ASTPtr & query_ptr_,
|
||||
const Context & context_,
|
||||
const Names & required_result_column_names = Names{},
|
||||
QueryProcessingStage::Enum to_stage_ = QueryProcessingStage::Complete,
|
||||
size_t subquery_depth_ = 0,
|
||||
bool only_analyze_ = false,
|
||||
bool modify_inplace = false);
|
||||
const SelectQueryOptions &,
|
||||
const Names & required_result_column_names = Names{});
|
||||
|
||||
/// Read data not from the table specified in the query, but from the prepared source `input`.
|
||||
InterpreterSelectQuery(
|
||||
const ASTPtr & query_ptr_,
|
||||
const Context & context_,
|
||||
const BlockInputStreamPtr & input_,
|
||||
QueryProcessingStage::Enum to_stage_ = QueryProcessingStage::Complete,
|
||||
bool only_analyze_ = false,
|
||||
bool modify_inplace = false);
|
||||
const SelectQueryOptions & = {});
|
||||
|
||||
/// Read data not from the table specified in the query, but from the specified `storage_`.
|
||||
InterpreterSelectQuery(
|
||||
const ASTPtr & query_ptr_,
|
||||
const Context & context_,
|
||||
const StoragePtr & storage_,
|
||||
QueryProcessingStage::Enum to_stage_ = QueryProcessingStage::Complete,
|
||||
bool only_analyze_ = false,
|
||||
bool modify_inplace = false);
|
||||
const SelectQueryOptions & = {});
|
||||
|
||||
~InterpreterSelectQuery() override;
|
||||
|
||||
@ -93,11 +80,8 @@ private:
|
||||
const Context & context_,
|
||||
const BlockInputStreamPtr & input_,
|
||||
const StoragePtr & storage_,
|
||||
const Names & required_result_column_names,
|
||||
QueryProcessingStage::Enum to_stage_,
|
||||
size_t subquery_depth_,
|
||||
bool only_analyze_,
|
||||
bool modify_inplace);
|
||||
const SelectQueryOptions &,
|
||||
const Names & required_result_column_names = {});
|
||||
|
||||
ASTSelectQuery & getSelectQuery() { return query_ptr->as<ASTSelectQuery &>(); }
|
||||
|
||||
@ -223,10 +207,9 @@ private:
|
||||
*/
|
||||
void initSettings();
|
||||
|
||||
const SelectQueryOptions options;
|
||||
ASTPtr query_ptr;
|
||||
Context context;
|
||||
QueryProcessingStage::Enum to_stage;
|
||||
size_t subquery_depth = 0;
|
||||
NamesAndTypesList source_columns;
|
||||
SyntaxAnalyzerResultPtr syntax_analyzer_result;
|
||||
std::unique_ptr<ExpressionAnalyzer> query_analyzer;
|
||||
@ -234,9 +217,6 @@ private:
|
||||
/// How many streams we ask for storage to produce, and in how many threads we will do further processing.
|
||||
size_t max_streams = 1;
|
||||
|
||||
/// The object was created only for query analysis.
|
||||
bool only_analyze = false;
|
||||
|
||||
/// List of columns to read to execute the query.
|
||||
Names required_columns;
|
||||
/// Structure of query source (table, subquery, etc).
|
||||
|
@ -26,15 +26,11 @@ namespace ErrorCodes
|
||||
InterpreterSelectWithUnionQuery::InterpreterSelectWithUnionQuery(
|
||||
const ASTPtr & query_ptr_,
|
||||
const Context & context_,
|
||||
const Names & required_result_column_names,
|
||||
QueryProcessingStage::Enum to_stage_,
|
||||
size_t subquery_depth_,
|
||||
bool only_analyze,
|
||||
bool modify_inplace)
|
||||
: query_ptr(query_ptr_),
|
||||
context(context_),
|
||||
to_stage(to_stage_),
|
||||
subquery_depth(subquery_depth_)
|
||||
const SelectQueryOptions & options_,
|
||||
const Names & required_result_column_names)
|
||||
: options(options_),
|
||||
query_ptr(query_ptr_),
|
||||
context(context_)
|
||||
{
|
||||
const auto & ast = query_ptr->as<ASTSelectWithUnionQuery &>();
|
||||
|
||||
@ -57,7 +53,7 @@ InterpreterSelectWithUnionQuery::InterpreterSelectWithUnionQuery(
|
||||
/// We use it to determine positions of 'required_result_column_names' in SELECT clause.
|
||||
|
||||
Block full_result_header = InterpreterSelectQuery(
|
||||
ast.list_of_selects->children.at(0), context, Names(), to_stage, subquery_depth, true).getSampleBlock();
|
||||
ast.list_of_selects->children.at(0), context, options.copy().analyze().noModify()).getSampleBlock();
|
||||
|
||||
std::vector<size_t> positions_of_required_result_columns(required_result_column_names.size());
|
||||
for (size_t required_result_num = 0, size = required_result_column_names.size(); required_result_num < size; ++required_result_num)
|
||||
@ -66,7 +62,7 @@ InterpreterSelectWithUnionQuery::InterpreterSelectWithUnionQuery(
|
||||
for (size_t query_num = 1; query_num < num_selects; ++query_num)
|
||||
{
|
||||
Block full_result_header_for_current_select = InterpreterSelectQuery(
|
||||
ast.list_of_selects->children.at(query_num), context, Names(), to_stage, subquery_depth, true).getSampleBlock();
|
||||
ast.list_of_selects->children.at(query_num), context, options.copy().analyze().noModify()).getSampleBlock();
|
||||
|
||||
if (full_result_header_for_current_select.columns() != full_result_header.columns())
|
||||
throw Exception("Different number of columns in UNION ALL elements:\n"
|
||||
@ -89,11 +85,8 @@ InterpreterSelectWithUnionQuery::InterpreterSelectWithUnionQuery(
|
||||
nested_interpreters.emplace_back(std::make_unique<InterpreterSelectQuery>(
|
||||
ast.list_of_selects->children.at(query_num),
|
||||
context,
|
||||
current_required_result_column_names,
|
||||
to_stage,
|
||||
subquery_depth,
|
||||
only_analyze,
|
||||
modify_inplace));
|
||||
options,
|
||||
current_required_result_column_names));
|
||||
}
|
||||
|
||||
/// Determine structure of the result.
|
||||
@ -179,7 +172,7 @@ Block InterpreterSelectWithUnionQuery::getSampleBlock(
|
||||
return cache[key];
|
||||
}
|
||||
|
||||
return cache[key] = InterpreterSelectWithUnionQuery(query_ptr, context, {}, QueryProcessingStage::Complete, 0, true).getSampleBlock();
|
||||
return cache[key] = InterpreterSelectWithUnionQuery(query_ptr, context, SelectQueryOptions().analyze()).getSampleBlock();
|
||||
}
|
||||
|
||||
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <Core/QueryProcessingStage.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/IInterpreter.h>
|
||||
#include <Interpreters/SelectQueryOptions.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -19,11 +20,8 @@ public:
|
||||
InterpreterSelectWithUnionQuery(
|
||||
const ASTPtr & query_ptr_,
|
||||
const Context & context_,
|
||||
const Names & required_result_column_names = Names{},
|
||||
QueryProcessingStage::Enum to_stage_ = QueryProcessingStage::Complete,
|
||||
size_t subquery_depth_ = 0,
|
||||
bool only_analyze = false,
|
||||
bool modify_inplace = false);
|
||||
const SelectQueryOptions &,
|
||||
const Names & required_result_column_names = {});
|
||||
|
||||
~InterpreterSelectWithUnionQuery() override;
|
||||
|
||||
@ -43,10 +41,9 @@ public:
|
||||
ASTPtr getQuery() const { return query_ptr; }
|
||||
|
||||
private:
|
||||
const SelectQueryOptions options;
|
||||
ASTPtr query_ptr;
|
||||
Context context;
|
||||
QueryProcessingStage::Enum to_stage;
|
||||
size_t subquery_depth;
|
||||
|
||||
std::vector<std::unique_ptr<InterpreterSelectQuery>> nested_interpreters;
|
||||
|
||||
|
@ -32,23 +32,54 @@ namespace ErrorCodes
|
||||
extern const int ILLEGAL_COLUMN;
|
||||
}
|
||||
|
||||
static NameSet requiredRightKeys(const Names & key_names, const NamesAndTypesList & columns_added_by_join)
|
||||
{
|
||||
NameSet required;
|
||||
|
||||
static std::unordered_map<String, DataTypePtr> requiredRightKeys(const Names & key_names, const NamesAndTypesList & columns_added_by_join)
|
||||
{
|
||||
NameSet right_keys;
|
||||
for (const auto & name : key_names)
|
||||
right_keys.insert(name);
|
||||
|
||||
std::unordered_map<String, DataTypePtr> required;
|
||||
for (const auto & column : columns_added_by_join)
|
||||
{
|
||||
if (right_keys.count(column.name))
|
||||
required.insert(column.name);
|
||||
}
|
||||
required.insert({column.name, column.type});
|
||||
|
||||
return required;
|
||||
}
|
||||
|
||||
static void convertColumnToNullable(ColumnWithTypeAndName & column)
|
||||
{
|
||||
if (column.type->isNullable())
|
||||
return;
|
||||
|
||||
column.type = makeNullable(column.type);
|
||||
if (column.column)
|
||||
column.column = makeNullable(column.column);
|
||||
}
|
||||
|
||||
/// Converts column to nullable if needed. No backward convertion.
|
||||
static ColumnWithTypeAndName correctNullability(ColumnWithTypeAndName && column, bool nullable)
|
||||
{
|
||||
if (nullable)
|
||||
convertColumnToNullable(column);
|
||||
return std::move(column);
|
||||
}
|
||||
|
||||
static ColumnWithTypeAndName correctNullability(ColumnWithTypeAndName && column, bool nullable, const ColumnUInt8 & negative_null_map)
|
||||
{
|
||||
if (nullable)
|
||||
{
|
||||
convertColumnToNullable(column);
|
||||
if (negative_null_map.size())
|
||||
{
|
||||
MutableColumnPtr mutable_column = (*std::move(column.column)).mutate();
|
||||
static_cast<ColumnNullable &>(*mutable_column).applyNegatedNullMap(negative_null_map);
|
||||
column.column = std::move(mutable_column);
|
||||
}
|
||||
}
|
||||
return std::move(column);
|
||||
}
|
||||
|
||||
|
||||
Join::Join(const Names & key_names_right_, bool use_nulls_, const SizeLimits & limits,
|
||||
ASTTableJoin::Kind kind_, ASTTableJoin::Strictness strictness_, bool any_take_last_row_)
|
||||
@ -120,56 +151,6 @@ Join::Type Join::chooseMethod(const ColumnRawPtrs & key_columns, Sizes & key_siz
|
||||
}
|
||||
|
||||
|
||||
template <typename Maps>
|
||||
static void initImpl(Maps & maps, Join::Type type)
|
||||
{
|
||||
switch (type)
|
||||
{
|
||||
case Join::Type::EMPTY: break;
|
||||
case Join::Type::CROSS: break;
|
||||
|
||||
#define M(TYPE) \
|
||||
case Join::Type::TYPE: maps.TYPE = std::make_unique<typename decltype(maps.TYPE)::element_type>(); break;
|
||||
APPLY_FOR_JOIN_VARIANTS(M)
|
||||
#undef M
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Maps>
|
||||
static size_t getTotalRowCountImpl(const Maps & maps, Join::Type type)
|
||||
{
|
||||
switch (type)
|
||||
{
|
||||
case Join::Type::EMPTY: return 0;
|
||||
case Join::Type::CROSS: return 0;
|
||||
|
||||
#define M(NAME) \
|
||||
case Join::Type::NAME: return maps.NAME ? maps.NAME->size() : 0;
|
||||
APPLY_FOR_JOIN_VARIANTS(M)
|
||||
#undef M
|
||||
}
|
||||
|
||||
__builtin_unreachable();
|
||||
}
|
||||
|
||||
template <typename Maps>
|
||||
static size_t getTotalByteCountImpl(const Maps & maps, Join::Type type)
|
||||
{
|
||||
switch (type)
|
||||
{
|
||||
case Join::Type::EMPTY: return 0;
|
||||
case Join::Type::CROSS: return 0;
|
||||
|
||||
#define M(NAME) \
|
||||
case Join::Type::NAME: return maps.NAME ? maps.NAME->getBufferSizeInBytes() : 0;
|
||||
APPLY_FOR_JOIN_VARIANTS(M)
|
||||
#undef M
|
||||
}
|
||||
|
||||
__builtin_unreachable();
|
||||
}
|
||||
|
||||
|
||||
template <Join::Type type, typename Value, typename Mapped>
|
||||
struct KeyGetterForTypeImpl;
|
||||
|
||||
@ -227,7 +208,7 @@ void Join::init(Type type_)
|
||||
if (kind == ASTTableJoin::Kind::Cross)
|
||||
return;
|
||||
dispatch(MapInitTag());
|
||||
dispatch([&](auto, auto, auto & map) { initImpl(map, type); });
|
||||
dispatch([&](auto, auto, auto & map) { map.create(type); });
|
||||
}
|
||||
|
||||
size_t Join::getTotalRowCount() const
|
||||
@ -241,7 +222,7 @@ size_t Join::getTotalRowCount() const
|
||||
}
|
||||
else
|
||||
{
|
||||
dispatch([&](auto, auto, auto & map) { res += getTotalRowCountImpl(map, type); });
|
||||
dispatch([&](auto, auto, auto & map) { res += map.getTotalRowCount(type); });
|
||||
}
|
||||
|
||||
return res;
|
||||
@ -258,22 +239,13 @@ size_t Join::getTotalByteCount() const
|
||||
}
|
||||
else
|
||||
{
|
||||
dispatch([&](auto, auto, auto & map) { res += getTotalByteCountImpl(map, type); });
|
||||
dispatch([&](auto, auto, auto & map) { res += map.getTotalByteCountImpl(type); });
|
||||
res += pool.size();
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
static void convertColumnToNullable(ColumnWithTypeAndName & column)
|
||||
{
|
||||
column.type = makeNullable(column.type);
|
||||
if (column.column)
|
||||
column.column = makeNullable(column.column);
|
||||
}
|
||||
|
||||
|
||||
void Join::setSampleBlock(const Block & block)
|
||||
{
|
||||
std::unique_lock lock(rwlock);
|
||||
@ -526,113 +498,112 @@ bool Join::insertFromBlock(const Block & block)
|
||||
|
||||
namespace
|
||||
{
|
||||
template <bool fill_left, ASTTableJoin::Strictness STRICTNESS, typename Map>
|
||||
struct Adder;
|
||||
|
||||
template <typename Map>
|
||||
struct Adder<true, ASTTableJoin::Strictness::Any, Map>
|
||||
class AddedColumns
|
||||
{
|
||||
static void addFound(const typename Map::mapped_type & mapped, size_t num_columns_to_add, MutableColumns & added_columns,
|
||||
size_t i, IColumn::Filter & filter, IColumn::Offset & /*current_offset*/, IColumn::Offsets * /*offsets*/,
|
||||
const std::vector<size_t> & right_indexes)
|
||||
{
|
||||
filter[i] = 1;
|
||||
public:
|
||||
using TypeAndNames = std::vector<std::pair<decltype(ColumnWithTypeAndName::type), decltype(ColumnWithTypeAndName::name)>>;
|
||||
|
||||
for (size_t j = 0; j < num_columns_to_add; ++j)
|
||||
added_columns[j]->insertFrom(*mapped.block->getByPosition(right_indexes[j]).column, mapped.row_num);
|
||||
AddedColumns(const Block & sample_block_with_columns_to_add,
|
||||
const Block & block_with_columns_to_add,
|
||||
const Block & block, size_t num_columns_to_skip)
|
||||
{
|
||||
size_t num_columns_to_add = sample_block_with_columns_to_add.columns();
|
||||
|
||||
columns.reserve(num_columns_to_add);
|
||||
type_name.reserve(num_columns_to_add);
|
||||
right_indexes.reserve(num_columns_to_add);
|
||||
|
||||
for (size_t i = 0; i < num_columns_to_add; ++i)
|
||||
{
|
||||
const ColumnWithTypeAndName & src_column = sample_block_with_columns_to_add.safeGetByPosition(i);
|
||||
|
||||
/// Don't insert column if it's in left block or not explicitly required.
|
||||
if (!block.has(src_column.name) && block_with_columns_to_add.has(src_column.name))
|
||||
addColumn(src_column, num_columns_to_skip + i);
|
||||
}
|
||||
}
|
||||
|
||||
static void addNotFound(size_t num_columns_to_add, MutableColumns & added_columns,
|
||||
size_t i, IColumn::Filter & filter, IColumn::Offset & /*current_offset*/, IColumn::Offsets * /*offsets*/)
|
||||
{
|
||||
filter[i] = 0;
|
||||
size_t size() const { return columns.size(); }
|
||||
|
||||
for (size_t j = 0; j < num_columns_to_add; ++j)
|
||||
added_columns[j]->insertDefault();
|
||||
ColumnWithTypeAndName moveColumn(size_t i)
|
||||
{
|
||||
return ColumnWithTypeAndName(std::move(columns[i]), type_name[i].first, type_name[i].second);
|
||||
}
|
||||
|
||||
void appendFromBlock(const Block & block, size_t row_num)
|
||||
{
|
||||
for (size_t j = 0; j < right_indexes.size(); ++j)
|
||||
columns[j]->insertFrom(*block.getByPosition(right_indexes[j]).column, row_num);
|
||||
}
|
||||
|
||||
void appendDefaultRow()
|
||||
{
|
||||
for (size_t j = 0; j < right_indexes.size(); ++j)
|
||||
columns[j]->insertDefault();
|
||||
}
|
||||
|
||||
private:
|
||||
TypeAndNames type_name;
|
||||
MutableColumns columns;
|
||||
std::vector<size_t> right_indexes;
|
||||
|
||||
void addColumn(const ColumnWithTypeAndName & src_column, size_t idx)
|
||||
{
|
||||
columns.push_back(src_column.column->cloneEmpty());
|
||||
columns.back()->reserve(src_column.column->size());
|
||||
type_name.emplace_back(src_column.type, src_column.name);
|
||||
right_indexes.push_back(idx);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Map>
|
||||
struct Adder<false, ASTTableJoin::Strictness::Any, Map>
|
||||
template <ASTTableJoin::Strictness STRICTNESS, typename Map>
|
||||
void addFoundRow(const typename Map::mapped_type & mapped, AddedColumns & added, IColumn::Offset & current_offset [[maybe_unused]])
|
||||
{
|
||||
static void addFound(const typename Map::mapped_type & mapped, size_t num_columns_to_add, MutableColumns & added_columns,
|
||||
size_t i, IColumn::Filter & filter, IColumn::Offset & /*current_offset*/, IColumn::Offsets * /*offsets*/,
|
||||
const std::vector<size_t> & right_indexes)
|
||||
if constexpr (STRICTNESS == ASTTableJoin::Strictness::Any)
|
||||
{
|
||||
filter[i] = 1;
|
||||
|
||||
for (size_t j = 0; j < num_columns_to_add; ++j)
|
||||
added_columns[j]->insertFrom(*mapped.block->getByPosition(right_indexes[j]).column, mapped.row_num);
|
||||
added.appendFromBlock(*mapped.block, mapped.row_num);
|
||||
}
|
||||
|
||||
static void addNotFound(size_t /*num_columns_to_add*/, MutableColumns & /*added_columns*/,
|
||||
size_t i, IColumn::Filter & filter, IColumn::Offset & /*current_offset*/, IColumn::Offsets * /*offsets*/)
|
||||
if constexpr (STRICTNESS == ASTTableJoin::Strictness::All)
|
||||
{
|
||||
filter[i] = 0;
|
||||
}
|
||||
};
|
||||
|
||||
template <bool fill_left, typename Map>
|
||||
struct Adder<fill_left, ASTTableJoin::Strictness::All, Map>
|
||||
{
|
||||
static void addFound(const typename Map::mapped_type & mapped, size_t num_columns_to_add, MutableColumns & added_columns,
|
||||
size_t i, IColumn::Filter & filter, IColumn::Offset & current_offset, IColumn::Offsets * offsets,
|
||||
const std::vector<size_t> & right_indexes)
|
||||
{
|
||||
filter[i] = 1;
|
||||
|
||||
size_t rows_joined = 0;
|
||||
for (auto current = &static_cast<const typename Map::mapped_type::Base_t &>(mapped); current != nullptr; current = current->next)
|
||||
{
|
||||
for (size_t j = 0; j < num_columns_to_add; ++j)
|
||||
added_columns[j]->insertFrom(*current->block->getByPosition(right_indexes[j]).column.get(), current->row_num);
|
||||
|
||||
++rows_joined;
|
||||
}
|
||||
|
||||
current_offset += rows_joined;
|
||||
(*offsets)[i] = current_offset;
|
||||
}
|
||||
|
||||
static void addNotFound(size_t num_columns_to_add, MutableColumns & added_columns,
|
||||
size_t i, IColumn::Filter & filter, IColumn::Offset & current_offset, IColumn::Offsets * offsets)
|
||||
{
|
||||
filter[i] = 0;
|
||||
|
||||
if (!fill_left)
|
||||
{
|
||||
(*offsets)[i] = current_offset;
|
||||
}
|
||||
else
|
||||
{
|
||||
added.appendFromBlock(*current->block, current->row_num);
|
||||
++current_offset;
|
||||
(*offsets)[i] = current_offset;
|
||||
|
||||
for (size_t j = 0; j < num_columns_to_add; ++j)
|
||||
added_columns[j]->insertDefault();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <ASTTableJoin::Kind KIND, ASTTableJoin::Strictness STRICTNESS, typename KeyGetter, typename Map, bool has_null_map>
|
||||
void NO_INLINE joinBlockImplTypeCase(
|
||||
const Map & map, size_t rows, const ColumnRawPtrs & key_columns, const Sizes & key_sizes,
|
||||
MutableColumns & added_columns, ConstNullMapPtr null_map, IColumn::Filter & filter,
|
||||
std::unique_ptr<IColumn::Offsets> & offsets_to_replicate,
|
||||
const std::vector<size_t> & right_indexes)
|
||||
template <bool _add_missing>
|
||||
void addNotFoundRow(AddedColumns & added [[maybe_unused]], IColumn::Offset & current_offset [[maybe_unused]])
|
||||
{
|
||||
IColumn::Offset current_offset = 0;
|
||||
size_t num_columns_to_add = right_indexes.size();
|
||||
if constexpr (_add_missing)
|
||||
{
|
||||
added.appendDefaultRow();
|
||||
++current_offset;
|
||||
}
|
||||
}
|
||||
|
||||
/// Joins right table columns which indexes are present in right_indexes using specified map.
|
||||
/// Makes filter (1 if row presented in right table) and returns offsets to replicate (for ALL JOINS).
|
||||
template <bool _add_missing, ASTTableJoin::Strictness STRICTNESS, typename KeyGetter, typename Map, bool _has_null_map>
|
||||
std::unique_ptr<IColumn::Offsets> NO_INLINE joinRightIndexedColumns(
|
||||
const Map & map, size_t rows, KeyGetter & key_getter,
|
||||
AddedColumns & added_columns, ConstNullMapPtr null_map, IColumn::Filter & filter)
|
||||
{
|
||||
std::unique_ptr<IColumn::Offsets> offsets_to_replicate;
|
||||
if constexpr (STRICTNESS == ASTTableJoin::Strictness::All)
|
||||
offsets_to_replicate = std::make_unique<IColumn::Offsets>(rows);
|
||||
|
||||
IColumn::Offset current_offset = 0;
|
||||
Arena pool;
|
||||
KeyGetter key_getter(key_columns, key_sizes, nullptr);
|
||||
|
||||
for (size_t i = 0; i < rows; ++i)
|
||||
{
|
||||
if (has_null_map && (*null_map)[i])
|
||||
if (_has_null_map && (*null_map)[i])
|
||||
{
|
||||
Adder<Join::KindTrait<KIND>::fill_left, STRICTNESS, Map>::addNotFound(
|
||||
num_columns_to_add, added_columns, i, filter, current_offset, offsets_to_replicate.get());
|
||||
addNotFoundRow<_add_missing>(added_columns, current_offset);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -640,46 +611,65 @@ namespace
|
||||
|
||||
if (find_result.isFound())
|
||||
{
|
||||
filter[i] = 1;
|
||||
auto & mapped = find_result.getMapped();
|
||||
mapped.setUsed();
|
||||
Adder<Join::KindTrait<KIND>::fill_left, STRICTNESS, Map>::addFound(
|
||||
mapped, num_columns_to_add, added_columns, i, filter, current_offset, offsets_to_replicate.get(), right_indexes);
|
||||
addFoundRow<STRICTNESS, Map>(mapped, added_columns, current_offset);
|
||||
}
|
||||
else
|
||||
Adder<Join::KindTrait<KIND>::fill_left, STRICTNESS, Map>::addNotFound(
|
||||
num_columns_to_add, added_columns, i, filter, current_offset, offsets_to_replicate.get());
|
||||
}
|
||||
}
|
||||
addNotFoundRow<_add_missing>(added_columns, current_offset);
|
||||
}
|
||||
|
||||
using BlockFilterData = std::pair<
|
||||
std::unique_ptr<IColumn::Filter>,
|
||||
std::unique_ptr<IColumn::Offsets>>;
|
||||
if constexpr (STRICTNESS == ASTTableJoin::Strictness::All)
|
||||
(*offsets_to_replicate)[i] = current_offset;
|
||||
}
|
||||
|
||||
return offsets_to_replicate;
|
||||
}
|
||||
|
||||
template <ASTTableJoin::Kind KIND, ASTTableJoin::Strictness STRICTNESS, typename KeyGetter, typename Map>
|
||||
BlockFilterData joinBlockImplType(
|
||||
IColumn::Filter joinRightColumns(
|
||||
const Map & map, size_t rows, const ColumnRawPtrs & key_columns, const Sizes & key_sizes,
|
||||
MutableColumns & added_columns, ConstNullMapPtr null_map, const std::vector<size_t> & right_indexes)
|
||||
AddedColumns & added_columns, ConstNullMapPtr null_map, std::unique_ptr<IColumn::Offsets> & offsets_to_replicate)
|
||||
{
|
||||
std::unique_ptr<IColumn::Filter> filter = std::make_unique<IColumn::Filter>(rows);
|
||||
std::unique_ptr<IColumn::Offsets> offsets_to_replicate;
|
||||
constexpr bool left_or_full = static_in_v<KIND, ASTTableJoin::Kind::Left, ASTTableJoin::Kind::Full>;
|
||||
|
||||
if (STRICTNESS == ASTTableJoin::Strictness::All)
|
||||
offsets_to_replicate = std::make_unique<IColumn::Offsets>(rows);
|
||||
IColumn::Filter filter(rows, 0);
|
||||
KeyGetter key_getter(key_columns, key_sizes, nullptr);
|
||||
|
||||
if (null_map)
|
||||
joinBlockImplTypeCase<KIND, STRICTNESS, KeyGetter, Map, true>(
|
||||
map, rows, key_columns, key_sizes, added_columns, null_map, *filter,
|
||||
offsets_to_replicate, right_indexes);
|
||||
offsets_to_replicate = joinRightIndexedColumns<left_or_full, STRICTNESS, KeyGetter, Map, true>(
|
||||
map, rows, key_getter, added_columns, null_map, filter);
|
||||
else
|
||||
joinBlockImplTypeCase<KIND, STRICTNESS, KeyGetter, Map, false>(
|
||||
map, rows, key_columns, key_sizes, added_columns, null_map, *filter,
|
||||
offsets_to_replicate, right_indexes);
|
||||
offsets_to_replicate = joinRightIndexedColumns<left_or_full, STRICTNESS, KeyGetter, Map, false>(
|
||||
map, rows, key_getter, added_columns, null_map, filter);
|
||||
|
||||
return {std::move(filter), std::move(offsets_to_replicate)};
|
||||
return filter;
|
||||
}
|
||||
|
||||
template <ASTTableJoin::Kind KIND, ASTTableJoin::Strictness STRICTNESS, typename Maps>
|
||||
IColumn::Filter switchJoinRightColumns(
|
||||
Join::Type type,
|
||||
const Maps & maps_, size_t rows, const ColumnRawPtrs & key_columns, const Sizes & key_sizes,
|
||||
AddedColumns & added_columns, ConstNullMapPtr null_map,
|
||||
std::unique_ptr<IColumn::Offsets> & offsets_to_replicate)
|
||||
{
|
||||
switch (type)
|
||||
{
|
||||
#define M(TYPE) \
|
||||
case Join::Type::TYPE: \
|
||||
return joinRightColumns<KIND, STRICTNESS, typename KeyGetterForType<Join::Type::TYPE, const std::remove_reference_t<decltype(*maps_.TYPE)>>::Type>(\
|
||||
*maps_.TYPE, rows, key_columns, key_sizes, added_columns, null_map, offsets_to_replicate);
|
||||
APPLY_FOR_JOIN_VARIANTS(M)
|
||||
#undef M
|
||||
|
||||
default:
|
||||
throw Exception("Unknown JOIN keys variant.", ErrorCodes::UNKNOWN_SET_DATA_VARIANT);
|
||||
}
|
||||
}
|
||||
|
||||
} /// nameless
|
||||
|
||||
|
||||
template <ASTTableJoin::Kind KIND, ASTTableJoin::Strictness STRICTNESS, typename Maps>
|
||||
void Join::joinBlockImpl(
|
||||
@ -714,7 +704,8 @@ void Join::joinBlockImpl(
|
||||
* Because if they are constants, then in the "not joined" rows, they may have different values
|
||||
* - default values, which can differ from the values of these constants.
|
||||
*/
|
||||
if (isRightOrFull(kind))
|
||||
constexpr bool right_or_full = static_in_v<KIND, ASTTableJoin::Kind::Right, ASTTableJoin::Kind::Full>;
|
||||
if constexpr (right_or_full)
|
||||
{
|
||||
for (size_t i = 0; i < existing_columns; ++i)
|
||||
{
|
||||
@ -734,68 +725,39 @@ void Join::joinBlockImpl(
|
||||
* but they will not be used at this stage of joining (and will be in `AdderNonJoined`), and they need to be skipped.
|
||||
*/
|
||||
size_t num_columns_to_skip = 0;
|
||||
if (isRightOrFull(kind))
|
||||
if constexpr (right_or_full)
|
||||
num_columns_to_skip = keys_size;
|
||||
|
||||
/// Add new columns to the block.
|
||||
size_t num_columns_to_add = sample_block_with_columns_to_add.columns();
|
||||
MutableColumns added_columns;
|
||||
added_columns.reserve(num_columns_to_add);
|
||||
|
||||
std::vector<std::pair<decltype(ColumnWithTypeAndName::type), decltype(ColumnWithTypeAndName::name)>> added_type_name;
|
||||
added_type_name.reserve(num_columns_to_add);
|
||||
AddedColumns added(sample_block_with_columns_to_add, block_with_columns_to_add, block, num_columns_to_skip);
|
||||
|
||||
std::vector<size_t> right_indexes;
|
||||
right_indexes.reserve(num_columns_to_add);
|
||||
|
||||
for (size_t i = 0; i < num_columns_to_add; ++i)
|
||||
{
|
||||
const ColumnWithTypeAndName & src_column = sample_block_with_columns_to_add.safeGetByPosition(i);
|
||||
|
||||
/// Don't insert column if it's in left block or not explicitly required.
|
||||
if (!block.has(src_column.name) && block_with_columns_to_add.has(src_column.name))
|
||||
{
|
||||
added_columns.push_back(src_column.column->cloneEmpty());
|
||||
added_columns.back()->reserve(src_column.column->size());
|
||||
added_type_name.emplace_back(src_column.type, src_column.name);
|
||||
right_indexes.push_back(num_columns_to_skip + i);
|
||||
}
|
||||
}
|
||||
|
||||
std::unique_ptr<IColumn::Filter> filter;
|
||||
std::unique_ptr<IColumn::Offsets> offsets_to_replicate;
|
||||
|
||||
switch (type)
|
||||
IColumn::Filter row_filter = switchJoinRightColumns<KIND, STRICTNESS>(
|
||||
type, maps_, block.rows(), key_columns, key_sizes, added, null_map, offsets_to_replicate);
|
||||
|
||||
for (size_t i = 0; i < added.size(); ++i)
|
||||
block.insert(added.moveColumn(i));
|
||||
|
||||
/// Filter & insert missing rows
|
||||
|
||||
auto right_keys = requiredRightKeys(key_names_right, columns_added_by_join);
|
||||
|
||||
if constexpr (STRICTNESS == ASTTableJoin::Strictness::Any)
|
||||
{
|
||||
#define M(TYPE) \
|
||||
case Join::Type::TYPE: \
|
||||
std::tie(filter, offsets_to_replicate) = \
|
||||
joinBlockImplType<KIND, STRICTNESS, typename KeyGetterForType<Join::Type::TYPE, const std::remove_reference_t<decltype(*maps_.TYPE)>>::Type>(\
|
||||
*maps_.TYPE, block.rows(), key_columns, key_sizes, added_columns, null_map, right_indexes); \
|
||||
break;
|
||||
APPLY_FOR_JOIN_VARIANTS(M)
|
||||
#undef M
|
||||
/// Some trash to represent IColumn::Filter as ColumnUInt8 needed for ColumnNullable::applyNullMap()
|
||||
auto null_map_filter_ptr = ColumnUInt8::create();
|
||||
ColumnUInt8 & null_map_filter = static_cast<ColumnUInt8 &>(*null_map_filter_ptr);
|
||||
null_map_filter.getData().swap(row_filter);
|
||||
const IColumn::Filter & filter = null_map_filter.getData();
|
||||
|
||||
default:
|
||||
throw Exception("Unknown JOIN keys variant.", ErrorCodes::UNKNOWN_SET_DATA_VARIANT);
|
||||
}
|
||||
|
||||
const auto added_columns_size = added_columns.size();
|
||||
for (size_t i = 0; i < added_columns_size; ++i)
|
||||
block.insert(ColumnWithTypeAndName(std::move(added_columns[i]), added_type_name[i].first, added_type_name[i].second));
|
||||
|
||||
if (!filter)
|
||||
throw Exception("No data to filter columns", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
NameSet needed_key_names_right = requiredRightKeys(key_names_right, columns_added_by_join);
|
||||
|
||||
if (strictness == ASTTableJoin::Strictness::Any)
|
||||
{
|
||||
if (isInnerOrRight(kind))
|
||||
constexpr bool inner_or_right = static_in_v<KIND, ASTTableJoin::Kind::Inner, ASTTableJoin::Kind::Right>;
|
||||
if constexpr (inner_or_right)
|
||||
{
|
||||
/// If ANY INNER | RIGHT JOIN - filter all the columns except the new ones.
|
||||
for (size_t i = 0; i < existing_columns; ++i)
|
||||
block.safeGetByPosition(i).column = block.safeGetByPosition(i).column->filter(*filter, -1);
|
||||
block.safeGetByPosition(i).column = block.safeGetByPosition(i).column->filter(filter, -1);
|
||||
|
||||
/// Add join key columns from right block if they has different name.
|
||||
for (size_t i = 0; i < key_names_right.size(); ++i)
|
||||
@ -803,10 +765,12 @@ void Join::joinBlockImpl(
|
||||
auto & right_name = key_names_right[i];
|
||||
auto & left_name = key_names_left[i];
|
||||
|
||||
if (needed_key_names_right.count(right_name) && !block.has(right_name))
|
||||
auto it = right_keys.find(right_name);
|
||||
if (it != right_keys.end() && !block.has(right_name))
|
||||
{
|
||||
const auto & col = block.getByName(left_name);
|
||||
block.insert({col.column, col.type, right_name});
|
||||
bool is_nullable = it->second->isNullable();
|
||||
block.insert(correctNullability({col.column, col.type, right_name}, is_nullable));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -818,27 +782,30 @@ void Join::joinBlockImpl(
|
||||
auto & right_name = key_names_right[i];
|
||||
auto & left_name = key_names_left[i];
|
||||
|
||||
if (needed_key_names_right.count(right_name) && !block.has(right_name))
|
||||
auto it = right_keys.find(right_name);
|
||||
if (it != right_keys.end() && !block.has(right_name))
|
||||
{
|
||||
const auto & col = block.getByName(left_name);
|
||||
auto & column = col.column;
|
||||
ColumnPtr column = col.column->convertToFullColumnIfConst();
|
||||
MutableColumnPtr mut_column = column->cloneEmpty();
|
||||
|
||||
for (size_t col_no = 0; col_no < filter->size(); ++col_no)
|
||||
for (size_t row = 0; row < filter.size(); ++row)
|
||||
{
|
||||
if ((*filter)[col_no])
|
||||
mut_column->insertFrom(*column, col_no);
|
||||
if (filter[row])
|
||||
mut_column->insertFrom(*column, row);
|
||||
else
|
||||
mut_column->insertDefault();
|
||||
}
|
||||
|
||||
block.insert({std::move(mut_column), col.type, right_name});
|
||||
bool is_nullable = use_nulls || it->second->isNullable();
|
||||
block.insert(correctNullability({std::move(mut_column), col.type, right_name}, is_nullable, null_map_filter));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
constexpr bool left_or_full = static_in_v<KIND, ASTTableJoin::Kind::Left, ASTTableJoin::Kind::Full>;
|
||||
if (!offsets_to_replicate)
|
||||
throw Exception("No data to filter columns", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
@ -848,28 +815,31 @@ void Join::joinBlockImpl(
|
||||
auto & right_name = key_names_right[i];
|
||||
auto & left_name = key_names_left[i];
|
||||
|
||||
if (needed_key_names_right.count(right_name) && !block.has(right_name))
|
||||
auto it = right_keys.find(right_name);
|
||||
if (it != right_keys.end() && !block.has(right_name))
|
||||
{
|
||||
const auto & col = block.getByName(left_name);
|
||||
auto & column = col.column;
|
||||
ColumnPtr column = col.column->convertToFullColumnIfConst();
|
||||
MutableColumnPtr mut_column = column->cloneEmpty();
|
||||
|
||||
size_t last_offset = 0;
|
||||
for (size_t col_no = 0; col_no < column->size(); ++col_no)
|
||||
for (size_t row = 0; row < column->size(); ++row)
|
||||
{
|
||||
if (size_t to_insert = (*offsets_to_replicate)[col_no] - last_offset)
|
||||
if (size_t to_insert = (*offsets_to_replicate)[row] - last_offset)
|
||||
{
|
||||
if (!(*filter)[col_no])
|
||||
if (!row_filter[row])
|
||||
mut_column->insertDefault();
|
||||
else
|
||||
for (size_t dup = 0; dup < to_insert; ++dup)
|
||||
mut_column->insertFrom(*column, col_no);
|
||||
mut_column->insertFrom(*column, row);
|
||||
}
|
||||
|
||||
last_offset = (*offsets_to_replicate)[col_no];
|
||||
last_offset = (*offsets_to_replicate)[row];
|
||||
}
|
||||
|
||||
block.insert({std::move(mut_column), col.type, right_name});
|
||||
/// TODO: null_map_filter
|
||||
bool is_nullable = (use_nulls && left_or_full) || it->second->isNullable();
|
||||
block.insert(correctNullability({std::move(mut_column), col.type, right_name}, is_nullable));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1063,11 +1033,8 @@ struct AdderNonJoined;
|
||||
template <typename Mapped>
|
||||
struct AdderNonJoined<ASTTableJoin::Strictness::Any, Mapped>
|
||||
{
|
||||
static void add(const Mapped & mapped, size_t & rows_added, MutableColumns & columns_left, MutableColumns & columns_right)
|
||||
static void add(const Mapped & mapped, size_t & rows_added, MutableColumns & columns_right)
|
||||
{
|
||||
for (size_t j = 0; j < columns_left.size(); ++j)
|
||||
columns_left[j]->insertDefault();
|
||||
|
||||
for (size_t j = 0; j < columns_right.size(); ++j)
|
||||
columns_right[j]->insertFrom(*mapped.block->getByPosition(j).column.get(), mapped.row_num);
|
||||
|
||||
@ -1078,13 +1045,10 @@ struct AdderNonJoined<ASTTableJoin::Strictness::Any, Mapped>
|
||||
template <typename Mapped>
|
||||
struct AdderNonJoined<ASTTableJoin::Strictness::All, Mapped>
|
||||
{
|
||||
static void add(const Mapped & mapped, size_t & rows_added, MutableColumns & columns_left, MutableColumns & columns_right)
|
||||
static void add(const Mapped & mapped, size_t & rows_added, MutableColumns & columns_right)
|
||||
{
|
||||
for (auto current = &static_cast<const typename Mapped::Base_t &>(mapped); current != nullptr; current = current->next)
|
||||
{
|
||||
for (size_t j = 0; j < columns_left.size(); ++j)
|
||||
columns_left[j]->insertDefault();
|
||||
|
||||
for (size_t j = 0; j < columns_right.size(); ++j)
|
||||
columns_right[j]->insertFrom(*current->block->getByPosition(j).column.get(), current->row_num);
|
||||
|
||||
@ -1106,54 +1070,52 @@ public:
|
||||
* result_sample_block - keys, "left" columns, and "right" columns.
|
||||
*/
|
||||
|
||||
std::unordered_map<String, String> key_renames;
|
||||
makeResultSampleBlock(left_sample_block, key_names_left, columns_added_by_join, key_renames);
|
||||
|
||||
const Block & right_sample_block = parent.sample_block_with_columns_to_add;
|
||||
|
||||
size_t num_keys = key_names_left.size();
|
||||
size_t num_columns_left = left_sample_block.columns() - num_keys;
|
||||
size_t num_columns_right = right_sample_block.columns();
|
||||
|
||||
column_indices_left.reserve(num_columns_left);
|
||||
column_indices_keys_and_right.reserve(num_keys + num_columns_right);
|
||||
|
||||
std::vector<bool> is_left_key(left_sample_block.columns(), false);
|
||||
std::vector<size_t> key_positions_left;
|
||||
key_positions_left.reserve(key_names_left.size());
|
||||
|
||||
for (const std::string & key : key_names_left)
|
||||
{
|
||||
size_t key_pos = left_sample_block.getPositionByName(key);
|
||||
key_positions_left.push_back(key_pos);
|
||||
is_left_key[key_pos] = true;
|
||||
}
|
||||
|
||||
const Block & right_sample_block = parent.sample_block_with_columns_to_add;
|
||||
|
||||
std::unordered_map<size_t, size_t> left_to_right_key_map;
|
||||
makeResultSampleBlock(left_sample_block, right_sample_block, columns_added_by_join,
|
||||
key_positions_left, is_left_key, left_to_right_key_map);
|
||||
|
||||
column_indices_left.reserve(left_sample_block.columns() - key_names_left.size());
|
||||
column_indices_keys_and_right.reserve(key_names_left.size() + right_sample_block.columns());
|
||||
|
||||
/// Use right key columns if present. @note left & right key columns could have different nullability.
|
||||
for (size_t key_pos : key_positions_left)
|
||||
{
|
||||
/// Here we establish the mapping between key columns of the left- and right-side tables.
|
||||
/// key_pos index is inserted in the position corresponding to key column in parent.blocks
|
||||
/// (saved blocks of the right-side table) and points to the same key column
|
||||
/// in the left_sample_block and thus in the result_sample_block.
|
||||
column_indices_keys_and_right.push_back(key_pos);
|
||||
|
||||
auto it = key_renames.find(key);
|
||||
if (it != key_renames.end())
|
||||
key_renames_indices[key_pos] = result_sample_block.getPositionByName(it->second);
|
||||
auto it = left_to_right_key_map.find(key_pos);
|
||||
if (it != left_to_right_key_map.end())
|
||||
{
|
||||
column_indices_keys_and_right.push_back(it->second);
|
||||
column_indices_left.push_back(key_pos);
|
||||
}
|
||||
else
|
||||
column_indices_keys_and_right.push_back(key_pos);
|
||||
}
|
||||
|
||||
size_t num_src_columns = left_sample_block.columns() + right_sample_block.columns();
|
||||
|
||||
for (size_t i = 0; i < result_sample_block.columns(); ++i)
|
||||
{
|
||||
if (i < left_sample_block.columns())
|
||||
{
|
||||
for (size_t i = 0; i < left_sample_block.columns(); ++i)
|
||||
if (!is_left_key[i])
|
||||
{
|
||||
column_indices_left.emplace_back(i);
|
||||
|
||||
/// If use_nulls, convert left columns to Nullable.
|
||||
if (parent.use_nulls)
|
||||
convertColumnToNullable(result_sample_block.getByPosition(i));
|
||||
}
|
||||
}
|
||||
else if (i < num_src_columns)
|
||||
size_t num_additional_keys = left_to_right_key_map.size();
|
||||
for (size_t i = left_sample_block.columns(); i < result_sample_block.columns() - num_additional_keys; ++i)
|
||||
column_indices_keys_and_right.emplace_back(i);
|
||||
}
|
||||
}
|
||||
|
||||
String getName() const override { return "NonJoined"; }
|
||||
|
||||
@ -1184,18 +1146,25 @@ private:
|
||||
/// Indices of key columns in result_sample_block or columns that come from the right-side table.
|
||||
/// Order is significant: it is the same as the order of columns in the blocks of the right-side table that are saved in parent.blocks.
|
||||
ColumnNumbers column_indices_keys_and_right;
|
||||
std::unordered_map<size_t, size_t> key_renames_indices;
|
||||
|
||||
std::unique_ptr<void, std::function<void(void *)>> position; /// type erasure
|
||||
|
||||
|
||||
void makeResultSampleBlock(const Block & left_sample_block, const Names & key_names_left,
|
||||
const NamesAndTypesList & columns_added_by_join, std::unordered_map<String, String> & key_renames)
|
||||
void makeResultSampleBlock(const Block & left_sample_block, const Block & right_sample_block,
|
||||
const NamesAndTypesList & columns_added_by_join,
|
||||
const std::vector<size_t> & key_positions_left, const std::vector<bool> & is_left_key,
|
||||
std::unordered_map<size_t, size_t> & left_to_right_key_map)
|
||||
{
|
||||
const Block & right_sample_block = parent.sample_block_with_columns_to_add;
|
||||
|
||||
result_sample_block = materializeBlock(left_sample_block);
|
||||
|
||||
/// Convert left columns to Nullable if allowed
|
||||
if (parent.use_nulls)
|
||||
{
|
||||
for (size_t i = 0; i < result_sample_block.columns(); ++i)
|
||||
if (!is_left_key[i])
|
||||
convertColumnToNullable(result_sample_block.getByPosition(i));
|
||||
}
|
||||
|
||||
/// Add columns from the right-side table to the block.
|
||||
for (size_t i = 0; i < right_sample_block.columns(); ++i)
|
||||
{
|
||||
@ -1205,20 +1174,23 @@ private:
|
||||
}
|
||||
|
||||
const auto & key_names_right = parent.key_names_right;
|
||||
NameSet needed_key_names_right = requiredRightKeys(key_names_right, columns_added_by_join);
|
||||
auto right_keys = requiredRightKeys(key_names_right, columns_added_by_join);
|
||||
|
||||
/// Add join key columns from right block if they has different name.
|
||||
for (size_t i = 0; i < key_names_right.size(); ++i)
|
||||
{
|
||||
auto & right_name = key_names_right[i];
|
||||
auto & left_name = key_names_left[i];
|
||||
size_t left_key_pos = key_positions_left[i];
|
||||
|
||||
if (needed_key_names_right.count(right_name) && !result_sample_block.has(right_name))
|
||||
auto it = right_keys.find(right_name);
|
||||
if (it != right_keys.end() && !result_sample_block.has(right_name))
|
||||
{
|
||||
const auto & col = result_sample_block.getByName(left_name);
|
||||
result_sample_block.insert({col.column, col.type, right_name});
|
||||
const auto & col = result_sample_block.getByPosition(left_key_pos);
|
||||
bool is_nullable = (parent.use_nulls && isFull(parent.kind)) || it->second->isNullable();
|
||||
result_sample_block.insert(correctNullability({col.column, col.type, right_name}, is_nullable));
|
||||
|
||||
key_renames[left_name] = right_name;
|
||||
size_t right_key_pos = result_sample_block.getPositionByName(right_name);
|
||||
left_to_right_key_map[left_key_pos] = right_key_pos;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1235,7 +1207,7 @@ private:
|
||||
{
|
||||
#define M(TYPE) \
|
||||
case Join::Type::TYPE: \
|
||||
rows_added = fillColumns<STRICTNESS>(*maps.TYPE, columns_left, columns_keys_and_right); \
|
||||
rows_added = fillColumns<STRICTNESS>(*maps.TYPE, columns_keys_and_right); \
|
||||
break;
|
||||
APPLY_FOR_JOIN_VARIANTS(M)
|
||||
#undef M
|
||||
@ -1249,32 +1221,12 @@ private:
|
||||
|
||||
Block res = result_sample_block.cloneEmpty();
|
||||
|
||||
/// @note it's possible to make ColumnConst here and materialize it later
|
||||
for (size_t i = 0; i < columns_left.size(); ++i)
|
||||
res.getByPosition(column_indices_left[i]).column = std::move(columns_left[i]);
|
||||
res.getByPosition(column_indices_left[i]).column = columns_left[i]->cloneResized(rows_added);
|
||||
|
||||
if (key_renames_indices.empty())
|
||||
{
|
||||
for (size_t i = 0; i < columns_keys_and_right.size(); ++i)
|
||||
res.getByPosition(column_indices_keys_and_right[i]).column = std::move(columns_keys_and_right[i]);
|
||||
}
|
||||
else
|
||||
{
|
||||
for (size_t i = 0; i < columns_keys_and_right.size(); ++i)
|
||||
{
|
||||
size_t key_idx = column_indices_keys_and_right[i];
|
||||
|
||||
auto it = key_renames_indices.find(key_idx);
|
||||
if (it != key_renames_indices.end())
|
||||
{
|
||||
auto & key_column = res.getByPosition(key_idx).column;
|
||||
if (key_column->empty())
|
||||
key_column = key_column->cloneResized(columns_keys_and_right[i]->size());
|
||||
res.getByPosition(it->second).column = std::move(columns_keys_and_right[i]);
|
||||
}
|
||||
else
|
||||
res.getByPosition(key_idx).column = std::move(columns_keys_and_right[i]);
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
@ -1296,7 +1248,7 @@ private:
|
||||
}
|
||||
|
||||
template <ASTTableJoin::Strictness STRICTNESS, typename Map>
|
||||
size_t fillColumns(const Map & map, MutableColumns & columns_left, MutableColumns & columns_keys_and_right)
|
||||
size_t fillColumns(const Map & map, MutableColumns & columns_keys_and_right)
|
||||
{
|
||||
size_t rows_added = 0;
|
||||
|
||||
@ -1313,7 +1265,7 @@ private:
|
||||
if (it->getSecond().getUsed())
|
||||
continue;
|
||||
|
||||
AdderNonJoined<STRICTNESS, typename Map::mapped_type>::add(it->getSecond(), rows_added, columns_left, columns_keys_and_right);
|
||||
AdderNonJoined<STRICTNESS, typename Map::mapped_type>::add(it->getSecond(), rows_added, columns_keys_and_right);
|
||||
|
||||
if (rows_added >= max_block_size)
|
||||
{
|
||||
|
@ -228,6 +228,52 @@ public:
|
||||
std::unique_ptr<HashMap<UInt128, Mapped, UInt128HashCRC32>> keys128;
|
||||
std::unique_ptr<HashMap<UInt256, Mapped, UInt256HashCRC32>> keys256;
|
||||
std::unique_ptr<HashMap<UInt128, Mapped, UInt128TrivialHash>> hashed;
|
||||
|
||||
void create(Type which)
|
||||
{
|
||||
switch (which)
|
||||
{
|
||||
case Type::EMPTY: break;
|
||||
case Type::CROSS: break;
|
||||
|
||||
#define M(NAME) \
|
||||
case Type::NAME: NAME = std::make_unique<typename decltype(NAME)::element_type>(); break;
|
||||
APPLY_FOR_JOIN_VARIANTS(M)
|
||||
#undef M
|
||||
}
|
||||
}
|
||||
|
||||
size_t getTotalRowCount(Type which) const
|
||||
{
|
||||
switch (which)
|
||||
{
|
||||
case Type::EMPTY: return 0;
|
||||
case Type::CROSS: return 0;
|
||||
|
||||
#define M(NAME) \
|
||||
case Type::NAME: return NAME ? NAME->size() : 0;
|
||||
APPLY_FOR_JOIN_VARIANTS(M)
|
||||
#undef M
|
||||
}
|
||||
|
||||
__builtin_unreachable();
|
||||
}
|
||||
|
||||
size_t getTotalByteCountImpl(Type which) const
|
||||
{
|
||||
switch (which)
|
||||
{
|
||||
case Type::EMPTY: return 0;
|
||||
case Type::CROSS: return 0;
|
||||
|
||||
#define M(NAME) \
|
||||
case Type::NAME: return NAME ? NAME->getBufferSizeInBytes() : 0;
|
||||
APPLY_FOR_JOIN_VARIANTS(M)
|
||||
#undef M
|
||||
}
|
||||
|
||||
__builtin_unreachable();
|
||||
}
|
||||
};
|
||||
|
||||
using MapsAny = MapsTemplate<WithFlags<false, false, RowRef>>;
|
||||
|
@ -72,7 +72,7 @@ bool MutationsInterpreter::isStorageTouchedByMutations() const
|
||||
context_copy.getSettingsRef().merge_tree_uniform_read_distribution = 0;
|
||||
context_copy.getSettingsRef().max_threads = 1;
|
||||
|
||||
BlockInputStreamPtr in = InterpreterSelectQuery(select, context_copy, storage, QueryProcessingStage::Complete).execute().in;
|
||||
BlockInputStreamPtr in = InterpreterSelectQuery(select, context_copy, storage).execute().in;
|
||||
|
||||
Block block = in->read();
|
||||
if (!block.rows())
|
||||
@ -367,7 +367,7 @@ void MutationsInterpreter::prepare(bool dry_run)
|
||||
select->children.push_back(where_expression);
|
||||
}
|
||||
|
||||
interpreter_select = std::make_unique<InterpreterSelectQuery>(select, context, storage, QueryProcessingStage::Complete, dry_run);
|
||||
interpreter_select = std::make_unique<InterpreterSelectQuery>(select, context, storage, SelectQueryOptions().analyze(dry_run));
|
||||
|
||||
is_prepared = true;
|
||||
}
|
||||
|
76
dbms/src/Interpreters/SelectQueryOptions.h
Normal file
76
dbms/src/Interpreters/SelectQueryOptions.h
Normal file
@ -0,0 +1,76 @@
|
||||
#pragma once
|
||||
|
||||
#include <Core/QueryProcessingStage.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/**
|
||||
* to_stage
|
||||
* - the stage to which the query is to be executed. By default - till to the end.
|
||||
* You can perform till the intermediate aggregation state, which are combined from different servers for distributed query processing.
|
||||
*
|
||||
* subquery_depth
|
||||
* - to control the limit on the depth of nesting of subqueries. For subqueries, a value that is incremented by one is passed;
|
||||
* for INSERT SELECT, a value 1 is passed instead of 0.
|
||||
*
|
||||
* only_analyze
|
||||
* - the object was created only for query analysis.
|
||||
*
|
||||
* is_subquery
|
||||
* - there could be some specific for subqueries. Ex. there's no need to pass duplicated columns in results, cause of indirect results.
|
||||
*/
|
||||
struct SelectQueryOptions
|
||||
{
|
||||
QueryProcessingStage::Enum to_stage;
|
||||
size_t subquery_depth;
|
||||
bool only_analyze;
|
||||
bool modify_inplace;
|
||||
bool remove_duplicates;
|
||||
|
||||
SelectQueryOptions(QueryProcessingStage::Enum stage = QueryProcessingStage::Complete, size_t depth = 0)
|
||||
: to_stage(stage)
|
||||
, subquery_depth(depth)
|
||||
, only_analyze(false)
|
||||
, modify_inplace(false)
|
||||
, remove_duplicates(false)
|
||||
{}
|
||||
|
||||
SelectQueryOptions copy() const { return *this; }
|
||||
|
||||
SelectQueryOptions subquery() const
|
||||
{
|
||||
SelectQueryOptions out = *this;
|
||||
out.to_stage = QueryProcessingStage::Complete;
|
||||
++out.subquery_depth;
|
||||
return out;
|
||||
}
|
||||
|
||||
SelectQueryOptions & analyze(bool value = true)
|
||||
{
|
||||
only_analyze = value;
|
||||
return *this;
|
||||
}
|
||||
|
||||
SelectQueryOptions & modify(bool value = true)
|
||||
{
|
||||
modify_inplace = value;
|
||||
return *this;
|
||||
}
|
||||
|
||||
SelectQueryOptions & noModify() { return modify(false); }
|
||||
|
||||
SelectQueryOptions & removeDuplicates(bool value = true)
|
||||
{
|
||||
remove_duplicates = value;
|
||||
return *this;
|
||||
}
|
||||
|
||||
SelectQueryOptions & noSubquery()
|
||||
{
|
||||
subquery_depth = 0;
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
@ -123,24 +123,69 @@ bool hasArrayJoin(const ASTPtr & ast)
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Keep number of columns for 'GLOBAL IN (SELECT 1 AS a, a)'
|
||||
void renameDuplicatedColumns(const ASTSelectQuery * select_query)
|
||||
{
|
||||
ASTs & elements = select_query->select_expression_list->children;
|
||||
|
||||
std::set<String> all_column_names;
|
||||
std::set<String> assigned_column_names;
|
||||
|
||||
for (auto & expr : elements)
|
||||
all_column_names.insert(expr->getAliasOrColumnName());
|
||||
|
||||
for (auto & expr : elements)
|
||||
{
|
||||
auto name = expr->getAliasOrColumnName();
|
||||
|
||||
if (!assigned_column_names.insert(name).second)
|
||||
{
|
||||
size_t i = 1;
|
||||
while (all_column_names.end() != all_column_names.find(name + "_" + toString(i)))
|
||||
++i;
|
||||
|
||||
name = name + "_" + toString(i);
|
||||
expr = expr->clone(); /// Cancels fuse of the same expressions in the tree.
|
||||
expr->setAlias(name);
|
||||
|
||||
all_column_names.insert(name);
|
||||
assigned_column_names.insert(name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Sometimes we have to calculate more columns in SELECT clause than will be returned from query.
|
||||
/// This is the case when we have DISTINCT or arrayJoin: we require more columns in SELECT even if we need less columns in result.
|
||||
void removeUnneededColumnsFromSelectClause(const ASTSelectQuery * select_query, const Names & required_result_columns)
|
||||
/// Also we have to remove duplicates in case of GLOBAL subqueries. Their results are placed into tables so duplicates are inpossible.
|
||||
void removeUnneededColumnsFromSelectClause(const ASTSelectQuery * select_query, const Names & required_result_columns, bool remove_dups)
|
||||
{
|
||||
if (required_result_columns.empty())
|
||||
return;
|
||||
|
||||
ASTs & elements = select_query->select_expression_list->children;
|
||||
|
||||
std::map<String, size_t> required_columns_with_duplicate_count;
|
||||
|
||||
if (!required_result_columns.empty())
|
||||
{
|
||||
/// Some columns may be queried multiple times, like SELECT x, y, y FROM table.
|
||||
for (const auto & name : required_result_columns)
|
||||
{
|
||||
if (remove_dups)
|
||||
required_columns_with_duplicate_count[name] = 1;
|
||||
else
|
||||
++required_columns_with_duplicate_count[name];
|
||||
}
|
||||
}
|
||||
else if (remove_dups)
|
||||
{
|
||||
/// Even if we have no requirements there could be duplicates cause of asterisks. SELECT *, t.*
|
||||
for (const auto & elem : elements)
|
||||
required_columns_with_duplicate_count.emplace(elem->getAliasOrColumnName(), 1);
|
||||
}
|
||||
else
|
||||
return;
|
||||
|
||||
ASTs new_elements;
|
||||
new_elements.reserve(elements.size());
|
||||
|
||||
/// Some columns may be queried multiple times, like SELECT x, y, y FROM table.
|
||||
/// In that case we keep them exactly same number of times.
|
||||
std::map<String, size_t> required_columns_with_duplicate_count;
|
||||
for (const auto & name : required_result_columns)
|
||||
++required_columns_with_duplicate_count[name];
|
||||
|
||||
for (const auto & elem : elements)
|
||||
{
|
||||
String name = elem->getAliasOrColumnName();
|
||||
@ -645,6 +690,9 @@ SyntaxAnalyzerResultPtr SyntaxAnalyzer::analyze(
|
||||
|
||||
if (select_query)
|
||||
{
|
||||
if (remove_duplicates)
|
||||
renameDuplicatedColumns(select_query);
|
||||
|
||||
if (const ASTTablesInSelectQueryElement * node = select_query->join())
|
||||
{
|
||||
if (settings.enable_optimize_predicate_expression)
|
||||
@ -688,7 +736,7 @@ SyntaxAnalyzerResultPtr SyntaxAnalyzer::analyze(
|
||||
/// Must be after 'normalizeTree' (after expanding aliases, for aliases not get lost)
|
||||
/// and before 'executeScalarSubqueries', 'analyzeAggregation', etc. to avoid excessive calculations.
|
||||
if (select_query)
|
||||
removeUnneededColumnsFromSelectClause(select_query, required_result_columns);
|
||||
removeUnneededColumnsFromSelectClause(select_query, required_result_columns, remove_duplicates);
|
||||
|
||||
/// Executing scalar subqueries - replacing them with constant values.
|
||||
executeScalarSubqueries(query, context, subquery_depth);
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#include <Interpreters/AnalyzedJoin.h>
|
||||
#include <Interpreters/Aliases.h>
|
||||
#include <Interpreters/SelectQueryOptions.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -55,9 +56,10 @@ using SyntaxAnalyzerResultPtr = std::shared_ptr<const SyntaxAnalyzerResult>;
|
||||
class SyntaxAnalyzer
|
||||
{
|
||||
public:
|
||||
SyntaxAnalyzer(const Context & context_, size_t subquery_depth_ = 0)
|
||||
SyntaxAnalyzer(const Context & context_, const SelectQueryOptions & select_options = {})
|
||||
: context(context_)
|
||||
, subquery_depth(subquery_depth_)
|
||||
, subquery_depth(select_options.subquery_depth)
|
||||
, remove_duplicates(select_options.remove_duplicates)
|
||||
{}
|
||||
|
||||
SyntaxAnalyzerResultPtr analyze(
|
||||
@ -69,6 +71,7 @@ public:
|
||||
private:
|
||||
const Context & context;
|
||||
size_t subquery_depth;
|
||||
bool remove_duplicates;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -41,6 +41,8 @@ std::shared_ptr<InterpreterSelectWithUnionQuery> interpretSubquery(
|
||||
subquery_settings.extremes = 0;
|
||||
subquery_context.setSettings(subquery_settings);
|
||||
|
||||
auto subquery_options = SelectQueryOptions(QueryProcessingStage::Complete, subquery_depth).subquery();
|
||||
|
||||
ASTPtr query;
|
||||
if (table || function)
|
||||
{
|
||||
@ -83,48 +85,10 @@ std::shared_ptr<InterpreterSelectWithUnionQuery> interpretSubquery(
|
||||
else
|
||||
{
|
||||
query = subquery->children.at(0);
|
||||
|
||||
/** Columns with the same name can be specified in a subquery. For example, SELECT x, x FROM t
|
||||
* This is bad, because the result of such a query can not be saved to the table, because the table can not have the same name columns.
|
||||
* Saving to the table is required for GLOBAL subqueries.
|
||||
*
|
||||
* To avoid this situation, we will rename the same columns.
|
||||
*/
|
||||
|
||||
std::set<std::string> all_column_names;
|
||||
std::set<std::string> assigned_column_names;
|
||||
|
||||
if (const auto * select_with_union = query->as<ASTSelectWithUnionQuery>())
|
||||
{
|
||||
if (const auto * select = select_with_union->list_of_selects->children.at(0)->as<ASTSelectQuery>())
|
||||
{
|
||||
for (auto & expr : select->select_expression_list->children)
|
||||
all_column_names.insert(expr->getAliasOrColumnName());
|
||||
|
||||
for (auto & expr : select->select_expression_list->children)
|
||||
{
|
||||
auto name = expr->getAliasOrColumnName();
|
||||
|
||||
if (!assigned_column_names.insert(name).second)
|
||||
{
|
||||
size_t i = 1;
|
||||
while (all_column_names.end() != all_column_names.find(name + "_" + toString(i)))
|
||||
++i;
|
||||
|
||||
name = name + "_" + toString(i);
|
||||
expr = expr->clone(); /// Cancels fuse of the same expressions in the tree.
|
||||
expr->setAlias(name);
|
||||
|
||||
all_column_names.insert(name);
|
||||
assigned_column_names.insert(name);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
subquery_options.removeDuplicates();
|
||||
}
|
||||
|
||||
return std::make_shared<InterpreterSelectWithUnionQuery>(
|
||||
query, subquery_context, required_source_columns, QueryProcessingStage::Complete, subquery_depth + 1);
|
||||
return std::make_shared<InterpreterSelectWithUnionQuery>(query, subquery_context, subquery_options, required_source_columns);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -39,9 +39,13 @@ ASTPtr ASTColumnDeclaration::clone() const
|
||||
void ASTColumnDeclaration::formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const
|
||||
{
|
||||
frame.need_parens = false;
|
||||
std::string indent_str = settings.one_line ? "" : std::string(4 * frame.indent, ' ');
|
||||
|
||||
settings.ostr << settings.nl_or_ws << indent_str << backQuoteIfNeed(name);
|
||||
if (!settings.one_line)
|
||||
settings.ostr << settings.nl_or_ws << std::string(4 * frame.indent, ' ');
|
||||
|
||||
/// We have to always backquote column names to avoid ambiguouty with INDEX and other declarations in CREATE query.
|
||||
settings.ostr << backQuote(name);
|
||||
|
||||
if (type)
|
||||
{
|
||||
settings.ostr << ' ';
|
||||
|
@ -25,7 +25,6 @@ const char * IAST::hilite_alias = "\033[0;32m";
|
||||
const char * IAST::hilite_none = "\033[0m";
|
||||
|
||||
|
||||
/// Quote the identifier with backquotes, if required.
|
||||
String backQuoteIfNeed(const String & x)
|
||||
{
|
||||
String res(x.size(), '\0');
|
||||
@ -36,6 +35,16 @@ String backQuoteIfNeed(const String & x)
|
||||
return res;
|
||||
}
|
||||
|
||||
String backQuote(const String & x)
|
||||
{
|
||||
String res(x.size(), '\0');
|
||||
{
|
||||
WriteBufferFromString wb(res);
|
||||
writeBackQuotedString(x, wb);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
size_t IAST::checkSize(size_t max_size) const
|
||||
{
|
||||
|
@ -208,7 +208,9 @@ private:
|
||||
};
|
||||
|
||||
|
||||
/// Surrounds an identifier by back quotes if it is necessary.
|
||||
/// Quote the identifier with backquotes, if required.
|
||||
String backQuoteIfNeed(const String & x);
|
||||
/// Quote the identifier with backquotes.
|
||||
String backQuote(const String & x);
|
||||
|
||||
}
|
||||
|
@ -205,7 +205,7 @@ void AlterCommand::apply(ColumnsDescription & columns_description, IndicesDescri
|
||||
}
|
||||
else if (type == MODIFY_ORDER_BY)
|
||||
{
|
||||
if (!primary_key_ast)
|
||||
if (!primary_key_ast && order_by_ast)
|
||||
{
|
||||
/// Primary and sorting key become independent after this ALTER so we have to
|
||||
/// save the old ORDER BY expression as the new primary key.
|
||||
|
@ -25,6 +25,8 @@
|
||||
#include <Poco/Ext/ThreadNumber.h>
|
||||
|
||||
#include <ext/range.h>
|
||||
#include <DataStreams/FilterBlockInputStream.h>
|
||||
#include <DataStreams/ExpressionBlockInputStream.h>
|
||||
|
||||
|
||||
namespace ProfileEvents
|
||||
@ -221,7 +223,21 @@ BlockInputStreams StorageBuffer::read(
|
||||
*/
|
||||
if (processed_stage > QueryProcessingStage::FetchColumns)
|
||||
for (auto & stream : streams_from_buffers)
|
||||
stream = InterpreterSelectQuery(query_info.query, context, stream, processed_stage).execute().in;
|
||||
stream = InterpreterSelectQuery(query_info.query, context, stream, SelectQueryOptions(processed_stage)).execute().in;
|
||||
|
||||
if (query_info.prewhere_info)
|
||||
{
|
||||
for (auto & stream : streams_from_buffers)
|
||||
stream = std::make_shared<FilterBlockInputStream>(stream, query_info.prewhere_info->prewhere_actions,
|
||||
query_info.prewhere_info->prewhere_column_name, query_info.prewhere_info->remove_prewhere_column);
|
||||
|
||||
if (query_info.prewhere_info->alias_actions)
|
||||
{
|
||||
for (auto & stream : streams_from_buffers)
|
||||
stream = std::make_shared<ExpressionBlockInputStream>(stream, query_info.prewhere_info->alias_actions);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
streams_from_dst.insert(streams_from_dst.end(), streams_from_buffers.begin(), streams_from_buffers.end());
|
||||
return streams_from_dst;
|
||||
|
@ -74,7 +74,15 @@ public:
|
||||
void rename(const String & /*new_path_to_db*/, const String & /*new_database_name*/, const String & new_table_name) override { name = new_table_name; }
|
||||
|
||||
bool supportsSampling() const override { return true; }
|
||||
bool supportsPrewhere() const override { return false; }
|
||||
bool supportsPrewhere() const override
|
||||
{
|
||||
if (no_destination)
|
||||
return false;
|
||||
auto dest = global_context.tryGetTable(destination_database, destination_table);
|
||||
if (dest && dest.get() != this)
|
||||
return dest->supportsPrewhere();
|
||||
return false;
|
||||
}
|
||||
bool supportsFinal() const override { return true; }
|
||||
bool supportsIndexForIn() const override { return true; }
|
||||
|
||||
|
@ -286,7 +286,8 @@ BlockInputStreams StorageDistributed::read(
|
||||
const auto & modified_query_ast = rewriteSelectQuery(
|
||||
query_info.query, remote_database, remote_table, remote_table_function_ptr);
|
||||
|
||||
Block header = materializeBlock(InterpreterSelectQuery(query_info.query, context, Names{}, processed_stage).getSampleBlock());
|
||||
Block header = materializeBlock(
|
||||
InterpreterSelectQuery(query_info.query, context, SelectQueryOptions(processed_stage)).getSampleBlock());
|
||||
|
||||
ClusterProxy::SelectStreamFactory select_stream_factory = remote_table_function_ptr
|
||||
? ClusterProxy::SelectStreamFactory(
|
||||
|
@ -274,7 +274,7 @@ BlockInputStreams StorageMerge::createSourceStreams(const SelectQueryInfo & quer
|
||||
if (!storage)
|
||||
return BlockInputStreams{
|
||||
InterpreterSelectQuery(modified_query_info.query, modified_context, std::make_shared<OneBlockInputStream>(header),
|
||||
processed_stage, true).execute().in};
|
||||
SelectQueryOptions(processed_stage).analyze()).execute().in};
|
||||
|
||||
BlockInputStreams source_streams;
|
||||
|
||||
@ -295,7 +295,7 @@ BlockInputStreams StorageMerge::createSourceStreams(const SelectQueryInfo & quer
|
||||
modified_context.getSettingsRef().max_threads = UInt64(streams_num);
|
||||
modified_context.getSettingsRef().max_streams_to_max_threads_ratio = 1;
|
||||
|
||||
InterpreterSelectQuery interpreter{modified_query_info.query, modified_context, Names{}, processed_stage};
|
||||
InterpreterSelectQuery interpreter{modified_query_info.query, modified_context, SelectQueryOptions(processed_stage)};
|
||||
BlockInputStreamPtr interpreter_stream = interpreter.execute().in;
|
||||
|
||||
/** Materialization is needed, since from distributed storage the constants come materialized.
|
||||
@ -429,7 +429,7 @@ Block StorageMerge::getQueryHeader(
|
||||
case QueryProcessingStage::Complete:
|
||||
return materializeBlock(InterpreterSelectQuery(
|
||||
query_info.query, context, std::make_shared<OneBlockInputStream>(getSampleBlockForColumns(column_names)),
|
||||
processed_stage, true).getSampleBlock());
|
||||
SelectQueryOptions(processed_stage).analyze()).getSampleBlock());
|
||||
}
|
||||
throw Exception("Logical Error: unknown processed stage.", ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
|
@ -63,7 +63,7 @@ BlockInputStreams StorageView::read(
|
||||
current_inner_query = new_inner_query;
|
||||
}
|
||||
|
||||
res = InterpreterSelectWithUnionQuery(current_inner_query, context, column_names).executeWithMultipleStreams();
|
||||
res = InterpreterSelectWithUnionQuery(current_inner_query, context, {}, column_names).executeWithMultipleStreams();
|
||||
|
||||
/// It's expected that the columns read from storage are not constant.
|
||||
/// Because method 'getSampleBlockForColumns' is used to obtain a structure of result in InterpreterSelectQuery.
|
||||
|
@ -56,6 +56,7 @@ const char * auto_config_build[]
|
||||
"USE_PROTOBUF", "@USE_PROTOBUF@",
|
||||
"USE_BROTLI", "@USE_BROTLI@",
|
||||
"USE_SSL", "@USE_SSL@",
|
||||
"USE_HYPERSCAN", "@USE_HYPERSCAN@",
|
||||
|
||||
nullptr, nullptr
|
||||
};
|
||||
|
@ -404,6 +404,8 @@ def main(args):
|
||||
|
||||
|
||||
def find_binary(name):
|
||||
if os.path.exists(name) and os.access(name, os.X_OK):
|
||||
return True
|
||||
paths = os.environ.get("PATH").split(':')
|
||||
for path in paths:
|
||||
if os.access(os.path.join(path, name), os.X_OK):
|
||||
@ -416,7 +418,7 @@ if __name__ == '__main__':
|
||||
parser=ArgumentParser(description='ClickHouse functional tests')
|
||||
parser.add_argument('-q', '--queries', help='Path to queries dir')
|
||||
parser.add_argument('--tmp', help='Path to tmp dir')
|
||||
parser.add_argument('-b', '--binary', default='clickhouse', help='Main clickhouse binary')
|
||||
parser.add_argument('-b', '--binary', default='clickhouse', help='Path to clickhouse binary or name of binary in PATH')
|
||||
parser.add_argument('-c', '--client', help='Client program')
|
||||
parser.add_argument('--extract_from_config', help='extract-from-config program')
|
||||
parser.add_argument('--configclient', help='Client config (if you use not default ports)')
|
||||
|
@ -9,18 +9,18 @@ ROOT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && cd ../.. && pwd)
|
||||
DATA_DIR=${DATA_DIR:=`mktemp -d /tmp/clickhouse.test..XXXXX`}
|
||||
DATA_DIR_PATTERN=${DATA_DIR_PATTERN:=/tmp/clickhouse} # path from config file, will be replaced to temporary
|
||||
LOG_DIR=${LOG_DIR:=$DATA_DIR/log}
|
||||
export CLICKHOUSE_BINARY=${CLICKHOUSE_BINARY:="clickhouse"}
|
||||
( [ -x "$ROOT_DIR/dbms/programs/${CLICKHOUSE_BINARY}-server" ] || [ -x "$ROOT_DIR/dbms/programs/${CLICKHOUSE_BINARY}" ] ) && BUILD_DIR=${BUILD_DIR:=$ROOT_DIR} # Build without separate build dir
|
||||
export CLICKHOUSE_BINARY_NAME=${CLICKHOUSE_BINARY_NAME:="clickhouse"}
|
||||
( [ -x "$ROOT_DIR/dbms/programs/${CLICKHOUSE_BINARY_NAME}-server" ] || [ -x "$ROOT_DIR/dbms/programs/${CLICKHOUSE_BINARY_NAME}" ] ) && BUILD_DIR=${BUILD_DIR:=$ROOT_DIR} # Build without separate build dir
|
||||
[ -d "$ROOT_DIR/build${BUILD_TYPE}" ] && BUILD_DIR=${BUILD_DIR:=$ROOT_DIR/build${BUILD_TYPE}}
|
||||
BUILD_DIR=${BUILD_DIR:=$ROOT_DIR}
|
||||
[ -x ${CLICKHOUSE_BINARY}-server" ] && [ -x ${CLICKHOUSE_BINARY}-client" ] && BIN_DIR= # Allow run in /usr/bin
|
||||
( [ -x "$BUILD_DIR/dbms/programs/${CLICKHOUSE_BINARY}" ] || [ -x "$BUILD_DIR/dbms/programs/${CLICKHOUSE_BINARY}-server" ] ) && BIN_DIR=${BIN_DIR:=$BUILD_DIR/dbms/programs/}
|
||||
[ -x "$BIN_DIR/${CLICKHOUSE_BINARY}-server" ] && CLICKHOUSE_SERVER=${CLICKHOUSE_SERVER:=$BIN_DIR/${CLICKHOUSE_BINARY}-server}
|
||||
[ -x "$BIN_DIR/${CLICKHOUSE_BINARY}" ] && CLICKHOUSE_SERVER=${CLICKHOUSE_SERVER:=$BIN_DIR/${CLICKHOUSE_BINARY} server}
|
||||
[ -x "$BIN_DIR/${CLICKHOUSE_BINARY}-client" ] && CLICKHOUSE_CLIENT=${CLICKHOUSE_CLIENT:=$BIN_DIR/${CLICKHOUSE_BINARY}-client}
|
||||
[ -x "$BIN_DIR/${CLICKHOUSE_BINARY}" ] && CLICKHOUSE_CLIENT=${CLICKHOUSE_CLIENT:=$BIN_DIR/${CLICKHOUSE_BINARY} client}
|
||||
[ -x "$BIN_DIR/${CLICKHOUSE_BINARY}-extract-from-config" ] && CLICKHOUSE_EXTRACT=${CLICKHOUSE_EXTRACT:=$BIN_DIR/${CLICKHOUSE_BINARY}-extract-from-config}
|
||||
[ -x "$BIN_DIR/${CLICKHOUSE_BINARY}" ] && CLICKHOUSE_EXTRACT=${CLICKHOUSE_EXTRACT:=$BIN_DIR/${CLICKHOUSE_BINARY} extract-from-config}
|
||||
[ -x ${CLICKHOUSE_BINARY_NAME}-server" ] && [ -x ${CLICKHOUSE_BINARY_NAME}-client" ] && BIN_DIR= # Allow run in /usr/bin
|
||||
( [ -x "$BUILD_DIR/dbms/programs/${CLICKHOUSE_BINARY_NAME}" ] || [ -x "$BUILD_DIR/dbms/programs/${CLICKHOUSE_BINARY_NAME}-server" ] ) && BIN_DIR=${BIN_DIR:=$BUILD_DIR/dbms/programs/}
|
||||
[ -x "$BIN_DIR/${CLICKHOUSE_BINARY_NAME}-server" ] && CLICKHOUSE_SERVER=${CLICKHOUSE_SERVER:=$BIN_DIR/${CLICKHOUSE_BINARY_NAME}-server}
|
||||
[ -x "$BIN_DIR/${CLICKHOUSE_BINARY_NAME}" ] && CLICKHOUSE_SERVER=${CLICKHOUSE_SERVER:=$BIN_DIR/${CLICKHOUSE_BINARY_NAME} server}
|
||||
[ -x "$BIN_DIR/${CLICKHOUSE_BINARY_NAME}-client" ] && CLICKHOUSE_CLIENT=${CLICKHOUSE_CLIENT:=$BIN_DIR/${CLICKHOUSE_BINARY_NAME}-client}
|
||||
[ -x "$BIN_DIR/${CLICKHOUSE_BINARY_NAME}" ] && CLICKHOUSE_CLIENT=${CLICKHOUSE_CLIENT:=$BIN_DIR/${CLICKHOUSE_BINARY_NAME} client}
|
||||
[ -x "$BIN_DIR/${CLICKHOUSE_BINARY_NAME}-extract-from-config" ] && CLICKHOUSE_EXTRACT=${CLICKHOUSE_EXTRACT:=$BIN_DIR/${CLICKHOUSE_BINARY_NAME}-extract-from-config}
|
||||
[ -x "$BIN_DIR/${CLICKHOUSE_BINARY_NAME}" ] && CLICKHOUSE_EXTRACT=${CLICKHOUSE_EXTRACT:=$BIN_DIR/${CLICKHOUSE_BINARY_NAME} extract-from-config}
|
||||
|
||||
[ -f "$CUR_DIR/server-test.xml" ] && CONFIG_DIR=${CONFIG_DIR=$CUR_DIR}/
|
||||
CONFIG_CLIENT_DIR=${CONFIG_CLIENT_DIR=$CONFIG_DIR}
|
||||
@ -131,7 +131,7 @@ else
|
||||
TEST_DICT=${TEST_DICT=1}
|
||||
CLICKHOUSE_CLIENT_QUERY="${CLICKHOUSE_CLIENT} --config ${CLICKHOUSE_CONFIG_CLIENT} --port $CLICKHOUSE_PORT_TCP -m -n -q"
|
||||
$CLICKHOUSE_CLIENT_QUERY 'SELECT * from system.build_options; SELECT * FROM system.clusters;'
|
||||
CLICKHOUSE_TEST="env ${TEST_DIR}clickhouse-test --binary ${BIN_DIR}${CLICKHOUSE_BINARY} --configclient $CLICKHOUSE_CONFIG_CLIENT --configserver $CLICKHOUSE_CONFIG --tmp $DATA_DIR/tmp --queries $QUERIES_DIR $TEST_OPT0 $TEST_OPT"
|
||||
CLICKHOUSE_TEST="env ${TEST_DIR}clickhouse-test --binary ${BIN_DIR}${CLICKHOUSE_BINARY_NAME} --configclient $CLICKHOUSE_CONFIG_CLIENT --configserver $CLICKHOUSE_CONFIG --tmp $DATA_DIR/tmp --queries $QUERIES_DIR $TEST_OPT0 $TEST_OPT"
|
||||
CLICKHOUSE_PERFORMANCE_TEST="${BIN_DIR}clickhouse-performance-test --port $CLICKHOUSE_PORT_TCP --recursive $CUR_DIR/performance --skip-tags=long"
|
||||
if [ "${TEST_RUN_STRESS}" ]; then
|
||||
# Running test in parallel will fail some results (tests can create/fill/drop same tables)
|
||||
|
@ -23,22 +23,36 @@
|
||||
</stop_conditions>
|
||||
|
||||
<query><![CDATA[select count(position(URL, 'yandex')), count(position(URL, 'google')) FROM hits_100m_single]]></query>
|
||||
<query><![CDATA[select count(multiPosition(URL, ['yandex', 'google'])) FROM hits_100m_single]]></query>
|
||||
<query><![CDATA[select count(multiSearchAllPositions(URL, ['yandex', 'google'])) FROM hits_100m_single]]></query>
|
||||
<query><![CDATA[select count(match(URL, 'yandex|google')) FROM hits_100m_single]]></query>
|
||||
<query><![CDATA[select count(multiMatchAny(URL, ['yandex', 'google'])) FROM hits_100m_single]]></query>
|
||||
|
||||
<query><![CDATA[select sum(match(URL, 'yandex')), sum(match(URL, 'google')), sum(match(URL, 'yahoo')), sum(match(URL, 'pikabu')) FROM hits_100m_single]]></query>
|
||||
<query><![CDATA[select sum(multiSearch(URL, ['yandex', 'google', 'yahoo', 'pikabu'])) from hits_100m_single]]></query>
|
||||
<query><![CDATA[select sum(multiSearchAny(URL, ['yandex', 'google', 'yahoo', 'pikabu'])) from hits_100m_single]]></query>
|
||||
<query><![CDATA[select sum(multiMatchAny(URL, ['yandex', 'google', 'yahoo', 'pikabu'])) from hits_100m_single]]></query>
|
||||
<query><![CDATA[select sum(match(URL, 'yandex|google|yahoo|pikabu')) FROM hits_100m_single]]></query>
|
||||
|
||||
<query><![CDATA[select sum(match(URL, 'yandex')), sum(match(URL, 'google')), sum(match(URL, 'http')) FROM hits_100m_single]]></query>
|
||||
<query><![CDATA[select sum(multiSearch(URL, ['yandex', 'google', 'http'])) from hits_100m_single]]></query>
|
||||
<query><![CDATA[select sum(multiSearchAny(URL, ['yandex', 'google', 'http'])) from hits_100m_single]]></query>
|
||||
<query><![CDATA[select sum(multiMatchAny(URL, ['yandex', 'google', 'http'])) from hits_100m_single]]></query>
|
||||
<query><![CDATA[select sum(match(URL, 'yandex|google|http')) FROM hits_100m_single]]></query>
|
||||
|
||||
<query><![CDATA[select sum(match(URL, 'yandex')), sum(match(URL, 'google')), sum(match(URL, 'facebook')), sum(match(URL, 'wikipedia')), sum(match(URL, 'reddit')) FROM hits_100m_single]]></query>
|
||||
<query><![CDATA[select sum(multiSearch(URL, ['yandex', 'google', 'facebook', 'wikipedia', 'reddit'])) from hits_100m_single]]></query>
|
||||
<query><![CDATA[select sum(multiSearchAny(URL, ['yandex', 'google', 'facebook', 'wikipedia', 'reddit'])) from hits_100m_single]]></query>
|
||||
<query><![CDATA[select sum(multiMatchAny(URL, ['yandex', 'google', 'facebook', 'wikipedia', 'reddit'])) from hits_100m_single]]></query>
|
||||
<query><![CDATA[select sum(match(URL, 'yandex|google|facebook|wikipedia|reddit')) FROM hits_100m_single]]></query>
|
||||
|
||||
<query><![CDATA[select sum(firstMatch(URL, ['yandex', 'google', 'http', 'facebook', 'google'])) from hits_100m_single]]></query>
|
||||
<query><![CDATA[select sum(multiSearchFirstIndex(URL, ['yandex', 'google', 'http', 'facebook', 'google'])) from hits_100m_single]]></query>
|
||||
|
||||
<query><![CDATA[SELECT count() FROM hits_100m_single WHERE multiMatchAny(URL, ['about/address', 'for_woman', '^https?://lm-company.ruy/$', 'ultimateguitar.com'])]]></query>
|
||||
<query><![CDATA[SELECT count() FROM hits_100m_single WHERE match(URL, 'about/address|for_woman|^https?://lm-company.ruy/$|ultimateguitar.com')]]></query>
|
||||
|
||||
<query><![CDATA[SELECT count() FROM hits_100m_single WHERE match(URL, 'chelyabinsk.74.ru|doctor.74.ru|transport.74.ru|m.74.ru|//74.ru/|chel.74.ru|afisha.74.ru|diplom.74.ru|chelfin.ru|//chel.ru|chelyabinsk.ru|cheldoctor.ru|//mychel.ru|cheldiplom.ru|74.ru/video|market|poll|mail|conference|consult|contest|tags|feedback|pages|text')]]></query>
|
||||
<query><![CDATA[SELECT count() FROM hits_100m_single WHERE multiMatchAny(URL, ['chelyabinsk.74.ru', 'doctor.74.ru', 'transport.74.ru', 'm.74.ru', '//74.ru/', 'chel.74.ru', 'afisha.74.ru', 'diplom.74.ru', 'chelfin.ru', '//chel.ru', 'chelyabinsk.ru', 'cheldoctor.ru', '//mychel.ru', 'cheldiplom.ru', '74.ru/video', 'market', 'poll', 'mail', 'conference', 'consult', 'contest', 'tags', 'feedback', 'pages', 'text'])]]></query>
|
||||
|
||||
<query><![CDATA[SELECT count() FROM hits_100m_single WHERE multiMatchAny(URL, ['chelyabinsk\\.74\\.ru', 'doctor\\.74\\.ru', 'transport\\.74\\.ru', 'm\\.74\\.ru', '//74\\.ru/', 'chel\\.74\\.ru', 'afisha\\.74\\.ru', 'diplom\\.74\\.ru', 'chelfin\\.ru', '//chel\\.ru', 'chelyabinsk\\.ru', 'cheldoctor\\.ru', '//mychel\\.ru', 'cheldiplom\\.ru', '74\\.ru/video', 'market', 'poll', 'mail', 'conference', 'consult', 'contest', 'tags', 'feedback', 'pages', 'text'])]]></query>
|
||||
<query><![CDATA[SELECT count() FROM hits_100m_single WHERE multiSearchAny(URL, ['chelyabinsk.74.ru', 'doctor.74.ru', 'transport.74.ru', 'm.74.ru', '//74.ru/', 'chel.74.ru', 'afisha.74.ru', 'diplom.74.ru', 'chelfin.ru', '//chel.ru', 'chelyabinsk.ru', 'cheldoctor.ru', '//mychel.ru', 'cheldiplom.ru', '74.ru/video', 'market', 'poll', 'mail', 'conference', 'consult', 'contest', 'tags', 'feedback', 'pages', 'text'])]]></query>
|
||||
|
||||
|
||||
<main_metric>
|
||||
<min_time/>
|
||||
|
@ -1,14 +1,14 @@
|
||||
d Date
|
||||
k UInt64
|
||||
i32 Int32
|
||||
CREATE TABLE test.alter ( d Date, k UInt64, i32 Int32) ENGINE = MergeTree(d, k, 8192)
|
||||
CREATE TABLE test.alter (`d` Date, `k` UInt64, `i32` Int32) ENGINE = MergeTree(d, k, 8192)
|
||||
2015-01-01 10 42
|
||||
d Date
|
||||
k UInt64
|
||||
i32 Int32
|
||||
n.ui8 Array(UInt8)
|
||||
n.s Array(String)
|
||||
CREATE TABLE test.alter ( d Date, k UInt64, i32 Int32, `n.ui8` Array(UInt8), `n.s` Array(String)) ENGINE = MergeTree(d, k, 8192)
|
||||
CREATE TABLE test.alter (`d` Date, `k` UInt64, `i32` Int32, `n.ui8` Array(UInt8), `n.s` Array(String)) ENGINE = MergeTree(d, k, 8192)
|
||||
2015-01-01 8 40 [1,2,3] ['12','13','14']
|
||||
2015-01-01 10 42 [] []
|
||||
d Date
|
||||
@ -17,7 +17,7 @@ i32 Int32
|
||||
n.ui8 Array(UInt8)
|
||||
n.s Array(String)
|
||||
n.d Array(Date)
|
||||
CREATE TABLE test.alter ( d Date, k UInt64, i32 Int32, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date)) ENGINE = MergeTree(d, k, 8192)
|
||||
CREATE TABLE test.alter (`d` Date, `k` UInt64, `i32` Int32, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date)) ENGINE = MergeTree(d, k, 8192)
|
||||
2015-01-01 7 39 [10,20,30] ['120','130','140'] ['2000-01-01','2000-01-01','2000-01-03']
|
||||
2015-01-01 8 40 [1,2,3] ['12','13','14'] ['0000-00-00','0000-00-00','0000-00-00']
|
||||
2015-01-01 10 42 [] [] []
|
||||
@ -28,7 +28,7 @@ n.ui8 Array(UInt8)
|
||||
n.s Array(String)
|
||||
n.d Array(Date)
|
||||
s String DEFAULT \'0\'
|
||||
CREATE TABLE test.alter ( d Date, k UInt64, i32 Int32, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date), s String DEFAULT \'0\') ENGINE = MergeTree(d, k, 8192)
|
||||
CREATE TABLE test.alter (`d` Date, `k` UInt64, `i32` Int32, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date), `s` String DEFAULT \'0\') ENGINE = MergeTree(d, k, 8192)
|
||||
2015-01-01 6 38 [10,20,30] ['asd','qwe','qwe'] ['2000-01-01','2000-01-01','2000-01-03'] 100500
|
||||
2015-01-01 7 39 [10,20,30] ['120','130','140'] ['2000-01-01','2000-01-01','2000-01-03'] 0
|
||||
2015-01-01 8 40 [1,2,3] ['12','13','14'] ['0000-00-00','0000-00-00','0000-00-00'] 0
|
||||
@ -39,7 +39,7 @@ i32 Int32
|
||||
n.ui8 Array(UInt8)
|
||||
n.s Array(String)
|
||||
s Int64
|
||||
CREATE TABLE test.alter ( d Date, k UInt64, i32 Int32, `n.ui8` Array(UInt8), `n.s` Array(String), s Int64) ENGINE = MergeTree(d, k, 8192)
|
||||
CREATE TABLE test.alter (`d` Date, `k` UInt64, `i32` Int32, `n.ui8` Array(UInt8), `n.s` Array(String), `s` Int64) ENGINE = MergeTree(d, k, 8192)
|
||||
2015-01-01 6 38 [10,20,30] ['asd','qwe','qwe'] 100500
|
||||
2015-01-01 7 39 [10,20,30] ['120','130','140'] 0
|
||||
2015-01-01 8 40 [1,2,3] ['12','13','14'] 0
|
||||
@ -51,7 +51,7 @@ n.ui8 Array(UInt8)
|
||||
n.s Array(String)
|
||||
s UInt32
|
||||
n.d Array(Date)
|
||||
CREATE TABLE test.alter ( d Date, k UInt64, i32 Int32, `n.ui8` Array(UInt8), `n.s` Array(String), s UInt32, `n.d` Array(Date)) ENGINE = MergeTree(d, k, 8192)
|
||||
CREATE TABLE test.alter (`d` Date, `k` UInt64, `i32` Int32, `n.ui8` Array(UInt8), `n.s` Array(String), `s` UInt32, `n.d` Array(Date)) ENGINE = MergeTree(d, k, 8192)
|
||||
2015-01-01 6 38 [10,20,30] ['asd','qwe','qwe'] 100500 ['0000-00-00','0000-00-00','0000-00-00']
|
||||
2015-01-01 7 39 [10,20,30] ['120','130','140'] 0 ['0000-00-00','0000-00-00','0000-00-00']
|
||||
2015-01-01 8 40 [1,2,3] ['12','13','14'] 0 ['0000-00-00','0000-00-00','0000-00-00']
|
||||
@ -65,7 +65,7 @@ k UInt64
|
||||
i32 Int32
|
||||
n.s Array(String)
|
||||
s UInt32
|
||||
CREATE TABLE test.alter ( d Date, k UInt64, i32 Int32, `n.s` Array(String), s UInt32) ENGINE = MergeTree(d, k, 8192)
|
||||
CREATE TABLE test.alter (`d` Date, `k` UInt64, `i32` Int32, `n.s` Array(String), `s` UInt32) ENGINE = MergeTree(d, k, 8192)
|
||||
2015-01-01 6 38 ['asd','qwe','qwe'] 100500
|
||||
2015-01-01 7 39 ['120','130','140'] 0
|
||||
2015-01-01 8 40 ['12','13','14'] 0
|
||||
@ -74,7 +74,7 @@ d Date
|
||||
k UInt64
|
||||
i32 Int32
|
||||
s UInt32
|
||||
CREATE TABLE test.alter ( d Date, k UInt64, i32 Int32, s UInt32) ENGINE = MergeTree(d, k, 8192)
|
||||
CREATE TABLE test.alter (`d` Date, `k` UInt64, `i32` Int32, `s` UInt32) ENGINE = MergeTree(d, k, 8192)
|
||||
2015-01-01 6 38 100500
|
||||
2015-01-01 7 39 0
|
||||
2015-01-01 8 40 0
|
||||
@ -85,7 +85,7 @@ i32 Int32
|
||||
s UInt32
|
||||
n.s Array(String)
|
||||
n.d Array(Date)
|
||||
CREATE TABLE test.alter ( d Date, k UInt64, i32 Int32, s UInt32, `n.s` Array(String), `n.d` Array(Date)) ENGINE = MergeTree(d, k, 8192)
|
||||
CREATE TABLE test.alter (`d` Date, `k` UInt64, `i32` Int32, `s` UInt32, `n.s` Array(String), `n.d` Array(Date)) ENGINE = MergeTree(d, k, 8192)
|
||||
2015-01-01 6 38 100500 [] []
|
||||
2015-01-01 7 39 0 [] []
|
||||
2015-01-01 8 40 0 [] []
|
||||
@ -94,7 +94,7 @@ d Date
|
||||
k UInt64
|
||||
i32 Int32
|
||||
s UInt32
|
||||
CREATE TABLE test.alter ( d Date, k UInt64, i32 Int32, s UInt32) ENGINE = MergeTree(d, k, 8192)
|
||||
CREATE TABLE test.alter (`d` Date, `k` UInt64, `i32` Int32, `s` UInt32) ENGINE = MergeTree(d, k, 8192)
|
||||
2015-01-01 6 38 100500
|
||||
2015-01-01 7 39 0
|
||||
2015-01-01 8 40 0
|
||||
|
@ -1,22 +1,22 @@
|
||||
d Date
|
||||
k UInt64
|
||||
i32 Int32
|
||||
CREATE TABLE test.replicated_alter1 ( d Date, k UInt64, i32 Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
|
||||
CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
|
||||
d Date
|
||||
k UInt64
|
||||
i32 Int32
|
||||
CREATE TABLE test.replicated_alter2 ( d Date, k UInt64, i32 Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
|
||||
CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
|
||||
2015-01-01 10 42
|
||||
d Date
|
||||
k UInt64
|
||||
i32 Int32
|
||||
dt DateTime
|
||||
CREATE TABLE test.replicated_alter1 ( d Date, k UInt64, i32 Int32, dt DateTime) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
|
||||
CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
|
||||
d Date
|
||||
k UInt64
|
||||
i32 Int32
|
||||
dt DateTime
|
||||
CREATE TABLE test.replicated_alter2 ( d Date, k UInt64, i32 Int32, dt DateTime) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
|
||||
CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
|
||||
2015-01-01 9 41 1992-01-01 08:00:00
|
||||
2015-01-01 10 42 0000-00-00 00:00:00
|
||||
d Date
|
||||
@ -25,14 +25,14 @@ i32 Int32
|
||||
dt DateTime
|
||||
n.ui8 Array(UInt8)
|
||||
n.s Array(String)
|
||||
CREATE TABLE test.replicated_alter1 ( d Date, k UInt64, i32 Int32, dt DateTime, `n.ui8` Array(UInt8), `n.s` Array(String)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
|
||||
CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
|
||||
d Date
|
||||
k UInt64
|
||||
i32 Int32
|
||||
dt DateTime
|
||||
n.ui8 Array(UInt8)
|
||||
n.s Array(String)
|
||||
CREATE TABLE test.replicated_alter2 ( d Date, k UInt64, i32 Int32, dt DateTime, `n.ui8` Array(UInt8), `n.s` Array(String)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
|
||||
CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
|
||||
2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14']
|
||||
2015-01-01 9 41 1992-01-01 08:00:00 [] []
|
||||
2015-01-01 10 42 0000-00-00 00:00:00 [] []
|
||||
@ -43,7 +43,7 @@ dt DateTime
|
||||
n.ui8 Array(UInt8)
|
||||
n.s Array(String)
|
||||
n.d Array(Date)
|
||||
CREATE TABLE test.replicated_alter1 ( d Date, k UInt64, i32 Int32, dt DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
|
||||
CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
|
||||
d Date
|
||||
k UInt64
|
||||
i32 Int32
|
||||
@ -51,7 +51,7 @@ dt DateTime
|
||||
n.ui8 Array(UInt8)
|
||||
n.s Array(String)
|
||||
n.d Array(Date)
|
||||
CREATE TABLE test.replicated_alter2 ( d Date, k UInt64, i32 Int32, dt DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
|
||||
CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
|
||||
2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] ['2000-01-01','2000-01-01','2000-01-03']
|
||||
2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] ['0000-00-00','0000-00-00','0000-00-00']
|
||||
2015-01-01 9 41 1992-01-01 08:00:00 [] [] []
|
||||
@ -64,7 +64,7 @@ n.ui8 Array(UInt8)
|
||||
n.s Array(String)
|
||||
n.d Array(Date)
|
||||
s String DEFAULT \'0\'
|
||||
CREATE TABLE test.replicated_alter1 ( d Date, k UInt64, i32 Int32, dt DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date), s String DEFAULT \'0\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
|
||||
CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date), `s` String DEFAULT \'0\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
|
||||
d Date
|
||||
k UInt64
|
||||
i32 Int32
|
||||
@ -73,7 +73,7 @@ n.ui8 Array(UInt8)
|
||||
n.s Array(String)
|
||||
n.d Array(Date)
|
||||
s String DEFAULT \'0\'
|
||||
CREATE TABLE test.replicated_alter2 ( d Date, k UInt64, i32 Int32, dt DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date), s String DEFAULT \'0\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
|
||||
CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date), `s` String DEFAULT \'0\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
|
||||
2015-01-01 6 38 2014-07-15 13:26:50 [10,20,30] ['asd','qwe','qwe'] ['2000-01-01','2000-01-01','2000-01-03'] 100500
|
||||
2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] ['2000-01-01','2000-01-01','2000-01-03'] 0
|
||||
2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] ['0000-00-00','0000-00-00','0000-00-00'] 0
|
||||
@ -86,7 +86,7 @@ dt DateTime
|
||||
n.ui8 Array(UInt8)
|
||||
n.s Array(String)
|
||||
s Int64
|
||||
CREATE TABLE test.replicated_alter1 ( d Date, k UInt64, i32 Int32, dt DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), s Int64) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
|
||||
CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `s` Int64) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
|
||||
d Date
|
||||
k UInt64
|
||||
i32 Int32
|
||||
@ -94,7 +94,7 @@ dt DateTime
|
||||
n.ui8 Array(UInt8)
|
||||
n.s Array(String)
|
||||
s Int64
|
||||
CREATE TABLE test.replicated_alter2 ( d Date, k UInt64, i32 Int32, dt DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), s Int64) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
|
||||
CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `s` Int64) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
|
||||
2015-01-01 6 38 2014-07-15 13:26:50 [10,20,30] ['asd','qwe','qwe'] 100500
|
||||
2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] 0
|
||||
2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] 0
|
||||
@ -108,7 +108,7 @@ n.ui8 Array(UInt8)
|
||||
n.s Array(String)
|
||||
s UInt32
|
||||
n.d Array(Date)
|
||||
CREATE TABLE test.replicated_alter1 ( d Date, k UInt64, i32 Int32, dt DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), s UInt32, `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
|
||||
CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `s` UInt32, `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
|
||||
d Date
|
||||
k UInt64
|
||||
i32 Int32
|
||||
@ -117,7 +117,7 @@ n.ui8 Array(UInt8)
|
||||
n.s Array(String)
|
||||
s UInt32
|
||||
n.d Array(Date)
|
||||
CREATE TABLE test.replicated_alter2 ( d Date, k UInt64, i32 Int32, dt DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), s UInt32, `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
|
||||
CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `s` UInt32, `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
|
||||
2015-01-01 6 38 2014-07-15 13:26:50 [10,20,30] ['asd','qwe','qwe'] 100500 ['0000-00-00','0000-00-00','0000-00-00']
|
||||
2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] 0 ['0000-00-00','0000-00-00','0000-00-00']
|
||||
2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] 0 ['0000-00-00','0000-00-00','0000-00-00']
|
||||
@ -129,14 +129,14 @@ i32 Int32
|
||||
dt DateTime
|
||||
n.s Array(String)
|
||||
s UInt32
|
||||
CREATE TABLE test.replicated_alter1 ( d Date, k UInt64, i32 Int32, dt DateTime, `n.s` Array(String), s UInt32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
|
||||
CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.s` Array(String), `s` UInt32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
|
||||
d Date
|
||||
k UInt64
|
||||
i32 Int32
|
||||
dt DateTime
|
||||
n.s Array(String)
|
||||
s UInt32
|
||||
CREATE TABLE test.replicated_alter2 ( d Date, k UInt64, i32 Int32, dt DateTime, `n.s` Array(String), s UInt32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
|
||||
CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.s` Array(String), `s` UInt32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
|
||||
2015-01-01 6 38 2014-07-15 13:26:50 ['asd','qwe','qwe'] 100500
|
||||
2015-01-01 7 39 2014-07-14 13:26:50 ['120','130','140'] 0
|
||||
2015-01-01 8 40 2012-12-12 12:12:12 ['12','13','14'] 0
|
||||
@ -147,13 +147,13 @@ k UInt64
|
||||
i32 Int32
|
||||
dt DateTime
|
||||
s UInt32
|
||||
CREATE TABLE test.replicated_alter1 ( d Date, k UInt64, i32 Int32, dt DateTime, s UInt32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
|
||||
CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `s` UInt32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
|
||||
d Date
|
||||
k UInt64
|
||||
i32 Int32
|
||||
dt DateTime
|
||||
s UInt32
|
||||
CREATE TABLE test.replicated_alter2 ( d Date, k UInt64, i32 Int32, dt DateTime, s UInt32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
|
||||
CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `s` UInt32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
|
||||
2015-01-01 6 38 2014-07-15 13:26:50 100500
|
||||
2015-01-01 7 39 2014-07-14 13:26:50 0
|
||||
2015-01-01 8 40 2012-12-12 12:12:12 0
|
||||
@ -166,7 +166,7 @@ dt DateTime
|
||||
s UInt32
|
||||
n.s Array(String)
|
||||
n.d Array(Date)
|
||||
CREATE TABLE test.replicated_alter1 ( d Date, k UInt64, i32 Int32, dt DateTime, s UInt32, `n.s` Array(String), `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
|
||||
CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `s` UInt32, `n.s` Array(String), `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
|
||||
d Date
|
||||
k UInt64
|
||||
i32 Int32
|
||||
@ -174,7 +174,7 @@ dt DateTime
|
||||
s UInt32
|
||||
n.s Array(String)
|
||||
n.d Array(Date)
|
||||
CREATE TABLE test.replicated_alter2 ( d Date, k UInt64, i32 Int32, dt DateTime, s UInt32, `n.s` Array(String), `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
|
||||
CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `s` UInt32, `n.s` Array(String), `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
|
||||
2015-01-01 6 38 2014-07-15 13:26:50 100500 [] []
|
||||
2015-01-01 7 39 2014-07-14 13:26:50 0 [] []
|
||||
2015-01-01 8 40 2012-12-12 12:12:12 0 [] []
|
||||
@ -185,13 +185,13 @@ k UInt64
|
||||
i32 Int32
|
||||
dt DateTime
|
||||
s UInt32
|
||||
CREATE TABLE test.replicated_alter1 ( d Date, k UInt64, i32 Int32, dt DateTime, s UInt32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
|
||||
CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `s` UInt32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
|
||||
d Date
|
||||
k UInt64
|
||||
i32 Int32
|
||||
dt DateTime
|
||||
s UInt32
|
||||
CREATE TABLE test.replicated_alter2 ( d Date, k UInt64, i32 Int32, dt DateTime, s UInt32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
|
||||
CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `s` UInt32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
|
||||
2015-01-01 6 38 2014-07-15 13:26:50 100500
|
||||
2015-01-01 7 39 2014-07-14 13:26:50 0
|
||||
2015-01-01 8 40 2012-12-12 12:12:12 0
|
||||
@ -202,13 +202,13 @@ k UInt64
|
||||
i32 Int32
|
||||
dt Date
|
||||
s DateTime
|
||||
CREATE TABLE test.replicated_alter1 ( d Date, k UInt64, i32 Int32, dt Date, s DateTime) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
|
||||
CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` Date, `s` DateTime) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
|
||||
d Date
|
||||
k UInt64
|
||||
i32 Int32
|
||||
dt Date
|
||||
s DateTime
|
||||
CREATE TABLE test.replicated_alter2 ( d Date, k UInt64, i32 Int32, dt Date, s DateTime) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
|
||||
CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` Date, `s` DateTime) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
|
||||
2015-01-01 6 38 2014-07-15 1970-01-02 06:55:00
|
||||
2015-01-01 7 39 2014-07-14 0000-00-00 00:00:00
|
||||
2015-01-01 8 40 2012-12-12 0000-00-00 00:00:00
|
||||
|
@ -1,7 +1,7 @@
|
||||
A
|
||||
B
|
||||
A 1 TinyLog CREATE TABLE test_show_tables.A ( A UInt8) ENGINE = TinyLog
|
||||
B 1 TinyLog CREATE TABLE test_show_tables.B ( A UInt8) ENGINE = TinyLog
|
||||
A 1 TinyLog CREATE TABLE test_show_tables.A (`A` UInt8) ENGINE = TinyLog
|
||||
B 1 TinyLog CREATE TABLE test_show_tables.B (`A` UInt8) ENGINE = TinyLog
|
||||
test_temporary_table
|
||||
['test_show_tables'] ['test_materialized']
|
||||
0
|
||||
|
@ -23192,3 +23192,243 @@
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,4 +1,4 @@
|
||||
1
|
||||
CREATE TEMPORARY TABLE temp_tab ( number UInt64) ENGINE = Memory
|
||||
CREATE TEMPORARY TABLE temp_tab (`number` UInt64) ENGINE = Memory
|
||||
temp_tab
|
||||
0
|
||||
|
@ -1 +1 @@
|
||||
CREATE VIEW test.test_view ( id UInt64) AS SELECT * FROM test.test WHERE id = (SELECT 1)
|
||||
CREATE VIEW test.test_view (`id` UInt64) AS SELECT * FROM test.test WHERE id = (SELECT 1)
|
||||
|
@ -7,7 +7,7 @@ hello
|
||||
hello
|
||||
hello
|
||||
1970-01-01 00:00:01
|
||||
CREATE TABLE test.cast ( x UInt8, e Enum8('hello' = 1, 'world' = 2) DEFAULT CAST(x, 'Enum8(\'hello\' = 1, \'world\' = 2)')) ENGINE = MergeTree ORDER BY e SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.cast (`x` UInt8, `e` Enum8('hello' = 1, 'world' = 2) DEFAULT CAST(x, 'Enum8(\'hello\' = 1, \'world\' = 2)')) ENGINE = MergeTree ORDER BY e SETTINGS index_granularity = 8192
|
||||
x UInt8
|
||||
e Enum8(\'hello\' = 1, \'world\' = 2) DEFAULT CAST(x, \'Enum8(\\\'hello\\\' = 1, \\\'world\\\' = 2)\')
|
||||
1 hello
|
||||
|
@ -1,4 +1,4 @@
|
||||
CREATE TABLE test.cast1 ( x UInt8, e Enum8('hello' = 1, 'world' = 2) DEFAULT CAST(x, 'Enum8(\'hello\' = 1, \'world\' = 2)')) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_cast', 'r1') ORDER BY e SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.cast1 (`x` UInt8, `e` Enum8('hello' = 1, 'world' = 2) DEFAULT CAST(x, 'Enum8(\'hello\' = 1, \'world\' = 2)')) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_cast', 'r1') ORDER BY e SETTINGS index_granularity = 8192
|
||||
x UInt8
|
||||
e Enum8(\'hello\' = 1, \'world\' = 2) DEFAULT CAST(x, \'Enum8(\\\'hello\\\' = 1, \\\'world\\\' = 2)\')
|
||||
1 hello
|
||||
|
@ -1,4 +1,4 @@
|
||||
CREATE TABLE test.check_query_comment_column ( first_column UInt8 DEFAULT 1 COMMENT \'comment 1\', second_column UInt8 MATERIALIZED first_column COMMENT \'comment 2\', third_column UInt8 ALIAS second_column COMMENT \'comment 3\', fourth_column UInt8 COMMENT \'comment 4\', fifth_column UInt8) ENGINE = TinyLog
|
||||
CREATE TABLE test.check_query_comment_column (`first_column` UInt8 DEFAULT 1 COMMENT \'comment 1\', `second_column` UInt8 MATERIALIZED first_column COMMENT \'comment 2\', `third_column` UInt8 ALIAS second_column COMMENT \'comment 3\', `fourth_column` UInt8 COMMENT \'comment 4\', `fifth_column` UInt8) ENGINE = TinyLog
|
||||
first_column UInt8 DEFAULT 1 comment 1
|
||||
second_column UInt8 MATERIALIZED first_column comment 2
|
||||
third_column UInt8 ALIAS second_column comment 3
|
||||
@ -11,7 +11,7 @@ fifth_column UInt8
|
||||
│ check_query_comment_column │ fourth_column │ comment 4 │
|
||||
│ check_query_comment_column │ fifth_column │ │
|
||||
└────────────────────────────┴───────────────┴───────────┘
|
||||
CREATE TABLE test.check_query_comment_column ( first_column UInt8 DEFAULT 1 COMMENT \'comment 1_1\', second_column UInt8 MATERIALIZED first_column COMMENT \'comment 2_1\', third_column UInt8 ALIAS second_column COMMENT \'comment 3_1\', fourth_column UInt8 COMMENT \'comment 4_1\', fifth_column UInt8 COMMENT \'comment 5_1\') ENGINE = TinyLog
|
||||
CREATE TABLE test.check_query_comment_column (`first_column` UInt8 DEFAULT 1 COMMENT \'comment 1_1\', `second_column` UInt8 MATERIALIZED first_column COMMENT \'comment 2_1\', `third_column` UInt8 ALIAS second_column COMMENT \'comment 3_1\', `fourth_column` UInt8 COMMENT \'comment 4_1\', `fifth_column` UInt8 COMMENT \'comment 5_1\') ENGINE = TinyLog
|
||||
┌─table──────────────────────┬─name──────────┬─comment─────┐
|
||||
│ check_query_comment_column │ first_column │ comment 1_2 │
|
||||
│ check_query_comment_column │ second_column │ comment 2_2 │
|
||||
@ -19,8 +19,8 @@ CREATE TABLE test.check_query_comment_column ( first_column UInt8 DEFAULT 1 COMM
|
||||
│ check_query_comment_column │ fourth_column │ comment 4_2 │
|
||||
│ check_query_comment_column │ fifth_column │ comment 5_2 │
|
||||
└────────────────────────────┴───────────────┴─────────────┘
|
||||
CREATE TABLE test.check_query_comment_column ( first_column UInt8 DEFAULT 1 COMMENT \'comment 1_2\', second_column UInt8 MATERIALIZED first_column COMMENT \'comment 2_2\', third_column UInt8 ALIAS second_column COMMENT \'comment 3_2\', fourth_column UInt8 COMMENT \'comment 4_2\', fifth_column UInt8 COMMENT \'comment 5_2\') ENGINE = TinyLog
|
||||
CREATE TABLE test.check_query_comment_column ( first_column UInt8 COMMENT \'comment 1\', second_column UInt8 COMMENT \'comment 2\', third_column UInt8 COMMENT \'comment 3\') ENGINE = MergeTree() PARTITION BY second_column ORDER BY first_column SAMPLE BY first_column SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.check_query_comment_column (`first_column` UInt8 DEFAULT 1 COMMENT \'comment 1_2\', `second_column` UInt8 MATERIALIZED first_column COMMENT \'comment 2_2\', `third_column` UInt8 ALIAS second_column COMMENT \'comment 3_2\', `fourth_column` UInt8 COMMENT \'comment 4_2\', `fifth_column` UInt8 COMMENT \'comment 5_2\') ENGINE = TinyLog
|
||||
CREATE TABLE test.check_query_comment_column (`first_column` UInt8 COMMENT \'comment 1\', `second_column` UInt8 COMMENT \'comment 2\', `third_column` UInt8 COMMENT \'comment 3\') ENGINE = MergeTree() PARTITION BY second_column ORDER BY first_column SAMPLE BY first_column SETTINGS index_granularity = 8192
|
||||
first_column UInt8 comment 1
|
||||
second_column UInt8 comment 2
|
||||
third_column UInt8 comment 3
|
||||
@ -29,8 +29,8 @@ third_column UInt8 comment 3
|
||||
│ check_query_comment_column │ second_column │ comment 2 │
|
||||
│ check_query_comment_column │ third_column │ comment 3 │
|
||||
└────────────────────────────┴───────────────┴───────────┘
|
||||
CREATE TABLE test.check_query_comment_column ( first_column UInt8 COMMENT \'comment 1_2\', second_column UInt8 COMMENT \'comment 2_2\', third_column UInt8 COMMENT \'comment 3_2\') ENGINE = MergeTree() PARTITION BY second_column ORDER BY first_column SAMPLE BY first_column SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.check_query_comment_column ( first_column UInt8 COMMENT \'comment 1_3\', second_column UInt8 COMMENT \'comment 2_3\', third_column UInt8 COMMENT \'comment 3_3\') ENGINE = MergeTree() PARTITION BY second_column ORDER BY first_column SAMPLE BY first_column SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.check_query_comment_column (`first_column` UInt8 COMMENT \'comment 1_2\', `second_column` UInt8 COMMENT \'comment 2_2\', `third_column` UInt8 COMMENT \'comment 3_2\') ENGINE = MergeTree() PARTITION BY second_column ORDER BY first_column SAMPLE BY first_column SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.check_query_comment_column (`first_column` UInt8 COMMENT \'comment 1_3\', `second_column` UInt8 COMMENT \'comment 2_3\', `third_column` UInt8 COMMENT \'comment 3_3\') ENGINE = MergeTree() PARTITION BY second_column ORDER BY first_column SAMPLE BY first_column SETTINGS index_granularity = 8192
|
||||
┌─table──────────────────────┬─name──────────┬─comment─────┐
|
||||
│ check_query_comment_column │ first_column │ comment 1_3 │
|
||||
│ check_query_comment_column │ second_column │ comment 2_3 │
|
||||
|
@ -1,4 +1,4 @@
|
||||
CREATE TABLE test.ipv4_test ( ipv4_ IPv4) ENGINE = Memory
|
||||
CREATE TABLE test.ipv4_test (`ipv4_` IPv4) ENGINE = Memory
|
||||
0.0.0.0 00
|
||||
8.8.8.8 08080808
|
||||
127.0.0.1 7F000001
|
||||
@ -10,7 +10,7 @@ CREATE TABLE test.ipv4_test ( ipv4_ IPv4) ENGINE = Memory
|
||||
> 127.0.0.1 255.255.255.255
|
||||
= 127.0.0.1 127.0.0.1
|
||||
euqality of IPv4-mapped IPv6 value and IPv4 promoted to IPv6 with function: 1
|
||||
CREATE TABLE test.ipv6_test ( ipv6_ IPv6) ENGINE = Memory
|
||||
CREATE TABLE test.ipv6_test (`ipv6_` IPv6) ENGINE = Memory
|
||||
:: 00000000000000000000000000000000
|
||||
:: 00000000000000000000000000000000
|
||||
::ffff:8.8.8.8 00000000000000000000FFFF08080808
|
||||
|
@ -1,4 +1,4 @@
|
||||
CREATE MATERIALIZED VIEW test.t_mv ( date Date, platform Enum8('a' = 0, 'b' = 1), app Enum8('a' = 0, 'b' = 1)) ENGINE = MergeTree ORDER BY date SETTINGS index_granularity = 8192 AS SELECT date, platform, app FROM test.t WHERE (app = (SELECT min(app) FROM test.u )) AND (platform = (SELECT (SELECT min(platform) FROM test.v )))
|
||||
CREATE MATERIALIZED VIEW test.t_mv (`date` Date, `platform` Enum8('a' = 0, 'b' = 1), `app` Enum8('a' = 0, 'b' = 1)) ENGINE = MergeTree ORDER BY date SETTINGS index_granularity = 8192 AS SELECT date, platform, app FROM test.t WHERE (app = (SELECT min(app) FROM test.u )) AND (platform = (SELECT (SELECT min(platform) FROM test.v )))
|
||||
2000-01-01 a a
|
||||
2000-01-02 b b
|
||||
2000-01-03 a a
|
||||
|
@ -1,6 +1,6 @@
|
||||
CREATE TABLE test.check_comments ( column_name1 UInt8 DEFAULT 1 COMMENT \'comment\', column_name2 UInt8 COMMENT \'non default comment\') ENGINE = ReplicatedMergeTree(\'clickhouse/tables/test_comments\', \'r1\') ORDER BY column_name1 SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.check_comments (`column_name1` UInt8 DEFAULT 1 COMMENT \'comment\', `column_name2` UInt8 COMMENT \'non default comment\') ENGINE = ReplicatedMergeTree(\'clickhouse/tables/test_comments\', \'r1\') ORDER BY column_name1 SETTINGS index_granularity = 8192
|
||||
column_name1 UInt8 DEFAULT 1 comment
|
||||
column_name2 UInt8 non default comment
|
||||
CREATE TABLE test.check_comments ( column_name1 UInt8 DEFAULT 1 COMMENT \'another comment\', column_name2 UInt8 COMMENT \'non default comment\') ENGINE = ReplicatedMergeTree(\'clickhouse/tables/test_comments\', \'r1\') ORDER BY column_name1 SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.check_comments (`column_name1` UInt8 DEFAULT 1 COMMENT \'another comment\', `column_name2` UInt8 COMMENT \'non default comment\') ENGINE = ReplicatedMergeTree(\'clickhouse/tables/test_comments\', \'r1\') ORDER BY column_name1 SETTINGS index_granularity = 8192
|
||||
column_name1 UInt8 DEFAULT 1 another comment
|
||||
column_name2 UInt8 non default comment
|
||||
|
@ -9,4 +9,4 @@
|
||||
1 2 1 30
|
||||
1 2 4 90
|
||||
*** Check SHOW CREATE TABLE ***
|
||||
CREATE TABLE test.summing ( x UInt32, y UInt32, z UInt32, val UInt32) ENGINE = SummingMergeTree PRIMARY KEY (x, y) ORDER BY (x, y, -z) SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.summing (`x` UInt32, `y` UInt32, `z` UInt32, `val` UInt32) ENGINE = SummingMergeTree PRIMARY KEY (x, y) ORDER BY (x, y, -z) SETTINGS index_granularity = 8192
|
||||
|
@ -9,6 +9,6 @@
|
||||
1 2 1 30
|
||||
1 2 4 90
|
||||
*** Check SHOW CREATE TABLE ***
|
||||
CREATE TABLE test.summing_r2 ( x UInt32, y UInt32, z UInt32, val UInt32) ENGINE = ReplicatedSummingMergeTree(\'/clickhouse/tables/test/summing\', \'r2\') PRIMARY KEY (x, y) ORDER BY (x, y, -z) SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.summing_r2 (`x` UInt32, `y` UInt32, `z` UInt32, `val` UInt32) ENGINE = ReplicatedSummingMergeTree(\'/clickhouse/tables/test/summing\', \'r2\') PRIMARY KEY (x, y) ORDER BY (x, y, -z) SETTINGS index_granularity = 8192
|
||||
*** Check SHOW CREATE TABLE after offline ALTER ***
|
||||
CREATE TABLE test.summing_r2 ( x UInt32, y UInt32, z UInt32, t UInt32, val UInt32) ENGINE = ReplicatedSummingMergeTree(\'/clickhouse/tables/test/summing\', \'r2\') PRIMARY KEY (x, y) ORDER BY (x, y, t * t) SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.summing_r2 (`x` UInt32, `y` UInt32, `z` UInt32, `t` UInt32, `val` UInt32) ENGINE = ReplicatedSummingMergeTree(\'/clickhouse/tables/test/summing\', \'r2\') PRIMARY KEY (x, y) ORDER BY (x, y, t * t) SETTINGS index_granularity = 8192
|
||||
|
@ -9,10 +9,10 @@
|
||||
10003
|
||||
274972506.6
|
||||
9175437371954010821
|
||||
CREATE TABLE test.compression_codec_multiple_more_types ( id Decimal(38, 13) CODEC(ZSTD(1), LZ4, ZSTD(1), ZSTD(1), Delta(2), Delta(4), Delta(1), LZ4HC(0)), data FixedString(12) CODEC(ZSTD(1), ZSTD(1), Delta(1), Delta(1), Delta(1), NONE, NONE, NONE, LZ4HC(0)), `ddd.age` Array(UInt8) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8)), `ddd.Name` Array(String) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8))) ENGINE = MergeTree() ORDER BY tuple() SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.compression_codec_multiple_more_types (`id` Decimal(38, 13) CODEC(ZSTD(1), LZ4, ZSTD(1), ZSTD(1), Delta(2), Delta(4), Delta(1), LZ4HC(0)), `data` FixedString(12) CODEC(ZSTD(1), ZSTD(1), Delta(1), Delta(1), Delta(1), NONE, NONE, NONE, LZ4HC(0)), `ddd.age` Array(UInt8) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8)), `ddd.Name` Array(String) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8))) ENGINE = MergeTree() ORDER BY tuple() SETTINGS index_granularity = 8192
|
||||
1.5555555555555 hello world! [77] ['John']
|
||||
7.1000000000000 xxxxxxxxxxxx [127] ['Henry']
|
||||
!
|
||||
222
|
||||
!ZSTD
|
||||
CREATE TABLE test.test_default_delta ( id UInt64 CODEC(Delta(8)), data String CODEC(Delta(1)), somedate Date CODEC(Delta(2)), somenum Float64 CODEC(Delta(8)), somestr FixedString(3) CODEC(Delta(1)), othernum Int64 CODEC(Delta(8)), yetothernum Float32 CODEC(Delta(4)), `ddd.age` Array(UInt8) CODEC(Delta(1)), `ddd.Name` Array(String) CODEC(Delta(1)), `ddd.OName` Array(String) CODEC(Delta(1)), `ddd.BName` Array(String) CODEC(Delta(1))) ENGINE = MergeTree() ORDER BY tuple() SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.test_default_delta (`id` UInt64 CODEC(Delta(8)), `data` String CODEC(Delta(1)), `somedate` Date CODEC(Delta(2)), `somenum` Float64 CODEC(Delta(8)), `somestr` FixedString(3) CODEC(Delta(1)), `othernum` Int64 CODEC(Delta(8)), `yetothernum` Float32 CODEC(Delta(4)), `ddd.age` Array(UInt8) CODEC(Delta(1)), `ddd.Name` Array(String) CODEC(Delta(1)), `ddd.OName` Array(String) CODEC(Delta(1)), `ddd.BName` Array(String) CODEC(Delta(1))) ENGINE = MergeTree() ORDER BY tuple() SETTINGS index_granularity = 8192
|
||||
|
@ -1,9 +1,9 @@
|
||||
CREATE TABLE test.compression_codec_log ( id UInt64 CODEC(LZ4), data String CODEC(ZSTD(1)), ddd Date CODEC(NONE), somenum Float64 CODEC(ZSTD(2)), somestr FixedString(3) CODEC(LZ4HC(7)), othernum Int64 CODEC(Delta(8))) ENGINE = Log()
|
||||
CREATE TABLE test.compression_codec_log (`id` UInt64 CODEC(LZ4), `data` String CODEC(ZSTD(1)), `ddd` Date CODEC(NONE), `somenum` Float64 CODEC(ZSTD(2)), `somestr` FixedString(3) CODEC(LZ4HC(7)), `othernum` Int64 CODEC(Delta(8))) ENGINE = Log()
|
||||
1 hello 2018-12-14 1.1 aaa 5
|
||||
2 world 2018-12-15 2.2 bbb 6
|
||||
3 ! 2018-12-16 3.3 ccc 7
|
||||
2
|
||||
CREATE TABLE test.compression_codec_multiple_log ( id UInt64 CODEC(LZ4, ZSTD(1), NONE, LZ4HC(0), Delta(4)), data String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC(0), LZ4, LZ4, Delta(8)), ddd Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD(1), LZ4HC(0), LZ4HC(0)), somenum Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD(1))) ENGINE = Log()
|
||||
CREATE TABLE test.compression_codec_multiple_log (`id` UInt64 CODEC(LZ4, ZSTD(1), NONE, LZ4HC(0), Delta(4)), `data` String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC(0), LZ4, LZ4, Delta(8)), `ddd` Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD(1), LZ4HC(0), LZ4HC(0)), `somenum` Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD(1))) ENGINE = Log()
|
||||
1 world 2018-10-05 1.1
|
||||
2 hello 2018-10-01 2.2
|
||||
3 buy 2018-10-11 3.3
|
||||
@ -11,12 +11,12 @@ CREATE TABLE test.compression_codec_multiple_log ( id UInt64 CODEC(LZ4, ZSTD(1),
|
||||
10003
|
||||
274972506.6
|
||||
9175437371954010821
|
||||
CREATE TABLE test.compression_codec_tiny_log ( id UInt64 CODEC(LZ4), data String CODEC(ZSTD(1)), ddd Date CODEC(NONE), somenum Float64 CODEC(ZSTD(2)), somestr FixedString(3) CODEC(LZ4HC(7)), othernum Int64 CODEC(Delta(8))) ENGINE = TinyLog()
|
||||
CREATE TABLE test.compression_codec_tiny_log (`id` UInt64 CODEC(LZ4), `data` String CODEC(ZSTD(1)), `ddd` Date CODEC(NONE), `somenum` Float64 CODEC(ZSTD(2)), `somestr` FixedString(3) CODEC(LZ4HC(7)), `othernum` Int64 CODEC(Delta(8))) ENGINE = TinyLog()
|
||||
1 hello 2018-12-14 1.1 aaa 5
|
||||
2 world 2018-12-15 2.2 bbb 6
|
||||
3 ! 2018-12-16 3.3 ccc 7
|
||||
2
|
||||
CREATE TABLE test.compression_codec_multiple_tiny_log ( id UInt64 CODEC(LZ4, ZSTD(1), NONE, LZ4HC(0), Delta(4)), data String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC(0), LZ4, LZ4, Delta(8)), ddd Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD(1), LZ4HC(0), LZ4HC(0)), somenum Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD(1))) ENGINE = TinyLog()
|
||||
CREATE TABLE test.compression_codec_multiple_tiny_log (`id` UInt64 CODEC(LZ4, ZSTD(1), NONE, LZ4HC(0), Delta(4)), `data` String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC(0), LZ4, LZ4, Delta(8)), `ddd` Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD(1), LZ4HC(0), LZ4HC(0)), `somenum` Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD(1))) ENGINE = TinyLog()
|
||||
1 world 2018-10-05 1.1
|
||||
2 hello 2018-10-01 2.2
|
||||
3 buy 2018-10-11 3.3
|
||||
|
@ -16,7 +16,7 @@ $CLICKHOUSE_CLIENT -q "select getColumnStructure('abc');" 2>&1 | grep "Maybe you
|
||||
$CLICKHOUSE_CLIENT -q "select gutColumnStructure('abc');" 2>&1 | grep "Maybe you meant: \['dumpColumnStructure'\]" &>/dev/null;
|
||||
$CLICKHOUSE_CLIENT -q "select gupColumnStructure('abc');" 2>&1 | grep "Maybe you meant: \['dumpColumnStructure'\]" &>/dev/null;
|
||||
$CLICKHOUSE_CLIENT -q "select provideColumnStructure('abc');" 2>&1 | grep "Maybe you meant: \['dumpColumnStructure'\]" &>/dev/null;
|
||||
$CLICKHOUSE_CLIENT -q "select multiposicionutf7('abc');" 2>&1 | grep "Maybe you meant: \['multiPositionUTF8','multiPosition'\]" &>/dev/null;
|
||||
$CLICKHOUSE_CLIENT -q "select multiposicionutf7casesensitive('abc');" 2>&1 | grep "Maybe you meant: \['multiPositionCaseInsensitive'\]" &>/dev/null;
|
||||
$CLICKHOUSE_CLIENT -q "select multiposicionutf7sensitive('abc');" 2>&1 | grep "Maybe you meant: \['multiPositionCaseInsensitive'\]" &>/dev/null;
|
||||
$CLICKHOUSE_CLIENT -q "select multiPosicionSensitiveUTF8('abc');" 2>&1 | grep "Maybe you meant: \['multiPositionCaseInsensitiveUTF8'\]" &>/dev/null;
|
||||
$CLICKHOUSE_CLIENT -q "select multisearchallposicionutf7('abc');" 2>&1 | grep "Maybe you meant: \['multiSearchAllPositionsUTF8','multiSearchAllPositions'\]" &>/dev/null;
|
||||
$CLICKHOUSE_CLIENT -q "select multisearchallposicionutf7casesensitive('abc');" 2>&1 | grep "Maybe you meant: \['multiSearchAllPositionsCaseInsensitive','multiSearchAllPositionsCaseInsensitiveUTF8'\]." &>/dev/null;
|
||||
$CLICKHOUSE_CLIENT -q "select multiSearchAllposicionutf7sensitive('abc');" 2>&1 | grep "Maybe you meant: \['multiSearchAllPositionsCaseInsensitive','multiSearchAnyCaseInsensitive'\]." &>/dev/null;
|
||||
$CLICKHOUSE_CLIENT -q "select multiSearchAllPosicionSensitiveUTF8('abc');" 2>&1 | grep "Maybe you meant: \['multiSearchAnyCaseInsensitiveUTF8','multiSearchAllPositionsCaseInsensitiveUTF8'\]." &>/dev/null;
|
||||
|
@ -17,7 +17,8 @@ ${CLICKHOUSE_CLIENT} --query="SELECT '*** Create and kill a single invalid mutat
|
||||
${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation DELETE WHERE toUInt32(s) = 1"
|
||||
|
||||
sleep 0.1
|
||||
${CLICKHOUSE_CLIENT} --query="SELECT mutation_id, latest_failed_part IN ('20000101_1_1_0', '20010101_2_2_0'), latest_fail_time != 0, substr(latest_fail_reason, 1, 8) FROM system.mutations WHERE database = 'test' AND table = 'kill_mutation'"
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query="SELECT mutation_id, latest_failed_part IN ('20000101_1_1_0', '20010101_2_2_0'), latest_fail_time != 0, substr(replaceRegexpOne(latest_fail_reason, '.version [0-9.]+. ', ''), 1, 8) FROM system.mutations WHERE database = 'test' AND table = 'kill_mutation'"
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query="KILL MUTATION WHERE database = 'test' AND table = 'kill_mutation'"
|
||||
|
||||
@ -29,7 +30,7 @@ ${CLICKHOUSE_CLIENT} --query="SELECT '*** Create and kill invalid mutation that
|
||||
${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation DELETE WHERE toUInt32(s) = 1"
|
||||
${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation DELETE WHERE x = 1"
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query="SELECT mutation_id, latest_failed_part IN ('20000101_1_1_0', '20010101_2_2_0'), latest_fail_time != 0, substr(latest_fail_reason, 1, 8) FROM system.mutations WHERE database = 'test' AND table = 'kill_mutation' AND mutation_id = 'mutation_4.txt'"
|
||||
${CLICKHOUSE_CLIENT} --query="SELECT mutation_id, latest_failed_part IN ('20000101_1_1_0', '20010101_2_2_0'), latest_fail_time != 0, substr(replaceRegexpOne(latest_fail_reason, '.version [0-9.]+. ', ''), 1, 8) FROM system.mutations WHERE database = 'test' AND table = 'kill_mutation' AND mutation_id = 'mutation_4.txt'"
|
||||
|
||||
sleep 0.1
|
||||
${CLICKHOUSE_CLIENT} --query="KILL MUTATION WHERE database = 'test' AND table = 'kill_mutation' AND mutation_id = 'mutation_4.txt'"
|
||||
|
@ -20,7 +20,7 @@ ${CLICKHOUSE_CLIENT} --query="SELECT '*** Create and kill a single invalid mutat
|
||||
${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation_r1 DELETE WHERE toUInt32(s) = 1"
|
||||
|
||||
sleep 1
|
||||
${CLICKHOUSE_CLIENT} --query="SELECT mutation_id, latest_failed_part IN ('20000101_0_0_0', '20010101_0_0_0'), latest_fail_time != 0, substr(latest_fail_reason, 1, 8) FROM system.mutations WHERE database = 'test' AND table = 'kill_mutation_r1'"
|
||||
${CLICKHOUSE_CLIENT} --query="SELECT mutation_id, latest_failed_part IN ('20000101_0_0_0', '20010101_0_0_0'), latest_fail_time != 0, substr(replaceRegexpOne(latest_fail_reason, '.version [0-9.]+. ', ''), 1, 8) FROM system.mutations WHERE database = 'test' AND table = 'kill_mutation_r1'"
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query="KILL MUTATION WHERE database = 'test' AND table = 'kill_mutation_r1'"
|
||||
|
||||
@ -34,7 +34,7 @@ ${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation_r1 DELETE WHERE toU
|
||||
${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation_r1 DELETE WHERE x = 1"
|
||||
|
||||
sleep 1
|
||||
${CLICKHOUSE_CLIENT} --query="SELECT mutation_id, latest_failed_part IN ('20000101_0_0_0_1', '20010101_0_0_0_1'), latest_fail_time != 0, substr(latest_fail_reason, 1, 8) FROM system.mutations WHERE database = 'test' AND table = 'kill_mutation_r1' AND mutation_id = '0000000001'"
|
||||
${CLICKHOUSE_CLIENT} --query="SELECT mutation_id, latest_failed_part IN ('20000101_0_0_0_1', '20010101_0_0_0_1'), latest_fail_time != 0, substr(replaceRegexpOne(latest_fail_reason, '.version [0-9.]+. ', ''), 1, 8) FROM system.mutations WHERE database = 'test' AND table = 'kill_mutation_r1' AND mutation_id = '0000000001'"
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query="KILL MUTATION WHERE database = 'test' AND table = 'kill_mutation_r1' AND mutation_id = '0000000001'"
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
CREATE TABLE test.minmax_idx ( u64 UInt64, i32 Int32, INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10, INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10) ENGINE = MergeTree() ORDER BY u64 SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.minmax_idx (`u64` UInt64, `i32` Int32, INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10, INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10) ENGINE = MergeTree() ORDER BY u64 SETTINGS index_granularity = 8192
|
||||
1 2
|
||||
1 2
|
||||
1 2
|
||||
@ -6,15 +6,15 @@ CREATE TABLE test.minmax_idx ( u64 UInt64, i32 Int32, INDEX idx1 u64 * i32 TYP
|
||||
1 2
|
||||
1 2
|
||||
1 2
|
||||
CREATE TABLE test.minmax_idx ( u64 UInt64, i32 Int32, INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10) ENGINE = MergeTree() ORDER BY u64 SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.minmax_idx (`u64` UInt64, `i32` Int32, INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10) ENGINE = MergeTree() ORDER BY u64 SETTINGS index_granularity = 8192
|
||||
1 2
|
||||
1 2
|
||||
1 2
|
||||
1 2
|
||||
1 2
|
||||
1 2
|
||||
CREATE TABLE test.minmax_idx ( u64 UInt64, i32 Int32) ENGINE = MergeTree() ORDER BY u64 SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.minmax_idx ( u64 UInt64, i32 Int32, INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10) ENGINE = MergeTree() ORDER BY u64 SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.minmax_idx (`u64` UInt64, `i32` Int32) ENGINE = MergeTree() ORDER BY u64 SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.minmax_idx (`u64` UInt64, `i32` Int32, INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10) ENGINE = MergeTree() ORDER BY u64 SETTINGS index_granularity = 8192
|
||||
1 2
|
||||
1 2
|
||||
1 2
|
||||
@ -23,6 +23,6 @@ CREATE TABLE test.minmax_idx ( u64 UInt64, i32 Int32, INDEX idx1 u64 * i32 TYP
|
||||
1 2
|
||||
1 2
|
||||
1 2
|
||||
CREATE TABLE test.minmax_idx2 ( u64 UInt64, i32 Int32) ENGINE = MergeTree() ORDER BY u64 SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.minmax_idx2 (`u64` UInt64, `i32` Int32) ENGINE = MergeTree() ORDER BY u64 SETTINGS index_granularity = 8192
|
||||
1 2
|
||||
1 2
|
||||
|
@ -1,5 +1,5 @@
|
||||
CREATE TABLE test.minmax_idx ( u64 UInt64, i32 Int32, INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10, INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\') ORDER BY u64 SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.minmax_idx_r ( u64 UInt64, i32 Int32, INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10, INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\') ORDER BY u64 SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.minmax_idx (`u64` UInt64, `i32` Int32, INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10, INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\') ORDER BY u64 SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.minmax_idx_r (`u64` UInt64, `i32` Int32, INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10, INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\') ORDER BY u64 SETTINGS index_granularity = 8192
|
||||
1 2
|
||||
1 2
|
||||
1 2
|
||||
@ -14,8 +14,8 @@ CREATE TABLE test.minmax_idx_r ( u64 UInt64, i32 Int32, INDEX idx1 u64 * i32 T
|
||||
3 2
|
||||
19 9
|
||||
65 75
|
||||
CREATE TABLE test.minmax_idx ( u64 UInt64, i32 Int32, INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\') ORDER BY u64 SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.minmax_idx_r ( u64 UInt64, i32 Int32, INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\') ORDER BY u64 SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.minmax_idx (`u64` UInt64, `i32` Int32, INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\') ORDER BY u64 SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.minmax_idx_r (`u64` UInt64, `i32` Int32, INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\') ORDER BY u64 SETTINGS index_granularity = 8192
|
||||
1 2
|
||||
1 4
|
||||
1 5
|
||||
@ -28,10 +28,10 @@ CREATE TABLE test.minmax_idx_r ( u64 UInt64, i32 Int32, INDEX idx3 u64 - i32 T
|
||||
3 2
|
||||
19 9
|
||||
65 75
|
||||
CREATE TABLE test.minmax_idx ( u64 UInt64, i32 Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\') ORDER BY u64 SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.minmax_idx_r ( u64 UInt64, i32 Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\') ORDER BY u64 SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.minmax_idx ( u64 UInt64, i32 Int32, INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\') ORDER BY u64 SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.minmax_idx_r ( u64 UInt64, i32 Int32, INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\') ORDER BY u64 SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.minmax_idx (`u64` UInt64, `i32` Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\') ORDER BY u64 SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.minmax_idx_r (`u64` UInt64, `i32` Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\') ORDER BY u64 SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.minmax_idx (`u64` UInt64, `i32` Int32, INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\') ORDER BY u64 SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.minmax_idx_r (`u64` UInt64, `i32` Int32, INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\') ORDER BY u64 SETTINGS index_granularity = 8192
|
||||
1 2
|
||||
1 4
|
||||
1 5
|
||||
@ -44,14 +44,14 @@ CREATE TABLE test.minmax_idx_r ( u64 UInt64, i32 Int32, INDEX idx1 u64 * i32 T
|
||||
3 2
|
||||
19 9
|
||||
65 75
|
||||
CREATE TABLE test.minmax_idx2 ( u64 UInt64, i32 Int32, INDEX idx1 u64 + i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 * i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r1\') ORDER BY u64 SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.minmax_idx2_r ( u64 UInt64, i32 Int32, INDEX idx1 u64 + i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 * i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r2\') ORDER BY u64 SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.minmax_idx2 (`u64` UInt64, `i32` Int32, INDEX idx1 u64 + i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 * i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r1\') ORDER BY u64 SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.minmax_idx2_r (`u64` UInt64, `i32` Int32, INDEX idx1 u64 + i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 * i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r2\') ORDER BY u64 SETTINGS index_granularity = 8192
|
||||
1 2
|
||||
1 3
|
||||
1 2
|
||||
1 3
|
||||
CREATE TABLE test.minmax_idx2 ( u64 UInt64, i32 Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r1\') ORDER BY u64 SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.minmax_idx2_r ( u64 UInt64, i32 Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r2\') ORDER BY u64 SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.minmax_idx2 (`u64` UInt64, `i32` Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r1\') ORDER BY u64 SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.minmax_idx2_r (`u64` UInt64, `i32` Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r2\') ORDER BY u64 SETTINGS index_granularity = 8192
|
||||
1 2
|
||||
1 3
|
||||
1 2
|
||||
|
@ -0,0 +1,44 @@
|
||||
on
|
||||
l \N \N String Nullable(String)
|
||||
l \N \N String Nullable(String)
|
||||
r \N String Nullable(String)
|
||||
\N r \N Nullable(String) Nullable(String)
|
||||
l \N String Nullable(String)
|
||||
l \N String Nullable(String)
|
||||
r \N String Nullable(String)
|
||||
\N r \N Nullable(String) Nullable(String)
|
||||
\N \N
|
||||
0 \N
|
||||
using
|
||||
l \N String Nullable(String)
|
||||
l \N String Nullable(String)
|
||||
\N String Nullable(String)
|
||||
\N \N Nullable(String) Nullable(String)
|
||||
l \N String Nullable(String)
|
||||
l \N String Nullable(String)
|
||||
\N String Nullable(String)
|
||||
\N \N Nullable(String) Nullable(String)
|
||||
\N \N
|
||||
0 \N
|
||||
on + join_use_nulls
|
||||
l \N \N TODO Nullable(String)
|
||||
l \N \N TODO Nullable(String)
|
||||
r \N TODO Nullable(String)
|
||||
\N r \N Nullable(String) Nullable(String)
|
||||
l \N TODO Nullable(String)
|
||||
l \N TODO Nullable(String)
|
||||
r \N TODO Nullable(String)
|
||||
\N r \N Nullable(String) Nullable(String)
|
||||
\N \N
|
||||
0 \N
|
||||
using + join_use_nulls
|
||||
l \N TODO Nullable(String)
|
||||
l \N TODO Nullable(String)
|
||||
\N TODO Nullable(String)
|
||||
\N \N Nullable(String) Nullable(String)
|
||||
l \N TODO Nullable(String)
|
||||
l \N TODO Nullable(String)
|
||||
\N TODO Nullable(String)
|
||||
\N \N Nullable(String) Nullable(String)
|
||||
\N \N
|
||||
0 \N
|
@ -0,0 +1,69 @@
|
||||
USE test;
|
||||
|
||||
DROP TABLE IF EXISTS t1;
|
||||
DROP TABLE IF EXISTS t2;
|
||||
DROP TABLE IF EXISTS t3;
|
||||
CREATE TABLE t1 ( id String ) ENGINE = Memory;
|
||||
CREATE TABLE t2 ( id Nullable(String) ) ENGINE = Memory;
|
||||
CREATE TABLE t3 ( id Nullable(String), not_id Nullable(String) ) ENGINE = Memory;
|
||||
|
||||
insert into t1 values ('l');
|
||||
insert into t3 (id) values ('r');
|
||||
|
||||
SELECT 'on';
|
||||
|
||||
SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1 ANY LEFT JOIN t3 ON t1.id = t3.id;
|
||||
SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1 ANY FULL JOIN t3 ON t1.id = t3.id;
|
||||
SELECT *, toTypeName(t2.id), toTypeName(t3.id) FROM t2 ANY FULL JOIN t3 ON t2.id = t3.id;
|
||||
|
||||
SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1 LEFT JOIN t3 ON t1.id = t3.id;
|
||||
SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1 FULL JOIN t3 ON t1.id = t3.id;
|
||||
SELECT *, toTypeName(t2.id), toTypeName(t3.id) FROM t2 FULL JOIN t3 ON t2.id = t3.id;
|
||||
|
||||
SELECT t3.id = 'l', t3.not_id = 'l' FROM t1 ANY LEFT JOIN t3 ON t1.id = t3.id;
|
||||
SELECT t3.id = 'l', t3.not_id = 'l' FROM t1 LEFT JOIN t3 ON t1.id = t3.id;
|
||||
|
||||
SELECT 'using';
|
||||
|
||||
SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1 ANY LEFT JOIN t3 USING(id);
|
||||
SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1 ANY FULL JOIN t3 USING(id);
|
||||
SELECT *, toTypeName(t2.id), toTypeName(t3.id) FROM t2 ANY FULL JOIN t3 USING(id);
|
||||
|
||||
SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1 LEFT JOIN t3 USING(id);
|
||||
SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1 FULL JOIN t3 USING(id);
|
||||
SELECT *, toTypeName(t2.id), toTypeName(t3.id) FROM t2 FULL JOIN t3 USING(id);
|
||||
|
||||
SELECT t3.id = 'l', t3.not_id = 'l' FROM t1 ANY LEFT JOIN t3 USING(id);
|
||||
SELECT t3.id = 'l', t3.not_id = 'l' FROM t1 LEFT JOIN t3 USING(id);
|
||||
|
||||
SET join_use_nulls = 1;
|
||||
-- TODO: toTypeName(t1.id) String -> Nullable(String)
|
||||
|
||||
SELECT 'on + join_use_nulls';
|
||||
|
||||
SELECT *, 'TODO', toTypeName(t3.id) FROM t1 ANY LEFT JOIN t3 ON t1.id = t3.id;
|
||||
SELECT *, 'TODO', toTypeName(t3.id) FROM t1 ANY FULL JOIN t3 ON t1.id = t3.id;
|
||||
SELECT *, toTypeName(t2.id), toTypeName(t3.id) FROM t2 ANY FULL JOIN t3 ON t2.id = t3.id;
|
||||
|
||||
SELECT *, 'TODO', toTypeName(t3.id) FROM t1 LEFT JOIN t3 ON t1.id = t3.id;
|
||||
SELECT *, 'TODO', toTypeName(t3.id) FROM t1 FULL JOIN t3 ON t1.id = t3.id;
|
||||
SELECT *, toTypeName(t2.id), toTypeName(t3.id) FROM t2 FULL JOIN t3 ON t2.id = t3.id;
|
||||
|
||||
SELECT t3.id = 'l', t3.not_id = 'l' FROM t1 ANY LEFT JOIN t3 ON t1.id = t3.id;
|
||||
SELECT t3.id = 'l', t3.not_id = 'l' FROM t1 LEFT JOIN t3 ON t1.id = t3.id;
|
||||
|
||||
SELECT 'using + join_use_nulls';
|
||||
|
||||
SELECT *, 'TODO', toTypeName(t3.id) FROM t1 ANY LEFT JOIN t3 USING(id);
|
||||
SELECT *, 'TODO', toTypeName(t3.id) FROM t1 ANY FULL JOIN t3 USING(id);
|
||||
SELECT *, toTypeName(t2.id), toTypeName(t3.id) FROM t2 ANY FULL JOIN t3 USING(id);
|
||||
|
||||
SELECT *, 'TODO', toTypeName(t3.id) FROM t1 LEFT JOIN t3 USING(id);
|
||||
SELECT *, 'TODO', toTypeName(t3.id) FROM t1 FULL JOIN t3 USING(id);
|
||||
SELECT *, toTypeName(t2.id), toTypeName(t3.id) FROM t2 FULL JOIN t3 USING(id);
|
||||
|
||||
SELECT t3.id = 'l', t3.not_id = 'l' FROM t1 ANY LEFT JOIN t3 USING(id);
|
||||
SELECT t3.id = 'l', t3.not_id = 'l' FROM t1 LEFT JOIN t3 USING(id);
|
||||
|
||||
DROP TABLE t1;
|
||||
DROP TABLE t2;
|
@ -1,2 +1,7 @@
|
||||
1
|
||||
0
|
||||
0
|
||||
0 0
|
||||
0
|
||||
0 0
|
||||
0 0
|
||||
|
@ -35,49 +35,49 @@ GLOBAL INNER JOIN
|
||||
) USING dummy;
|
||||
|
||||
|
||||
-- SET asterisk_left_columns_only = 0;
|
||||
--
|
||||
-- SELECT * FROM remote('127.0.0.2', system.one)
|
||||
-- GLOBAL INNER JOIN
|
||||
-- (
|
||||
-- SELECT *, dummy
|
||||
-- FROM ( SELECT dummy FROM remote('127.0.0.2', system.one) ) t1
|
||||
-- GLOBAL INNER JOIN ( SELECT dummy FROM remote('127.0.0.3', system.one) ) t2
|
||||
-- USING dummy
|
||||
-- ) USING dummy;
|
||||
--
|
||||
-- SELECT * FROM remote('127.0.0.2', system.one)
|
||||
-- GLOBAL INNER JOIN
|
||||
-- (
|
||||
-- SELECT *, t1.*, t2.*
|
||||
-- FROM ( SELECT toUInt8(1) AS dummy ) t1
|
||||
-- INNER JOIN ( SELECT toUInt8(1) AS dummy ) t2
|
||||
-- USING dummy
|
||||
-- ) USING dummy;
|
||||
--
|
||||
-- SELECT * FROM remote('127.0.0.2', system.one)
|
||||
-- GLOBAL INNER JOIN
|
||||
-- (
|
||||
-- SELECT *, dummy
|
||||
-- FROM ( SELECT toUInt8(1) AS dummy ) t1
|
||||
-- INNER JOIN ( SELECT toUInt8(1) AS dummy ) t2
|
||||
-- USING dummy
|
||||
-- ) USING dummy;
|
||||
--
|
||||
-- SELECT * FROM remote('127.0.0.2', system.one)
|
||||
-- GLOBAL INNER JOIN
|
||||
-- (
|
||||
-- SELECT *
|
||||
-- FROM ( SELECT dummy FROM remote('127.0.0.3', system.one) ) t1
|
||||
-- GLOBAL INNER JOIN ( SELECT toUInt8(1) AS dummy ) t2
|
||||
-- USING dummy
|
||||
-- ) USING dummy;
|
||||
--
|
||||
-- SELECT * FROM remote('127.0.0.2', system.one)
|
||||
-- GLOBAL INNER JOIN
|
||||
-- (
|
||||
-- SELECT *
|
||||
-- FROM ( SELECT toUInt8(1) AS dummy ) t1
|
||||
-- GLOBAL INNER JOIN ( SELECT dummy FROM remote('127.0.0.3', system.one) ) t2
|
||||
-- USING dummy
|
||||
-- ) USING dummy;
|
||||
SET asterisk_left_columns_only = 0;
|
||||
|
||||
SELECT * FROM remote('127.0.0.2', system.one)
|
||||
GLOBAL INNER JOIN
|
||||
(
|
||||
SELECT *, dummy
|
||||
FROM ( SELECT dummy FROM remote('127.0.0.2', system.one) ) t1
|
||||
GLOBAL INNER JOIN ( SELECT dummy FROM remote('127.0.0.3', system.one) ) t2
|
||||
USING dummy
|
||||
) USING dummy;
|
||||
|
||||
SELECT * FROM remote('127.0.0.2', system.one)
|
||||
GLOBAL INNER JOIN
|
||||
(
|
||||
SELECT *, t1.*, t2.*
|
||||
FROM ( SELECT toUInt8(0) AS dummy ) t1
|
||||
INNER JOIN ( SELECT toUInt8(0) AS dummy ) t2
|
||||
USING dummy
|
||||
) USING dummy;
|
||||
|
||||
SELECT * FROM remote('127.0.0.2', system.one)
|
||||
GLOBAL INNER JOIN
|
||||
(
|
||||
SELECT *, dummy
|
||||
FROM ( SELECT toUInt8(0) AS dummy ) t1
|
||||
INNER JOIN ( SELECT toUInt8(0) AS dummy ) t2
|
||||
USING dummy
|
||||
) USING dummy;
|
||||
|
||||
SELECT * FROM remote('127.0.0.2', system.one)
|
||||
GLOBAL INNER JOIN
|
||||
(
|
||||
SELECT *, dummy as other
|
||||
FROM ( SELECT dummy FROM remote('127.0.0.3', system.one) ) t1
|
||||
GLOBAL INNER JOIN ( SELECT toUInt8(0) AS dummy ) t2
|
||||
USING dummy
|
||||
) USING dummy;
|
||||
|
||||
SELECT * FROM remote('127.0.0.2', system.one)
|
||||
GLOBAL INNER JOIN
|
||||
(
|
||||
SELECT *, dummy, dummy as other
|
||||
FROM ( SELECT toUInt8(0) AS dummy ) t1
|
||||
GLOBAL INNER JOIN ( SELECT dummy FROM remote('127.0.0.3', system.one) ) t2
|
||||
USING dummy
|
||||
) USING dummy;
|
||||
|
@ -0,0 +1,2 @@
|
||||
1 0
|
||||
\N 1
|
19
dbms/tests/queries/0_stateless/00852_any_join_nulls.sql
Normal file
19
dbms/tests/queries/0_stateless/00852_any_join_nulls.sql
Normal file
@ -0,0 +1,19 @@
|
||||
USE test;
|
||||
|
||||
DROP TABLE IF EXISTS table1;
|
||||
DROP TABLE IF EXISTS table2;
|
||||
CREATE TABLE table1 ( id String ) ENGINE = Log;
|
||||
CREATE TABLE table2 ( parent_id String ) ENGINE = Log;
|
||||
|
||||
insert into table1 values ('1');
|
||||
|
||||
SELECT table2.parent_id = '', isNull(table2.parent_id)
|
||||
FROM table1 ANY LEFT JOIN table2 ON table1.id = table2.parent_id;
|
||||
|
||||
SET join_use_nulls = 1;
|
||||
|
||||
SELECT table2.parent_id = '', isNull(table2.parent_id)
|
||||
FROM table1 ANY LEFT JOIN table2 ON table1.id = table2.parent_id;
|
||||
|
||||
DROP TABLE test.table1;
|
||||
DROP TABLE test.table2;
|
@ -1,4 +1,3 @@
|
||||
CREATE DATABASE IF NOT EXISTS test;
|
||||
DROP TABLE IF EXISTS test.defaults;
|
||||
CREATE TABLE IF NOT EXISTS test.defaults
|
||||
(
|
||||
@ -9,7 +8,6 @@ insert into test.defaults values ('ba'), ('aa'), ('ba'), ('b'), ('ba'), ('aa');
|
||||
select val < 1.5 and val > 1.459 from (select entropy(vals) as val from test.defaults);
|
||||
|
||||
|
||||
CREATE DATABASE IF NOT EXISTS test;
|
||||
DROP TABLE IF EXISTS test.defaults;
|
||||
CREATE TABLE IF NOT EXISTS test.defaults
|
||||
(
|
||||
@ -19,7 +17,6 @@ insert into test.defaults values (0), (0), (1), (0), (0), (0), (1), (2), (3), (5
|
||||
select val < 2.4 and val > 2.3393 from (select entropy(vals) as val from test.defaults);
|
||||
|
||||
|
||||
CREATE DATABASE IF NOT EXISTS test;
|
||||
DROP TABLE IF EXISTS test.defaults;
|
||||
CREATE TABLE IF NOT EXISTS test.defaults
|
||||
(
|
||||
@ -29,7 +26,6 @@ insert into test.defaults values (0), (0), (1), (0), (0), (0), (1), (2), (3), (5
|
||||
select val < 2.4 and val > 2.3393 from (select entropy(vals) as val from test.defaults);
|
||||
|
||||
|
||||
CREATE DATABASE IF NOT EXISTS test;
|
||||
DROP TABLE IF EXISTS test.defaults;
|
||||
CREATE TABLE IF NOT EXISTS test.defaults
|
||||
(
|
||||
@ -39,7 +35,6 @@ insert into test.defaults values (0), (0), (-1), (0), (0), (0), (-1), (2), (3),
|
||||
select val < 2.4 and val > 2.3393 from (select entropy(vals) as val from test.defaults);
|
||||
|
||||
|
||||
CREATE DATABASE IF NOT EXISTS test;
|
||||
DROP TABLE IF EXISTS test.defaults;
|
||||
CREATE TABLE IF NOT EXISTS test.defaults
|
||||
(
|
||||
|
@ -0,0 +1 @@
|
||||
1
|
7
dbms/tests/queries/0_stateless/00910_buffer_prewhere.sql
Normal file
7
dbms/tests/queries/0_stateless/00910_buffer_prewhere.sql
Normal file
@ -0,0 +1,7 @@
|
||||
DROP DATABASE IF EXISTS test_buffer;
|
||||
CREATE DATABASE test_buffer;
|
||||
CREATE TABLE test_buffer.mt (uid UInt64, ts DateTime, val Float64) ENGINE = MergeTree PARTITION BY toDate(ts) ORDER BY (uid, ts);
|
||||
CREATE TABLE test_buffer.buf as test_buffer.mt ENGINE = Buffer(test_buffer, mt, 2, 10, 60, 10000, 100000, 1000000, 10000000);
|
||||
INSERT INTO test_buffer.buf VALUES (1, '2019-03-01 10:00:00', 0.5), (2, '2019-03-02 10:00:00', 0.15), (1, '2019-03-03 10:00:00', 0.25);
|
||||
SELECT count() from test_buffer.buf prewhere ts > toDateTime('2019-03-01 12:00:00') and ts < toDateTime('2019-03-02 12:00:00');
|
||||
DROP DATABASE test_buffer;
|
@ -0,0 +1,7 @@
|
||||
DROP TABLE IF EXISTS test.union1;
|
||||
DROP TABLE IF EXISTS test.union2;
|
||||
CREATE TABLE test.union1 ( date Date, a Int32, b Int32, c Int32, d Int32) ENGINE = MergeTree(date, (a, date), 8192);
|
||||
CREATE TABLE test.union2 ( date Date, a Int32, b Int32, c Int32, d Int32) ENGINE = Distributed(test_shard_localhost, 'test', 'union1');
|
||||
ALTER TABLE test.union2 MODIFY ORDER BY a; -- { serverError 48 }
|
||||
DROP TABLE test.union1;
|
||||
DROP TABLE test.union2;
|
@ -20,7 +20,7 @@
|
||||
274972506.6
|
||||
9175437371954010821
|
||||
9175437371954010821
|
||||
CREATE TABLE test.compression_codec_multiple_more_types_replicated ( id Decimal(38, 13) CODEC(ZSTD(1), LZ4, ZSTD(1), ZSTD(1), Delta(2), Delta(4), Delta(1), LZ4HC(0)), data FixedString(12) CODEC(ZSTD(1), ZSTD(1), Delta(1), Delta(1), Delta(1), NONE, NONE, NONE, LZ4HC(0)), `ddd.age` Array(UInt8) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8)), `ddd.Name` Array(String) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8))) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/compression_codec_multiple_more_types_replicated\', \'1\') ORDER BY tuple() SETTINGS index_granularity = 8192
|
||||
CREATE TABLE test.compression_codec_multiple_more_types_replicated (`id` Decimal(38, 13) CODEC(ZSTD(1), LZ4, ZSTD(1), ZSTD(1), Delta(2), Delta(4), Delta(1), LZ4HC(0)), `data` FixedString(12) CODEC(ZSTD(1), ZSTD(1), Delta(1), Delta(1), Delta(1), NONE, NONE, NONE, LZ4HC(0)), `ddd.age` Array(UInt8) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8)), `ddd.Name` Array(String) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8))) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/compression_codec_multiple_more_types_replicated\', \'1\') ORDER BY tuple() SETTINGS index_granularity = 8192
|
||||
1.5555555555555 hello world! [77] ['John']
|
||||
7.1000000000000 xxxxxxxxxxxx [127] ['Henry']
|
||||
!
|
||||
|
@ -1,6 +1,7 @@
|
||||
SET send_logs_level = 'none';
|
||||
|
||||
DROP TABLE IF EXISTS test.compression_codec_replicated;
|
||||
DROP TABLE IF EXISTS test.compression_codec_replicated1;
|
||||
DROP TABLE IF EXISTS test.compression_codec_replicated2;
|
||||
|
||||
CREATE TABLE test.compression_codec_replicated1(
|
||||
id UInt64 CODEC(LZ4),
|
||||
|
@ -1,2 +1,2 @@
|
||||
CREATE VIEW test.t ( number UInt64) AS SELECT number FROM system.numbers
|
||||
CREATE VIEW test.t ( next_number UInt64) AS SELECT number + 1 AS next_number FROM system.numbers
|
||||
CREATE VIEW test.t (`number` UInt64) AS SELECT number FROM system.numbers
|
||||
CREATE VIEW test.t (`next_number` UInt64) AS SELECT number + 1 AS next_number FROM system.numbers
|
||||
|
@ -3,3 +3,18 @@
|
||||
1
|
||||
1
|
||||
1 1
|
||||
1 0
|
||||
1 0
|
||||
1 0
|
||||
1 1
|
||||
1 0
|
||||
1 0
|
||||
1 0
|
||||
1 1
|
||||
0 2
|
||||
0 2
|
||||
0 2
|
||||
1 1
|
||||
0 2
|
||||
0 2
|
||||
0 2
|
||||
|
@ -5,4 +5,25 @@ SELECT * FROM (SELECT 1 AS x) AS t1 ALL LEFT JOIN (SELECT 1 AS x) AS t2 USING x;
|
||||
SELECT * FROM (SELECT 1 AS x) AS t1 ALL LEFT JOIN (SELECT 2 AS x) AS t2 USING x;
|
||||
|
||||
SELECT * FROM (SELECT 1 AS x) AS t1 ALL LEFT JOIN (SELECT 1 AS x) AS t2 ON t1.x = t2.x;
|
||||
-- (bug) SELECT * FROM (SELECT 1 AS x) AS t1 ALL LEFT JOIN (SELECT 2 AS x) AS t2 ON t1.x = t2.x;
|
||||
SELECT * FROM (SELECT 1 AS x) AS t1 ALL LEFT JOIN (SELECT 2 AS x) AS t2 ON t1.x = t2.x;
|
||||
SELECT * FROM (SELECT materialize(1) AS x) AS t1 ALL LEFT JOIN (SELECT 2 AS x) AS t2 ON t1.x = t2.x;
|
||||
SELECT * FROM (SELECT 1 AS x) AS t1 ALL LEFT JOIN (SELECT materialize(2) AS x) AS t2 ON t1.x = t2.x;
|
||||
|
||||
SELECT * FROM (SELECT 1 AS x) AS t1 ANY LEFT JOIN (SELECT 1 AS x) AS t2 ON t1.x = t2.x;
|
||||
SELECT * FROM (SELECT 1 AS x) AS t1 ANY LEFT JOIN (SELECT 2 AS x) AS t2 ON t1.x = t2.x;
|
||||
SELECT * FROM (SELECT materialize(1) AS x) AS t1 ANY LEFT JOIN (SELECT 2 AS x) AS t2 ON t1.x = t2.x;
|
||||
SELECT * FROM (SELECT 1 AS x) AS t1 ANY LEFT JOIN (SELECT materialize(2) AS x) AS t2 ON t1.x = t2.x;
|
||||
|
||||
SELECT * FROM (SELECT 1 AS x) AS t1 ALL RIGHT JOIN (SELECT 1 AS x) AS t2 ON t1.x = t2.x;
|
||||
SELECT * FROM (SELECT 1 AS x) AS t1 ALL RIGHT JOIN (SELECT 2 AS x) AS t2 ON t1.x = t2.x;
|
||||
SELECT * FROM (SELECT materialize(1) AS x) AS t1 ALL RIGHT JOIN (SELECT 2 AS x) AS t2 ON t1.x = t2.x;
|
||||
SELECT * FROM (SELECT 1 AS x) AS t1 ALL RIGHT JOIN (SELECT materialize(2) AS x) AS t2 ON t1.x = t2.x;
|
||||
|
||||
SELECT * FROM (SELECT 1 AS x) AS t1 ANY RIGHT JOIN (SELECT 1 AS x) AS t2 ON t1.x = t2.x;
|
||||
SELECT * FROM (SELECT 1 AS x) AS t1 ANY RIGHT JOIN (SELECT 2 AS x) AS t2 ON t1.x = t2.x;
|
||||
SELECT * FROM (SELECT materialize(1) AS x) AS t1 ANY RIGHT JOIN (SELECT 2 AS x) AS t2 ON t1.x = t2.x;
|
||||
SELECT * FROM (SELECT 1 AS x) AS t1 ANY RIGHT JOIN (SELECT materialize(2) AS x) AS t2 ON t1.x = t2.x;
|
||||
|
||||
-- SET join_use_nulls = 1;
|
||||
-- SELECT * FROM (SELECT 1 AS x) AS t1 ALL LEFT JOIN (SELECT 2 AS x) AS t2 ON t1.x = t2.x;
|
||||
-- SELECT * FROM (SELECT 1 AS x) AS t1 ALL RIGHT JOIN (SELECT 2 AS x) AS t2 ON t1.x = t2.x;
|
||||
|
601
dbms/tests/queries/0_stateless/00926_multimatch.reference
Normal file
601
dbms/tests/queries/0_stateless/00926_multimatch.reference
Normal file
@ -0,0 +1,601 @@
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
81
dbms/tests/queries/0_stateless/00926_multimatch.sql
Normal file
81
dbms/tests/queries/0_stateless/00926_multimatch.sql
Normal file
@ -0,0 +1,81 @@
|
||||
select 0 = multiMatchAny(materialize('mpnsguhwsitzvuleiwebwjfitmsg'), ['wbirxqoabpblrnvvmjizj', 'cfcxhuvrexyzyjsh', 'oldhtubemyuqlqbwvwwkwin', 'bumoozxdkjglzu', 'intxlfohlxmajjomw', 'dxkeghohv', 'arsvmwwkjeopnlwnan', 'ouugllgowpqtaxslcopkytbfhifaxbgt', 'hkedmjlbcrzvryaopjqdjjc', 'tbqkljywstuahzh', 'o', 'wowoclosyfcuwotmvjygzuzhrery', 'vpefjiffkhlggntcu', 'ytdixvasrorhripzfhjdmlhqksmctyycwp']) from system.numbers limit 10;
|
||||
select 0 = multiMatchAny(materialize('qjjzqexjpgkglgxpzrbqbnskq'), ['vaiatcjacmlffdzsejpdareqzy', 'xspcfzdufkmecud', 'bcvtbuqtctq', 'nkcopwbfytgemkqcfnnno', 'dylxnzuyhq', 'tno', 'scukuhufly', 'cdyquzuqlptv', 'ohluyfeksyxepezdhqmtfmgkvzsyph', 'ualzwtahvqvtijwp', 'jg', 'gwbawqlngzcknzgtmlj', 'qimvjcgbkkp', 'eaedbcgyrdvv', 'qcwrncjoewwedyyewcdkh', 'uqcvhngoqngmitjfxpznqomertqnqcveoqk', 'ydrgjiankgygpm', 'axepgap']) from system.numbers limit 10;
|
||||
select 0 = multiMatchAny(materialize('fdkmtqmxnegwvnjhghjq'), ['vynkybvdmhgeezybbdqfrukibisj', 'knazzamgjjpavwhvdkwigykh', 'peumnifrmdhhmrqqnemw', 'lmsnyvqoisinlaqobxojlwfbi', 'oqwfzs', 'dymudxxeodwjpgbibnkvr', 'vomtfsnizkplgzktqyoiw', 'yoyfuhlpgrzds', 'cefao', 'gi', 'srpgxfjwl', 'etsjusdeiwbfe', 'ikvtzdopxo', 'ljfkavrau', 'soqdhxtenfrkmeic', 'ktprjwfcelzbup', 'pcvuoddqwsaurcqdtjfnczekwni', 'agkqkqxkfbkfgyqliahsljim']) from system.numbers limit 10;
|
||||
select 1 = multiMatchAny(materialize('khljxzxlpcrxpkrfybbfk'), ['', 'lpc', 'rxpkrfybb', 'crxp', '', 'pkr', 'jxzxlpcrxpkrf', '', 'xzxlpcr', 'xpk', 'fyb', 'xzxlpcrxpkrfybbfk', 'k', 'lpcrxp', 'ljxzxlpcr', 'r', 'pkr', 'fk']) from system.numbers limit 10;
|
||||
select 0 = multiMatchAny(materialize('rbrizgjbigvzfnpgmpkqxoqxvdj'), ['ee', 'cohqnb', 'msol', 'yhlujcvhklnhuomy', 'ietn', 'vgmnlkcsybtokrepzrm', 'wspiryefojxysgrzsxyrluykxfnnbzdstcel', 'mxisnsivndbefqxwznimwgazuulupbaihavg', 'vpzdjvqqeizascxmzdhuq', 'pgvncohlxcqjhfkm', 'mbaypcnfapltsegquurahlsruqvipfhrhq', 'ioxjbcyyqujfveujfhnfdfokfcrlsincjbdt', 'cnvlujyowompdrqjwjx', 'wobwed', 'kdfhaoxiuifotmptcmdbk', 'leoamsnorcvtlmokdomkzuo', 'jjw', 'ogugysetxuqmvggneosbsfbonszepsatq']) from system.numbers limit 10;
|
||||
select 0 = multiMatchAny(materialize('uymwxzyjbfegbhgswiqhinf'), ['lizxzbzlwljkr', 'ukxygktlpzuyijcqeqktxenlaqi', 'onperabgbdiafsxwbvpjtyt', 'xfqgoqvhqph', 'aflmcwabtwgmajmmqelxwkaolyyhmdlc', 'yfz', 'meffuiaicvwed', 'hhzvgmifzamgftkifaeowayjrnnzw', 'nwewybtajv', 'ectiye', 'epjeiljegmqqjncubj', 'zsjgftqjrn', 'pssng', 'raqoarfhdoeujulvqmdo']) from system.numbers limit 10;
|
||||
select 0 = multiMatchAny(materialize('omgghgnzjmecpzqmtcvw'), ['fjhlzbszodmzavzg', 'gfofrnwrxprkfiokv', 'jmjiiqpgznlmyrxwewzqzbe', 'pkyrsqkltlmxr', 'crqgkgqkkyujcyoc', 'endagbcxwqhueczuasykmajfsvtcmh', 'xytmxtrnkdysuwltqomehddp', 'etmdxyyfotfyifwvbykghijvwv', 'mwqtgrncyhkfhjdg', 'iuvymofrqpp', 'pgllsdanlhzqhkstwsmzzftp', 'disjylcceufxtjdvhy']) from system.numbers limit 10;
|
||||
select 1 = multiMatchAny(materialize('mznihnmshftvnmmhnrulizzpslq'), ['nrul', 'mshftvnmmhnr', 'z', 'mhnrulizzps', 'hftvnmmhnrul', 'ihnmshftvnmmhnrulizzp', 'izz', '', 'uli', 'nihnmshftvnmmhnru', 'hnrulizzp', 'nrulizz']) from system.numbers limit 10;
|
||||
select 1 = multiMatchAny(materialize('ruqmqrsxrbftvruvahonradau'), ['uqmqrsxrbft', 'ftv', 'tvruvahonrad', 'mqrsxrbftvruvahon', 'rbftvruvah', 'qrsxrbftvru', 'o', 'ahonradau', 'a', 'ft', '', 'u', 'rsxrbftvruvahonradau', 'ruvahon', 'bftvruvahonradau', 'qrsxrbftvru', 't', 'vahonrada', 'vruvahonradau', 'onra']) from system.numbers limit 10;
|
||||
select 1 = multiMatchAny(materialize('gpsevxtcoeexrltyzduyidmtzxf'), ['exrltyzduyid', 'vxtcoeexrltyz', 'xr', 'ltyzduyidmt', 'yzduy', 'exr', 'coeexrltyzduy', 'coeexrltyzduy', 'rlty', 'rltyzduyidm', 'exrltyz', 'xtcoeexrlty', 'vxtcoeexrltyzduyidm', '', 'coeexrl', 'sevxtcoeexrltyzdu', 'dmt', '']) from system.numbers limit 10;
|
||||
select 0 = multiMatchAny(materialize('dyhycfhzyewaikgursyxfkuv'), ['sktnofpugrmyxmbizzrivmhn', 'fhlgadpoqcvktbfzncxbllvwutdawmw', 'eewzjpcgzrqmltbgmhafwlwqb', 'tpogbkyj', 'rtllntxjgkzs', 'mirbvsqexscnzglogigbujgdwjvcv', 'iktwpgjsakemewmahgqza', 'xgfvzkvqgiuoihjjnxwwpznxhz', 'nxaumpaknreklbwynvxdsmatjekdlxvklh', 'zadzwqhgfxqllihuudozxeixyokhny', 'tdqpgfpzexlkslodps', 'slztannufxaabqfcjyfquafgfhfb', 'xvjldhfuwurvkb', 'aecv', 'uycfsughpikqsbcmwvqygdyexkcykhbnau', 'jr']) from system.numbers limit 10;
|
||||
select 1 = multiMatchAny(materialize('vbcsettndwuntnruiyclvvwoo'), ['dwuntnru', '', 'ttndwuntnruiyclvv', 'ntnr', 'nruiyclvvw', 'wo', '', 'bcsettndwuntnruiycl', 'yc', 'untnruiyclvvw', 'csettndwuntnr', 'ntnruiyclvvwo']) from system.numbers limit 10;
|
||||
select 0 = multiMatchAny(materialize('pqqnugshlczcuxhpjxjbcnro'), ['dpeedqy', 'rtsc', 'jdgla', 'qkgudqjiyzvlvsj', 'xmfxawhijgxxtydbd', 'ebgzazqthb', 'wyrjhvhwzhmpybnylirrn', 'iviqbyuclayqketooztwegtkgwnsezfl', 'bhvidy', 'hijctxxweboq', 't', 'osnzfbziidteiaifgaanm']) from system.numbers limit 10;
|
||||
select 1 = multiMatchAny(materialize('loqchlxspwuvvccucskuytr'), ['', 'k', 'qchlxspwu', 'u', 'hlxspwuvv', 'wuvvccucsku', 'vcc', 'uyt', 'uvv', 'spwu', 'ytr', 'wuvvccucs', 'xspwuv', 'lxspwuvvccuc', 'spwuvvccu', 'oqchlxspwuvvccucskuy']) from system.numbers limit 10;
|
||||
select 1 = multiMatchAny(materialize('pjjyzupzwllshlnatiujmwvaofr'), ['lnatiujmwvao', '', 'zupzwllsh', 'nati', 'wllshl', 'hlnatiujmwv', 'mwvao', 'shlnat', 'ati', 'wllshlnatiujmwvao', 'wllshlnatiujmwvaofr', 'nat']) from system.numbers limit 10;
|
||||
select 1 = multiMatchAny(materialize('iketunkleyaqaxdlocci'), ['nkleyaqaxd', 'etunkleyaq', 'yaqaxdlocci', 'tunkleyaq', 'eyaqaxdlocc', 'leyaq', 'nkleyaqaxdl', 'tunkleya', 'kleyaqa', 'etunkleya', 'leyaqa', 'dlo', 'yaqa', 'leyaqaxd', 'etunkleyaq', '']) from system.numbers limit 10;
|
||||
select 0 = multiMatchAny(materialize('drqianqtangmgbdwruvblkqd'), ['wusajejyucamkyl', 'wsgibljugzrpkniliy', 'lhwqqiuafwffyersqjgjvvvfurx', 'jfokpzzxfdonelorqu', 'ccwkpcgac', 'jmyulqpndkmzbfztobwtm', 'rwrgfkccgxht', 'ggldjecrgbngkonphtcxrkcviujihidjx', 'spwweavbiokizv', 'lv', 'krb', 'vstnhvkbwlqbconaxgbfobqky', 'pvxwdc', 'thrl', 'ahsblffdveamceonqwrbeyxzccmux', 'yozji', 'oejtaxwmeovtqtz', 'zsnzznvqpxdvdxhznxrjn', 'hse', 'kcmkrccxmljzizracxwmpoaggywhdfpxkq']) from system.numbers limit 10;
|
||||
select 0 = multiMatchAny(materialize('yasnpckniistxcejowfijjsvkdajz'), ['slkpxhtsmrtvtm', 'crsbq', 'rdeshtxbfrlfwpsqojassxmvlfbzefldavmgme', 'ipetilcbpsfroefkjirquciwtxhrimbmwnlyv', 'knjpwkmdwbvdbapuyqbtsw', 'horueidziztxovqhsicnklmharuxhtgrsr', 'ofohrgpz', 'oneqnwyevbaqsonrcpmxcynflojmsnix', 'shg', 'nglqzczevgevwawdfperpeytuodjlf']) from system.numbers limit 10;
|
||||
select 0 = multiMatchAny(materialize('ueptpscfgxhplwsueckkxs'), ['ohhygchclbpcdwmftperprn', 'dvpjdqmqckekndvcerqrpkxen', 'lohhvarnmyi', 'zppd', 'qmqxgfewitsunbuhffozcpjtc', 'hsjbioisycsrawktqssjovkmltxodjgv', 'dbzuunwbkrtosyvctdujqtvaawfnvuq', 'gupbvpqthqxae', 'abjdmijaaiasnccgxttmqdsz', 'uccyumqoyqe', 'kxxliepyzlc', 'wbqcqtbyyjbqcgdbpkmzugksmcxhvr', 'piedxm', 'uncpphzoif', 'exkdankwck', 'qeitzozdrqopsergzr', 'hesgrhaftgesnzflrrtjdobxhbepjoas', 'wfpexx']) from system.numbers limit 10;
|
||||
select 1 = multiMatchAny(materialize('ldrzgttlqaphekkkdukgngl'), ['gttlqaphekkkdukgn', 'ekkkd', 'gttlqaphe', 'qaphek', 'h', 'kdu', 'he', 'phek', '', 'drzgttlqaphekkkd']) from system.numbers limit 10;
|
||||
select 1 = multiMatchAny(materialize('ololo'), ['ololo', 'ololo', 'ololo']);
|
||||
SELECT 1 = multiMatchAny(materialize('khljxzxlpcrxpkrfybbfk'), ['k']);
|
||||
|
||||
select 1 = multiMatchAny(materialize(''), ['']);
|
||||
select 0 = multiMatchAny(materialize(''), ['some string']);
|
||||
select 1 = multiMatchAny(materialize('abc'), ['']);
|
||||
select 1 = multiMatchAny(materialize('abc'), ['']) from system.numbers limit 10;
|
||||
|
||||
select 0 = multiMatchAny(materialize('abc'), ['defgh']);
|
||||
select 0 = multiMatchAny(materialize('abc'), ['defg']);
|
||||
select 0 = multiMatchAny(materialize('abc'), ['def']);
|
||||
select 0 = multiMatchAny(materialize('abc'), ['de']);
|
||||
select 0 = multiMatchAny(materialize('abc'), ['d']);
|
||||
|
||||
|
||||
select 1 = multiMatchAny(materialize('abc'), ['b']) from system.numbers limit 10;
|
||||
select 1 = multiMatchAny(materialize('abc'), ['bc']) from system.numbers limit 10;
|
||||
select 0 = multiMatchAny(materialize('abc'), ['bcde']) from system.numbers limit 10;
|
||||
select 0 = multiMatchAny(materialize('abc'), ['bcdef']) from system.numbers limit 10;
|
||||
select 0 = multiMatchAny(materialize('abc'), ['bcdefg']) from system.numbers limit 10;
|
||||
select 0 = multiMatchAny(materialize('abc'), ['bcdefgh']) from system.numbers limit 10;
|
||||
|
||||
|
||||
select 0 = multiMatchAny(materialize('abc'), ['abcdefg']) from system.numbers limit 10;
|
||||
select 0 = multiMatchAny(materialize('abc'), ['abcdef']) from system.numbers limit 10;
|
||||
select 0 = multiMatchAny(materialize('abc'), ['abcde']) from system.numbers limit 10;
|
||||
select 0 = multiMatchAny(materialize('abc'), ['abcd']) from system.numbers limit 10;
|
||||
select 1 = multiMatchAny(materialize('abc'), ['abc']) from system.numbers limit 10;
|
||||
select 1 = multiMatchAny(materialize('abc'), ['ab']) from system.numbers limit 10;
|
||||
select 1 = multiMatchAny(materialize('abc'), ['a']) from system.numbers limit 10;
|
||||
|
||||
select 1 = multiMatchAny(materialize('abcd'), ['c']) from system.numbers limit 10;
|
||||
select 1 = multiMatchAny(materialize('abcd'), ['cd']) from system.numbers limit 10;
|
||||
select 0 = multiMatchAny(materialize('abcd'), ['cde']) from system.numbers limit 10;
|
||||
select 0 = multiMatchAny(materialize('abcd'), ['cdef']) from system.numbers limit 10;
|
||||
select 0 = multiMatchAny(materialize('abcd'), ['cdefg']) from system.numbers limit 10;
|
||||
select 0 = multiMatchAny(materialize('abcd'), ['cdefgh']) from system.numbers limit 10;
|
||||
|
||||
select 0 = multiMatchAny(materialize('abc'), ['defgh']) from system.numbers limit 10;
|
||||
select 0 = multiMatchAny(materialize('abc'), ['defg']) from system.numbers limit 10;
|
||||
select 0 = multiMatchAny(materialize('abc'), ['def']) from system.numbers limit 10;
|
||||
select 0 = multiMatchAny(materialize('abc'), ['de']) from system.numbers limit 10;
|
||||
select 0 = multiMatchAny(materialize('abc'), ['d']) from system.numbers limit 10;
|
||||
|
||||
select 1 = multiMatchAny(materialize('abc'), ['...']) from system.numbers limit 10;
|
||||
select 1 = multiMatchAny(materialize('a\nbc'), ['a?bc']) from system.numbers limit 10;
|
||||
select 1 = multiMatchAny(materialize('a\nbc'), ['a.bc']) from system.numbers limit 10;
|
||||
select 1 = multiMatchAny(materialize('a\0bc'), ['a?bc']) from system.numbers limit 10;
|
||||
select 1 = multiMatchAny(materialize('a\0bc'), ['a.bc']) from system.numbers limit 10;
|
||||
select 1 = multiMatchAny(materialize('abcdef'), ['a.....']) from system.numbers limit 10;
|
||||
select 0 = multiMatchAny(materialize('abcdef'), ['a......']) from system.numbers limit 10;
|
||||
select 1 = multiMatchAny(materialize('abcdef'), ['a......', 'a.....']) from system.numbers limit 10;
|
||||
select 0 = multiMatchAny(materialize('aaaa'), ['.*aa.*aaa.*', 'aaaaaa{2}', '\(aa\){3}']) from system.numbers limit 10;
|
||||
select 1 = multiMatchAny(materialize('abc'), ['a\0d']) from system.numbers limit 10;
|
||||
|
||||
select 1 = multiMatchAnyIndex(materialize('gogleuedeyandexgoogle'), ['google', 'yandex1']) from system.numbers limit 10;;
|
||||
select 2 = multiMatchAnyIndex(materialize('gogleuedeyandexgoogle'), ['google1', 'yandex']) from system.numbers limit 10;;
|
||||
select 0 != multiMatchAnyIndex(materialize('gogleuedeyandexgoogle'), ['.*goo.*', '.*yan.*']) from system.numbers limit 10;;
|
||||
select 5 = multiMatchAnyIndex(materialize('vladizlvav dabe don\'t heart me no more'), ['what', 'is', 'love', 'baby', 'no mo??', 'dont', 'h.rt me']) from system.numbers limit 10;;
|
||||
|
||||
SELECT multiMatchAny(materialize('/odezhda-dlya-bega/'), ['/odezhda-dlya-bega/', 'kurtki-i-vetrovki-dlya-bega', 'futbolki-i-mayki-dlya-bega']);
|
@ -0,0 +1,9 @@
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
0
|
||||
1
|
||||
1
|
||||
1
|
||||
0
|
@ -0,0 +1,6 @@
|
||||
SET allow_hyperscan = 1;
|
||||
SELECT multiMatchAny(arrayJoin(['hello', 'world', 'hellllllllo', 'wororld', 'abc']), ['hel+o', 'w(or)*ld']);
|
||||
SET allow_hyperscan = 0;
|
||||
SELECT multiMatchAny(arrayJoin(['hello', 'world', 'hellllllllo', 'wororld', 'abc']), ['hel+o', 'w(or)*ld']); -- { serverError 446 }
|
||||
|
||||
SELECT multiSearchAny(arrayJoin(['hello', 'world', 'hello, world', 'abc']), ['hello', 'world']);
|
@ -0,0 +1,3 @@
|
||||
1
|
||||
2
|
||||
[1,8]
|
@ -0,0 +1,3 @@
|
||||
SELECT multiMatchAny('goodbye', ['^hello[, ]+world$', 'go+d *bye', 'w(or)+ld']);
|
||||
SELECT multiMatchAnyIndex('goodbye', ['^hello[, ]+world$', 'go+d *bye', 'w(or)+ld']);
|
||||
SELECT multiSearchAllPositions('hello, world', ['hello', 'world']);
|
@ -1,24 +0,0 @@
|
||||
drop table if exists test.persons;
|
||||
drop table if exists test.children;
|
||||
|
||||
create table test.persons (
|
||||
id String,
|
||||
name String
|
||||
) engine MergeTree order by id;
|
||||
|
||||
create table test.children (
|
||||
id String,
|
||||
childName String
|
||||
) engine MergeTree order by id;
|
||||
|
||||
insert into test.persons (id, name) values ('1', 'John'), ('2', 'Jack'), ('3', 'Daniel'), ('4', 'James'), ('5', 'Amanda');
|
||||
insert into test.children (id, childName) values ('1', 'Robert'), ('1', 'Susan'), ('3', 'Sarah'), ('4', 'David'), ('4', 'Joseph'), ('5', 'Robert');
|
||||
|
||||
|
||||
select * from test.persons all inner join test.children using id;
|
||||
|
||||
select * from test.persons all inner join (select * from test.children) as j using id;
|
||||
|
||||
select * from (select * from test.persons) as s all inner join (select * from test.children) as j using id;
|
||||
|
||||
|
@ -0,0 +1 @@
|
||||
SELECT a FROM (SELECT 1 AS a, (SELECT count() FROM system.numbers) AS b);
|
4
debian/control
vendored
4
debian/control
vendored
@ -28,8 +28,8 @@ Package: clickhouse-common-static
|
||||
Architecture: any
|
||||
Depends: ${shlibs:Depends}, ${misc:Depends}, tzdata
|
||||
Suggests: clickhouse-common-static-dbg
|
||||
Replaces: clickhouse-server-base
|
||||
Provides: clickhouse-server-base
|
||||
Replaces: clickhouse-common, clickhouse-server-base
|
||||
Provides: clickhouse-common, clickhouse-server-base
|
||||
Description: Common files for ClickHouse
|
||||
Yandex ClickHouse is a column-oriented database management system
|
||||
that allows generating analytical data reports in real time.
|
||||
|
11
debian/dupload.conf.in
vendored
11
debian/dupload.conf.in
vendored
@ -1,11 +0,0 @@
|
||||
package config;
|
||||
|
||||
$default_host = "metrika";
|
||||
|
||||
$cfg{'metrika'} = {
|
||||
fqdn => "",
|
||||
method => "scpb",
|
||||
incoming => "/repo/metrika/mini-dinstall/incoming/",
|
||||
dinstall_runs => 0,
|
||||
login => "@AUTHOR@"
|
||||
};
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user