Merge with master

This commit is contained in:
alesapin 2020-01-20 12:21:05 +03:00
commit 3716d4f7e3
302 changed files with 4771 additions and 2299 deletions

3
.gitignore vendored
View File

@ -244,6 +244,9 @@ website/package-lock.json
# ccls cache
/.ccls-cache
# clangd cache
/.clangd
/compile_commands.json
# Toolchains

3
.gitmodules vendored
View File

@ -134,6 +134,9 @@
[submodule "contrib/libc-headers"]
path = contrib/libc-headers
url = https://github.com/ClickHouse-Extras/libc-headers.git
[submodule "contrib/replxx"]
path = contrib/replxx
url = https://github.com/AmokHuginnsson/replxx.git
[submodule "contrib/ryu"]
path = contrib/ryu
url = https://github.com/ClickHouse-Extras/ryu.git

View File

@ -328,7 +328,6 @@ include (cmake/find/xxhash.cmake)
include (cmake/find/sparsehash.cmake)
include (cmake/find/rt.cmake)
include (cmake/find/execinfo.cmake)
include (cmake/find/readline_edit.cmake)
include (cmake/find/re2.cmake)
include (cmake/find/libgsasl.cmake)
include (cmake/find/rdkafka.cmake)
@ -353,6 +352,7 @@ include (cmake/find/simdjson.cmake)
include (cmake/find/rapidjson.cmake)
include (cmake/find/fastops.cmake)
include (cmake/find/orc.cmake)
include (cmake/find/replxx.cmake)
find_contrib_lib(cityhash)
find_contrib_lib(farmhash)

View File

@ -11,7 +11,6 @@ if (ENABLE_BASE64)
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/base64")
message (WARNING "submodule contrib/base64 is missing. to fix try run: \n git submodule update --init --recursive")
else()
set (BASE64_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/base64/include)
set (BASE64_LIBRARY base64)
set (USE_BASE64 1)
endif()

View File

@ -1,60 +0,0 @@
include (CMakePushCheckState)
cmake_push_check_state ()
option (ENABLE_READLINE "Enable readline" ${ENABLE_LIBRARIES})
if (ENABLE_READLINE)
set (READLINE_PATHS "/usr/local/opt/readline/lib")
# First try find custom lib for macos users (default lib without history support)
find_library (READLINE_LIB NAMES readline PATHS ${READLINE_PATHS} NO_DEFAULT_PATH)
if (NOT READLINE_LIB)
find_library (READLINE_LIB NAMES readline PATHS ${READLINE_PATHS})
endif ()
list(APPEND CMAKE_FIND_LIBRARY_SUFFIXES .so.2)
find_library (EDIT_LIB NAMES edit)
set(READLINE_INCLUDE_PATHS "/usr/local/opt/readline/include")
if (READLINE_LIB AND TERMCAP_LIBRARY)
find_path (READLINE_INCLUDE_DIR NAMES readline/readline.h PATHS ${READLINE_INCLUDE_PATHS} NO_DEFAULT_PATH)
if (NOT READLINE_INCLUDE_DIR)
find_path (READLINE_INCLUDE_DIR NAMES readline/readline.h PATHS ${READLINE_INCLUDE_PATHS})
endif ()
if (READLINE_INCLUDE_DIR AND READLINE_LIB)
set (USE_READLINE 1)
set (LINE_EDITING_LIBS ${READLINE_LIB} ${TERMCAP_LIBRARY})
message (STATUS "Using line editing libraries (readline): ${READLINE_INCLUDE_DIR} : ${LINE_EDITING_LIBS}")
endif ()
elseif (EDIT_LIB AND TERMCAP_LIBRARY)
find_library (CURSES_LIB NAMES curses)
find_path (READLINE_INCLUDE_DIR NAMES editline/readline.h PATHS ${READLINE_INCLUDE_PATHS})
if (CURSES_LIB AND READLINE_INCLUDE_DIR)
set (USE_LIBEDIT 1)
set (LINE_EDITING_LIBS ${EDIT_LIB} ${CURSES_LIB} ${TERMCAP_LIBRARY})
message (STATUS "Using line editing libraries (edit): ${READLINE_INCLUDE_DIR} : ${LINE_EDITING_LIBS}")
endif ()
endif ()
endif ()
if (LINE_EDITING_LIBS AND READLINE_INCLUDE_DIR)
include (CheckCXXSourceRuns)
set (CMAKE_REQUIRED_LIBRARIES ${CMAKE_REQUIRED_LIBRARIES} ${LINE_EDITING_LIBS})
set (CMAKE_REQUIRED_INCLUDES ${CMAKE_REQUIRED_INCLUDES} ${READLINE_INCLUDE_DIR})
check_cxx_source_runs ("
#include <stdio.h>
#include <readline/readline.h>
#include <readline/history.h>
int main() {
add_history(NULL);
append_history(1,NULL);
return 0;
}
" HAVE_READLINE_HISTORY)
else ()
message (STATUS "Not using any library for line editing.")
endif ()
cmake_pop_check_state ()

40
cmake/find/replxx.cmake Normal file
View File

@ -0,0 +1,40 @@
option (ENABLE_REPLXX "Enable replxx support" ${NOT_UNBUNDLED})
if (ENABLE_REPLXX)
option (USE_INTERNAL_REPLXX "Use internal replxx library" ${NOT_UNBUNDLED})
if (USE_INTERNAL_REPLXX AND NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/replxx/README.md")
message (WARNING "submodule contrib/replxx is missing. to fix try run: \n git submodule update --init --recursive")
set (USE_INTERNAL_REPLXX 0)
endif ()
if (NOT USE_INTERNAL_REPLXX)
find_library(LIBRARY_REPLXX NAMES replxx replxx-static)
find_path(INCLUDE_REPLXX replxx.hxx)
add_library(replxx UNKNOWN IMPORTED)
set_property(TARGET replxx PROPERTY IMPORTED_LOCATION ${LIBRARY_REPLXX})
target_include_directories(replxx PUBLIC ${INCLUDE_REPLXX})
set(CMAKE_REQUIRED_LIBRARIES replxx)
check_cxx_source_compiles(
"
#include <replxx.hxx>
int main() {
replxx::Replxx rx;
}
"
EXTERNAL_REPLXX_WORKS
)
if (NOT EXTERNAL_REPLXX_WORKS)
message (FATAL_ERROR "replxx is unusable: ${LIBRARY_REPLXX} ${INCLUDE_REPLXX}")
endif ()
endif ()
set(USE_REPLXX 1)
message (STATUS "Using replxx")
else ()
set(USE_REPLXX 0)
endif ()

View File

@ -48,7 +48,6 @@ if (SANITIZE)
set (ENABLE_EMBEDDED_COMPILER 0 CACHE BOOL "")
set (USE_INTERNAL_CAPNP_LIBRARY 0 CACHE BOOL "")
set (USE_SIMDJSON 0 CACHE BOOL "")
set (ENABLE_READLINE 0 CACHE BOOL "")
set (ENABLE_ORC 0 CACHE BOOL "")
set (ENABLE_PARQUET 0 CACHE BOOL "")
set (USE_CAPNP 0 CACHE BOOL "")

View File

@ -15,7 +15,6 @@ if (CMAKE_CROSSCOMPILING)
set (USE_SNAPPY OFF CACHE INTERNAL "")
set (ENABLE_PROTOBUF OFF CACHE INTERNAL "")
set (ENABLE_PARQUET OFF CACHE INTERNAL "")
set (ENABLE_READLINE OFF CACHE INTERNAL "")
set (ENABLE_ICU OFF CACHE INTERNAL "")
set (ENABLE_FASTOPS OFF CACHE INTERNAL "")
elseif (OS_LINUX)

View File

@ -331,3 +331,7 @@ endif()
if (USE_FASTOPS)
add_subdirectory (fastops-cmake)
endif()
if (USE_INTERNAL_REPLXX)
add_subdirectory (replxx-cmake)
endif()

View File

@ -74,7 +74,6 @@ file(GLOB S3_UNIFIED_SRC
)
set(S3_INCLUDES
"${CMAKE_CURRENT_SOURCE_DIR}/include/"
"${AWS_COMMON_LIBRARY_DIR}/include/"
"${AWS_EVENT_STREAM_LIBRARY_DIR}/include/"
"${AWS_S3_LIBRARY_DIR}/include/"
@ -96,7 +95,7 @@ target_compile_definitions(aws_s3 PUBLIC -DENABLE_CURL_CLIENT)
target_compile_definitions(aws_s3 PUBLIC "AWS_SDK_VERSION_MAJOR=1")
target_compile_definitions(aws_s3 PUBLIC "AWS_SDK_VERSION_MINOR=7")
target_compile_definitions(aws_s3 PUBLIC "AWS_SDK_VERSION_PATCH=231")
target_include_directories(aws_s3 PUBLIC ${S3_INCLUDES} "${CMAKE_BINARY_DIR}/install")
target_include_directories(aws_s3 PUBLIC ${S3_INCLUDES})
if (OPENSSL_FOUND)
target_compile_definitions(aws_s3 PUBLIC -DENABLE_OPENSSL_ENCRYPTION)

1
contrib/replxx vendored Submodule

@ -0,0 +1 @@
Subproject commit 37582f0bb8c52513c6c6b76797c02d852d701dad

View File

@ -0,0 +1,18 @@
set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/replxx")
set(SRCS
${LIBRARY_DIR}/src/conversion.cxx
${LIBRARY_DIR}/src/escape.cxx
${LIBRARY_DIR}/src/history.cxx
${LIBRARY_DIR}/src/io.cxx
${LIBRARY_DIR}/src/prompt.cxx
${LIBRARY_DIR}/src/replxx.cxx
${LIBRARY_DIR}/src/replxx_impl.cxx
${LIBRARY_DIR}/src/util.cxx
${LIBRARY_DIR}/src/wcwidth.cpp
${LIBRARY_DIR}/src/ConvertUTF.cpp
)
add_library(replxx ${SRCS})
target_include_directories(replxx PUBLIC ${LIBRARY_DIR}/include)
target_compile_options(replxx PUBLIC -Wno-documentation)

View File

@ -142,10 +142,10 @@ elseif (COMPILER_GCC)
add_cxx_compile_options(-Wmaybe-uninitialized)
# Warn when the indentation of the code does not reflect the block structure
add_cxx_compile_options(-Wmisleading-indentation)
# Warn if a global function is defined without a previous declaration
# Warn if a global function is defined without a previous declaration - disabled because of build times
# add_cxx_compile_options(-Wmissing-declarations)
# Warn if a user-supplied include directory does not exist
# add_cxx_compile_options(-Wmissing-include-dirs)
add_cxx_compile_options(-Wmissing-include-dirs)
# Obvious
add_cxx_compile_options(-Wnon-virtual-dtor)
# Obvious
@ -563,7 +563,7 @@ if (USE_JEMALLOC)
endif()
endif ()
dbms_target_include_directories (PUBLIC ${DBMS_INCLUDE_DIR} PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/src/Formats/include)
dbms_target_include_directories (PUBLIC ${DBMS_INCLUDE_DIR})
target_include_directories (clickhouse_common_io PUBLIC ${DBMS_INCLUDE_DIR})
target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${DOUBLE_CONVERSION_INCLUDE_DIR})

View File

@ -418,7 +418,7 @@ private:
std::cerr << percent << "%\t\t";
for (const auto & info : infos)
{
std::cerr << info->sampler.quantileInterpolated(percent / 100.0) << " sec." << "\t";
std::cerr << info->sampler.quantileNearest(percent / 100.0) << " sec." << "\t";
}
std::cerr << "\n";
};
@ -453,7 +453,7 @@ private:
auto print_percentile = [&json_out](Stats & info, auto percent, bool with_comma = true)
{
json_out << "\"" << percent << "\"" << ": " << info.sampler.quantileInterpolated(percent / 100.0) << (with_comma ? ",\n" : "\n");
json_out << "\"" << percent << "\"" << ": " << info.sampler.quantileNearest(percent / 100.0) << (with_comma ? ",\n" : "\n");
};
json_out << "{\n";

View File

@ -1,14 +1,10 @@
set(CLICKHOUSE_CLIENT_SOURCES
${CMAKE_CURRENT_SOURCE_DIR}/Client.cpp
${CMAKE_CURRENT_SOURCE_DIR}/ConnectionParameters.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Suggest.cpp
)
set(CLICKHOUSE_CLIENT_LINK PRIVATE clickhouse_common_config clickhouse_functions clickhouse_aggregate_functions clickhouse_common_io clickhouse_parsers string_utils ${LINE_EDITING_LIBS} ${Boost_PROGRAM_OPTIONS_LIBRARY})
set(CLICKHOUSE_CLIENT_INCLUDE PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/include)
if (READLINE_INCLUDE_DIR)
set(CLICKHOUSE_CLIENT_INCLUDE ${CLICKHOUSE_CLIENT_INCLUDE} SYSTEM PRIVATE ${READLINE_INCLUDE_DIR})
endif ()
include(CheckSymbolExists)
check_symbol_exists(readpassphrase readpassphrase.h HAVE_READPASSPHRASE)

View File

@ -1,7 +1,7 @@
#include "TestHint.h"
#include "ConnectionParameters.h"
#include "Suggest.h"
#include <port/unistd.h>
#include <stdlib.h>
#include <fcntl.h>
#include <signal.h>
@ -18,8 +18,9 @@
#include <Poco/String.h>
#include <Poco/File.h>
#include <Poco/Util/Application.h>
#include <common/readline_use.h>
#include <common/find_symbols.h>
#include <common/config_common.h>
#include <common/LineReader.h>
#include <Common/ClickHouseRevision.h>
#include <Common/Stopwatch.h>
#include <Common/Exception.h>
@ -69,10 +70,6 @@
#include <common/argsToConfig.h>
#include <Common/TerminalSize.h>
#if USE_READLINE
#include "Suggest.h"
#endif
#ifndef __clang__
#pragma GCC optimize("-fno-var-tracking-assignments")
#endif
@ -89,39 +86,6 @@
#define DISABLE_LINE_WRAPPING "\033[?7l"
#define ENABLE_LINE_WRAPPING "\033[?7h"
#if USE_READLINE && RL_VERSION_MAJOR >= 7
#define BRACK_PASTE_PREF "\033[200~"
#define BRACK_PASTE_SUFF "\033[201~"
#define BRACK_PASTE_LAST '~'
#define BRACK_PASTE_SLEN 6
/// This handler bypasses some unused macro/event checkings.
static int clickhouse_rl_bracketed_paste_begin(int /* count */, int /* key */)
{
std::string buf;
buf.reserve(128);
RL_SETSTATE(RL_STATE_MOREINPUT);
SCOPE_EXIT(RL_UNSETSTATE(RL_STATE_MOREINPUT));
int c;
while ((c = rl_read_key()) >= 0)
{
if (c == '\r')
c = '\n';
buf.push_back(c);
if (buf.size() >= BRACK_PASTE_SLEN && c == BRACK_PASTE_LAST && buf.substr(buf.size() - BRACK_PASTE_SLEN) == BRACK_PASTE_SUFF)
{
buf.resize(buf.size() - BRACK_PASTE_SLEN);
break;
}
}
return static_cast<size_t>(rl_insert_text(buf.c_str())) == buf.size() ? 0 : 1;
}
#endif
namespace DB
{
@ -136,7 +100,6 @@ namespace ErrorCodes
extern const int UNEXPECTED_PACKET_FROM_SERVER;
extern const int CLIENT_OUTPUT_FORMAT_SPECIFIED;
extern const int CANNOT_SET_SIGNAL_HANDLER;
extern const int CANNOT_READLINE;
extern const int SYSTEM_ERROR;
extern const int INVALID_USAGE_OF_INPUT;
}
@ -157,7 +120,7 @@ private:
"учшеж", "йгшеж", "дщпщгеж",
"q", "й", "\\q", "\\Q", "\\й", "\\Й", ":q", "Жй"
};
bool is_interactive = true; /// Use either readline interface or batch mode.
bool is_interactive = true; /// Use either interactive line editing interface or batch mode.
bool need_render_progress = true; /// Render query execution progress.
bool echo_queries = false; /// Print queries before execution in batch mode.
bool ignore_error = false; /// In case of errors, don't print error message, continue to next query. Only applicable for non-interactive mode.
@ -514,26 +477,10 @@ private:
if (print_time_to_stderr)
throw Exception("time option could be specified only in non-interactive mode", ErrorCodes::BAD_ARGUMENTS);
#if USE_READLINE
SCOPE_EXIT({ Suggest::instance().finalize(); });
if (server_revision >= Suggest::MIN_SERVER_REVISION
&& !config().getBool("disable_suggestion", false))
{
if (server_revision >= Suggest::MIN_SERVER_REVISION && !config().getBool("disable_suggestion", false))
/// Load suggestion data from the server.
Suggest::instance().load(connection_parameters, config().getInt("suggestion_limit"));
/// Added '.' to the default list. Because it is used to separate database and table.
rl_basic_word_break_characters = " \t\n\r\"\\'`@$><=;|&{(.";
/// Not append whitespace after single suggestion. Because whitespace after function name is meaningless.
rl_completion_append_character = '\0';
rl_completion_entry_function = Suggest::generator;
}
else
/// Turn tab completion off.
rl_bind_key('\t', rl_insert);
#endif
/// Load command history if present.
if (config().has("history_file"))
history_file = config().getString("history_file");
@ -546,70 +493,45 @@ private:
history_file = home_path + "/.clickhouse-client-history";
}
if (!history_file.empty())
if (!history_file.empty() && !Poco::File(history_file).exists())
Poco::File(history_file).createFile();
LineReader lr(&Suggest::instance(), history_file, '\\', config().has("multiline") ? ';' : 0);
do
{
if (Poco::File(history_file).exists())
auto input = lr.readLine(prompt(), ":-] ");
if (input.empty())
break;
try
{
#if USE_READLINE
int res = read_history(history_file.c_str());
if (res)
std::cerr << "Cannot read history from file " + history_file + ": "+ errnoToString(ErrorCodes::CANNOT_READ_HISTORY);
#endif
if (!process(input))
break;
}
catch (const Exception & e)
{
actual_client_error = e.code();
if (!actual_client_error || actual_client_error != expected_client_error)
{
std::cerr << std::endl
<< "Exception on client:" << std::endl
<< "Code: " << e.code() << ". " << e.displayText() << std::endl;
if (config().getBool("stacktrace", false))
std::cerr << "Stack trace:" << std::endl << e.getStackTraceString() << std::endl;
std::cerr << std::endl;
}
/// Client-side exception during query execution can result in the loss of
/// sync in the connection protocol.
/// So we reconnect and allow to enter the next query.
connect();
}
else /// Create history file.
Poco::File(history_file).createFile();
}
#if USE_READLINE
/// Install Ctrl+C signal handler that will be used in interactive mode.
if (rl_initialize())
throw Exception("Cannot initialize readline", ErrorCodes::CANNOT_READLINE);
#if RL_VERSION_MAJOR >= 7
/// Enable bracketed-paste-mode only when multiquery is enabled and multiline is
/// disabled, so that we are able to paste and execute multiline queries in a whole
/// instead of erroring out, while be less intrusive.
if (config().has("multiquery") && !config().has("multiline"))
{
/// When bracketed paste mode is set, pasted text is bracketed with control sequences so
/// that the program can differentiate pasted text from typed-in text. This helps
/// clickhouse-client so that without -m flag, one can still paste multiline queries, and
/// possibly get better pasting performance. See https://cirw.in/blog/bracketed-paste for
/// more details.
rl_variable_bind("enable-bracketed-paste", "on");
/// Use our bracketed paste handler to get better user experience. See comments above.
rl_bind_keyseq(BRACK_PASTE_PREF, clickhouse_rl_bracketed_paste_begin);
}
#endif
auto clear_prompt_or_exit = [](int)
{
/// This is signal safe.
ssize_t res = write(STDOUT_FILENO, "\n", 1);
/// Allow to quit client while query is in progress by pressing Ctrl+C twice.
/// (First press to Ctrl+C will try to cancel query by InterruptListener).
if (res == 1 && rl_line_buffer[0] && !RL_ISSTATE(RL_STATE_DONE))
{
rl_replace_line("", 0);
if (rl_forced_update_display())
_exit(0);
}
else
{
/// A little dirty, but we struggle to find better way to correctly
/// force readline to exit after returning from the signal handler.
_exit(0);
}
};
if (signal(SIGINT, clear_prompt_or_exit) == SIG_ERR)
throwFromErrno("Cannot set signal handler.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
#endif
loop();
while (true);
if (isNewYearMode())
std::cout << "Happy new year." << std::endl;
@ -621,17 +543,6 @@ private:
}
else
{
/// This is intended for testing purposes.
if (config().getBool("always_load_suggestion_data", false))
{
#if USE_READLINE
SCOPE_EXIT({ Suggest::instance().finalize(); });
Suggest::instance().load(connection_parameters, config().getInt("suggestion_limit"));
#else
throw Exception("Command line suggestions cannot work without readline", ErrorCodes::BAD_ARGUMENTS);
#endif
}
query_id = config().getString("query_id", "");
nonInteractive();
@ -706,111 +617,11 @@ private:
}
/// Check if multi-line query is inserted from the paste buffer.
/// Allows delaying the start of query execution until the entirety of query is inserted.
static bool hasDataInSTDIN()
{
timeval timeout = { 0, 0 };
fd_set fds;
FD_ZERO(&fds);
FD_SET(STDIN_FILENO, &fds);
return select(1, &fds, nullptr, nullptr, &timeout) == 1;
}
inline const String prompt() const
{
return boost::replace_all_copy(prompt_by_server_display_name, "{database}", config().getString("database", "default"));
}
void loop()
{
String input;
String prev_input;
while (char * line_ = readline(input.empty() ? prompt().c_str() : ":-] "))
{
String line = line_;
free(line_);
size_t ws = line.size();
while (ws > 0 && isWhitespaceASCII(line[ws - 1]))
--ws;
if (ws == 0 || line.empty())
continue;
bool ends_with_semicolon = line[ws - 1] == ';';
bool ends_with_backslash = line[ws - 1] == '\\';
has_vertical_output_suffix = (ws >= 2) && (line[ws - 2] == '\\') && (line[ws - 1] == 'G');
if (ends_with_backslash)
line = line.substr(0, ws - 1);
input += line;
if (!ends_with_backslash && (ends_with_semicolon || has_vertical_output_suffix || (!config().has("multiline") && !hasDataInSTDIN())))
{
// TODO: should we do sensitive data masking on client too? History file can be source of secret leaks.
if (input != prev_input)
{
/// Replace line breaks with spaces to prevent the following problem.
/// Every line of multi-line query is saved to history file as a separate line.
/// If the user restarts the client then after pressing the "up" button
/// every line of the query will be displayed separately.
std::string logged_query = input;
if (config().has("multiline"))
std::replace(logged_query.begin(), logged_query.end(), '\n', ' ');
add_history(logged_query.c_str());
#if USE_READLINE && HAVE_READLINE_HISTORY
if (!history_file.empty() && append_history(1, history_file.c_str()))
std::cerr << "Cannot append history to file " + history_file + ": " + errnoToString(ErrorCodes::CANNOT_APPEND_HISTORY);
#endif
prev_input = input;
}
if (has_vertical_output_suffix)
input = input.substr(0, input.length() - 2);
try
{
if (!process(input))
break;
}
catch (const Exception & e)
{
actual_client_error = e.code();
if (!actual_client_error || actual_client_error != expected_client_error)
{
std::cerr << std::endl
<< "Exception on client:" << std::endl
<< "Code: " << e.code() << ". " << e.displayText() << std::endl;
if (config().getBool("stacktrace", false))
std::cerr << "Stack trace:" << std::endl
<< e.getStackTraceString() << std::endl;
std::cerr << std::endl;
}
/// Client-side exception during query execution can result in the loss of
/// sync in the connection protocol.
/// So we reconnect and allow to enter the next query.
connect();
}
input = "";
}
else
{
input += '\n';
}
}
}
void nonInteractive()
{
@ -2001,13 +1812,6 @@ public:
server_logs_file = options["server_logs_file"].as<std::string>();
if (options.count("disable_suggestion"))
config().setBool("disable_suggestion", true);
if (options.count("always_load_suggestion_data"))
{
if (options.count("disable_suggestion"))
throw Exception("Command line parameters disable_suggestion (-A) and always_load_suggestion_data cannot be specified simultaneously",
ErrorCodes::BAD_ARGUMENTS);
config().setBool("always_load_suggestion_data", true);
}
if (options.count("suggestion_limit"))
config().setInt("suggestion_limit", options["suggestion_limit"].as<int>());

View File

@ -0,0 +1,144 @@
#include "Suggest.h"
#include <Columns/ColumnString.h>
#include <Common/typeid_cast.h>
namespace DB
{
void Suggest::load(const ConnectionParameters & connection_parameters, size_t suggestion_limit)
{
loading_thread = std::thread([connection_parameters, suggestion_limit, this]
{
try
{
Connection connection(
connection_parameters.host,
connection_parameters.port,
connection_parameters.default_database,
connection_parameters.user,
connection_parameters.password,
"client",
connection_parameters.compression,
connection_parameters.security);
loadImpl(connection, connection_parameters.timeouts, suggestion_limit);
}
catch (...)
{
std::cerr << "Cannot load data for command line suggestions: " << getCurrentExceptionMessage(false, true) << "\n";
}
/// Note that keyword suggestions are available even if we cannot load data from server.
std::sort(words.begin(), words.end());
ready = true;
});
}
Suggest::Suggest()
{
/// Keywords may be not up to date with ClickHouse parser.
words = {"CREATE", "DATABASE", "IF", "NOT", "EXISTS", "TEMPORARY", "TABLE", "ON", "CLUSTER", "DEFAULT",
"MATERIALIZED", "ALIAS", "ENGINE", "AS", "VIEW", "POPULATE", "SETTINGS", "ATTACH", "DETACH", "DROP",
"RENAME", "TO", "ALTER", "ADD", "MODIFY", "CLEAR", "COLUMN", "AFTER", "COPY", "PROJECT",
"PRIMARY", "KEY", "CHECK", "PARTITION", "PART", "FREEZE", "FETCH", "FROM", "SHOW", "INTO",
"OUTFILE", "FORMAT", "TABLES", "DATABASES", "LIKE", "PROCESSLIST", "CASE", "WHEN", "THEN", "ELSE",
"END", "DESCRIBE", "DESC", "USE", "SET", "OPTIMIZE", "FINAL", "DEDUPLICATE", "INSERT", "VALUES",
"SELECT", "DISTINCT", "SAMPLE", "ARRAY", "JOIN", "GLOBAL", "LOCAL", "ANY", "ALL", "INNER",
"LEFT", "RIGHT", "FULL", "OUTER", "CROSS", "USING", "PREWHERE", "WHERE", "GROUP", "BY",
"WITH", "TOTALS", "HAVING", "ORDER", "COLLATE", "LIMIT", "UNION", "AND", "OR", "ASC",
"IN", "KILL", "QUERY", "SYNC", "ASYNC", "TEST", "BETWEEN", "TRUNCATE"};
}
void Suggest::loadImpl(Connection & connection, const ConnectionTimeouts & timeouts, size_t suggestion_limit)
{
std::stringstream query;
query << "SELECT DISTINCT arrayJoin(extractAll(name, '[\\\\w_]{2,}')) AS res FROM ("
"SELECT name FROM system.functions"
" UNION ALL "
"SELECT name FROM system.table_engines"
" UNION ALL "
"SELECT name FROM system.formats"
" UNION ALL "
"SELECT name FROM system.table_functions"
" UNION ALL "
"SELECT name FROM system.data_type_families"
" UNION ALL "
"SELECT name FROM system.settings"
" UNION ALL "
"SELECT cluster FROM system.clusters"
" UNION ALL "
"SELECT concat(func.name, comb.name) FROM system.functions AS func CROSS JOIN system.aggregate_function_combinators AS comb WHERE is_aggregate";
/// The user may disable loading of databases, tables, columns by setting suggestion_limit to zero.
if (suggestion_limit > 0)
{
String limit_str = toString(suggestion_limit);
query <<
" UNION ALL "
"SELECT name FROM system.databases LIMIT " << limit_str
<< " UNION ALL "
"SELECT DISTINCT name FROM system.tables LIMIT " << limit_str
<< " UNION ALL "
"SELECT DISTINCT name FROM system.columns LIMIT " << limit_str;
}
query << ") WHERE notEmpty(res)";
fetch(connection, timeouts, query.str());
}
void Suggest::fetch(Connection & connection, const ConnectionTimeouts & timeouts, const std::string & query)
{
connection.sendQuery(timeouts, query);
while (true)
{
Packet packet = connection.receivePacket();
switch (packet.type)
{
case Protocol::Server::Data:
fillWordsFromBlock(packet.block);
continue;
case Protocol::Server::Progress:
continue;
case Protocol::Server::ProfileInfo:
continue;
case Protocol::Server::Totals:
continue;
case Protocol::Server::Extremes:
continue;
case Protocol::Server::Log:
continue;
case Protocol::Server::Exception:
packet.exception->rethrow();
return;
case Protocol::Server::EndOfStream:
return;
default:
throw Exception("Unknown packet from server", ErrorCodes::UNKNOWN_PACKET_FROM_SERVER);
}
}
}
void Suggest::fillWordsFromBlock(const Block & block)
{
if (!block)
return;
if (block.columns() != 1)
throw Exception("Wrong number of columns received for query to read words for suggestion", ErrorCodes::LOGICAL_ERROR);
const ColumnString & column = typeid_cast<const ColumnString &>(*block.getByPosition(0).column);
size_t rows = block.rows();
for (size_t i = 0; i < rows; ++i)
words.emplace_back(column.getDataAt(i).toString());
}
}

View File

@ -2,18 +2,9 @@
#include "ConnectionParameters.h"
#include <string>
#include <sstream>
#include <string.h>
#include <vector>
#include <algorithm>
#include <common/readline_use.h>
#include <Common/typeid_cast.h>
#include <Columns/ColumnString.h>
#include <Client/Connection.h>
#include <IO/ConnectionTimeouts.h>
#include <common/LineReader.h>
namespace DB
@ -24,141 +15,8 @@ namespace ErrorCodes
extern const int UNKNOWN_PACKET_FROM_SERVER;
}
class Suggest : private boost::noncopyable
class Suggest : public LineReader::Suggest, boost::noncopyable
{
private:
/// The vector will be filled with completion words from the server and sorted.
using Words = std::vector<std::string>;
/// Keywords may be not up to date with ClickHouse parser.
Words words
{
"CREATE", "DATABASE", "IF", "NOT", "EXISTS", "TEMPORARY", "TABLE", "ON", "CLUSTER", "DEFAULT", "MATERIALIZED", "ALIAS", "ENGINE",
"AS", "VIEW", "POPULATE", "SETTINGS", "ATTACH", "DETACH", "DROP", "RENAME", "TO", "ALTER", "ADD", "MODIFY", "CLEAR", "COLUMN", "AFTER",
"COPY", "PROJECT", "PRIMARY", "KEY", "CHECK", "PARTITION", "PART", "FREEZE", "FETCH", "FROM", "SHOW", "INTO", "OUTFILE", "FORMAT", "TABLES",
"DATABASES", "LIKE", "PROCESSLIST", "CASE", "WHEN", "THEN", "ELSE", "END", "DESCRIBE", "DESC", "USE", "SET", "OPTIMIZE", "FINAL", "DEDUPLICATE",
"INSERT", "VALUES", "SELECT", "DISTINCT", "SAMPLE", "ARRAY", "JOIN", "GLOBAL", "LOCAL", "ANY", "ALL", "INNER", "LEFT", "RIGHT", "FULL", "OUTER",
"CROSS", "USING", "PREWHERE", "WHERE", "GROUP", "BY", "WITH", "TOTALS", "HAVING", "ORDER", "COLLATE", "LIMIT", "UNION", "AND", "OR", "ASC", "IN",
"KILL", "QUERY", "SYNC", "ASYNC", "TEST", "BETWEEN", "TRUNCATE"
};
/// Words are fetched asynchronously.
std::thread loading_thread;
std::atomic<bool> ready{false};
/// Points to current word to suggest.
Words::const_iterator pos;
/// Points after the last possible match.
Words::const_iterator end;
/// Set iterators to the matched range of words if any.
void findRange(const char * prefix, size_t prefix_length)
{
std::string prefix_str(prefix);
std::tie(pos, end) = std::equal_range(words.begin(), words.end(), prefix_str,
[prefix_length](const std::string & s, const std::string & prefix_searched) { return strncmp(s.c_str(), prefix_searched.c_str(), prefix_length) < 0; });
}
/// Iterates through matched range.
char * nextMatch()
{
if (pos >= end)
return nullptr;
/// readline will free memory by itself.
char * word = strdup(pos->c_str());
++pos;
return word;
}
void loadImpl(Connection & connection, const ConnectionTimeouts & timeouts, size_t suggestion_limit)
{
std::stringstream query;
query << "SELECT DISTINCT arrayJoin(extractAll(name, '[\\\\w_]{2,}')) AS res FROM ("
"SELECT name FROM system.functions"
" UNION ALL "
"SELECT name FROM system.table_engines"
" UNION ALL "
"SELECT name FROM system.formats"
" UNION ALL "
"SELECT name FROM system.table_functions"
" UNION ALL "
"SELECT name FROM system.data_type_families"
" UNION ALL "
"SELECT name FROM system.settings"
" UNION ALL "
"SELECT concat(func.name, comb.name) FROM system.functions AS func CROSS JOIN system.aggregate_function_combinators AS comb WHERE is_aggregate";
/// The user may disable loading of databases, tables, columns by setting suggestion_limit to zero.
if (suggestion_limit > 0)
{
String limit_str = toString(suggestion_limit);
query <<
" UNION ALL "
"SELECT name FROM system.databases LIMIT " << limit_str
<< " UNION ALL "
"SELECT DISTINCT name FROM system.tables LIMIT " << limit_str
<< " UNION ALL "
"SELECT DISTINCT name FROM system.columns LIMIT " << limit_str;
}
query << ") WHERE notEmpty(res)";
fetch(connection, timeouts, query.str());
}
void fetch(Connection & connection, const ConnectionTimeouts & timeouts, const std::string & query)
{
connection.sendQuery(timeouts, query);
while (true)
{
Packet packet = connection.receivePacket();
switch (packet.type)
{
case Protocol::Server::Data:
fillWordsFromBlock(packet.block);
continue;
case Protocol::Server::Progress:
continue;
case Protocol::Server::ProfileInfo:
continue;
case Protocol::Server::Totals:
continue;
case Protocol::Server::Extremes:
continue;
case Protocol::Server::Log:
continue;
case Protocol::Server::Exception:
packet.exception->rethrow();
return;
case Protocol::Server::EndOfStream:
return;
default:
throw Exception("Unknown packet from server", ErrorCodes::UNKNOWN_PACKET_FROM_SERVER);
}
}
}
void fillWordsFromBlock(const Block & block)
{
if (!block)
return;
if (block.columns() != 1)
throw Exception("Wrong number of columns received for query to read words for suggestion", ErrorCodes::LOGICAL_ERROR);
const ColumnString & column = typeid_cast<const ColumnString &>(*block.getByPosition(0).column);
size_t rows = block.rows();
for (size_t i = 0; i < rows; ++i)
words.emplace_back(column.getDataAt(i).toString());
}
public:
static Suggest & instance()
{
@ -166,64 +24,25 @@ public:
return instance;
}
/// More old server versions cannot execute the query above.
void load(const ConnectionParameters & connection_parameters, size_t suggestion_limit);
/// Older server versions cannot execute the query above.
static constexpr int MIN_SERVER_REVISION = 54406;
void load(const ConnectionParameters & connection_parameters, size_t suggestion_limit)
{
loading_thread = std::thread([connection_parameters, suggestion_limit, this]
{
try
{
Connection connection(
connection_parameters.host,
connection_parameters.port,
connection_parameters.default_database,
connection_parameters.user,
connection_parameters.password,
"client",
connection_parameters.compression,
connection_parameters.security);
loadImpl(connection, connection_parameters.timeouts, suggestion_limit);
}
catch (...)
{
std::cerr << "Cannot load data for command line suggestions: " << getCurrentExceptionMessage(false, true) << "\n";
}
/// Note that keyword suggestions are available even if we cannot load data from server.
std::sort(words.begin(), words.end());
ready = true;
});
}
void finalize()
private:
Suggest();
~Suggest()
{
if (loading_thread.joinable())
loading_thread.join();
}
/// A function for readline.
static char * generator(const char * text, int state)
{
Suggest & suggest = Suggest::instance();
if (!suggest.ready)
return nullptr;
if (state == 0)
suggest.findRange(text, strlen(text));
void loadImpl(Connection & connection, const ConnectionTimeouts & timeouts, size_t suggestion_limit);
void fetch(Connection & connection, const ConnectionTimeouts & timeouts, const std::string & query);
void fillWordsFromBlock(const Block & block);
/// Do not append whitespace after word. For unknown reason, rl_completion_append_character = '\0' does not work.
rl_completion_suppress_append = 1;
return suggest.nextMatch();
}
~Suggest()
{
finalize();
}
/// Words are fetched asynchronously.
std::thread loading_thread;
};
}

View File

@ -10,4 +10,4 @@ set_target_properties(readpassphrase
PROPERTIES LINKER_LANGUAGE C
)
# . to allow #include <readpassphrase.h>
target_include_directories(readpassphrase PUBLIC . ${CMAKE_CURRENT_BINARY_DIR}/include ${CMAKE_CURRENT_BINARY_DIR}/../include)
target_include_directories(readpassphrase PUBLIC . ${CMAKE_CURRENT_BINARY_DIR}/include)

View File

@ -15,20 +15,24 @@ set(CLICKHOUSE_ODBC_BRIDGE_INCLUDE PUBLIC ${ClickHouse_SOURCE_DIR}/libs/libdaemo
if (USE_POCO_SQLODBC)
set(CLICKHOUSE_ODBC_BRIDGE_LINK ${CLICKHOUSE_ODBC_BRIDGE_LINK} PRIVATE ${Poco_SQLODBC_LIBRARY})
set(CLICKHOUSE_ODBC_BRIDGE_INCLUDE ${CLICKHOUSE_ODBC_BRIDGE_INCLUDE} SYSTEM PRIVATE ${ODBC_INCLUDE_DIRS} ${Poco_SQLODBC_INCLUDE_DIR})
# Wouldnt work anyway because of the way list variable got expanded in `target_include_directories`
# set(CLICKHOUSE_ODBC_BRIDGE_INCLUDE ${CLICKHOUSE_ODBC_BRIDGE_INCLUDE} SYSTEM PRIVATE ${ODBC_INCLUDE_DIRS} ${Poco_SQLODBC_INCLUDE_DIR})
endif ()
if (Poco_SQL_FOUND)
set(CLICKHOUSE_ODBC_BRIDGE_LINK ${CLICKHOUSE_ODBC_BRIDGE_LINK} PRIVATE ${Poco_SQL_LIBRARY})
set(CLICKHOUSE_ODBC_BRIDGE_INCLUDE ${CLICKHOUSE_ODBC_BRIDGE_INCLUDE} SYSTEM PRIVATE ${Poco_SQL_INCLUDE_DIR})
# Wouldnt work anyway because of the way list variable got expanded in `target_include_directories`
# set(CLICKHOUSE_ODBC_BRIDGE_INCLUDE ${CLICKHOUSE_ODBC_BRIDGE_INCLUDE} SYSTEM PRIVATE ${Poco_SQL_INCLUDE_DIR})
endif ()
if (USE_POCO_DATAODBC)
set(CLICKHOUSE_ODBC_BRIDGE_LINK ${CLICKHOUSE_ODBC_BRIDGE_LINK} PRIVATE ${Poco_DataODBC_LIBRARY})
set(CLICKHOUSE_ODBC_BRIDGE_INCLUDE ${CLICKHOUSE_ODBC_BRIDGE_INCLUDE} SYSTEM PRIVATE ${ODBC_INCLUDE_DIRS} ${Poco_DataODBC_INCLUDE_DIR})
# Wouldnt work anyway because of the way list variable got expanded in `target_include_directories`
# set(CLICKHOUSE_ODBC_BRIDGE_INCLUDE ${CLICKHOUSE_ODBC_BRIDGE_INCLUDE} SYSTEM PRIVATE ${ODBC_INCLUDE_DIRS} ${Poco_DataODBC_INCLUDE_DIR})
endif()
if (Poco_Data_FOUND)
set(CLICKHOUSE_ODBC_BRIDGE_LINK ${CLICKHOUSE_ODBC_BRIDGE_LINK} PRIVATE ${Poco_Data_LIBRARY})
set(CLICKHOUSE_ODBC_BRIDGE_INCLUDE ${CLICKHOUSE_ODBC_BRIDGE_INCLUDE} SYSTEM PRIVATE ${Poco_Data_INCLUDE_DIR})
# Wouldnt work anyway because of the way list variable got expanded in `target_include_directories`
# set(CLICKHOUSE_ODBC_BRIDGE_INCLUDE ${CLICKHOUSE_ODBC_BRIDGE_INCLUDE} SYSTEM PRIVATE ${Poco_Data_INCLUDE_DIR})
endif ()
clickhouse_program_add_library(odbc-bridge)

View File

@ -61,6 +61,10 @@ void InterserverIOHTTPHandler::processQuery(Poco::Net::HTTPServerRequest & reque
ReadBufferFromIStream body(request.stream());
auto endpoint = server.context().getInterserverIOHandler().getEndpoint(endpoint_name);
/// Locked for read while query processing
std::shared_lock lock(endpoint->rwlock);
if (endpoint->blocker.isCancelled())
throw Exception("Transferring part to replica was cancelled", ErrorCodes::ABORTED);
if (compress)
{

View File

@ -79,21 +79,19 @@ void MySQLHandler::run()
if (!connection_context.mysql.max_packet_size)
connection_context.mysql.max_packet_size = MAX_PACKET_LENGTH;
/* LOG_TRACE(log, "Capabilities: " << handshake_response.capability_flags
<< "\nmax_packet_size: "
LOG_TRACE(log, "Capabilities: " << handshake_response.capability_flags
<< ", max_packet_size: "
<< handshake_response.max_packet_size
<< "\ncharacter_set: "
<< handshake_response.character_set
<< "\nuser: "
<< ", character_set: "
<< static_cast<int>(handshake_response.character_set)
<< ", user: "
<< handshake_response.username
<< "\nauth_response length: "
<< ", auth_response length: "
<< handshake_response.auth_response.length()
<< "\nauth_response: "
<< handshake_response.auth_response
<< "\ndatabase: "
<< ", database: "
<< handshake_response.database
<< "\nauth_plugin_name: "
<< handshake_response.auth_plugin_name);*/
<< ", auth_plugin_name: "
<< handshake_response.auth_plugin_name);
client_capability_flags = handshake_response.capability_flags;
if (!(client_capability_flags & CLIENT_PROTOCOL_41))

View File

@ -34,7 +34,7 @@ MySQLHandlerFactory::MySQLHandlerFactory(IServer & server_)
}
catch (...)
{
LOG_INFO(log, "Failed to create SSL context. SSL will be disabled. Error: " << getCurrentExceptionMessage(false));
LOG_TRACE(log, "Failed to create SSL context. SSL will be disabled. Error: " << getCurrentExceptionMessage(false));
ssl_enabled = false;
}
#endif
@ -47,7 +47,7 @@ MySQLHandlerFactory::MySQLHandlerFactory(IServer & server_)
}
catch (...)
{
LOG_WARNING(log, "Failed to read RSA keys. Error: " << getCurrentExceptionMessage(false));
LOG_TRACE(log, "Failed to read RSA key pair from server certificate. Error: " << getCurrentExceptionMessage(false));
generateRSAKeys();
}
#endif
@ -104,7 +104,7 @@ void MySQLHandlerFactory::readRSAKeys()
void MySQLHandlerFactory::generateRSAKeys()
{
LOG_INFO(log, "Generating new RSA key.");
LOG_TRACE(log, "Generating new RSA key pair.");
public_key.reset(RSA_new());
if (!public_key)
throw Exception("Failed to allocate RSA key. Error: " + getOpenSSLErrors(), ErrorCodes::OPENSSL_ERROR);

View File

@ -436,8 +436,10 @@ int Server::main(const std::vector<std::string> & /*args*/)
main_config_zk_changed_event,
[&](ConfigurationPtr config)
{
setTextLog(global_context->getTextLog());
buildLoggers(*config, logger());
// FIXME logging-related things need synchronization -- see the 'Logger * log' saved
// in a lot of places. For now, disable updating log configuration without server restart.
//setTextLog(global_context->getTextLog());
//buildLoggers(*config, logger());
global_context->setClustersConfig(config);
global_context->setMacros(std::make_unique<Macros>(*config, "macros"));
@ -862,6 +864,9 @@ int Server::main(const std::vector<std::string> & /*args*/)
for (auto & server : servers)
server->start();
setTextLog(global_context->getTextLog());
buildLoggers(config(), logger());
main_config_reloader->start();
users_config_reloader->start();
if (dns_cache_updater)

View File

@ -993,7 +993,7 @@ bool TCPHandler::receiveData(bool scalar)
if (!(storage = query_context->tryGetExternalTable(name)))
{
NamesAndTypesList columns = block.getNamesAndTypesList();
storage = StorageMemory::create("_external", name, ColumnsDescription{columns}, ConstraintsDescription{});
storage = StorageMemory::create(StorageID("_external", name), ColumnsDescription{columns}, ConstraintsDescription{});
storage->startup();
query_context->addExternalTable(name, storage);
}

View File

@ -487,10 +487,17 @@ void LogisticRegression::compute(
size_t row_num)
{
Float64 derivative = bias;
std::vector<Float64> values(weights.size());
for (size_t i = 0; i < weights.size(); ++i)
{
auto value = (*columns[i]).getFloat64(row_num);
derivative += weights[i] * value;
values[i] = (*columns[i]).getFloat64(row_num);
}
for (size_t i = 0; i < weights.size(); ++i)
{
derivative += weights[i] * values[i];
}
derivative *= target;
derivative = exp(derivative);
@ -498,8 +505,7 @@ void LogisticRegression::compute(
batch_gradient[weights.size()] += target / (derivative + 1);
for (size_t i = 0; i < weights.size(); ++i)
{
auto value = (*columns[i]).getFloat64(row_num);
batch_gradient[i] += target * value / (derivative + 1) - 2 * l2_reg_coef * weights[i];
batch_gradient[i] += target * values[i] / (derivative + 1) - 2 * l2_reg_coef * weights[i];
}
}
@ -558,18 +564,25 @@ void LinearRegression::compute(
size_t row_num)
{
Float64 derivative = (target - bias);
std::vector<Float64> values(weights.size());
for (size_t i = 0; i < weights.size(); ++i)
{
auto value = (*columns[i]).getFloat64(row_num);
derivative -= weights[i] * value;
values[i] = (*columns[i]).getFloat64(row_num);
}
for (size_t i = 0; i < weights.size(); ++i)
{
derivative -= weights[i] * values[i];
}
derivative *= 2;
batch_gradient[weights.size()] += derivative;
for (size_t i = 0; i < weights.size(); ++i)
{
auto value = (*columns[i]).getFloat64(row_num);
batch_gradient[i] += derivative * value - 2 * l2_reg_coef * weights[i];
batch_gradient[i] += derivative * values[i] - 2 * l2_reg_coef * weights[i];
}
}

View File

@ -217,7 +217,7 @@ UInt64 ColumnVector<T>::get64(size_t n) const
}
template <typename T>
Float64 ColumnVector<T>::getFloat64(size_t n) const
inline Float64 ColumnVector<T>::getFloat64(size_t n) const
{
return static_cast<Float64>(data[n]);
}

View File

@ -387,7 +387,6 @@ namespace ErrorCodes
extern const int PTHREAD_ERROR = 411;
extern const int NETLINK_ERROR = 412;
extern const int CANNOT_SET_SIGNAL_HANDLER = 413;
extern const int CANNOT_READLINE = 414;
extern const int ALL_REPLICAS_LOST = 415;
extern const int REPLICA_STATUS_CHANGED = 416;
extern const int EXPECTED_ALL_OR_ANY = 417;

View File

@ -160,7 +160,7 @@ void ExternalTablesHandler::handlePart(const Poco::Net::MessageHeader & header,
/// Create table
NamesAndTypesList columns = sample_block.getNamesAndTypesList();
StoragePtr storage = StorageMemory::create("_external", data.second, ColumnsDescription{columns}, ConstraintsDescription{});
StoragePtr storage = StorageMemory::create(StorageID("_external", data.second), ColumnsDescription{columns}, ConstraintsDescription{});
storage->startup();
context.addExternalTable(data.second, storage);
BlockOutputStreamPtr output = storage->write(ASTPtr(), context);

View File

@ -1030,6 +1030,7 @@ public:
LOG_TRACE(log, "Authentication method match.");
}
bool sent_public_key = false;
if (auth_response == "\1")
{
LOG_TRACE(log, "Client requests public key.");
@ -1050,6 +1051,7 @@ public:
AuthMoreData data(pem);
packet_sender->sendPacket(data, true);
sent_public_key = true;
AuthSwitchResponse response;
packet_sender->receivePacket(response);
@ -1069,13 +1071,15 @@ public:
*/
if (!is_secure_connection && !auth_response->empty() && auth_response != String("\0", 1))
{
LOG_TRACE(log, "Received nonempty password");
LOG_TRACE(log, "Received nonempty password.");
auto ciphertext = reinterpret_cast<unsigned char *>(auth_response->data());
unsigned char plaintext[RSA_size(&private_key)];
int plaintext_size = RSA_private_decrypt(auth_response->size(), ciphertext, plaintext, &private_key, RSA_PKCS1_OAEP_PADDING);
if (plaintext_size == -1)
{
if (!sent_public_key)
LOG_WARNING(log, "Client could have encrypted password with different public key since it didn't request it from server.");
throw Exception("Failed to decrypt auth data. Error: " + getOpenSSLErrors(), ErrorCodes::OPENSSL_ERROR);
}

View File

@ -64,14 +64,14 @@ struct Settings : public SettingsCollection<Settings>
M(SettingSeconds, send_timeout, DBMS_DEFAULT_SEND_TIMEOUT_SEC, "", 0) \
M(SettingSeconds, tcp_keep_alive_timeout, 0, "The time in seconds the connection needs to remain idle before TCP starts sending keepalive probes", 0) \
M(SettingMilliseconds, queue_max_wait_ms, 0, "The wait time in the request queue, if the number of concurrent requests exceeds the maximum.", 0) \
M(SettingMilliseconds, connection_pool_max_wait_ms, 0, "The wait time when connection pool is full.", 0) \
M(SettingMilliseconds, connection_pool_max_wait_ms, 0, "The wait time when the connection pool is full.", 0) \
M(SettingMilliseconds, replace_running_query_max_wait_ms, 5000, "The wait time for running query with the same query_id to finish when setting 'replace_running_query' is active.", 0) \
M(SettingMilliseconds, kafka_max_wait_ms, 5000, "The wait time for reading from Kafka before retry.", 0) \
M(SettingUInt64, poll_interval, DBMS_DEFAULT_POLL_INTERVAL, "Block at the query wait loop on the server for the specified number of seconds.", 0) \
M(SettingUInt64, idle_connection_timeout, 3600, "Close idle TCP connections after specified number of seconds.", 0) \
M(SettingUInt64, distributed_connections_pool_size, DBMS_DEFAULT_DISTRIBUTED_CONNECTIONS_POOL_SIZE, "Maximum number of connections with one remote server in the pool.", 0) \
M(SettingUInt64, connections_with_failover_max_tries, DBMS_CONNECTION_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES, "The maximum number of attempts to connect to replicas.", 0) \
M(SettingUInt64, s3_min_upload_part_size, 512*1024*1024, "The mininum size of part to upload during multipart upload to S3.", 0) \
M(SettingUInt64, s3_min_upload_part_size, 512*1024*1024, "The minimum size of part to upload during multipart upload to S3.", 0) \
M(SettingBool, extremes, false, "Calculate minimums and maximums of the result columns. They can be output in JSON-formats.", IMPORTANT) \
M(SettingBool, use_uncompressed_cache, true, "Whether to use the cache of uncompressed blocks.", 0) \
M(SettingBool, replace_running_query, false, "Whether the running request should be canceled with the same id as the new one.", 0) \
@ -183,8 +183,8 @@ struct Settings : public SettingsCollection<Settings>
M(SettingBool, input_format_tsv_empty_as_default, false, "Treat empty fields in TSV input as default values.", 0) \
M(SettingBool, input_format_null_as_default, false, "For text input formats initialize null fields with default values if data type of this field is not nullable", 0) \
\
M(SettingBool, input_format_values_interpret_expressions, true, "For Values format: if field could not be parsed by streaming parser, run SQL parser and try to interpret it as SQL expression.", 0) \
M(SettingBool, input_format_values_deduce_templates_of_expressions, true, "For Values format: if field could not be parsed by streaming parser, run SQL parser, deduce template of the SQL expression, try to parse all rows using template and then interpret expression for all rows.", 0) \
M(SettingBool, input_format_values_interpret_expressions, true, "For Values format: if the field could not be parsed by streaming parser, run SQL parser and try to interpret it as SQL expression.", 0) \
M(SettingBool, input_format_values_deduce_templates_of_expressions, true, "For Values format: if the field could not be parsed by streaming parser, run SQL parser, deduce template of the SQL expression, try to parse all rows using template and then interpret expression for all rows.", 0) \
M(SettingBool, input_format_values_accurate_types_of_literals, true, "For Values format: when parsing and interpreting expressions using template, check actual type of literal to avoid possible overflow and precision issues.", 0) \
\
M(SettingBool, output_format_json_quote_64bit_integers, true, "Controls quoting of 64-bit integers in JSON output format.", 0) \
@ -212,7 +212,7 @@ struct Settings : public SettingsCollection<Settings>
M(SettingBool, join_use_nulls, 0, "Use NULLs for non-joined rows of outer JOINs for types that can be inside Nullable. If false, use default value of corresponding columns data type.", IMPORTANT) \
\
M(SettingJoinStrictness, join_default_strictness, JoinStrictness::ALL, "Set default strictness in JOIN query. Possible values: empty string, 'ANY', 'ALL'. If empty, query without strictness will throw exception.", 0) \
M(SettingBool, any_join_distinct_right_table_keys, false, "Enable old ANY JOIN logic with many-to-one left-to-right table keys mapping for all ANY JOINs. It leads to confusing not equal results for 't1 ANY LEFT JOIN t2' and 't2 ANY RIGHT JOIN t1'. ANY RIGHT JOIN needs one-to-many keys maping to be consistent with LEFT one.", IMPORTANT) \
M(SettingBool, any_join_distinct_right_table_keys, false, "Enable old ANY JOIN logic with many-to-one left-to-right table keys mapping for all ANY JOINs. It leads to confusing not equal results for 't1 ANY LEFT JOIN t2' and 't2 ANY RIGHT JOIN t1'. ANY RIGHT JOIN needs one-to-many keys mapping to be consistent with LEFT one.", IMPORTANT) \
\
M(SettingUInt64, preferred_block_size_bytes, 1000000, "", 0) \
\
@ -249,8 +249,8 @@ struct Settings : public SettingsCollection<Settings>
M(SettingBool, empty_result_for_aggregation_by_empty_set, false, "Return empty result when aggregating without keys on empty set.", 0) \
M(SettingBool, allow_distributed_ddl, true, "If it is set to true, then a user is allowed to executed distributed DDL queries.", 0) \
M(SettingUInt64, odbc_max_field_size, 1024, "Max size of filed can be read from ODBC dictionary. Long strings are truncated.", 0) \
M(SettingUInt64, query_profiler_real_time_period_ns, 1000000000, "Highly experimental. Period for real clock timer of query profiler (in nanoseconds). Set 0 value to turn off real clock query profiler. Recommended value is at least 10000000 (100 times a second) for single queries or 1000000000 (once a second) for cluster-wide profiling.", 0) \
M(SettingUInt64, query_profiler_cpu_time_period_ns, 1000000000, "Highly experimental. Period for CPU clock timer of query profiler (in nanoseconds). Set 0 value to turn off CPU clock query profiler. Recommended value is at least 10000000 (100 times a second) for single queries or 1000000000 (once a second) for cluster-wide profiling.", 0) \
M(SettingUInt64, query_profiler_real_time_period_ns, 1000000000, "Period for real clock timer of query profiler (in nanoseconds). Set 0 value to turn off the real clock query profiler. Recommended value is at least 10000000 (100 times a second) for single queries or 1000000000 (once a second) for cluster-wide profiling.", 0) \
M(SettingUInt64, query_profiler_cpu_time_period_ns, 1000000000, "Period for CPU clock timer of query profiler (in nanoseconds). Set 0 value to turn off the CPU clock query profiler. Recommended value is at least 10000000 (100 times a second) for single queries or 1000000000 (once a second) for cluster-wide profiling.", 0) \
\
\
/** Limits during query execution are part of the settings. \
@ -310,9 +310,9 @@ struct Settings : public SettingsCollection<Settings>
M(SettingBool, join_any_take_last_row, false, "When disabled (default) ANY JOIN will take the first found row for a key. When enabled, it will take the last row seen if there are multiple rows for the same key.", IMPORTANT) \
M(SettingBool, partial_merge_join, false, "Use partial merge join instead of hash join for LEFT and INNER JOINs.", 0) \
M(SettingBool, partial_merge_join_optimizations, false, "Enable optimizations in partial merge join", 0) \
M(SettingUInt64, default_max_bytes_in_join, 100000000, "Maximum size of right-side table if limit's required but max_bytes_in_join is not set.", 0) \
M(SettingUInt64, default_max_bytes_in_join, 100000000, "Maximum size of right-side table if limit is required but max_bytes_in_join is not set.", 0) \
M(SettingUInt64, partial_merge_join_rows_in_right_blocks, 10000, "Split right-hand joining data in blocks of specified size. It's a portion of data indexed by min-max values and possibly unloaded on disk.", 0) \
M(SettingUInt64, partial_merge_join_rows_in_left_blocks, 10000, "Group left-hand joining data in bigger blocks. Setting it to a bigger value increase JOIN performance and memory usage.", 0) \
M(SettingUInt64, partial_merge_join_rows_in_left_blocks, 10000, "Group left-hand joining data in bigger blocks. Setting it to a bigger value increases JOIN performance and memory usage.", 0) \
\
M(SettingUInt64, max_rows_to_transfer, 0, "Maximum size (in rows) of the transmitted external table obtained when the GLOBAL IN/JOIN section is executed.", 0) \
M(SettingUInt64, max_bytes_to_transfer, 0, "Maximum size (in uncompressed bytes) of the transmitted external table obtained when the GLOBAL IN/JOIN section is executed.", 0) \
@ -371,7 +371,7 @@ struct Settings : public SettingsCollection<Settings>
M(SettingBool, allow_drop_detached, false, "Allow ALTER TABLE ... DROP DETACHED PART[ITION] ... queries", 0) \
\
M(SettingSeconds, distributed_replica_error_half_life, DBMS_CONNECTION_POOL_WITH_FAILOVER_DEFAULT_DECREASE_ERROR_PERIOD, "Time period reduces replica error counter by 2 times.", 0) \
M(SettingUInt64, distributed_replica_error_cap, DBMS_CONNECTION_POOL_WITH_FAILOVER_MAX_ERROR_COUNT, "Max number of errors per replica, prevents piling up increadible amount of errors if replica was offline for some time and allows it to be reconsidered in a shorter amount of time.", 0) \
M(SettingUInt64, distributed_replica_error_cap, DBMS_CONNECTION_POOL_WITH_FAILOVER_MAX_ERROR_COUNT, "Max number of errors per replica, prevents piling up an incredible amount of errors if replica was offline for some time and allows it to be reconsidered in a shorter amount of time.", 0) \
\
M(SettingBool, allow_experimental_live_view, false, "Enable LIVE VIEW. Not mature enough.", 0) \
M(SettingSeconds, live_view_heartbeat_interval, DEFAULT_LIVE_VIEW_HEARTBEAT_INTERVAL_SEC, "The heartbeat interval in seconds to indicate live query is alive.", 0) \
@ -394,6 +394,7 @@ struct Settings : public SettingsCollection<Settings>
M(SettingBool, allow_experimental_data_skipping_indices, true, "Obsolete setting, does nothing. Will be removed after 2020-05-31", 0) \
M(SettingBool, merge_tree_uniform_read_distribution, true, "Obsolete setting, does nothing. Will be removed after 2020-05-20", 0) \
M(SettingUInt64, mark_cache_min_lifetime, 0, "Obsolete setting, does nothing. Will be removed after 2020-05-31", 0) \
M(SettingUInt64, max_parser_depth, 1000, "Maximum parser depth.", 0) \
DECLARE_SETTINGS_COLLECTION(LIST_OF_SETTINGS)

View File

@ -47,7 +47,8 @@ std::ostream & operator<<(std::ostream & stream, const IDataType & what)
std::ostream & operator<<(std::ostream & stream, const IStorage & what)
{
stream << "IStorage(name = " << what.getName() << ", tableName = " << what.getTableName() << ") {"
auto table_id = what.getStorageID();
stream << "IStorage(name = " << what.getName() << ", tableName = " << table_id.table_name << ") {"
<< what.getColumns().getAllPhysical().toString() << "}";
return stream;
}

View File

@ -18,7 +18,7 @@ namespace DB
{
PushingToViewsBlockOutputStream::PushingToViewsBlockOutputStream(
const String & database, const String & table, const StoragePtr & storage_,
const StoragePtr & storage_,
const Context & context_, const ASTPtr & query_ptr_, bool no_destination)
: storage(storage_), context(context_), query_ptr(query_ptr_)
{
@ -32,47 +32,44 @@ PushingToViewsBlockOutputStream::PushingToViewsBlockOutputStream(
/// Moreover, deduplication for AggregatingMergeTree children could produce false positives due to low size of inserting blocks
bool disable_deduplication_for_children = !no_destination && storage->supportsDeduplication();
if (!table.empty())
auto table_id = storage->getStorageID();
Dependencies dependencies = context.getDependencies(table_id);
/// We need special context for materialized views insertions
if (!dependencies.empty())
{
Dependencies dependencies = context.getDependencies(database, table);
views_context = std::make_unique<Context>(context);
// Do not deduplicate insertions into MV if the main insertion is Ok
if (disable_deduplication_for_children)
views_context->getSettingsRef().insert_deduplicate = false;
}
/// We need special context for materialized views insertions
if (!dependencies.empty())
for (const auto & database_table : dependencies)
{
auto dependent_table = context.getTable(database_table);
ASTPtr query;
BlockOutputStreamPtr out;
if (auto * materialized_view = dynamic_cast<const StorageMaterializedView *>(dependent_table.get()))
{
views_context = std::make_unique<Context>(context);
// Do not deduplicate insertions into MV if the main insertion is Ok
if (disable_deduplication_for_children)
views_context->getSettingsRef().insert_deduplicate = false;
StoragePtr inner_table = materialized_view->getTargetTable();
auto inner_table_id = inner_table->getStorageID();
query = materialized_view->getInnerQuery();
std::unique_ptr<ASTInsertQuery> insert = std::make_unique<ASTInsertQuery>();
insert->database = inner_table_id.database_name;
insert->table = inner_table_id.table_name;
ASTPtr insert_query_ptr(insert.release());
InterpreterInsertQuery interpreter(insert_query_ptr, *views_context);
BlockIO io = interpreter.execute();
out = io.out;
}
else if (dynamic_cast<const StorageLiveView *>(dependent_table.get()))
out = std::make_shared<PushingToViewsBlockOutputStream>(dependent_table, *views_context, ASTPtr(), true);
else
out = std::make_shared<PushingToViewsBlockOutputStream>(dependent_table, *views_context, ASTPtr());
for (const auto & database_table : dependencies)
{
auto dependent_table = context.getTable(database_table.first, database_table.second);
ASTPtr query;
BlockOutputStreamPtr out;
if (auto * materialized_view = dynamic_cast<const StorageMaterializedView *>(dependent_table.get()))
{
StoragePtr inner_table = materialized_view->getTargetTable();
query = materialized_view->getInnerQuery();
std::unique_ptr<ASTInsertQuery> insert = std::make_unique<ASTInsertQuery>();
insert->database = inner_table->getDatabaseName();
insert->table = inner_table->getTableName();
ASTPtr insert_query_ptr(insert.release());
InterpreterInsertQuery interpreter(insert_query_ptr, *views_context);
BlockIO io = interpreter.execute();
out = io.out;
}
else if (dynamic_cast<const StorageLiveView *>(dependent_table.get()))
out = std::make_shared<PushingToViewsBlockOutputStream>(
database_table.first, database_table.second, dependent_table, *views_context, ASTPtr(), true);
else
out = std::make_shared<PushingToViewsBlockOutputStream>(
database_table.first, database_table.second, dependent_table, *views_context, ASTPtr());
views.emplace_back(ViewInfo{std::move(query), database_table.first, database_table.second, std::move(out)});
}
views.emplace_back(ViewInfo{std::move(query), database_table, std::move(out)});
}
/* Do not push to destination table if the flag is set */
@ -161,7 +158,7 @@ void PushingToViewsBlockOutputStream::writePrefix()
}
catch (Exception & ex)
{
ex.addMessage("while write prefix to view " + view.database + "." + view.table);
ex.addMessage("while write prefix to view " + view.table_id.getNameForLogs());
throw;
}
}
@ -180,7 +177,7 @@ void PushingToViewsBlockOutputStream::writeSuffix()
}
catch (Exception & ex)
{
ex.addMessage("while write prefix to view " + view.database + "." + view.table);
ex.addMessage("while write prefix to view " + view.table_id.getNameForLogs());
throw;
}
}
@ -223,7 +220,7 @@ void PushingToViewsBlockOutputStream::process(const Block & block, size_t view_n
/// InterpreterSelectQuery will do processing of alias columns.
Context local_context = *views_context;
local_context.addViewSource(
StorageValues::create(storage->getDatabaseName(), storage->getTableName(), storage->getColumns(),
StorageValues::create(storage->getStorageID(), storage->getColumns(),
block));
select.emplace(view.query, local_context, SelectQueryOptions());
in = std::make_shared<MaterializingBlockInputStream>(select->execute().in);
@ -250,7 +247,7 @@ void PushingToViewsBlockOutputStream::process(const Block & block, size_t view_n
}
catch (Exception & ex)
{
ex.addMessage("while pushing to view " + backQuoteIfNeed(view.database) + "." + backQuoteIfNeed(view.table));
ex.addMessage("while pushing to view " + view.table_id.getNameForLogs());
throw;
}
}

View File

@ -17,8 +17,7 @@ class ReplicatedMergeTreeBlockOutputStream;
class PushingToViewsBlockOutputStream : public IBlockOutputStream
{
public:
PushingToViewsBlockOutputStream(
const String & database, const String & table, const StoragePtr & storage_,
PushingToViewsBlockOutputStream(const StoragePtr & storage_,
const Context & context_, const ASTPtr & query_ptr_, bool no_destination = false);
Block getHeader() const override;
@ -39,8 +38,7 @@ private:
struct ViewInfo
{
ASTPtr query;
String database;
String table;
StorageID table_id;
BlockOutputStreamPtr out;
};

View File

@ -1,6 +1,7 @@
#pragma once
#include <DataTypes/IDataTypeDummy.h>
#include <Columns/ColumnSet.h>
namespace DB
@ -18,6 +19,9 @@ public:
bool equals(const IDataType & rhs) const override { return typeid(rhs) == typeid(*this); }
bool isParametric() const override { return true; }
// Used for expressions analysis.
MutableColumnPtr createColumn() const override { return ColumnSet::create(0, nullptr); }
// Used only for debugging, making it DUMPABLE
Field getDefault() const override { return Tuple(); }
};

View File

@ -52,7 +52,7 @@ Tables DatabaseDictionary::listTables(const Context & context, const FilterByNam
auto dict_name = dict_ptr->getName();
const DictionaryStructure & dictionary_structure = dict_ptr->getStructure();
auto columns = StorageDictionary::getNamesAndTypes(dictionary_structure);
tables[dict_name] = StorageDictionary::create(getDatabaseName(), dict_name, ColumnsDescription{columns}, context, true, dict_name);
tables[dict_name] = StorageDictionary::create(StorageID(getDatabaseName(), dict_name), ColumnsDescription{columns}, context, true, dict_name);
}
}
return tables;
@ -74,7 +74,7 @@ StoragePtr DatabaseDictionary::tryGetTable(
{
const DictionaryStructure & dictionary_structure = dict_ptr->getStructure();
auto columns = StorageDictionary::getNamesAndTypes(dictionary_structure);
return StorageDictionary::create(getDatabaseName(), table_name, ColumnsDescription{columns}, context, true, table_name);
return StorageDictionary::create(StorageID(getDatabaseName(), table_name), ColumnsDescription{columns}, context, true, table_name);
}
return {};
@ -109,11 +109,12 @@ ASTPtr DatabaseDictionary::getCreateTableQueryImpl(const Context & context,
buffer << ") Engine = Dictionary(" << backQuoteIfNeed(table_name) << ")";
}
auto settings = context.getSettingsRef();
ParserCreateQuery parser;
const char * pos = query.data();
std::string error_message;
auto ast = tryParseQuery(parser, pos, pos + query.size(), error_message,
/* hilite = */ false, "", /* allow_multi_statements = */ false, 0);
/* hilite = */ false, "", /* allow_multi_statements = */ false, 0, settings.max_parser_depth);
if (!ast && throw_on_error)
throw Exception(error_message, ErrorCodes::SYNTAX_ERROR);
@ -121,15 +122,16 @@ ASTPtr DatabaseDictionary::getCreateTableQueryImpl(const Context & context,
return ast;
}
ASTPtr DatabaseDictionary::getCreateDatabaseQuery() const
ASTPtr DatabaseDictionary::getCreateDatabaseQuery(const Context & context) const
{
String query;
{
WriteBufferFromString buffer(query);
buffer << "CREATE DATABASE " << backQuoteIfNeed(database_name) << " ENGINE = Dictionary";
}
auto settings = context.getSettingsRef();
ParserCreateQuery parser;
return parseQuery(parser, query.data(), query.data() + query.size(), "", 0);
return parseQuery(parser, query.data(), query.data() + query.size(), "", 0, settings.max_parser_depth);
}
void DatabaseDictionary::shutdown()

View File

@ -41,7 +41,7 @@ public:
bool empty(const Context & context) const override;
ASTPtr getCreateDatabaseQuery() const override;
ASTPtr getCreateDatabaseQuery(const Context & context) const override;
void shutdown() override;

View File

@ -122,7 +122,7 @@ StoragePtr DatabaseLazy::tryGetTable(
std::lock_guard lock(mutex);
auto it = tables_cache.find(table_name);
if (it == tables_cache.end())
throw Exception("Table " + backQuote(getDatabaseName()) + "." + backQuote(table_name) + " doesn't exist.", ErrorCodes::UNKNOWN_TABLE);
return {};
if (it->second.table)
{
@ -230,7 +230,7 @@ StoragePtr DatabaseLazy::loadTable(const Context & context, const String & table
StoragePtr table;
Context context_copy(context); /// some tables can change context, but not LogTables
auto ast = parseQueryFromMetadata(table_metadata_path, /*throw_on_error*/ true, /*remove_empty*/false);
auto ast = parseQueryFromMetadata(context, table_metadata_path, /*throw_on_error*/ true, /*remove_empty*/false);
if (ast)
{
auto & ast_create = ast->as<const ASTCreateQuery &>();

View File

@ -27,7 +27,7 @@ void DatabaseMemory::removeTable(
detachTable(table_name);
}
ASTPtr DatabaseMemory::getCreateDatabaseQuery() const
ASTPtr DatabaseMemory::getCreateDatabaseQuery(const Context & /*context*/) const
{
auto create_query = std::make_shared<ASTCreateQuery>();
create_query->database = database_name;

View File

@ -31,7 +31,7 @@ public:
const Context & context,
const String & table_name) override;
ASTPtr getCreateDatabaseQuery() const override;
ASTPtr getCreateDatabaseQuery(const Context & /*context*/) const override;
};
}

View File

@ -132,9 +132,9 @@ static ASTPtr getCreateQueryFromStorage(const StoragePtr & storage, const ASTPtr
{
/// init create query.
create_table_query->table = storage->getTableName();
create_table_query->database = storage->getDatabaseName();
auto table_id = storage->getStorageID();
create_table_query->table = table_id.table_name;
create_table_query->database = table_id.database_name;
for (const auto & column_type_and_name : storage->getColumns().getOrdinary())
{
@ -144,7 +144,7 @@ static ASTPtr getCreateQueryFromStorage(const StoragePtr & storage, const ASTPtr
columns_expression_list->children.emplace_back(column_declaration);
}
auto mysql_table_name = std::make_shared<ASTLiteral>(storage->getTableName());
auto mysql_table_name = std::make_shared<ASTLiteral>(table_id.table_name);
auto storage_engine_arguments = table_storage_define->as<ASTStorage>()->engine->arguments;
storage_engine_arguments->children.insert(storage_engine_arguments->children.begin() + 2, mysql_table_name);
}
@ -181,7 +181,7 @@ time_t DatabaseMySQL::getObjectMetadataModificationTime(const String & table_nam
return time_t(local_tables_cache[table_name].first);
}
ASTPtr DatabaseMySQL::getCreateDatabaseQuery() const
ASTPtr DatabaseMySQL::getCreateDatabaseQuery(const Context & /*context*/) const
{
const auto & create_query = std::make_shared<ASTCreateQuery>();
create_query->database = database_name;
@ -239,7 +239,7 @@ void DatabaseMySQL::fetchLatestTablesStructureIntoCache(const std::map<String, U
}
local_tables_cache[table_name] = std::make_pair(table_modification_time, StorageMySQL::create(
database_name, table_name, std::move(mysql_pool), database_name_in_mysql, table_name,
StorageID(database_name, table_name), std::move(mysql_pool), database_name_in_mysql, table_name,
false, "", ColumnsDescription{columns_name_and_type}, ConstraintsDescription{}, global_context));
}
}

View File

@ -32,7 +32,7 @@ public:
DatabaseTablesIteratorPtr getTablesIterator(const Context & context, const FilterByNameFunction & filter_by_table_name = {}) override;
ASTPtr getCreateDatabaseQuery() const override;
ASTPtr getCreateDatabaseQuery(const Context & /*context*/) const override;
bool isTableExist(const Context & context, const String & name) const override;

View File

@ -68,9 +68,12 @@ std::pair<String, StoragePtr> createTableFromAST(
ast_create_query.table,
StorageFactory::instance().get(
ast_create_query,
table_data_path_relative, ast_create_query.table, database_name, context, context.getGlobalContext(),
columns, constraints,
true, has_force_restore_data_flag)
table_data_path_relative,
context,
context.getGlobalContext(),
columns,
constraints,
has_force_restore_data_flag)
};
}
@ -211,7 +214,7 @@ void DatabaseOnDisk::renameTable(
if (!table)
throw Exception("Table " + backQuote(getDatabaseName()) + "." + backQuote(table_name) + " doesn't exist.", ErrorCodes::UNKNOWN_TABLE);
ASTPtr ast = parseQueryFromMetadata(getObjectMetadataPath(table_name));
ASTPtr ast = parseQueryFromMetadata(context, getObjectMetadataPath(table_name));
if (!ast)
throw Exception("There is no metadata file for table " + backQuote(table_name) + ".", ErrorCodes::FILE_DOESNT_EXIST);
auto & create = ast->as<ASTCreateQuery &>();
@ -244,7 +247,7 @@ ASTPtr DatabaseOnDisk::getCreateTableQueryImpl(const Context & context, const St
ASTPtr ast;
auto table_metadata_path = getObjectMetadataPath(table_name);
ast = getCreateQueryFromMetadata(table_metadata_path, throw_on_error);
ast = getCreateQueryFromMetadata(context, table_metadata_path, throw_on_error);
if (!ast && throw_on_error)
{
/// Handle system.* tables for which there are no table.sql files.
@ -260,20 +263,21 @@ ASTPtr DatabaseOnDisk::getCreateTableQueryImpl(const Context & context, const St
return ast;
}
ASTPtr DatabaseOnDisk::getCreateDatabaseQuery() const
ASTPtr DatabaseOnDisk::getCreateDatabaseQuery(const Context & context) const
{
ASTPtr ast;
auto settings = context.getSettingsRef();
auto metadata_dir_path = getMetadataPath();
auto database_metadata_path = metadata_dir_path.substr(0, metadata_dir_path.size() - 1) + ".sql";
ast = getCreateQueryFromMetadata(database_metadata_path, true);
ast = getCreateQueryFromMetadata(context, database_metadata_path, true);
if (!ast)
{
/// Handle databases (such as default) for which there are no database.sql files.
/// If database.sql doesn't exist, then engine is Ordinary
String query = "CREATE DATABASE " + backQuoteIfNeed(getDatabaseName()) + " ENGINE = Ordinary";
ParserCreateQuery parser;
ast = parseQuery(parser, query.data(), query.data() + query.size(), "", 0);
ast = parseQuery(parser, query.data(), query.data() + query.size(), "", 0, settings.max_parser_depth);
}
return ast;
@ -353,7 +357,7 @@ void DatabaseOnDisk::iterateMetadataFiles(const Context & context, const Iterati
}
}
ASTPtr DatabaseOnDisk::parseQueryFromMetadata(const String & metadata_file_path, bool throw_on_error /*= true*/, bool remove_empty /*= false*/) const
ASTPtr DatabaseOnDisk::parseQueryFromMetadata(const Context & context, const String & metadata_file_path, bool throw_on_error /*= true*/, bool remove_empty /*= false*/) const
{
String query;
@ -380,11 +384,12 @@ ASTPtr DatabaseOnDisk::parseQueryFromMetadata(const String & metadata_file_path,
return nullptr;
}
auto settings = context.getSettingsRef();
ParserCreateQuery parser;
const char * pos = query.data();
std::string error_message;
auto ast = tryParseQuery(parser, pos, pos + query.size(), error_message, /* hilite = */ false,
"in file " + getMetadataPath(), /* allow_multi_statements = */ false, 0);
"in file " + getMetadataPath(), /* allow_multi_statements = */ false, 0, settings.max_parser_depth);
if (!ast && throw_on_error)
throw Exception(error_message, ErrorCodes::SYNTAX_ERROR);
@ -394,9 +399,9 @@ ASTPtr DatabaseOnDisk::parseQueryFromMetadata(const String & metadata_file_path,
return ast;
}
ASTPtr DatabaseOnDisk::getCreateQueryFromMetadata(const String & database_metadata_path, bool throw_on_error) const
ASTPtr DatabaseOnDisk::getCreateQueryFromMetadata(const Context & context, const String & database_metadata_path, bool throw_on_error) const
{
ASTPtr ast = parseQueryFromMetadata(database_metadata_path, throw_on_error);
ASTPtr ast = parseQueryFromMetadata(context, database_metadata_path, throw_on_error);
if (ast)
{

View File

@ -52,7 +52,7 @@ public:
const String & to_table_name,
TableStructureWriteLockHolder & lock) override;
ASTPtr getCreateDatabaseQuery() const override;
ASTPtr getCreateDatabaseQuery(const Context & context) const override;
void drop(const Context & context) override;
@ -74,8 +74,8 @@ protected:
const String & table_name,
bool throw_on_error) const override;
ASTPtr parseQueryFromMetadata(const String & metadata_file_path, bool throw_on_error = true, bool remove_empty = false) const;
ASTPtr getCreateQueryFromMetadata(const String & metadata_path, bool throw_on_error) const;
ASTPtr parseQueryFromMetadata(const Context & context, const String & metadata_file_path, bool throw_on_error = true, bool remove_empty = false) const;
ASTPtr getCreateQueryFromMetadata(const Context & context, const String & metadata_path, bool throw_on_error) const;
const String metadata_path;

View File

@ -122,12 +122,12 @@ void DatabaseOrdinary::loadStoredObjects(
FileNames file_names;
size_t total_dictionaries = 0;
iterateMetadataFiles(context, [&file_names, &total_dictionaries, this](const String & file_name)
iterateMetadataFiles(context, [&context, &file_names, &total_dictionaries, this](const String & file_name)
{
String full_path = getMetadataPath() + file_name;
try
{
auto ast = parseQueryFromMetadata(full_path, /*throw_on_error*/ true, /*remove_empty*/false);
auto ast = parseQueryFromMetadata(context, full_path, /*throw_on_error*/ true, /*remove_empty*/false);
if (ast)
{
auto * create_query = ast->as<ASTCreateQuery>();

View File

@ -222,7 +222,7 @@ StoragePtr DatabaseWithDictionaries::getDictionaryStorage(const Context & contex
{
const DictionaryStructure & dictionary_structure = dict_ptr->getStructure();
auto columns = StorageDictionary::getNamesAndTypes(dictionary_structure);
return StorageDictionary::create(database_name, table_name, ColumnsDescription{columns}, context, true, dict_name);
return StorageDictionary::create(StorageID(database_name, table_name), ColumnsDescription{columns}, context, true, dict_name);
}
return nullptr;
}
@ -235,7 +235,7 @@ ASTPtr DatabaseWithDictionaries::getCreateDictionaryQueryImpl(
ASTPtr ast;
auto dictionary_metadata_path = getObjectMetadataPath(dictionary_name);
ast = getCreateQueryFromMetadata(dictionary_metadata_path, throw_on_error);
ast = getCreateQueryFromMetadata(context, dictionary_metadata_path, throw_on_error);
if (!ast && throw_on_error)
{
/// Handle system.* tables for which there are no table.sql files.

View File

@ -262,7 +262,7 @@ public:
}
/// Get the CREATE DATABASE query for current database.
virtual ASTPtr getCreateDatabaseQuery() const = 0;
virtual ASTPtr getCreateDatabaseQuery(const Context & /*context*/) const = 0;
/// Get name of database.
String getDatabaseName() const { return database_name; }

View File

@ -10,8 +10,6 @@ namespace DB
{
namespace ErrorCodes
{
extern const int UNKNOWN_ELEMENT_IN_CONFIG;
extern const int EXCESSIVE_ELEMENT_IN_CONFIG;
extern const int FILE_DOESNT_EXIST;
extern const int FILE_ALREADY_EXISTS;
extern const int DIRECTORY_DOESNT_EXIST;

View File

@ -1,19 +1,24 @@
#pragma once
#include <Disks/IDisk.h>
#include <IO/ReadBuffer.h>
#include <IO/WriteBuffer.h>
#include <mutex>
#include <memory>
#include <unordered_map>
namespace DB
{
namespace ErrorCodes
{
extern const int LOGICAL_ERROR;
}
class ReadBuffer;
class WriteBuffer;
/** Implementation of Disk intended only for testing purposes.
* All filesystem objects are stored in memory and lost on server restart.
*
* NOTE Work in progress. Currently the interface is not viable enough to support MergeTree or even StripeLog tables.
* Please delete this interface if it will not be finished after 2020-06-18.
*/
class DiskMemory : public IDisk
{
public:

View File

@ -32,7 +32,6 @@ if (OPENSSL_CRYPTO_LIBRARY)
target_link_libraries(clickhouse_functions PUBLIC ${OPENSSL_CRYPTO_LIBRARY})
endif()
target_include_directories(clickhouse_functions PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/include)
target_include_directories(clickhouse_functions SYSTEM PRIVATE ${DIVIDE_INCLUDE_DIR} ${METROHASH_INCLUDE_DIR} ${SPARSEHASH_INCLUDE_DIR})
if (CONSISTENT_HASHING_INCLUDE_DIR)

View File

@ -282,29 +282,32 @@ template <
typename Op, template <typename, size_t> typename OperationApplierImpl, size_t N = 10>
struct OperationApplier
{
template <typename Columns, typename ResultColumn>
static void apply(Columns & in, ResultColumn & result)
template <typename Columns, typename ResultData>
static void apply(Columns & in, ResultData & result_data, bool use_result_data_as_input = false)
{
while (in.size() > 1)
{
doBatchedApply(in, result->getData());
in.push_back(result.get());
}
if (!use_result_data_as_input)
doBatchedApply<false>(in, result_data);
while (in.size() > 0)
doBatchedApply<true>(in, result_data);
}
template <typename Columns, typename ResultData>
template <bool CarryResult, typename Columns, typename ResultData>
static void NO_INLINE doBatchedApply(Columns & in, ResultData & result_data)
{
if (N > in.size())
{
OperationApplier<Op, OperationApplierImpl, N - 1>::doBatchedApply(in, result_data);
OperationApplier<Op, OperationApplierImpl, N - 1>
::template doBatchedApply<CarryResult>(in, result_data);
return;
}
const OperationApplierImpl<Op, N> operationApplierImpl(in);
size_t i = 0;
for (auto & res : result_data)
res = operationApplierImpl.apply(i++);
if constexpr (CarryResult)
res = Op::apply(res, operationApplierImpl.apply(i++));
else
res = operationApplierImpl.apply(i++);
in.erase(in.end() - N, in.end());
}
@ -312,9 +315,9 @@ struct OperationApplier
template <
typename Op, template <typename, size_t> typename OperationApplierImpl>
struct OperationApplier<Op, OperationApplierImpl, 1>
struct OperationApplier<Op, OperationApplierImpl, 0>
{
template <typename Columns, typename Result>
template <bool, typename Columns, typename Result>
static void NO_INLINE doBatchedApply(Columns &, Result &)
{
throw Exception(
@ -332,7 +335,7 @@ static void executeForTernaryLogicImpl(ColumnRawPtrs arguments, ColumnWithTypeAn
const bool has_consts = extractConstColumnsTernary<Op>(arguments, const_3v_value);
/// If the constant value uniquely determines the result, return it.
if (has_consts && (arguments.empty() || (Op::isSaturable() && Op::isSaturatedValue(const_3v_value))))
if (has_consts && (arguments.empty() || Op::isSaturatedValue(const_3v_value)))
{
result_info.column = ColumnConst::create(
convertFromTernaryData(UInt8Container({const_3v_value}), result_info.type->isNullable()),
@ -341,16 +344,10 @@ static void executeForTernaryLogicImpl(ColumnRawPtrs arguments, ColumnWithTypeAn
return;
}
const auto result_column = ColumnUInt8::create(input_rows_count);
MutableColumnPtr const_column_holder;
if (has_consts)
{
const_column_holder =
convertFromTernaryData(UInt8Container(input_rows_count, const_3v_value), const_3v_value == Ternary::Null);
arguments.push_back(const_column_holder.get());
}
const auto result_column = has_consts ?
ColumnUInt8::create(input_rows_count, const_3v_value) : ColumnUInt8::create(input_rows_count);
OperationApplier<Op, AssociativeGenericApplierImpl>::apply(arguments, result_column);
OperationApplier<Op, AssociativeGenericApplierImpl>::apply(arguments, result_column->getData(), has_consts);
result_info.column = convertFromTernaryData(result_column->getData(), result_info.type->isNullable());
}
@ -425,19 +422,8 @@ static void basicExecuteImpl(ColumnRawPtrs arguments, ColumnWithTypeAndName & re
if (has_consts && Op::apply(const_val, 0) == 0 && Op::apply(const_val, 1) == 1)
has_consts = false;
UInt8ColumnPtrs uint8_args;
auto col_res = ColumnUInt8::create();
UInt8Container & vec_res = col_res->getData();
if (has_consts)
{
vec_res.assign(input_rows_count, const_val);
uint8_args.push_back(col_res.get());
}
else
{
vec_res.resize(input_rows_count);
}
auto col_res = has_consts ?
ColumnUInt8::create(input_rows_count, const_val) : ColumnUInt8::create(input_rows_count);
/// FastPath detection goes in here
if (arguments.size() == (has_consts ? 1 : 2))
@ -452,7 +438,8 @@ static void basicExecuteImpl(ColumnRawPtrs arguments, ColumnWithTypeAndName & re
}
/// Convert all columns to UInt8
Columns converted_columns;
UInt8ColumnPtrs uint8_args;
Columns converted_columns_holder;
for (const IColumn * column : arguments)
{
if (auto uint8_column = checkAndGetColumn<ColumnUInt8>(column))
@ -462,15 +449,11 @@ static void basicExecuteImpl(ColumnRawPtrs arguments, ColumnWithTypeAndName & re
auto converted_column = ColumnUInt8::create(input_rows_count);
convertColumnToUInt8(column, converted_column->getData());
uint8_args.push_back(converted_column.get());
converted_columns.emplace_back(std::move(converted_column));
converted_columns_holder.emplace_back(std::move(converted_column));
}
}
OperationApplier<Op, AssociativeApplierImpl>::apply(uint8_args, col_res);
/// This is possible if there is exactly one non-constant among the arguments, and it is of type UInt8.
if (uint8_args[0] != col_res.get())
vec_res.assign(uint8_args[0]->getData());
OperationApplier<Op, AssociativeApplierImpl>::apply(uint8_args, col_res->getData(), has_consts);
result_info.column = std::move(col_res);
}

View File

@ -83,12 +83,7 @@ struct XorImpl
static inline constexpr bool isSaturable() { return false; }
static inline constexpr bool isSaturatedValue(bool) { return false; }
/** Considering that CH uses UInt8 for representation of boolean values this function
* returns 255 as "true" but the current implementation of logical functions suggests that
* any nonzero value is "true" as well. Also the current code provides no guarantee
* for "true" to be represented with the value of 1.
*/
static inline constexpr ResultType apply(UInt8 a, UInt8 b) { return (a != b) ? Ternary::True : Ternary::False; }
static inline constexpr ResultType apply(UInt8 a, UInt8 b) { return !!a != !!b; }
static inline constexpr bool specialImplementationForNulls() { return false; }
#if USE_EMBEDDED_COMPILER

View File

@ -2,7 +2,6 @@ include(${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake)
add_headers_and_sources(clickhouse_functions_url .)
add_library(clickhouse_functions_url ${clickhouse_functions_url_sources} ${clickhouse_functions_url_headers})
target_link_libraries(clickhouse_functions_url PRIVATE dbms)
target_include_directories(clickhouse_functions_url PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/../include) # ${CMAKE_CURRENT_BINARY_DIR}/include
if (CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE" OR CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" OR CMAKE_BUILD_TYPE_UC STREQUAL "MINSIZEREL")
# Won't generate debug info for files with heavy template instantiation to achieve faster linking and lower size.

View File

@ -14,6 +14,7 @@ namespace ErrorCodes
{
extern const int SIZES_OF_ARRAYS_DOESNT_MATCH;
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int ILLEGAL_COLUMN;
}
/// arrayZip(['a', 'b', 'c'], ['d', 'e', 'f']) = [('a', 'd'), ('b', 'e'), ('c', 'f')]
@ -44,9 +45,8 @@ public:
const DataTypeArray * array_type = checkAndGetDataType<DataTypeArray>(arguments[index].type.get());
if (!array_type)
throw Exception(
"Argument " + toString(index + 1) + " of function must be array. Found " + arguments[0].type->getName() + " instead.",
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
throw Exception("Argument " + toString(index + 1) + " of function " + getName()
+ " must be array. Found " + arguments[0].type->getName() + " instead.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
arguments_types.emplace_back(array_type->getNestedType());
}
@ -56,26 +56,37 @@ public:
void executeImpl(Block & block, const ColumnNumbers & arguments, size_t result, size_t /*input_rows_count*/) override
{
auto first_argument = block.getByPosition(arguments[0]);
const auto & first_array_column = checkAndGetColumn<ColumnArray>(first_argument.column.get());
size_t num_arguments = arguments.size();
Columns res_tuple_columns(arguments.size());
res_tuple_columns[0] = first_array_column->getDataPtr();
ColumnPtr first_array_column;
Columns tuple_columns(num_arguments);
for (size_t index = 1; index < arguments.size(); ++index)
for (size_t i = 0; i < num_arguments; ++i)
{
const auto & argument_type_and_column = block.getByPosition(arguments[index]);
const auto & argument_array_column = checkAndGetColumn<ColumnArray>(argument_type_and_column.column.get());
/// Constant columns cannot be inside tuple. It's only possible to have constant tuple as a whole.
ColumnPtr holder = block.getByPosition(arguments[i]).column->convertToFullColumnIfConst();
if (!first_array_column->hasEqualOffsets(*argument_array_column))
throw Exception("The argument 1 and argument " + toString(index + 1) + " of function have different array sizes",
ErrorCodes::SIZES_OF_ARRAYS_DOESNT_MATCH);
const ColumnArray * column_array = checkAndGetColumn<ColumnArray>(holder.get());
res_tuple_columns[index] = argument_array_column->getDataPtr();
if (!column_array)
throw Exception("Argument " + toString(i + 1) + " of function " + getName() + " must be array."
" Found column " + holder->getName() + " instead.", ErrorCodes::ILLEGAL_COLUMN);
if (i == 0)
{
first_array_column = holder;
}
else if (!column_array->hasEqualOffsets(static_cast<const ColumnArray &>(*first_array_column)))
{
throw Exception("The argument 1 and argument " + toString(i + 1) + " of function " + getName() + " have different array sizes",
ErrorCodes::SIZES_OF_ARRAYS_DOESNT_MATCH);
}
tuple_columns[i] = column_array->getDataPtr();
}
block.getByPosition(result).column = ColumnArray::create(
ColumnTuple::create(res_tuple_columns), first_array_column->getOffsetsPtr());
ColumnTuple::create(tuple_columns), static_cast<const ColumnArray &>(*first_array_column).getOffsetsPtr());
}
};

View File

@ -0,0 +1,50 @@
#include <ext/bit_cast.h>
#include <Functions/FunctionFactory.h>
#include <Functions/FunctionUnaryArithmetic.h>
namespace DB
{
template <typename A>
struct BitCountImpl
{
using ResultType = UInt8;
static inline ResultType apply(A a)
{
/// We count bits in the value representation in memory. For example, we support floats.
/// We need to avoid sign-extension when converting signed numbers to larger type. So, uint8_t(-1) has 8 bits.
if constexpr (std::is_same_v<A, UInt64> || std::is_same_v<A, Int64>)
return __builtin_popcountll(a);
if constexpr (std::is_same_v<A, UInt32> || std::is_same_v<A, Int32> || std::is_unsigned_v<A>)
return __builtin_popcount(a);
else
return __builtin_popcountll(ext::bit_cast<unsigned long long>(a));
}
#if USE_EMBEDDED_COMPILER
static constexpr bool compilable = false;
#endif
};
struct NameBitCount { static constexpr auto name = "bitCount"; };
using FunctionBitCount = FunctionUnaryArithmetic<BitCountImpl, NameBitCount, false /* is injective */>;
/// The function has no ranges of monotonicity.
template <> struct FunctionUnaryArithmeticMonotonicity<NameBitCount>
{
static bool has() { return false; }
static IFunction::Monotonicity get(const Field &, const Field &)
{
return {};
}
};
void registerFunctionBitCount(FunctionFactory & factory)
{
factory.registerFunction<FunctionBitCount>();
}
}

View File

@ -0,0 +1,73 @@
#include <Functions/IFunctionImpl.h>
#include <Functions/FunctionHelpers.h>
#include <Functions/FunctionFactory.h>
#include <DataTypes/DataTypesNumber.h>
#include <DataTypes/getLeastSupertype.h>
#include <Core/ColumnNumbers.h>
namespace DB
{
/// ifNotFinite(x, y) is equivalent to isFinite(x) ? x : y.
class FunctionIfNotFinite : public IFunction
{
public:
static constexpr auto name = "ifNotFinite";
FunctionIfNotFinite(const Context & context_) : context(context_) {}
static FunctionPtr create(const Context & context)
{
return std::make_shared<FunctionIfNotFinite>(context);
}
std::string getName() const override
{
return name;
}
size_t getNumberOfArguments() const override { return 2; }
bool useDefaultImplementationForNulls() const override { return false; }
bool useDefaultImplementationForConstants() const override { return true; }
ColumnNumbers getArgumentsThatDontImplyNullableReturnType(size_t /*number_of_arguments*/) const override { return {0}; }
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
{
auto is_finite_type = FunctionFactory::instance().get("isFinite", context)->build({arguments[0]})->getReturnType();
auto if_type = FunctionFactory::instance().get("if", context)->build({{nullptr, is_finite_type, ""}, arguments[0], arguments[1]})->getReturnType();
return if_type;
}
void executeImpl(Block & block, const ColumnNumbers & arguments, size_t result, size_t input_rows_count) override
{
Block temp_block = block;
auto is_finite = FunctionFactory::instance().get("isFinite", context)->build(
{temp_block.getByPosition(arguments[0])});
size_t is_finite_pos = temp_block.columns();
temp_block.insert({nullptr, is_finite->getReturnType(), ""});
auto func_if = FunctionFactory::instance().get("if", context)->build(
{temp_block.getByPosition(is_finite_pos), temp_block.getByPosition(arguments[0]), temp_block.getByPosition(arguments[1])});
is_finite->execute(temp_block, {arguments[0]}, is_finite_pos, input_rows_count);
func_if->execute(temp_block, {is_finite_pos, arguments[0], arguments[1]}, result, input_rows_count);
block.getByPosition(result).column = std::move(temp_block.getByPosition(result).column);
}
private:
const Context & context;
};
void registerFunctionIfNotFinite(FunctionFactory & factory)
{
factory.registerFunction<FunctionIfNotFinite>();
}
}

View File

@ -20,6 +20,7 @@ void registerFunctionBitShiftLeft(FunctionFactory & factory);
void registerFunctionBitShiftRight(FunctionFactory & factory);
void registerFunctionBitRotateLeft(FunctionFactory & factory);
void registerFunctionBitRotateRight(FunctionFactory & factory);
void registerFunctionBitCount(FunctionFactory & factory);
void registerFunctionLeast(FunctionFactory & factory);
void registerFunctionGreatest(FunctionFactory & factory);
void registerFunctionBitTest(FunctionFactory & factory);
@ -58,6 +59,7 @@ void registerFunctionsArithmetic(FunctionFactory & factory)
registerFunctionBitShiftRight(factory);
registerFunctionBitRotateLeft(factory);
registerFunctionBitRotateRight(factory);
registerFunctionBitCount(factory);
registerFunctionLeast(factory);
registerFunctionGreatest(factory);
registerFunctionBitTest(factory);

View File

@ -36,6 +36,7 @@ void registerFunctionHasColumnInTable(FunctionFactory &);
void registerFunctionIsFinite(FunctionFactory &);
void registerFunctionIsInfinite(FunctionFactory &);
void registerFunctionIsNaN(FunctionFactory &);
void registerFunctionIfNotFinite(FunctionFactory &);
void registerFunctionThrowIf(FunctionFactory &);
void registerFunctionVersion(FunctionFactory &);
void registerFunctionUptime(FunctionFactory &);
@ -93,6 +94,7 @@ void registerFunctionsMiscellaneous(FunctionFactory & factory)
registerFunctionIsFinite(factory);
registerFunctionIsInfinite(factory);
registerFunctionIsNaN(factory);
registerFunctionIfNotFinite(factory);
registerFunctionThrowIf(factory);
registerFunctionVersion(factory);
registerFunctionUptime(factory);

View File

@ -1,5 +1,6 @@
#include <string>
#include <iostream>
#include <cstring>
#include <ryu/ryu.h>

View File

@ -739,49 +739,50 @@ void Context::checkDatabaseAccessRightsImpl(const std::string & database_name) c
throw Exception("Access denied to database " + database_name + " for user " + client_info.current_user , ErrorCodes::DATABASE_ACCESS_DENIED);
}
void Context::addDependencyUnsafe(const DatabaseAndTableName & from, const DatabaseAndTableName & where)
void Context::addDependencyUnsafe(const StorageID & from, const StorageID & where)
{
checkDatabaseAccessRightsImpl(from.first);
checkDatabaseAccessRightsImpl(where.first);
checkDatabaseAccessRightsImpl(from.database_name);
checkDatabaseAccessRightsImpl(where.database_name);
shared->view_dependencies[from].insert(where);
// Notify table of dependencies change
auto table = tryGetTable(from.first, from.second);
auto table = tryGetTable(from);
if (table != nullptr)
table->updateDependencies();
}
void Context::addDependency(const DatabaseAndTableName & from, const DatabaseAndTableName & where)
void Context::addDependency(const StorageID & from, const StorageID & where)
{
auto lock = getLock();
addDependencyUnsafe(from, where);
}
void Context::removeDependencyUnsafe(const DatabaseAndTableName & from, const DatabaseAndTableName & where)
void Context::removeDependencyUnsafe(const StorageID & from, const StorageID & where)
{
checkDatabaseAccessRightsImpl(from.first);
checkDatabaseAccessRightsImpl(where.first);
checkDatabaseAccessRightsImpl(from.database_name);
checkDatabaseAccessRightsImpl(where.database_name);
shared->view_dependencies[from].erase(where);
// Notify table of dependencies change
auto table = tryGetTable(from.first, from.second);
auto table = tryGetTable(from);
if (table != nullptr)
table->updateDependencies();
}
void Context::removeDependency(const DatabaseAndTableName & from, const DatabaseAndTableName & where)
void Context::removeDependency(const StorageID & from, const StorageID & where)
{
auto lock = getLock();
removeDependencyUnsafe(from, where);
}
Dependencies Context::getDependencies(const String & database_name, const String & table_name) const
Dependencies Context::getDependencies(const StorageID & from) const
{
auto lock = getLock();
String db = resolveDatabase(database_name, current_database);
String db = resolveDatabase(from.database_name, current_database);
if (database_name.empty() && tryGetExternalTable(table_name))
if (from.database_name.empty() && tryGetExternalTable(from.table_name))
{
/// Table is temporary. Access granted.
}
@ -790,7 +791,7 @@ Dependencies Context::getDependencies(const String & database_name, const String
checkDatabaseAccessRightsImpl(db);
}
ViewDependencies::const_iterator iter = shared->view_dependencies.find(DatabaseAndTableName(db, table_name));
ViewDependencies::const_iterator iter = shared->view_dependencies.find(StorageID(db, from.table_name, from.uuid));
if (iter == shared->view_dependencies.end())
return {};
@ -919,24 +920,32 @@ StoragePtr Context::tryGetExternalTable(const String & table_name) const
return jt->second.first;
}
StoragePtr Context::getTable(const String & database_name, const String & table_name) const
{
return getTable(StorageID(database_name, table_name));
}
StoragePtr Context::getTable(const StorageID & table_id) const
{
std::optional<Exception> exc;
auto res = getTableImpl(database_name, table_name, &exc);
auto res = getTableImpl(table_id, &exc);
if (!res)
throw *exc;
return res;
}
StoragePtr Context::tryGetTable(const String & database_name, const String & table_name) const
{
return getTableImpl(database_name, table_name, {});
return getTableImpl(StorageID(database_name, table_name), {});
}
StoragePtr Context::tryGetTable(const StorageID & table_id) const
{
return getTableImpl(table_id, {});
}
StoragePtr Context::getTableImpl(const String & database_name, const String & table_name, std::optional<Exception> * exception) const
StoragePtr Context::getTableImpl(const StorageID & table_id, std::optional<Exception> * exception) const
{
String db;
DatabasePtr database;
@ -944,14 +953,14 @@ StoragePtr Context::getTableImpl(const String & database_name, const String & ta
{
auto lock = getLock();
if (database_name.empty())
if (table_id.database_name.empty())
{
StoragePtr res = tryGetExternalTable(table_name);
StoragePtr res = tryGetExternalTable(table_id.table_name);
if (res)
return res;
}
db = resolveDatabase(database_name, current_database);
db = resolveDatabase(table_id.database_name, current_database);
checkDatabaseAccessRightsImpl(db);
Databases::const_iterator it = shared->databases.find(db);
@ -965,11 +974,11 @@ StoragePtr Context::getTableImpl(const String & database_name, const String & ta
database = it->second;
}
auto table = database->tryGetTable(*this, table_name);
auto table = database->tryGetTable(*this, table_id.table_name);
if (!table)
{
if (exception)
exception->emplace("Table " + backQuoteIfNeed(db) + "." + backQuoteIfNeed(table_name) + " doesn't exist.", ErrorCodes::UNKNOWN_TABLE);
exception->emplace("Table " + table_id.getNameForLogs() + " doesn't exist.", ErrorCodes::UNKNOWN_TABLE);
return {};
}

View File

@ -80,6 +80,7 @@ class ICompressionCodec;
class AccessControlManager;
class SettingsConstraints;
class RemoteHostFilter;
struct StorageID;
class IDisk;
using DiskPtr = std::shared_ptr<IDisk>;
class DiskSelector;
@ -96,12 +97,9 @@ class CompiledExpressionCache;
#endif
/// (database name, table name)
using DatabaseAndTableName = std::pair<String, String>;
/// Table -> set of table-views that make SELECT from it.
using ViewDependencies = std::map<DatabaseAndTableName, std::set<DatabaseAndTableName>>;
using Dependencies = std::vector<DatabaseAndTableName>;
using ViewDependencies = std::map<StorageID, std::set<StorageID>>;
using Dependencies = std::vector<StorageID>;
using TableAndCreateAST = std::pair<StoragePtr, ASTPtr>;
using TableAndCreateASTs = std::map<String, TableAndCreateAST>;
@ -256,13 +254,13 @@ public:
ClientInfo & getClientInfo() { return client_info; }
const ClientInfo & getClientInfo() const { return client_info; }
void addDependency(const DatabaseAndTableName & from, const DatabaseAndTableName & where);
void removeDependency(const DatabaseAndTableName & from, const DatabaseAndTableName & where);
Dependencies getDependencies(const String & database_name, const String & table_name) const;
void addDependency(const StorageID & from, const StorageID & where);
void removeDependency(const StorageID & from, const StorageID & where);
Dependencies getDependencies(const StorageID & from) const;
/// Functions where we can lock the context manually
void addDependencyUnsafe(const DatabaseAndTableName & from, const DatabaseAndTableName & where);
void removeDependencyUnsafe(const DatabaseAndTableName & from, const DatabaseAndTableName & where);
void addDependencyUnsafe(const StorageID & from, const StorageID & where);
void removeDependencyUnsafe(const StorageID & from, const StorageID & where);
/// Checking the existence of the table/database. Database can be empty - in this case the current database is used.
bool isTableExist(const String & database_name, const String & table_name) const;
@ -288,7 +286,9 @@ public:
Tables getExternalTables() const;
StoragePtr tryGetExternalTable(const String & table_name) const;
StoragePtr getTable(const String & database_name, const String & table_name) const;
StoragePtr getTable(const StorageID & table_id) const;
StoragePtr tryGetTable(const String & database_name, const String & table_name) const;
StoragePtr tryGetTable(const StorageID & table_id) const;
void addExternalTable(const String & table_name, const StoragePtr & storage, const ASTPtr & ast = {});
void addScalar(const String & name, const Block & block);
bool hasScalar(const String & name) const;
@ -594,7 +594,7 @@ private:
EmbeddedDictionaries & getEmbeddedDictionariesImpl(bool throw_on_error) const;
StoragePtr getTableImpl(const String & database_name, const String & table_name, std::optional<Exception> * exception) const;
StoragePtr getTableImpl(const StorageID & table_id, std::optional<Exception> * exception) const;
SessionKey getSessionKey(const String & session_id) const;

View File

@ -622,8 +622,13 @@ void DDLWorker::processTask(DDLTask & task, const ZooKeeperPtr & zookeeper)
if (auto query_with_table = dynamic_cast<ASTQueryWithTableAndOutput *>(rewritten_ast.get()); query_with_table)
{
String database = query_with_table->database.empty() ? context.getCurrentDatabase() : query_with_table->database;
StoragePtr storage = context.tryGetTable(database, query_with_table->table);
StoragePtr storage;
if (!query_with_table->table.empty())
{
/// It's not CREATE DATABASE
String database = query_with_table->database.empty() ? context.getCurrentDatabase() : query_with_table->database;
storage = context.tryGetTable(database, query_with_table->table);
}
/// For some reason we check consistency of cluster definition only
/// in case of ALTER query, but not in case of CREATE/DROP etc.

View File

@ -95,7 +95,7 @@ public:
Block sample = interpreter->getSampleBlock();
NamesAndTypesList columns = sample.getNamesAndTypesList();
StoragePtr external_storage = StorageMemory::create("_external", external_table_name, ColumnsDescription{columns}, ConstraintsDescription{});
StoragePtr external_storage = StorageMemory::create(StorageID("_external", external_table_name), ColumnsDescription{columns}, ConstraintsDescription{});
external_storage->startup();
/** We replace the subquery with the name of the temporary table.

View File

@ -638,13 +638,10 @@ bool InterpreterCreateQuery::doCreateTable(const ASTCreateQuery & create,
{
res = StorageFactory::instance().get(create,
database ? database->getTableDataPath(create) : "",
table_name,
create.database,
context,
context.getGlobalContext(),
properties.columns,
properties.constraints,
create.attach,
false);
}

View File

@ -79,13 +79,14 @@ BlockIO InterpreterDropQuery::executeToTable(
if (database_and_table.first && database_and_table.second)
{
auto table_id = database_and_table.second->getStorageID();
if (kind == ASTDropQuery::Kind::Detach)
{
database_and_table.second->shutdown();
/// If table was already dropped by anyone, an exception will be thrown
auto table_lock = database_and_table.second->lockExclusively(context.getCurrentQueryId());
/// Drop table from memory, don't touch data and metadata
database_and_table.first->detachTable(database_and_table.second->getTableName());
database_and_table.first->detachTable(table_id.table_name);
}
else if (kind == ASTDropQuery::Kind::Truncate)
{
@ -107,7 +108,7 @@ BlockIO InterpreterDropQuery::executeToTable(
const std::string metadata_file_without_extension =
database_and_table.first->getMetadataPath()
+ escapeForFileName(database_and_table.second->getTableName());
+ escapeForFileName(table_id.table_name);
const auto prev_metadata_name = metadata_file_without_extension + ".sql";
const auto drop_metadata_name = metadata_file_without_extension + ".sql.tmp_drop";
@ -131,7 +132,7 @@ BlockIO InterpreterDropQuery::executeToTable(
String table_data_path_relative = database_and_table.first->getTableDataPath(table_name);
/// Delete table metadata and table itself from memory
database_and_table.first->removeTable(context, database_and_table.second->getTableName());
database_and_table.first->removeTable(context, table_id.table_name);
database_and_table.second->is_dropped = true;
/// If it is not virtual database like Dictionary then drop remaining data dir

View File

@ -111,7 +111,7 @@ BlockIO InterpreterInsertQuery::execute()
if (table->noPushingToViews() && !no_destination)
out = table->write(query_ptr, context);
else
out = std::make_shared<PushingToViewsBlockOutputStream>(query.database, query.table, table, context, query_ptr, no_destination);
out = std::make_shared<PushingToViewsBlockOutputStream>(table, context, query_ptr, no_destination);
/// Do not squash blocks if it is a sync INSERT into Distributed, since it lead to double bufferization on client and server side.
/// Client-side bufferization might cause excessive timeouts (especially in case of big blocks).

View File

@ -116,14 +116,11 @@ namespace ErrorCodes
extern const int INVALID_WITH_FILL_EXPRESSION;
}
namespace
{
/// Assumes `storage` is set and the table filter (row-level security) is not empty.
String generateFilterActions(ExpressionActionsPtr & actions, const Context & context, const StoragePtr & storage, const ASTPtr & row_policy_filter, const Names & prerequisite_columns = {})
String InterpreterSelectQuery::generateFilterActions(ExpressionActionsPtr & actions, const ASTPtr & row_policy_filter, const Names & prerequisite_columns) const
{
const auto & db_name = storage->getDatabaseName();
const auto & table_name = storage->getTableName();
const auto & db_name = table_id.getDatabaseName();
const auto & table_name = table_id.getTableName();
/// TODO: implement some AST builders for this kind of stuff
ASTPtr query_ast = std::make_shared<ASTSelectQuery>();
@ -153,17 +150,15 @@ String generateFilterActions(ExpressionActionsPtr & actions, const Context & con
table_expr->children.push_back(table_expr->database_and_table_name);
/// Using separate expression analyzer to prevent any possible alias injection
auto syntax_result = SyntaxAnalyzer(context).analyze(query_ast, storage->getColumns().getAllPhysical());
SelectQueryExpressionAnalyzer analyzer(query_ast, syntax_result, context);
ExpressionActionsChain new_chain(context);
auto syntax_result = SyntaxAnalyzer(*context).analyze(query_ast, storage->getColumns().getAllPhysical());
SelectQueryExpressionAnalyzer analyzer(query_ast, syntax_result, *context);
ExpressionActionsChain new_chain(*context);
analyzer.appendSelect(new_chain, false);
actions = new_chain.getLastActions();
return expr_list->children.at(0)->getColumnName();
}
}
InterpreterSelectQuery::InterpreterSelectQuery(
const ASTPtr & query_ptr_,
const Context & context_,
@ -292,7 +287,8 @@ InterpreterSelectQuery::InterpreterSelectQuery(
if (auto view_source = context->getViewSource())
{
auto & storage_values = static_cast<const StorageValues &>(*view_source);
if (storage_values.getDatabaseName() == database_name && storage_values.getTableName() == table_name)
auto tmp_table_id = storage_values.getStorageID();
if (tmp_table_id.database_name == database_name && tmp_table_id.table_name == table_name)
{
/// Read from view source.
storage = context->getViewSource();
@ -309,7 +305,10 @@ InterpreterSelectQuery::InterpreterSelectQuery(
//std::cerr << "Source header:" << source_header.dumpStructure() << std::endl;
if (storage)
{
table_lock = storage->lockStructureForShare(false, context->getInitialQueryId());
table_id = storage->getStorageID();
}
auto analyze = [&] ()
{
@ -371,11 +370,11 @@ InterpreterSelectQuery::InterpreterSelectQuery(
source_header = storage->getSampleBlockForColumns(required_columns);
/// Fix source_header for filter actions.
auto row_policy_filter = context->getRowPolicy()->getCondition(storage->getDatabaseName(), storage->getTableName(), RowPolicy::SELECT_FILTER);
auto row_policy_filter = context->getRowPolicy()->getCondition(table_id.getDatabaseName(), table_id.getTableName(), RowPolicy::SELECT_FILTER);
if (row_policy_filter)
{
filter_info = std::make_shared<FilterInfo>();
filter_info->column_name = generateFilterActions(filter_info->actions, *context, storage, row_policy_filter, required_columns);
filter_info->column_name = generateFilterActions(filter_info->actions, row_policy_filter, required_columns);
source_header = storage->getSampleBlockForColumns(filter_info->actions->getRequiredColumns());
}
}
@ -425,9 +424,9 @@ InterpreterSelectQuery::InterpreterSelectQuery(
sanitizeBlock(result_header);
/// Remove limits for some tables in the `system` database.
if (storage && (storage->getDatabaseName() == "system"))
if (storage && (table_id.getDatabaseName() == "system"))
{
String table_name = storage->getTableName();
String table_name = table_id.getTableName();
if ((table_name == "quotas") || (table_name == "quota_usage") || (table_name == "one"))
{
options.ignore_quota = true;
@ -511,7 +510,7 @@ Block InterpreterSelectQuery::getSampleBlockImpl()
/// PREWHERE optimization.
/// Turn off, if the table filter (row-level security) is applied.
if (!context->getRowPolicy()->getCondition(storage->getDatabaseName(), storage->getTableName(), RowPolicy::SELECT_FILTER))
if (!context->getRowPolicy()->getCondition(table_id.getDatabaseName(), table_id.getTableName(), RowPolicy::SELECT_FILTER))
{
auto optimize_prewhere = [&](auto & merge_tree)
{
@ -1367,12 +1366,12 @@ void InterpreterSelectQuery::executeFetchColumns(
if (storage)
{
/// Append columns from the table filter to required
auto row_policy_filter = context->getRowPolicy()->getCondition(storage->getDatabaseName(), storage->getTableName(), RowPolicy::SELECT_FILTER);
auto row_policy_filter = context->getRowPolicy()->getCondition(table_id.getDatabaseName(), table_id.getTableName(), RowPolicy::SELECT_FILTER);
if (row_policy_filter)
{
auto initial_required_columns = required_columns;
ExpressionActionsPtr actions;
generateFilterActions(actions, *context, storage, row_policy_filter, initial_required_columns);
generateFilterActions(actions, row_policy_filter, initial_required_columns);
auto required_columns_from_filter = actions->getRequiredColumns();
for (const auto & column : required_columns_from_filter)

View File

@ -13,6 +13,7 @@
#include <Storages/SelectQueryInfo.h>
#include <Storages/TableStructureLockHolder.h>
#include <Storages/ReadInOrderOptimizer.h>
#include <Storages/StorageID.h>
#include <Processors/QueryPipeline.h>
#include <Columns/FilterDescription.h>
@ -244,6 +245,8 @@ private:
void executeSubqueriesInSetsAndJoins(QueryPipeline & pipeline, std::unordered_map<String, SubqueryForSet> & subqueries_for_sets);
void executeMergeSorted(QueryPipeline & pipeline, const SortDescription & sort_description, UInt64 limit);
String generateFilterActions(ExpressionActionsPtr & actions, const ASTPtr & row_policy_filter, const Names & prerequisite_columns = {}) const;
/// Add ConvertingBlockInputStream to specified header.
void unifyStreams(Pipeline & pipeline, Block header);
@ -293,6 +296,7 @@ private:
/// Table from where to read data, if not subquery.
StoragePtr storage;
StorageID table_id = StorageID::createEmpty(); /// Will be initialized if storage is not nullptr
TableStructureReadLockHolder table_lock;
/// Used when we read from prepared input, not table or subquery.

View File

@ -55,7 +55,7 @@ BlockInputStreamPtr InterpreterShowCreateQuery::executeImpl()
{
if (show_query->temporary)
throw Exception("Temporary databases are not possible.", ErrorCodes::SYNTAX_ERROR);
create_query = context.getDatabase(show_query->database)->getCreateDatabaseQuery();
create_query = context.getDatabase(show_query->database)->getCreateDatabaseQuery(context);
}
else if ((show_query = query_ptr->as<ASTShowCreateDictionaryQuery>()))
{

View File

@ -299,13 +299,10 @@ StoragePtr InterpreterSystemQuery::tryRestartReplica(const String & database_nam
StoragePtr table = StorageFactory::instance().get(create,
database->getTableDataPath(create),
table_name,
database_name,
system_context,
system_context.getGlobalContext(),
columns,
constraints,
create.attach,
false);
database->createTable(system_context, table_name, table, create_ast);

View File

@ -11,6 +11,7 @@
#include <map>
#include <atomic>
#include <utility>
#include <shared_mutex>
#include <Poco/Net/HTMLForm.h>
namespace Poco { namespace Net { class HTTPServerResponse; } }
@ -24,42 +25,6 @@ namespace ErrorCodes
extern const int NO_SUCH_INTERSERVER_IO_ENDPOINT;
}
/** Location of the service.
*/
struct InterserverIOEndpointLocation
{
public:
InterserverIOEndpointLocation(const std::string & name_, const std::string & host_, UInt16 port_)
: name(name_), host(host_), port(port_)
{
}
/// Creates a location based on its serialized representation.
InterserverIOEndpointLocation(const std::string & serialized_location)
{
ReadBufferFromString buf(serialized_location);
readBinary(name, buf);
readBinary(host, buf);
readBinary(port, buf);
assertEOF(buf);
}
/// Serializes the location.
std::string toString() const
{
WriteBufferFromOwnString buf;
writeBinary(name, buf);
writeBinary(host, buf);
writeBinary(port, buf);
return buf.str();
}
public:
std::string name;
std::string host;
UInt16 port;
};
/** Query processor from other servers.
*/
class InterserverIOEndpoint
@ -71,6 +36,7 @@ public:
/// You need to stop the data transfer if blocker is activated.
ActionBlocker blocker;
std::shared_mutex rwlock;
};
using InterserverIOEndpointPtr = std::shared_ptr<InterserverIOEndpoint>;
@ -90,11 +56,10 @@ public:
throw Exception("Duplicate interserver IO endpoint: " + name, ErrorCodes::DUPLICATE_INTERSERVER_IO_ENDPOINT);
}
void removeEndpoint(const String & name)
bool removeEndpointIfExists(const String & name)
{
std::lock_guard lock(mutex);
if (!endpoint_map.erase(name))
throw Exception("No interserver IO endpoint named " + name, ErrorCodes::NO_SUCH_INTERSERVER_IO_ENDPOINT);
return endpoint_map.erase(name);
}
InterserverIOEndpointPtr getEndpoint(const String & name)
@ -115,41 +80,4 @@ private:
std::mutex mutex;
};
/// In the constructor calls `addEndpoint`, in the destructor - `removeEndpoint`.
class InterserverIOEndpointHolder
{
public:
InterserverIOEndpointHolder(const String & name_, InterserverIOEndpointPtr endpoint_, InterserverIOHandler & handler_)
: name(name_), endpoint(std::move(endpoint_)), handler(handler_)
{
handler.addEndpoint(name, endpoint);
}
InterserverIOEndpointPtr getEndpoint()
{
return endpoint;
}
~InterserverIOEndpointHolder()
try
{
handler.removeEndpoint(name);
/// After destroying the object, `endpoint` can still live, since its ownership is acquired during the processing of the request,
/// see InterserverIOHTTPHandler.cpp
}
catch (...)
{
tryLogCurrentException("~InterserverIOEndpointHolder");
}
ActionBlocker & getBlocker() { return endpoint->blocker; }
private:
String name;
InterserverIOEndpointPtr endpoint;
InterserverIOHandler & handler;
};
using InterserverIOEndpointHolderPtr = std::shared_ptr<InterserverIOEndpointHolder>;
}

View File

@ -110,7 +110,8 @@ bool PartLog::addNewParts(Context & current_context, const PartLog::MutableDataP
try
{
part_log = current_context.getPartLog(parts.front()->storage.getDatabaseName()); // assume parts belong to the same table
auto table_id = parts.front()->storage.getStorageID();
part_log = current_context.getPartLog(table_id.database_name); // assume parts belong to the same table
if (!part_log)
return false;
@ -122,8 +123,8 @@ bool PartLog::addNewParts(Context & current_context, const PartLog::MutableDataP
elem.event_time = time(nullptr);
elem.duration_ms = elapsed_ns / 1000000;
elem.database_name = part->storage.getDatabaseName();
elem.table_name = part->storage.getTableName();
elem.database_name = table_id.database_name;
elem.table_name = table_id.table_name;
elem.partition_id = part->info.partition_id;
elem.part_name = part->name;
elem.path_on_disk = part->getFullPath();

View File

@ -214,7 +214,7 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
try
{
/// TODO Parser should fail early when max_query_size limit is reached.
ast = parseQuery(parser, begin, end, "", max_query_size);
ast = parseQuery(parser, begin, end, "", max_query_size, settings.max_parser_depth);
auto * insert_query = ast->as<ASTInsertQuery>();

View File

@ -139,7 +139,7 @@ void loadMetadataSystem(Context & context)
Poco::File(global_path + "data/" SYSTEM_DATABASE).createDirectories();
Poco::File(global_path + "metadata/" SYSTEM_DATABASE).createDirectories();
auto system_database = std::make_shared<DatabaseOrdinary>(SYSTEM_DATABASE, global_path + "metadata/" SYSTEM_DATABASE, context);
auto system_database = std::make_shared<DatabaseOrdinary>(SYSTEM_DATABASE, global_path + "metadata/" SYSTEM_DATABASE "/", context);
context.addDatabase(SYSTEM_DATABASE, system_database);
}

View File

@ -38,12 +38,9 @@ public:
std::string getRemoteDatabaseName() const { return remote_database; }
std::string getRemoteTableName() const { return remote_table; }
std::string getTableName() const override { return ""; }
std::string getDatabaseName() const override { return ""; }
protected:
StorageDistributedFake(const std::string & remote_database_, const std::string & remote_table_, size_t shard_count_)
: remote_database(remote_database_), remote_table(remote_table_), shard_count(shard_count_)
: IStorage({"", ""}), remote_database(remote_database_), remote_table(remote_table_), shard_count(shard_count_)
{
}

View File

@ -182,10 +182,24 @@ void ASTAlterCommand::formatImpl(
case PartDestinationType::VOLUME:
settings.ostr << "VOLUME ";
break;
case PartDestinationType::TABLE:
settings.ostr << "TABLE ";
if (!to_database.empty())
{
settings.ostr << (settings.hilite ? hilite_identifier : "") << backQuoteIfNeed(to_database)
<< (settings.hilite ? hilite_none : "") << ".";
}
settings.ostr << (settings.hilite ? hilite_identifier : "")
<< backQuoteIfNeed(to_table)
<< (settings.hilite ? hilite_none : "");
return;
default:
break;
}
settings.ostr << quoteString(move_destination_name);
if (move_destination_type != PartDestinationType::TABLE)
{
settings.ostr << quoteString(move_destination_name);
}
}
else if (type == ASTAlterCommand::REPLACE_PARTITION)
{

View File

@ -146,6 +146,9 @@ public:
String from_table;
/// To distinguish REPLACE and ATTACH PARTITION partition FROM db.table
bool replace = true;
/// MOVE PARTITION partition TO TABLE db.table
String to_database;
String to_table;
String getID(char delim) const override { return "AlterCommand" + (delim + std::to_string(static_cast<int>(type))); }

View File

@ -2,6 +2,7 @@
#include <Parsers/IAST.h>
#include <Parsers/ASTQueryWithOutput.h>
#include <Core/UUID.h>
namespace DB
@ -15,6 +16,7 @@ class ASTQueryWithTableAndOutput : public ASTQueryWithOutput
public:
String database;
String table;
UUID uuid;
bool temporary{false};
protected:

View File

@ -5,6 +5,8 @@
#include <Core/Defines.h>
#include <Core/Types.h>
#include <Core/Settings.h>
#include <IO/WriteHelpers.h>
#include <Parsers/IAST.h>
#include <Parsers/TokenIterator.h>
@ -57,13 +59,15 @@ public:
using TokenIterator::TokenIterator;
uint32_t depth = 0;
uint32_t max_depth = 1000;
uint32_t max_depth = 0;
Pos(Tokens & tokens_, uint32_t max_depth_) : TokenIterator(tokens_), max_depth(max_depth_) {}
void increaseDepth()
{
++depth;
if (depth > max_depth)
throw Exception("Maximum parse depth exceeded", ErrorCodes::TOO_DEEP_RECURSION);
if (max_depth > 0 && depth > max_depth)
throw Exception("Maximum parse depth (" + toString(max_depth) + ") exceeded. Consider rising max_parser_depth parameter.", ErrorCodes::TOO_DEEP_RECURSION);
}
void decreaseDepth()

View File

@ -69,6 +69,7 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
ParserKeyword s_to_disk("TO DISK");
ParserKeyword s_to_volume("TO VOLUME");
ParserKeyword s_to_table("TO TABLE");
ParserKeyword s_delete_where("DELETE WHERE");
ParserKeyword s_update("UPDATE");
@ -240,14 +241,23 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
command->move_destination_type = PartDestinationType::DISK;
else if (s_to_volume.ignore(pos))
command->move_destination_type = PartDestinationType::VOLUME;
else if (s_to_table.ignore(pos))
{
if (!parseDatabaseAndTableName(pos, expected, command->to_database, command->to_table))
return false;
command->move_destination_type = PartDestinationType::TABLE;
}
else
return false;
ASTPtr ast_space_name;
if (!parser_string_literal.parse(pos, ast_space_name, expected))
return false;
if (command->move_destination_type != PartDestinationType::TABLE)
{
ASTPtr ast_space_name;
if (!parser_string_literal.parse(pos, ast_space_name, expected))
return false;
command->move_destination_name = ast_space_name->as<ASTLiteral &>().value.get<const String &>();
command->move_destination_name = ast_space_name->as<ASTLiteral &>().value.get<const String &>();
}
}
else if (s_move_partition.ignore(pos, expected))
{
@ -260,14 +270,23 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
command->move_destination_type = PartDestinationType::DISK;
else if (s_to_volume.ignore(pos))
command->move_destination_type = PartDestinationType::VOLUME;
else if (s_to_table.ignore(pos))
{
if (!parseDatabaseAndTableName(pos, expected, command->to_database, command->to_table))
return false;
command->move_destination_type = PartDestinationType::TABLE;
}
else
return false;
ASTPtr ast_space_name;
if (!parser_string_literal.parse(pos, ast_space_name, expected))
return false;
if (command->move_destination_type != PartDestinationType::TABLE)
{
ASTPtr ast_space_name;
if (!parser_string_literal.parse(pos, ast_space_name, expected))
return false;
command->move_destination_name = ast_space_name->as<ASTLiteral &>().value.get<const String &>();
command->move_destination_name = ast_space_name->as<ASTLiteral &>().value.get<const String &>();
}
}
else if (s_add_constraint.ignore(pos, expected))
{

View File

@ -215,10 +215,11 @@ ASTPtr tryParseQuery(
bool hilite,
const std::string & query_description,
bool allow_multi_statements,
size_t max_query_size)
size_t max_query_size,
size_t max_parser_depth)
{
Tokens tokens(pos, end, max_query_size);
IParser::Pos token_iterator(tokens);
IParser::Pos token_iterator(tokens, max_parser_depth);
if (token_iterator->isEnd()
|| token_iterator->type == TokenType::Semicolon)
@ -297,10 +298,11 @@ ASTPtr parseQueryAndMovePosition(
const char * end,
const std::string & query_description,
bool allow_multi_statements,
size_t max_query_size)
size_t max_query_size,
size_t max_parser_depth)
{
std::string error_message;
ASTPtr res = tryParseQuery(parser, pos, end, error_message, false, query_description, allow_multi_statements, max_query_size);
ASTPtr res = tryParseQuery(parser, pos, end, error_message, false, query_description, allow_multi_statements, max_query_size, max_parser_depth);
if (res)
return res;
@ -314,10 +316,11 @@ ASTPtr parseQuery(
const char * begin,
const char * end,
const std::string & query_description,
size_t max_query_size)
size_t max_query_size,
size_t max_parser_depth)
{
auto pos = begin;
return parseQueryAndMovePosition(parser, pos, end, query_description, false, max_query_size);
return parseQueryAndMovePosition(parser, pos, end, query_description, false, max_query_size, max_parser_depth);
}

View File

@ -15,8 +15,9 @@ ASTPtr tryParseQuery(
bool hilite,
const std::string & description,
bool allow_multi_statements, /// If false, check for non-space characters after semicolon and set error message if any.
size_t max_query_size); /// If (end - pos) > max_query_size and query is longer than max_query_size then throws "Max query size exceeded".
size_t max_query_size, /// If (end - pos) > max_query_size and query is longer than max_query_size then throws "Max query size exceeded".
/// Disabled if zero. Is used in order to check query size if buffer can contains data for INSERT query.
size_t max_parser_depth = 0);
/// Parse query or throw an exception with error message.
@ -26,15 +27,16 @@ ASTPtr parseQueryAndMovePosition(
const char * end,
const std::string & description,
bool allow_multi_statements,
size_t max_query_size);
size_t max_query_size = 0,
size_t max_parser_depth = 0);
ASTPtr parseQuery(
IParser & parser,
const char * begin,
const char * end,
const std::string & description,
size_t max_query_size);
size_t max_query_size,
size_t max_parser_depth = 0);
ASTPtr parseQuery(
IParser & parser,

View File

@ -328,12 +328,12 @@ ConstantExpressionTemplate::Cache::getFromCacheOrConstruct(const DataTypePtr & r
return res;
}
bool ConstantExpressionTemplate::parseExpression(ReadBuffer & istr, const FormatSettings & settings)
bool ConstantExpressionTemplate::parseExpression(ReadBuffer & istr, const FormatSettings & format_settings, const Settings & settings)
{
size_t cur_column = 0;
try
{
if (tryParseExpression(istr, settings, cur_column))
if (tryParseExpression(istr, format_settings, cur_column, settings))
{
++rows_count;
return true;
@ -355,7 +355,7 @@ bool ConstantExpressionTemplate::parseExpression(ReadBuffer & istr, const Format
return false;
}
bool ConstantExpressionTemplate::tryParseExpression(ReadBuffer & istr, const FormatSettings & settings, size_t & cur_column)
bool ConstantExpressionTemplate::tryParseExpression(ReadBuffer & istr, const FormatSettings & format_settings, size_t & cur_column, const Settings & settings)
{
size_t cur_token = 0;
size_t num_columns = structure->literals.columns();
@ -372,13 +372,13 @@ bool ConstantExpressionTemplate::tryParseExpression(ReadBuffer & istr, const For
skipWhitespaceIfAny(istr);
const DataTypePtr & type = structure->literals.getByPosition(cur_column).type;
if (settings.values.accurate_types_of_literals && !structure->special_parser[cur_column].useDefaultParser())
if (format_settings.values.accurate_types_of_literals && !structure->special_parser[cur_column].useDefaultParser())
{
if (!parseLiteralAndAssertType(istr, type.get(), cur_column))
if (!parseLiteralAndAssertType(istr, type.get(), cur_column, settings))
return false;
}
else
type->deserializeAsTextQuoted(*columns[cur_column], istr, settings);
type->deserializeAsTextQuoted(*columns[cur_column], istr, format_settings);
++cur_column;
}
@ -392,7 +392,7 @@ bool ConstantExpressionTemplate::tryParseExpression(ReadBuffer & istr, const For
return true;
}
bool ConstantExpressionTemplate::parseLiteralAndAssertType(ReadBuffer & istr, const IDataType * complex_type, size_t column_idx)
bool ConstantExpressionTemplate::parseLiteralAndAssertType(ReadBuffer & istr, const IDataType * complex_type, size_t column_idx, const Settings & settings)
{
using Type = Field::Types::Which;
@ -410,7 +410,7 @@ bool ConstantExpressionTemplate::parseLiteralAndAssertType(ReadBuffer & istr, co
/// TODO faster way to check types without using Parsers
ParserArrayOfLiterals parser_array;
Tokens tokens_number(istr.position(), istr.buffer().end());
IParser::Pos iterator(tokens_number);
IParser::Pos iterator(tokens_number, settings.max_parser_depth);
Expected expected;
ASTPtr ast;

View File

@ -66,7 +66,7 @@ public:
/// Read expression from istr, assert it has the same structure and the same types of literals (template matches)
/// and parse literals into temporary columns
bool parseExpression(ReadBuffer & istr, const FormatSettings & settings);
bool parseExpression(ReadBuffer & istr, const FormatSettings & format_settings, const Settings & settings);
/// Evaluate batch of expressions were parsed using template.
/// If template was deduced with null_as_default == true, set bits in nulls for NULL values in column_idx, starting from offset.
@ -75,8 +75,8 @@ public:
size_t rowsCount() const { return rows_count; }
private:
bool tryParseExpression(ReadBuffer & istr, const FormatSettings & settings, size_t & cur_column);
bool parseLiteralAndAssertType(ReadBuffer & istr, const IDataType * type, size_t column_idx);
bool tryParseExpression(ReadBuffer & istr, const FormatSettings & format_settings, size_t & cur_column, const Settings & settings);
bool parseLiteralAndAssertType(ReadBuffer & istr, const IDataType * type, size_t column_idx, const Settings & settings);
private:
TemplateStructurePtr structure;

View File

@ -129,7 +129,8 @@ void ValuesBlockInputFormat::readRow(MutableColumns & columns, size_t row_num)
bool ValuesBlockInputFormat::tryParseExpressionUsingTemplate(MutableColumnPtr & column, size_t column_idx)
{
/// Try to parse expression using template if one was successfully deduced while parsing the first row
if (templates[column_idx]->parseExpression(buf, format_settings))
auto settings = context->getSettingsRef();
if (templates[column_idx]->parseExpression(buf, format_settings, settings))
{
++rows_parsed_using_template[column_idx];
return true;
@ -187,6 +188,7 @@ bool ValuesBlockInputFormat::parseExpression(IColumn & column, size_t column_idx
{
const Block & header = getPort().getHeader();
const IDataType & type = *header.getByPosition(column_idx).type;
auto settings = context->getSettingsRef();
/// We need continuous memory containing the expression to use Lexer
skipToNextRow(0, 1);
@ -195,7 +197,7 @@ bool ValuesBlockInputFormat::parseExpression(IColumn & column, size_t column_idx
Expected expected;
Tokens tokens(buf.position(), buf.buffer().end());
IParser::Pos token_iterator(tokens);
IParser::Pos token_iterator(tokens, settings.max_parser_depth);
ASTPtr ast;
bool parsed = parser.parse(token_iterator, ast, expected);
@ -265,7 +267,7 @@ bool ValuesBlockInputFormat::parseExpression(IColumn & column, size_t column_idx
++attempts_to_deduce_template[column_idx];
buf.rollbackToCheckpoint();
if (templates[column_idx]->parseExpression(buf, format_settings))
if (templates[column_idx]->parseExpression(buf, format_settings, settings))
{
++rows_parsed_using_template[column_idx];
parser_type_for_column[column_idx] = ParserType::BatchTemplate;

View File

@ -689,7 +689,7 @@ bool StorageDistributedDirectoryMonitor::maybeMarkAsBroken(const std::string & f
std::string StorageDistributedDirectoryMonitor::getLoggerName() const
{
return storage.table_name + '.' + storage.getName() + ".DirectoryMonitor";
return storage.getStorageID().getFullTableName() + ".DirectoryMonitor";
}
void StorageDistributedDirectoryMonitor::updatePath()

View File

@ -90,7 +90,7 @@ void DistributedBlockOutputStream::write(const Block & block)
if (ordinary_block.has(col.name))
{
ordinary_block.erase(col.name);
LOG_DEBUG(log, storage.getTableName()
LOG_DEBUG(log, storage.getStorageID().getNameForLogs()
<< ": column " + col.name + " will be removed, "
<< "because it is MATERIALIZED");
}
@ -515,7 +515,7 @@ void DistributedBlockOutputStream::writeAsyncImpl(const Block & block, const siz
else
{
if (shard_info.dir_name_for_internal_replication.empty())
throw Exception("Directory name for async inserts is empty, table " + storage.getTableName(), ErrorCodes::LOGICAL_ERROR);
throw Exception("Directory name for async inserts is empty, table " + storage.getStorageID().getNameForLogs(), ErrorCodes::LOGICAL_ERROR);
writeToShard(block, {shard_info.dir_name_for_internal_replication});
}

View File

@ -30,7 +30,7 @@ namespace ErrorCodes
extern const int NOT_IMPLEMENTED;
}
IStorage::IStorage(ColumnsDescription virtuals_) : virtuals(std::move(virtuals_))
IStorage::IStorage(StorageID storage_id_, ColumnsDescription virtuals_) : storage_id(std::move(storage_id_)), virtuals(std::move(virtuals_))
{
}
@ -179,7 +179,7 @@ void IStorage::check(const Names & column_names, bool include_virtuals) const
{
if (columns_map.end() == columns_map.find(name))
throw Exception(
"There is no column with name " + backQuote(name) + " in table " + getTableName() + ". There are columns: " + list_of_columns,
"There is no column with name " + backQuote(name) + " in table " + getStorageID().getNameForLogs() + ". There are columns: " + list_of_columns,
ErrorCodes::NO_SUCH_COLUMN_IN_TABLE);
if (unique_names.end() != unique_names.find(name))
@ -341,7 +341,7 @@ TableStructureWriteLockHolder IStorage::lockAlterIntention(const String & query_
void IStorage::lockNewDataStructureExclusively(TableStructureWriteLockHolder & lock_holder, const String & query_id)
{
if (!lock_holder.alter_intention_lock)
throw Exception("Alter intention lock for table " + getTableName() + " was not taken. This is a bug.", ErrorCodes::LOGICAL_ERROR);
throw Exception("Alter intention lock for table " + getStorageID().getNameForLogs() + " was not taken. This is a bug.", ErrorCodes::LOGICAL_ERROR);
lock_holder.new_data_structure_lock = new_data_structure_lock->getLock(RWLockImpl::Write, query_id);
}
@ -349,7 +349,7 @@ void IStorage::lockNewDataStructureExclusively(TableStructureWriteLockHolder & l
void IStorage::lockStructureExclusively(TableStructureWriteLockHolder & lock_holder, const String & query_id)
{
if (!lock_holder.alter_intention_lock)
throw Exception("Alter intention lock for table " + getTableName() + " was not taken. This is a bug.", ErrorCodes::LOGICAL_ERROR);
throw Exception("Alter intention lock for table " + getStorageID().getNameForLogs() + " was not taken. This is a bug.", ErrorCodes::LOGICAL_ERROR);
if (!lock_holder.new_data_structure_lock)
lock_holder.new_data_structure_lock = new_data_structure_lock->getLock(RWLockImpl::Write, query_id);
@ -385,14 +385,11 @@ void IStorage::alter(
const Context & context,
TableStructureWriteLockHolder & table_lock_holder)
{
const String database_name = getDatabaseName();
const String table_name = getTableName();
lockStructureExclusively(table_lock_holder, context.getCurrentQueryId());
auto table_id = getStorageID();
StorageInMemoryMetadata metadata = getInMemoryMetadata();
params.apply(metadata);
context.getDatabase(database_name)->alterTable(context, table_name, metadata);
context.getDatabase(table_id.database_name)->alterTable(context, table_id.table_name, metadata);
setColumns(std::move(metadata.columns));
}
@ -427,4 +424,17 @@ BlockInputStreams IStorage::read(
return res;
}
StorageID IStorage::getStorageID() const
{
std::lock_guard<std::mutex> lock(id_mutex);
return storage_id;
}
void IStorage::renameInMemory(const String & new_database_name, const String & new_table_name)
{
std::lock_guard<std::mutex> lock(id_mutex);
storage_id.database_name = new_database_name;
storage_id.table_name = new_table_name;
}
}

View File

@ -6,6 +6,7 @@
#include <Databases/IDatabase.h>
#include <Interpreters/CancellationCode.h>
#include <Storages/IStorage_fwd.h>
#include <Storages/StorageID.h>
#include <Storages/SelectQueryInfo.h>
#include <Storages/TableStructureLockHolder.h>
#include <Storages/CheckResults.h>
@ -75,8 +76,9 @@ struct ColumnSize
class IStorage : public std::enable_shared_from_this<IStorage>, public TypePromotion<IStorage>
{
public:
IStorage() = default;
explicit IStorage(ColumnsDescription virtuals_);
IStorage() = delete;
explicit IStorage(StorageID storage_id_) : storage_id(std::move(storage_id_)) {}
IStorage(StorageID id_, ColumnsDescription virtuals_);
virtual ~IStorage() = default;
IStorage(const IStorage &) = delete;
@ -86,8 +88,7 @@ public:
virtual std::string getName() const = 0;
/// The name of the table.
virtual std::string getTableName() const = 0;
virtual std::string getDatabaseName() const { return {}; }
StorageID getStorageID() const;
/// Returns true if the storage receives data from a remote server or servers.
virtual bool isRemote() const { return false; }
@ -165,6 +166,8 @@ protected: /// still thread-unsafe part.
private:
StorageID storage_id;
mutable std::mutex id_mutex;
ColumnsDescription columns; /// combined real and virtual columns
const ColumnsDescription virtuals = {};
IndicesDescription indices;
@ -303,12 +306,18 @@ public:
* In this function, you need to rename the directory with the data, if any.
* Called when the table structure is locked for write.
*/
virtual void rename(const String & /*new_path_to_table_data*/, const String & /*new_database_name*/, const String & /*new_table_name*/,
virtual void rename(const String & /*new_path_to_table_data*/, const String & new_database_name, const String & new_table_name,
TableStructureWriteLockHolder &)
{
throw Exception("Method rename is not supported by storage " + getName(), ErrorCodes::NOT_IMPLEMENTED);
renameInMemory(new_database_name, new_table_name);
}
/**
* Just updates names of database and table without moving any data on disk
* Can be called directly only from DatabaseAtomic.
*/
virtual void renameInMemory(const String & new_database_name, const String & new_table_name);
/** ALTER tables in the form of column changes that do not affect the change to Storage or its parameters.
* This method must fully execute the ALTER query, taking care of the locks itself.
* To update the table metadata on disk, this method should call InterpreterAlterQuery::updateMetadata.

View File

@ -11,7 +11,6 @@ namespace DB
class IStorage;
using StoragePtr = std::shared_ptr<IStorage>;
using StorageWeakPtr = std::weak_ptr<IStorage>;
using Tables = std::map<String, StoragePtr>;
}

View File

@ -75,8 +75,7 @@ namespace
}
StorageKafka::StorageKafka(
const std::string & table_name_,
const std::string & database_name_,
const StorageID & table_id_,
Context & context_,
const ColumnsDescription & columns_,
const String & brokers_,
@ -89,14 +88,12 @@ StorageKafka::StorageKafka(
UInt64 max_block_size_,
size_t skip_broken_,
bool intermediate_commit_)
: IStorage(
: IStorage(table_id_,
ColumnsDescription({{"_topic", std::make_shared<DataTypeString>()},
{"_key", std::make_shared<DataTypeString>()},
{"_offset", std::make_shared<DataTypeUInt64>()},
{"_partition", std::make_shared<DataTypeUInt64>()},
{"_timestamp", std::make_shared<DataTypeNullable>(std::make_shared<DataTypeDateTime>())}}, true))
, table_name(table_name_)
, database_name(database_name_)
, global_context(context_.getGlobalContext())
, kafka_context(Context(global_context))
, topics(global_context.getMacros()->expand(topics_))
@ -107,7 +104,7 @@ StorageKafka::StorageKafka(
, schema_name(global_context.getMacros()->expand(schema_name_))
, num_consumers(num_consumers_)
, max_block_size(max_block_size_)
, log(&Logger::get("StorageKafka (" + table_name_ + ")"))
, log(&Logger::get("StorageKafka (" + table_id_.table_name + ")"))
, semaphore(0, num_consumers_)
, skip_broken(skip_broken_)
, intermediate_commit(intermediate_commit_)
@ -195,14 +192,6 @@ void StorageKafka::shutdown()
task->deactivate();
}
void StorageKafka::rename(const String & /* new_path_to_db */, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &)
{
table_name = new_table_name;
database_name = new_database_name;
}
void StorageKafka::updateDependencies()
{
task->activateAndSchedule();
@ -303,17 +292,17 @@ void StorageKafka::updateConfiguration(cppkafka::Configuration & conf)
}
}
bool StorageKafka::checkDependencies(const String & current_database_name, const String & current_table_name)
bool StorageKafka::checkDependencies(const StorageID & table_id)
{
// Check if all dependencies are attached
auto dependencies = global_context.getDependencies(current_database_name, current_table_name);
auto dependencies = global_context.getDependencies(table_id);
if (dependencies.size() == 0)
return true;
// Check the dependencies are ready?
for (const auto & db_tab : dependencies)
{
auto table = global_context.tryGetTable(db_tab.first, db_tab.second);
auto table = global_context.tryGetTable(db_tab);
if (!table)
return false;
@ -323,7 +312,7 @@ bool StorageKafka::checkDependencies(const String & current_database_name, const
return false;
// Check all its dependencies
if (!checkDependencies(db_tab.first, db_tab.second))
if (!checkDependencies(db_tab))
return false;
}
@ -334,13 +323,14 @@ void StorageKafka::threadFunc()
{
try
{
auto table_id = getStorageID();
// Check if at least one direct dependency is attached
auto dependencies = global_context.getDependencies(database_name, table_name);
auto dependencies = global_context.getDependencies(table_id);
// Keep streaming as long as there are attached views and streaming is not cancelled
while (!stream_cancelled && num_created_consumers > 0 && dependencies.size() > 0)
{
if (!checkDependencies(database_name, table_name))
if (!checkDependencies(table_id))
break;
LOG_DEBUG(log, "Started streaming to " << dependencies.size() << " attached views");
@ -363,14 +353,15 @@ void StorageKafka::threadFunc()
bool StorageKafka::streamToViews()
{
auto table = global_context.getTable(database_name, table_name);
auto table_id = getStorageID();
auto table = global_context.getTable(table_id);
if (!table)
throw Exception("Engine table " + backQuote(database_name) + "." + backQuote(table_name) + " doesn't exist.", ErrorCodes::LOGICAL_ERROR);
throw Exception("Engine table " + table_id.getNameForLogs() + " doesn't exist.", ErrorCodes::LOGICAL_ERROR);
// Create an INSERT query for streaming data
auto insert = std::make_shared<ASTInsertQuery>();
insert->database = database_name;
insert->table = table_name;
insert->database = table_id.database_name;
insert->table = table_id.table_name;
const Settings & settings = global_context.getSettingsRef();
size_t block_size = max_block_size;
@ -643,7 +634,7 @@ void registerStorageKafka(StorageFactory & factory)
}
return StorageKafka::create(
args.table_name, args.database_name, args.context, args.columns,
args.table_id, args.context, args.columns,
brokers, group, topics, format, row_delimiter, schema, num_consumers, max_block_size, skip_broken, intermediate_commit);
});
}

View File

@ -28,8 +28,6 @@ class StorageKafka : public ext::shared_ptr_helper<StorageKafka>, public IStorag
friend struct ext::shared_ptr_helper<StorageKafka>;
public:
std::string getName() const override { return "Kafka"; }
std::string getTableName() const override { return table_name; }
std::string getDatabaseName() const override { return database_name; }
bool supportsSettings() const override { return true; }
bool noPushingToViews() const override { return true; }
@ -49,8 +47,6 @@ public:
const ASTPtr & query,
const Context & context) override;
void rename(const String & /* new_path_to_db */, const String & new_database_name, const String & new_table_name, TableStructureWriteLockHolder &) override;
void updateDependencies() override;
void pushReadBuffer(ConsumerBufferPtr buf);
@ -66,19 +62,22 @@ public:
protected:
StorageKafka(
const std::string & table_name_,
const std::string & database_name_,
const StorageID & table_id_,
Context & context_,
const ColumnsDescription & columns_,
const String & brokers_, const String & group_, const Names & topics_,
const String & format_name_, char row_delimiter_, const String & schema_name_,
size_t num_consumers_, UInt64 max_block_size_, size_t skip_broken,
const String & brokers_,
const String & group_,
const Names & topics_,
const String & format_name_,
char row_delimiter_,
const String & schema_name_,
size_t num_consumers_,
UInt64 max_block_size_,
size_t skip_broken,
bool intermediate_commit_);
private:
// Configuration and state
String table_name;
String database_name;
Context global_context;
Context kafka_context;
Names topics;
@ -116,7 +115,7 @@ private:
void threadFunc();
bool streamToViews();
bool checkDependencies(const String & database_name, const String & table_name);
bool checkDependencies(const StorageID & table_id);
};
}

View File

@ -12,21 +12,19 @@ class StorageBlocks : public IStorage
* Used by Live Views to complete stored query based on the mergeable blocks.
*/
public:
StorageBlocks(const std::string & database_name_, const std::string & table_name_,
StorageBlocks(const StorageID & table_id_,
const ColumnsDescription & columns_, BlockInputStreams streams_,
QueryProcessingStage::Enum to_stage_)
: database_name(database_name_), table_name(table_name_), streams(streams_), to_stage(to_stage_)
: IStorage(table_id_), streams(streams_), to_stage(to_stage_)
{
setColumns(columns_);
}
static StoragePtr createStorage(const std::string & database_name, const std::string & table_name,
static StoragePtr createStorage(const StorageID & table_id,
const ColumnsDescription & columns, BlockInputStreams streams, QueryProcessingStage::Enum to_stage)
{
return std::make_shared<StorageBlocks>(database_name, table_name, columns, streams, to_stage);
return std::make_shared<StorageBlocks>(table_id, columns, streams, to_stage);
}
std::string getName() const override { return "Blocks"; }
std::string getTableName() const override { return table_name; }
std::string getDatabaseName() const override { return database_name; }
QueryProcessingStage::Enum getQueryProcessingStage(const Context & /*context*/) const override { return to_stage; }
BlockInputStreams read(
@ -41,8 +39,6 @@ public:
}
private:
std::string database_name;
std::string table_name;
Block res_block;
BlockInputStreams streams;
QueryProcessingStage::Enum to_stage;

View File

@ -13,7 +13,6 @@ limitations under the License. */
#include <Parsers/ASTCreateQuery.h>
#include <Parsers/ASTWatchQuery.h>
#include <Parsers/ASTDropQuery.h>
#include <Parsers/ASTIdentifier.h>
#include <Parsers/ASTLiteral.h>
#include <Interpreters/Context.h>
#include <Interpreters/InterpreterDropQuery.h>
@ -54,20 +53,15 @@ namespace ErrorCodes
extern const int SUPPORT_IS_DISABLED;
}
static void extractDependentTable(ASTPtr & query, String & select_database_name, String & select_table_name, const String & table_name, ASTPtr & inner_subquery)
static StorageID extractDependentTable(ASTPtr & query, Context & context, const String & table_name, ASTPtr & inner_subquery)
{
ASTSelectQuery & select_query = typeid_cast<ASTSelectQuery &>(*query);
auto db_and_table = getDatabaseAndTable(select_query, 0);
ASTPtr subquery = extractTableExpression(select_query, 0);
if (!db_and_table && !subquery)
if (auto db_and_table = getDatabaseAndTable(select_query, 0))
{
return;
}
if (db_and_table)
{
select_table_name = db_and_table->table;
String select_database_name = context.getCurrentDatabase();
String select_table_name = db_and_table->table;
if (db_and_table->database.empty())
{
@ -79,20 +73,27 @@ static void extractDependentTable(ASTPtr & query, String & select_database_name,
select_database_name = db_and_table->database;
select_query.replaceDatabaseAndTable("", table_name + "_blocks");
return StorageID(select_database_name, select_table_name);
}
else if (auto * ast_select = subquery->as<ASTSelectWithUnionQuery>())
else if (auto subquery = extractTableExpression(select_query, 0))
{
auto * ast_select = subquery->as<ASTSelectWithUnionQuery>();
if (!ast_select)
throw Exception("Logical error while creating StorageLiveView."
" Could not retrieve table name from select query.",
DB::ErrorCodes::LOGICAL_ERROR);
if (ast_select->list_of_selects->children.size() != 1)
throw Exception("UNION is not supported for LIVE VIEW", ErrorCodes::QUERY_IS_NOT_SUPPORTED_IN_LIVE_VIEW);
inner_subquery = ast_select->list_of_selects->children.at(0)->clone();
extractDependentTable(ast_select->list_of_selects->children.at(0), select_database_name, select_table_name, table_name, inner_subquery);
return extractDependentTable(ast_select->list_of_selects->children.at(0), context, table_name, inner_subquery);
}
else
throw Exception("Logical error while creating StorageLiveView."
" Could not retrieve table name from select query.",
DB::ErrorCodes::LOGICAL_ERROR);
{
/// If the table is not specified - use the table `system.one`
return StorageID("system", "one");
}
}
MergeableBlocksPtr StorageLiveView::collectMergeableBlocks(const Context & context)
@ -138,10 +139,11 @@ BlockInputStreamPtr StorageLiveView::completeQuery(BlockInputStreams from)
auto block_context = std::make_unique<Context>(global_context);
block_context->makeQueryContext();
auto blocks_storage = StorageBlocks::createStorage(database_name, table_name, parent_storage->getColumns(),
auto blocks_storage_id = getBlocksStorageID();
auto blocks_storage = StorageBlocks::createStorage(blocks_storage_id, parent_storage->getColumns(),
std::move(from), QueryProcessingStage::WithMergeableState);
block_context->addExternalTable(table_name + "_blocks", blocks_storage);
block_context->addExternalTable(blocks_storage_id.table_name, blocks_storage);
InterpreterSelectQuery select(inner_blocks_query->clone(), *block_context, StoragePtr(), SelectQueryOptions(QueryProcessingStage::Complete));
BlockInputStreamPtr data = std::make_shared<MaterializingBlockInputStream>(select.execute().in);
@ -193,6 +195,8 @@ void StorageLiveView::writeIntoLiveView(
}
}
auto blocks_storage_id = live_view.getBlocksStorageID();
if (!is_block_processed)
{
ASTPtr mergeable_query = live_view.getInnerQuery();
@ -202,7 +206,7 @@ void StorageLiveView::writeIntoLiveView(
BlockInputStreams streams = {std::make_shared<OneBlockInputStream>(block)};
auto blocks_storage = StorageBlocks::createStorage(live_view.database_name, live_view.table_name,
auto blocks_storage = StorageBlocks::createStorage(blocks_storage_id,
live_view.getParentStorage()->getColumns(), std::move(streams), QueryProcessingStage::FetchColumns);
InterpreterSelectQuery select_block(mergeable_query, context, blocks_storage,
@ -232,13 +236,11 @@ void StorageLiveView::writeIntoLiveView(
StorageLiveView::StorageLiveView(
const String & table_name_,
const String & database_name_,
const StorageID & table_id_,
Context & local_context,
const ASTCreateQuery & query,
const ColumnsDescription & columns_)
: table_name(table_name_),
database_name(database_name_), global_context(local_context.getGlobalContext())
: IStorage(table_id_), global_context(local_context.getGlobalContext())
{
live_view_context = std::make_unique<Context>(global_context);
live_view_context->makeQueryContext();
@ -249,7 +251,6 @@ StorageLiveView::StorageLiveView(
throw Exception("SELECT query is not specified for " + getName(), ErrorCodes::INCORRECT_QUERY);
/// Default value, if only table name exist in the query
select_database_name = local_context.getCurrentDatabase();
if (query.select->list_of_selects->children.size() != 1)
throw Exception("UNION is not supported for LIVE VIEW", ErrorCodes::QUERY_IS_NOT_SUPPORTED_IN_LIVE_VIEW);
@ -258,20 +259,11 @@ StorageLiveView::StorageLiveView(
InterpreterSelectQuery(inner_blocks_query, *live_view_context, SelectQueryOptions().modify().analyze());
extractDependentTable(inner_blocks_query, select_database_name, select_table_name, table_name, inner_subquery);
select_table_id = extractDependentTable(inner_blocks_query, global_context, table_id_.table_name, inner_subquery);
/// If the table is not specified - use the table `system.one`
if (select_table_name.empty())
{
select_database_name = "system";
select_table_name = "one";
}
global_context.addDependency(select_table_id, table_id_);
global_context.addDependency(
DatabaseAndTableName(select_database_name, select_table_name),
DatabaseAndTableName(database_name, table_name));
parent_storage = local_context.getTable(select_database_name, select_table_name);
parent_storage = local_context.getTable(select_table_id);
is_temporary = query.temporary;
temporary_live_view_timeout = local_context.getSettingsRef().temporary_live_view_timeout.totalSeconds();
@ -366,11 +358,12 @@ bool StorageLiveView::getNewBlocks()
void StorageLiveView::checkTableCanBeDropped() const
{
Dependencies dependencies = global_context.getDependencies(database_name, table_name);
auto table_id = getStorageID();
Dependencies dependencies = global_context.getDependencies(table_id);
if (!dependencies.empty())
{
DatabaseAndTableName database_and_table_name = dependencies.front();
throw Exception("Table has dependency " + database_and_table_name.first + "." + database_and_table_name.second, ErrorCodes::TABLE_WAS_NOT_DROPPED);
StorageID dependent_table_id = dependencies.front();
throw Exception("Table has dependency " + dependent_table_id.getNameForLogs(), ErrorCodes::TABLE_WAS_NOT_DROPPED);
}
}
@ -381,6 +374,7 @@ void StorageLiveView::noUsersThread(std::shared_ptr<StorageLiveView> storage, co
if (storage->shutdown_called)
return;
auto table_id = storage->getStorageID();
{
while (1)
{
@ -392,7 +386,7 @@ void StorageLiveView::noUsersThread(std::shared_ptr<StorageLiveView> storage, co
return;
if (storage->hasUsers())
return;
if (!storage->global_context.getDependencies(storage->database_name, storage->table_name).empty())
if (!storage->global_context.getDependencies(table_id).empty())
continue;
drop_table = true;
}
@ -402,14 +396,14 @@ void StorageLiveView::noUsersThread(std::shared_ptr<StorageLiveView> storage, co
if (drop_table)
{
if (storage->global_context.tryGetTable(storage->database_name, storage->table_name))
if (storage->global_context.tryGetTable(table_id))
{
try
{
/// We create and execute `drop` query for this table
auto drop_query = std::make_shared<ASTDropQuery>();
drop_query->database = storage->database_name;
drop_query->table = storage->table_name;
drop_query->database = table_id.database_name;
drop_query->table = table_id.table_name;
drop_query->kind = ASTDropQuery::Kind::Drop;
ASTPtr ast_drop_query = drop_query;
InterpreterDropQuery drop_interpreter(ast_drop_query, storage->global_context);
@ -417,6 +411,7 @@ void StorageLiveView::noUsersThread(std::shared_ptr<StorageLiveView> storage, co
}
catch (...)
{
tryLogCurrentException(__PRETTY_FUNCTION__);
}
}
}
@ -493,9 +488,8 @@ StorageLiveView::~StorageLiveView()
void StorageLiveView::drop(TableStructureWriteLockHolder &)
{
global_context.removeDependency(
DatabaseAndTableName(select_database_name, select_table_name),
DatabaseAndTableName(database_name, table_name));
auto table_id = getStorageID();
global_context.removeDependency(select_table_id, table_id);
std::lock_guard lock(mutex);
is_dropped = true;
@ -623,7 +617,7 @@ void registerStorageLiveView(StorageFactory & factory)
if (!args.attach && !args.local_context.getSettingsRef().allow_experimental_live_view)
throw Exception("Experimental LIVE VIEW feature is not enabled (the setting 'allow_experimental_live_view')", ErrorCodes::SUPPORT_IS_DISABLED);
return StorageLiveView::create(args.table_name, args.database_name, args.local_context, args.query, args.columns);
return StorageLiveView::create(args.table_id, args.local_context, args.query, args.columns);
});
}

View File

@ -48,10 +48,11 @@ friend class LiveViewBlockOutputStream;
public:
~StorageLiveView() override;
String getName() const override { return "LiveView"; }
String getTableName() const override { return table_name; }
String getDatabaseName() const override { return database_name; }
String getSelectDatabaseName() const { return select_database_name; }
String getSelectTableName() const { return select_table_name; }
StorageID getSelectTableID() const { return select_table_id; }
StorageID getBlocksStorageID() const
{
return StorageID("", getStorageID().table_name + "_blocks");
}
StoragePtr getParentStorage() const { return parent_storage; }
NameAndTypePair getColumn(const String & column_name) const override;
@ -170,10 +171,7 @@ public:
const Context & context);
private:
String select_database_name;
String select_table_name;
String table_name;
String database_name;
StorageID select_table_id = StorageID::createEmpty(); /// Will be initialized in constructor
ASTPtr inner_query; /// stored query : SELECT * FROM ( SELECT a FROM A)
ASTPtr inner_subquery; /// stored query's innermost subquery if any
ASTPtr inner_blocks_query; /// query over the mergeable blocks to produce final result
@ -210,8 +208,7 @@ private:
UInt64 temporary_live_view_timeout;
StorageLiveView(
const String & table_name_,
const String & database_name_,
const StorageID & table_id_,
Context & local_context,
const ASTCreateQuery & query,
const ColumnsDescription & columns

View File

@ -1,8 +1,6 @@
#include <Storages/MergeTree/DataPartsExchange.h>
#include <Storages/IStorage.h>
#include <Common/CurrentMetrics.h>
#include <Common/NetException.h>
#include <Common/typeid_cast.h>
#include <IO/HTTPCommon.h>
#include <Poco/File.h>
#include <ext/scope_guard.h>
@ -53,9 +51,6 @@ std::string Service::getId(const std::string & node_id) const
void Service::processQuery(const Poco::Net::HTMLForm & params, ReadBuffer & /*body*/, WriteBuffer & out, Poco::Net::HTTPServerResponse & response)
{
if (blocker.isCancelled())
throw Exception("Transferring part to replica was cancelled", ErrorCodes::ABORTED);
String client_protocol_version = params.get("client_protocol_version", REPLICATION_PROTOCOL_VERSION_WITHOUT_PARTS_SIZE);
@ -88,15 +83,11 @@ void Service::processQuery(const Poco::Net::HTMLForm & params, ReadBuffer & /*bo
++data.current_table_sends;
SCOPE_EXIT({--data.current_table_sends;});
StoragePtr owned_storage = storage.lock();
if (!owned_storage)
throw Exception("The table was already dropped", ErrorCodes::UNKNOWN_TABLE);
LOG_TRACE(log, "Sending part " << part_name);
try
{
auto storage_lock = owned_storage->lockStructureForShare(false, RWLockImpl::NO_QUERY);
auto storage_lock = data.lockStructureForShare(false, RWLockImpl::NO_QUERY);
MergeTreeData::DataPartPtr part = findPart(part_name);

View File

@ -20,8 +20,8 @@ namespace DataPartsExchange
class Service final : public InterserverIOEndpoint
{
public:
Service(MergeTreeData & data_, StoragePtr & storage_) : data(data_),
storage(storage_), log(&Logger::get(data.getLogName() + " (Replicated PartsService)")) {}
Service(MergeTreeData & data_)
: data(data_), log(&Logger::get(data.getLogName() + " (Replicated PartsService)")) {}
Service(const Service &) = delete;
Service & operator=(const Service &) = delete;
@ -33,8 +33,9 @@ private:
MergeTreeData::DataPartPtr findPart(const String & name);
private:
/// StorageReplicatedMergeTree::shutdown() waits for all parts exchange handlers to finish,
/// so Service will never access dangling reference to storage
MergeTreeData & data;
StorageWeakPtr storage;
Logger * log;
};

View File

@ -111,8 +111,7 @@ namespace
MergeTreeData::MergeTreeData(
const String & database_,
const String & table_,
const StorageID & table_id_,
const String & relative_data_path_,
const StorageInMemoryMetadata & metadata,
Context & context_,
@ -122,17 +121,16 @@ MergeTreeData::MergeTreeData(
bool require_part_metadata_,
bool attach,
BrokenPartCallback broken_part_callback_)
: global_context(context_)
: IStorage(table_id_)
, global_context(context_)
, merging_params(merging_params_)
, partition_by_ast(metadata.partition_by_ast)
, sample_by_ast(metadata.sample_by_ast)
, settings_ast(metadata.settings_ast)
, require_part_metadata(require_part_metadata_)
, database_name(database_)
, table_name(table_)
, relative_data_path(relative_data_path_)
, broken_part_callback(broken_part_callback_)
, log_name(database_name + "." + table_name)
, log_name(table_id_.getNameForLogs())
, log(&Logger::get(log_name))
, storage_settings(std::move(storage_settings_))
, storage_policy(context_.getStoragePolicy(getSettings()->storage_policy))
@ -635,6 +633,10 @@ void MergeTreeData::setTTLExpressions(const ColumnsDescription::ColumnTTLs & new
if (new_ttl_table_ast)
{
std::vector<TTLEntry> update_move_ttl_entries;
ASTPtr update_ttl_table_ast = nullptr;
TTLEntry update_ttl_table_entry;
bool seen_delete_ttl = false;
for (auto ttl_element_ptr : new_ttl_table_ast->children)
{
@ -649,8 +651,8 @@ void MergeTreeData::setTTLExpressions(const ColumnsDescription::ColumnTTLs & new
auto new_ttl_table_entry = create_ttl_entry(ttl_element.children[0]);
if (!only_check)
{
ttl_table_ast = ttl_element.children[0];
ttl_table_entry = new_ttl_table_entry;
update_ttl_table_ast = ttl_element.children[0];
update_ttl_table_entry = new_ttl_table_entry;
}
seen_delete_ttl = true;
@ -673,11 +675,18 @@ void MergeTreeData::setTTLExpressions(const ColumnsDescription::ColumnTTLs & new
}
if (!only_check)
{
move_ttl_entries.emplace_back(std::move(new_ttl_entry));
}
update_move_ttl_entries.emplace_back(std::move(new_ttl_entry));
}
}
if (!only_check)
{
ttl_table_entry = update_ttl_table_entry;
ttl_table_ast = update_ttl_table_ast;
auto move_ttl_entries_lock = std::lock_guard<std::mutex>(move_ttl_entries_mutex);
move_ttl_entries = update_move_ttl_entries;
}
}
}
@ -1257,7 +1266,9 @@ void MergeTreeData::removePartsFinally(const MergeTreeData::DataPartsVector & pa
/// Data parts is still alive (since DataPartsVector holds shared_ptrs) and contain useful metainformation for logging
/// NOTE: There is no need to log parts deletion somewhere else, all deleting parts pass through this function and pass away
if (auto part_log = global_context.getPartLog(database_name))
auto table_id = getStorageID();
if (auto part_log = global_context.getPartLog(table_id.database_name))
{
PartLogElement part_log_elem;
@ -1265,8 +1276,8 @@ void MergeTreeData::removePartsFinally(const MergeTreeData::DataPartsVector & pa
part_log_elem.event_time = time(nullptr);
part_log_elem.duration_ms = 0;
part_log_elem.database_name = database_name;
part_log_elem.table_name = table_name;
part_log_elem.database_name = table_id.database_name;
part_log_elem.table_name = table_id.table_name;
for (auto & part : parts)
{
@ -1341,8 +1352,7 @@ void MergeTreeData::rename(
global_context.dropCaches();
relative_data_path = new_table_path;
table_name = new_table_name;
database_name = new_database_name;
renameInMemory(new_database_name, new_table_name);
}
void MergeTreeData::dropAllData()
@ -3295,7 +3305,7 @@ ReservationPtr MergeTreeData::tryReserveSpacePreferringTTLRules(UInt64 expected_
ReservationPtr reservation;
auto ttl_entry = selectTTLEntryForTTLInfos(ttl_infos, time_of_move);
if (ttl_entry != nullptr)
if (ttl_entry)
{
SpacePtr destination_ptr = ttl_entry->getDestination(storage_policy);
if (!destination_ptr)
@ -3354,27 +3364,28 @@ bool MergeTreeData::TTLEntry::isPartInDestination(const StoragePolicyPtr & polic
return false;
}
const MergeTreeData::TTLEntry * MergeTreeData::selectTTLEntryForTTLInfos(
std::optional<MergeTreeData::TTLEntry> MergeTreeData::selectTTLEntryForTTLInfos(
const MergeTreeDataPart::TTLInfos & ttl_infos,
time_t time_of_move) const
{
const MergeTreeData::TTLEntry * result = nullptr;
/// Prefer TTL rule which went into action last.
time_t max_max_ttl = 0;
std::vector<DB::MergeTreeData::TTLEntry>::const_iterator best_entry_it;
for (const auto & ttl_entry : move_ttl_entries)
auto lock = std::lock_guard(move_ttl_entries_mutex);
for (auto ttl_entry_it = move_ttl_entries.begin(); ttl_entry_it != move_ttl_entries.end(); ++ttl_entry_it)
{
auto ttl_info_it = ttl_infos.moves_ttl.find(ttl_entry.result_column);
auto ttl_info_it = ttl_infos.moves_ttl.find(ttl_entry_it->result_column);
/// Prefer TTL rule which went into action last.
if (ttl_info_it != ttl_infos.moves_ttl.end()
&& ttl_info_it->second.max <= time_of_move
&& max_max_ttl <= ttl_info_it->second.max)
{
result = &ttl_entry;
best_entry_it = ttl_entry_it;
max_max_ttl = ttl_info_it->second.max;
}
}
return result;
return max_max_ttl ? *best_entry_it : std::optional<MergeTreeData::TTLEntry>();
}
MergeTreeData::DataParts MergeTreeData::getDataParts(const DataPartStates & affordable_states) const
@ -3522,11 +3533,12 @@ bool MergeTreeData::mayBenefitFromIndexForIn(const ASTPtr & left_in_operand, con
}
}
MergeTreeData & MergeTreeData::checkStructureAndGetMergeTreeData(const StoragePtr & source_table) const
MergeTreeData & MergeTreeData::checkStructureAndGetMergeTreeData(IStorage * source_table) const
{
MergeTreeData * src_data = dynamic_cast<MergeTreeData *>(source_table.get());
MergeTreeData * src_data = dynamic_cast<MergeTreeData *>(source_table);
if (!src_data)
throw Exception("Table " + table_name + " supports attachPartitionFrom only for MergeTree family of table engines."
throw Exception("Table " + source_table->getStorageID().getNameForLogs() +
" supports attachPartitionFrom only for MergeTree family of table engines."
" Got " + source_table->getName(), ErrorCodes::NOT_IMPLEMENTED);
if (getColumns().getAllPhysical().sizeOfDifference(src_data->getColumns().getAllPhysical()))
@ -3549,6 +3561,11 @@ MergeTreeData & MergeTreeData::checkStructureAndGetMergeTreeData(const StoragePt
return *src_data;
}
MergeTreeData & MergeTreeData::checkStructureAndGetMergeTreeData(const StoragePtr & source_table) const
{
return checkStructureAndGetMergeTreeData(source_table.get());
}
MergeTreeData::MutableDataPartPtr MergeTreeData::cloneAndLoadDataPartOnSameDisk(const MergeTreeData::DataPartPtr & src_part,
const String & tmp_part_prefix,
const MergeTreePartInfo & dst_part_info)
@ -3687,7 +3704,8 @@ void MergeTreeData::writePartLog(
const MergeListEntry * merge_entry)
try
{
auto part_log = global_context.getPartLog(database_name);
auto table_id = getStorageID();
auto part_log = global_context.getPartLog(table_id.database_name);
if (!part_log)
return;
@ -3702,8 +3720,8 @@ try
/// TODO: Stop stopwatch in outer code to exclude ZK timings and so on
part_log_elem.duration_ms = elapsed_ns / 1000000;
part_log_elem.database_name = database_name;
part_log_elem.table_name = table_name;
part_log_elem.database_name = table_id.database_name;
part_log_elem.table_name = table_id.table_name;
part_log_elem.partition_id = MergeTreePartInfo::fromPartName(new_part_name, format_version).partition_id;
part_log_elem.part_name = new_part_name;
@ -3768,7 +3786,15 @@ bool MergeTreeData::selectPartsAndMove()
bool MergeTreeData::areBackgroundMovesNeeded() const
{
return storage_policy->getVolumes().size() > 1;
auto policy = storage_policy;
if (policy->getVolumes().size() > 1)
return true;
if (policy->getVolumes().size() == 1 && policy->getVolumes()[0]->disks.size() > 1 && move_ttl_entries.size() > 0)
return true;
return false;
}
bool MergeTreeData::movePartsToSpace(const DataPartsVector & parts, SpacePtr space)

View File

@ -330,7 +330,7 @@ public:
///
/// require_part_metadata - should checksums.txt and columns.txt exist in the part directory.
/// attach - whether the existing table is attached or the new table is created.
MergeTreeData(const String & database_, const String & table_,
MergeTreeData(const StorageID & table_id_,
const String & relative_data_path_,
const StorageInMemoryMetadata & metadata,
Context & context_,
@ -396,9 +396,6 @@ public:
|| column_name == "_sample_factor";
}
String getDatabaseName() const override { return database_name; }
String getTableName() const override { return table_name; }
/// Load the set of data parts from disk. Call once - immediately after the object is created.
void loadDataParts(bool skip_sanity_checks);
@ -627,6 +624,7 @@ public:
/// and checks that their structure suitable for ALTER TABLE ATTACH PARTITION FROM
/// Tables structure should be locked.
MergeTreeData & checkStructureAndGetMergeTreeData(const StoragePtr & source_table) const;
MergeTreeData & checkStructureAndGetMergeTreeData(IStorage * source_table) const;
MergeTreeData::MutableDataPartPtr cloneAndLoadDataPartOnSameDisk(
const MergeTreeData::DataPartPtr & src_part, const String & tmp_part_prefix, const MergeTreePartInfo & dst_part_info);
@ -739,12 +737,17 @@ public:
bool isPartInDestination(const StoragePolicyPtr & policy, const MergeTreeDataPart & part) const;
};
const TTLEntry * selectTTLEntryForTTLInfos(const MergeTreeDataPart::TTLInfos & ttl_infos, time_t time_of_move) const;
std::optional<TTLEntry> selectTTLEntryForTTLInfos(const MergeTreeDataPart::TTLInfos & ttl_infos, time_t time_of_move) const;
using TTLEntriesByName = std::unordered_map<String, TTLEntry>;
TTLEntriesByName column_ttl_entries_by_name;
TTLEntry ttl_table_entry;
/// This mutex is required for background move operations which do not obtain global locks.
mutable std::mutex move_ttl_entries_mutex;
/// Vector rw operations have to be done under "move_ttl_entries_mutex".
std::vector<TTLEntry> move_ttl_entries;
String sampling_expr_column_name;
@ -786,8 +789,6 @@ protected:
bool require_part_metadata;
String database_name;
String table_name;
String relative_data_path;

View File

@ -127,14 +127,14 @@ bool MergeTreePartsMover::selectPartsForMove(
if (!can_move(part, &reason))
continue;
const MergeTreeData::TTLEntry * ttl_entry_ptr = part->storage.selectTTLEntryForTTLInfos(part->ttl_infos, time_of_move);
auto ttl_entry = part->storage.selectTTLEntryForTTLInfos(part->ttl_infos, time_of_move);
auto to_insert = need_to_move.find(part->disk);
ReservationPtr reservation;
if (ttl_entry_ptr)
if (ttl_entry)
{
auto destination = ttl_entry_ptr->getDestination(policy);
if (destination && !ttl_entry_ptr->isPartInDestination(policy, *part))
reservation = part->storage.tryReserveSpace(part->bytes_on_disk, ttl_entry_ptr->getDestination(policy));
auto destination = ttl_entry->getDestination(policy);
if (destination && !ttl_entry->isPartInDestination(policy, *part))
reservation = part->storage.tryReserveSpace(part->bytes_on_disk, ttl_entry->getDestination(policy));
}
if (reservation) /// Found reservation by TTL rule.
@ -221,7 +221,9 @@ void MergeTreePartsMover::swapClonedPart(const MergeTreeData::DataPartPtr & clon
return;
}
cloned_part->renameTo(active_part->name);
/// Don't remove new directory but throw an error because it may contain part which is currently in use.
cloned_part->renameTo(active_part->name, false);
/// TODO what happen if server goes down here?
data->swapActivePart(cloned_part);

Some files were not shown because too many files have changed in this diff Show More