Merge branch 'master' into fix-filter-table

This commit is contained in:
Ivan Lezhankin 2019-07-10 13:27:26 +03:00
commit befb3ce15b
139 changed files with 1049 additions and 775 deletions

View File

@ -1,3 +1,20 @@
## ClickHouse release 19.9.4.1, 2019-07-05
### Bug Fix
* Fix segfault in Delta codec which affects columns with values less than 32 bits size. The bug led to random memory corruption. [#5786](https://github.com/yandex/ClickHouse/pull/5786) ([alesapin](https://github.com/alesapin))
* Fix rare bug in checking of part with LowCardinality column. [#5832](https://github.com/yandex/ClickHouse/pull/5832) ([alesapin](https://github.com/alesapin))
* Fix segfault in TTL merge with non-physical columns in block. [#5819](https://github.com/yandex/ClickHouse/pull/5819) ([Anton Popov](https://github.com/CurtizJ))
* Fix potential infinite sleeping of low-priority queries. [#5842](https://github.com/yandex/ClickHouse/pull/5842) ([alexey-milovidov](https://github.com/alexey-milovidov))
* Fix how ClickHouse determines default time zone as UCT instead of UTC. [#5828](https://github.com/yandex/ClickHouse/pull/5828) ([alexey-milovidov](https://github.com/alexey-milovidov))
* Fix bug about executing distributed DROP/ALTER/TRUNCATE/OPTIMIZE ON CLUSTER queries on follower replica before leader replica. Now they will be executed directly on leader replica. [#5757](https://github.com/yandex/ClickHouse/pull/5757) ([alesapin](https://github.com/alesapin))
* Fix race condition, which cause that some queries may not appear in query_log instantly after SYSTEM FLUSH LOGS query. [#5685](https://github.com/yandex/ClickHouse/pull/5685) ([Anton Popov](https://github.com/CurtizJ))
* Added missing support for constant arguments to `evalMLModel` function. [#5820](https://github.com/yandex/ClickHouse/pull/5820) ([alexey-milovidov](https://github.com/alexey-milovidov))
## ClickHouse release 19.7.6.1, 2019-07-05
### Bug Fix
* Fix performance regression in some queries with JOIN. [#5192](https://github.com/yandex/ClickHouse/pull/5192) ([Winter Zhang](https://github.com/zhang2014))
## ClickHouse release 19.9.2.4, 2019-06-24
### New Feature

View File

@ -1,3 +1,20 @@
## ClickHouse release 19.9.4.1, 2019-07-05
### Исправления ошибок
* Исправлен segmentation fault в кодеке сжатия Delta в колонках с величинами размером меньше 32 бит. Ошибка могла приводить к повреждениям памяти. [#5786](https://github.com/yandex/ClickHouse/pull/5786) ([alesapin](https://github.com/alesapin))
* Исправлена ошибка в проверке кусков в LowCardinality колонках. [#5832](https://github.com/yandex/ClickHouse/pull/5832) ([alesapin](https://github.com/alesapin))
* Исправлен segmentation fault при слиянии кусков с истекшим TTL в случае, когда в блоке присутствуют столбцы, не входящие в структуру таблицы. [#5819](https://github.com/yandex/ClickHouse/pull/5819) ([Anton Popov](https://github.com/CurtizJ))
* Исправлена существовавшая возможность ухода в бесконечное ожидание на низко-приоритетных запросах. [#5842](https://github.com/yandex/ClickHouse/pull/5842) ([alexey-milovidov](https://github.com/alexey-milovidov))
* Исправлена ошибка определения таймзоны по умолчанию (UCT вместо UTC). [#5828](https://github.com/yandex/ClickHouse/pull/5828) ([alexey-milovidov](https://github.com/alexey-milovidov))
* Исправлена ошибка в распределенных запросах вида DROP/ALTER/TRUNCATE/OPTIMIZE ON CLUSTER. [#5757](https://github.com/yandex/ClickHouse/pull/5757) ([alesapin](https://github.com/alesapin))
* Исправлена ошибка, которая при распределенных запросах могла привести к тому, что некоторые запросы не появлялись в query_log сразу после SYSTEM FLUSH LOGS запроса. [#5685](https://github.com/yandex/ClickHouse/pull/5685) ([Anton Popov](https://github.com/CurtizJ))
* Добавлена отсутствовавшая поддержка константных аргументов для функции `evalMLModel`. [#5820](https://github.com/yandex/ClickHouse/pull/5820) ([alexey-milovidov](https://github.com/alexey-milovidov))
## ClickHouse release 19.7.6.1, 2019-07-05
### Исправления ошибок
* Исправлена просадка производительности в методе JOIN в некоторых видах запросов. [#5192](https://github.com/yandex/ClickHouse/pull/5192) ([Winter Zhang](https://github.com/zhang2014))
## ClickHouse release 19.8.3.8, 2019-06-11
### Новые возможности

View File

@ -189,6 +189,7 @@ if(WITH_COVERAGE AND COMPILER_CLANG)
endif()
if(WITH_COVERAGE AND COMPILER_GCC)
set(COMPILER_FLAGS "${COMPILER_FLAGS} -fprofile-arcs -ftest-coverage")
set(COVERAGE_OPTION "-lgcov")
endif()
set (CMAKE_BUILD_COLOR_MAKEFILE ON)
@ -257,7 +258,7 @@ if (OS_LINUX AND NOT UNBUNDLED AND (GLIBC_COMPATIBILITY OR USE_INTERNAL_UNWIND_L
if (USE_LIBCXX)
set (DEFAULT_LIBS "${DEFAULT_LIBS} -Wl,-Bstatic -lc++ -lc++abi ${EXCEPTION_HANDLING_LIBRARY} ${BUILTINS_LIB_PATH} -Wl,-Bdynamic")
else ()
set (DEFAULT_LIBS "${DEFAULT_LIBS} -Wl,-Bstatic -lstdc++ ${EXCEPTION_HANDLING_LIBRARY} -lgcc ${BUILTINS_LIB_PATH} -Wl,-Bdynamic")
set (DEFAULT_LIBS "${DEFAULT_LIBS} -Wl,-Bstatic -lstdc++ ${EXCEPTION_HANDLING_LIBRARY} ${COVERAGE_OPTION} -lgcc ${BUILTINS_LIB_PATH} -Wl,-Bdynamic")
endif ()
# Linking with GLIBC prevents portability of binaries to older systems.
@ -473,4 +474,5 @@ if (GLIBC_COMPATIBILITY OR USE_INTERNAL_UNWIND_LIBRARY_FOR_EXCEPTION_HANDLING)
add_default_dependencies(brotli)
add_default_dependencies(libprotobuf)
add_default_dependencies(base64)
add_default_dependencies(readpassphrase)
endif ()

View File

@ -251,7 +251,6 @@ if(USE_INTERNAL_GTEST_LIBRARY)
add_subdirectory(${ClickHouse_SOURCE_DIR}/contrib/googletest/googletest ${CMAKE_CURRENT_BINARY_DIR}/googletest)
# avoid problems with <regexp.h>
target_compile_definitions (gtest INTERFACE GTEST_HAS_POSIX_RE=0)
target_include_directories (gtest SYSTEM INTERFACE ${ClickHouse_SOURCE_DIR}/contrib/googletest/include)
elseif(GTEST_SRC_DIR)
add_subdirectory(${GTEST_SRC_DIR}/googletest ${CMAKE_CURRENT_BINARY_DIR}/googletest)
target_compile_definitions(gtest INTERFACE GTEST_HAS_POSIX_RE=0)

2
contrib/simdjson vendored

@ -1 +1 @@
Subproject commit 2151ad7f34cf773a23f086e941d661f8a8873144
Subproject commit 3bd3116cf8faf6d482dc31423b16533bfa2696f7

View File

@ -11,8 +11,9 @@ set(SIMDJSON_SRC
${SIMDJSON_SRC_DIR}/stage2_build_tape.cpp
${SIMDJSON_SRC_DIR}/parsedjson.cpp
${SIMDJSON_SRC_DIR}/parsedjsoniterator.cpp
${SIMDJSON_SRC_DIR}/simdjson.cpp
)
add_library(${SIMDJSON_LIBRARY} ${SIMDJSON_SRC})
target_include_directories(${SIMDJSON_LIBRARY} PUBLIC "${SIMDJSON_INCLUDE_DIR}")
target_include_directories(${SIMDJSON_LIBRARY} SYSTEM PUBLIC "${SIMDJSON_INCLUDE_DIR}")
target_compile_options(${SIMDJSON_LIBRARY} PRIVATE -mavx2 -mbmi -mbmi2 -mpclmul)

View File

@ -48,7 +48,7 @@ if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wshadow -Wshadow-uncaptured-local -Wextra-semi -Wcomma -Winconsistent-missing-destructor-override -Wunused-exception-parameter -Wcovered-switch-default -Wold-style-cast -Wrange-loop-analysis -Wunused-member-function -Wunreachable-code -Wunreachable-code-return -Wnewline-eof -Wembedded-directive -Wgnu-case-range -Wunused-macros -Wconditional-uninitialized -Wdeprecated -Wundef -Wreserved-id-macro -Wredundant-parens -Wzero-as-null-pointer-constant")
if (WEVERYTHING)
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Weverything -Wno-c++98-compat -Wno-c++98-compat-pedantic -Wno-missing-noreturn -Wno-padded -Wno-switch-enum -Wno-shadow-field-in-constructor -Wno-deprecated-dynamic-exception-spec -Wno-float-equal -Wno-weak-vtables -Wno-shift-sign-overflow -Wno-sign-conversion -Wno-conversion -Wno-exit-time-destructors -Wno-undefined-func-template -Wno-documentation-unknown-command -Wno-missing-variable-declarations -Wno-unused-template -Wno-global-constructors -Wno-c99-extensions -Wno-missing-prototypes -Wno-weak-template-vtables -Wno-zero-length-array -Wno-gnu-anonymous-struct -Wno-nested-anon-types -Wno-double-promotion -Wno-disabled-macro-expansion -Wno-used-but-marked-unused -Wno-vla-extension -Wno-vla -Wno-packed")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Weverything -Wno-c++98-compat -Wno-c++98-compat-pedantic -Wno-padded -Wno-switch-enum -Wno-shadow-field-in-constructor -Wno-deprecated-dynamic-exception-spec -Wno-float-equal -Wno-weak-vtables -Wno-shift-sign-overflow -Wno-sign-conversion -Wno-conversion -Wno-exit-time-destructors -Wno-undefined-func-template -Wno-documentation-unknown-command -Wno-missing-variable-declarations -Wno-unused-template -Wno-global-constructors -Wno-c99-extensions -Wno-missing-prototypes -Wno-weak-template-vtables -Wno-zero-length-array -Wno-gnu-anonymous-struct -Wno-nested-anon-types -Wno-double-promotion -Wno-disabled-macro-expansion -Wno-vla-extension -Wno-vla -Wno-packed")
# TODO Enable conversion, sign-conversion, double-promotion warnings.
endif ()
@ -387,6 +387,10 @@ if (ENABLE_TESTS AND USE_GTEST)
# attach all dbms gtest sources
grep_gtest_sources(${ClickHouse_SOURCE_DIR}/dbms dbms_gtest_sources)
add_executable(unit_tests_dbms ${dbms_gtest_sources})
# gtest framework has substandard code
target_compile_options(unit_tests_dbms PRIVATE -Wno-zero-as-null-pointer-constant -Wno-undef -Wno-sign-compare -Wno-used-but-marked-unused -Wno-missing-noreturn)
target_link_libraries(unit_tests_dbms PRIVATE ${GTEST_BOTH_LIBRARIES} clickhouse_functions clickhouse_parsers dbms clickhouse_common_zookeeper)
add_check(unit_tests_dbms)
endif ()

View File

@ -61,6 +61,8 @@ public:
randomize(randomize_), max_iterations(max_iterations_), max_time(max_time_),
json_path(json_path_), settings(settings_), global_context(Context::createGlobal()), pool(concurrency)
{
global_context.makeGlobalContext();
std::cerr << std::fixed << std::setprecision(3);
/// This is needed to receive blocks with columns of AggregateFunction data type

View File

@ -218,6 +218,7 @@ private:
configReadClient(config(), home_path);
context.makeGlobalContext();
context.setApplicationType(Context::ApplicationType::CLIENT);
/// settings and limits could be specified in config file, but passed settings has higher priority

View File

@ -2171,10 +2171,10 @@ void ClusterCopierApp::mainImpl()
<< "revision " << ClickHouseRevision::get() << ")");
auto context = std::make_unique<Context>(Context::createGlobal());
context->makeGlobalContext();
SCOPE_EXIT(context->shutdown());
context->setConfig(loaded_config.configuration);
context->setGlobalContext(*context);
context->setApplicationType(Context::ApplicationType::LOCAL);
context->setPath(process_path);

View File

@ -131,7 +131,7 @@ try
context = std::make_unique<Context>(Context::createGlobal());
context->setGlobalContext(*context);
context->makeGlobalContext();
context->setApplicationType(Context::ApplicationType::LOCAL);
tryInitPath();
@ -275,8 +275,8 @@ void LocalServer::processQueries()
if (!parse_res.second)
throw Exception("Cannot parse and execute the following part of query: " + String(parse_res.first), ErrorCodes::SYNTAX_ERROR);
context->setSessionContext(*context);
context->setQueryContext(*context);
context->makeSessionContext();
context->makeQueryContext();
context->setUser("default", "", Poco::Net::SocketAddress{}, "");
context->setCurrentQueryId("");

View File

@ -1024,6 +1024,7 @@ try
}
Context context = Context::createGlobal();
context.makeGlobalContext();
ReadBufferFromFileDescriptor file_in(STDIN_FILENO);
WriteBufferFromFileDescriptor file_out(STDOUT_FILENO);

View File

@ -160,7 +160,7 @@ int ODBCBridge::main(const std::vector<std::string> & /*args*/)
http_params->setKeepAliveTimeout(keep_alive_timeout);
context = std::make_shared<Context>(Context::createGlobal());
context->setGlobalContext(*context);
context->makeGlobalContext();
auto server = Poco::Net::HTTPServer(
new HandlerFactory("ODBCRequestHandlerFactory-factory", keep_alive_timeout, context), server_pool, socket, http_params);

View File

@ -89,6 +89,7 @@ public:
, input_files(input_files_)
, log(&Poco::Logger::get("PerformanceTestSuite"))
{
global_context.makeGlobalContext();
global_context.getSettingsRef().copyChangesFrom(cmd_settings);
if (input_files.size() < 1)
throw Exception("No tests were specified", ErrorCodes::BAD_ARGUMENTS);

View File

@ -211,7 +211,6 @@ void HTTPHandler::processQuery(
Output & used_output)
{
Context context = server.context();
context.setGlobalContext(server.context());
CurrentThread::QueryScope query_scope(context);

View File

@ -48,7 +48,7 @@ MySQLHandler::MySQLHandler(IServer & server_, const Poco::Net::StreamSocket & so
void MySQLHandler::run()
{
connection_context = server.context();
connection_context.setSessionContext(connection_context);
connection_context.makeSessionContext();
connection_context.setDefaultFormat("MySQLWire");
in = std::make_shared<ReadBufferFromPocoSocket>(socket());

View File

@ -187,7 +187,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
* settings, available functions, data types, aggregate functions, databases...
*/
global_context = std::make_unique<Context>(Context::createGlobal());
global_context->setGlobalContext(*global_context);
global_context->makeGlobalContext();
global_context->setApplicationType(Context::ApplicationType::SERVER);
bool has_zookeeper = config().has("zookeeper");

View File

@ -54,7 +54,7 @@ void TCPHandler::runImpl()
ThreadStatus thread_status;
connection_context = server.context();
connection_context.setSessionContext(connection_context);
connection_context.makeSessionContext();
Settings global_settings = connection_context.getSettings();

View File

@ -73,7 +73,7 @@ void Connection::connect(const ConnectionTimeouts & timeouts)
current_resolved_address = DNSResolver::instance().resolveAddress(host, port);
socket->connect(current_resolved_address, timeouts.connection_timeout);
socket->connect(*current_resolved_address, timeouts.connection_timeout);
socket->setReceiveTimeout(timeouts.receive_timeout);
socket->setSendTimeout(timeouts.send_timeout);
socket->setNoDelay(true);
@ -533,12 +533,9 @@ void Connection::sendExternalTablesData(ExternalTablesData & data)
LOG_DEBUG(log_wrapper.get(), msg.rdbuf());
}
Poco::Net::SocketAddress Connection::getResolvedAddress() const
std::optional<Poco::Net::SocketAddress> Connection::getResolvedAddress() const
{
if (connected)
return current_resolved_address;
return DNSResolver::instance().resolveAddress(host, port);
return current_resolved_address;
}
@ -595,7 +592,9 @@ Connection::Packet Connection::receivePacket()
switch (res.type)
{
case Protocol::Server::Data:
case Protocol::Server::Data: [[fallthrough]];
case Protocol::Server::Totals: [[fallthrough]];
case Protocol::Server::Extremes:
res.block = receiveData();
return res;
@ -611,16 +610,6 @@ Connection::Packet Connection::receivePacket()
res.profile_info = receiveProfileInfo();
return res;
case Protocol::Server::Totals:
/// Block with total values is passed in same form as ordinary block. The only difference is packed id.
res.block = receiveData();
return res;
case Protocol::Server::Extremes:
/// Same as above.
res.block = receiveData();
return res;
case Protocol::Server::Log:
res.block = receiveLogData();
return res;
@ -720,11 +709,14 @@ void Connection::initBlockLogsInput()
void Connection::setDescription()
{
auto resolved_address = getResolvedAddress();
description = host + ":" + toString(resolved_address.port());
auto ip_address = resolved_address.host().toString();
description = host + ":" + toString(port);
if (host != ip_address)
description += ", " + ip_address;
if (resolved_address)
{
auto ip_address = resolved_address->host().toString();
if (host != ip_address)
description += ", " + ip_address;
}
}

View File

@ -63,7 +63,7 @@ public:
Poco::Timespan sync_request_timeout_ = Poco::Timespan(DBMS_DEFAULT_SYNC_REQUEST_TIMEOUT_SEC, 0))
:
host(host_), port(port_), default_database(default_database_),
user(user_), password(password_), current_resolved_address(host, port),
user(user_), password(password_),
client_name(client_name_),
compression(compression_),
secure(secure_),
@ -168,9 +168,6 @@ public:
size_t outBytesCount() const { return out ? out->count() : 0; }
size_t inBytesCount() const { return in ? in->count() : 0; }
/// Returns initially resolved address
Poco::Net::SocketAddress getResolvedAddress() const;
private:
String host;
UInt16 port;
@ -180,12 +177,15 @@ private:
/// Address is resolved during the first connection (or the following reconnects)
/// Use it only for logging purposes
Poco::Net::SocketAddress current_resolved_address;
std::optional<Poco::Net::SocketAddress> current_resolved_address;
/// For messages in log and in exceptions.
String description;
void setDescription();
/// Returns resolved address if it was resolved.
std::optional<Poco::Net::SocketAddress> getResolvedAddress() const;
String client_name;
bool connected = false;

View File

@ -1,4 +0,0 @@
if(USE_GTEST)
add_executable(column_unique column_unique.cpp)
target_link_libraries(column_unique PRIVATE dbms ${GTEST_BOTH_LIBRARIES})
endif()

View File

@ -7,12 +7,6 @@
#include <DataTypes/DataTypesNumber.h>
#include <DataTypes/DataTypeNullable.h>
#pragma GCC diagnostic ignored "-Wsign-compare"
#ifdef __clang__
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#pragma clang diagnostic ignored "-Wundef"
#endif
#include <gtest/gtest.h>
#include <unordered_map>

View File

@ -14,8 +14,9 @@ namespace DB
namespace Cpu
{
#if defined(__x86_64__) || defined(__i386__)
inline UInt64 _xgetbv(UInt32 xcr) noexcept
#if (defined(__x86_64__) || defined(__i386__))
/// Our version is independent of -mxsave option, because we do dynamic dispatch.
inline UInt64 our_xgetbv(UInt32 xcr) noexcept
{
UInt32 eax;
UInt32 edx;
@ -185,7 +186,7 @@ bool haveAVX() noexcept
// http://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf
// https://bugs.chromium.org/p/chromium/issues/detail?id=375968
return haveOSXSAVE() // implies haveXSAVE()
&& (_xgetbv(0) & 6u) == 6u // XMM state and YMM state are enabled by OS
&& (our_xgetbv(0) & 6u) == 6u // XMM state and YMM state are enabled by OS
&& ((CpuInfo(0x1).ecx >> 28) & 1u); // AVX bit
#else
return false;
@ -217,8 +218,8 @@ bool haveAVX512F() noexcept
#if defined(__x86_64__) || defined(__i386__)
// https://software.intel.com/en-us/articles/how-to-detect-knl-instruction-support
return haveOSXSAVE() // implies haveXSAVE()
&& (_xgetbv(0) & 6u) == 6u // XMM state and YMM state are enabled by OS
&& ((_xgetbv(0) >> 5) & 7u) == 7u // ZMM state is enabled by OS
&& (our_xgetbv(0) & 6u) == 6u // XMM state and YMM state are enabled by OS
&& ((our_xgetbv(0) >> 5) & 7u) == 7u // ZMM state is enabled by OS
&& CpuInfo(0x0).eax >= 0x7 // leaf 7 is present
&& ((CpuInfo(0x7).ebx >> 16) & 1u); // AVX512F bit
#else

View File

@ -42,35 +42,40 @@ void FileChecker::update(const Files::const_iterator & begin, const Files::const
save();
}
bool FileChecker::check() const
CheckResults FileChecker::check() const
{
/** Read the files again every time you call `check` - so as not to violate the constancy.
* `check` method is rarely called.
*/
CheckResults results;
Map local_map;
load(local_map, files_info_path);
if (local_map.empty())
return true;
return {};
for (const auto & name_size : local_map)
{
Poco::File file(Poco::Path(files_info_path).parent().toString() + "/" + name_size.first);
Poco::Path path = Poco::Path(files_info_path).parent().toString() + "/" + name_size.first;
Poco::File file(path);
if (!file.exists())
{
LOG_ERROR(log, "File " << file.path() << " doesn't exist");
return false;
results.emplace_back(path.getFileName(), false, "File " + file.path() + " doesn't exist");
break;
}
size_t real_size = file.getSize();
if (real_size != name_size.second)
{
LOG_ERROR(log, "Size of " << file.path() << " is wrong. Size is " << real_size << " but should be " << name_size.second);
return false;
results.emplace_back(path.getFileName(), false, "Size of " + file.path() + " is wrong. Size is " + toString(real_size) + " but should be " + toString(name_size.second));
break;
}
results.emplace_back(path.getFileName(), true, "");
}
return true;
return results;
}
void FileChecker::initialize()

View File

@ -3,6 +3,7 @@
#include <string>
#include <common/logger_useful.h>
#include <Poco/File.h>
#include <Storages/CheckResults.h>
namespace DB
@ -24,7 +25,7 @@ public:
void update(const Files::const_iterator & begin, const Files::const_iterator & end);
/// Check the files whose parameters are specified in sizes.json
bool check() const;
CheckResults check() const;
private:
void initialize();

View File

@ -5,11 +5,6 @@
#include <iostream>
#include <chrono>
#pragma GCC diagnostic ignored "-Wsign-compare"
#ifdef __clang__
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#pragma clang diagnostic ignored "-Wundef"
#endif
#include <gtest/gtest.h>
#include <Common/ShellCommand.h>

View File

@ -153,7 +153,7 @@ void formatIPv6(const unsigned char * src, char *& dst, UInt8 zeroed_tail_bytes_
}
/// Was it a trailing run of 0x00's?
if (best.base != -1 && size_t(best.base + best.len) == words.size())
if (best.base != -1 && size_t(best.base) + size_t(best.len) == words.size())
*dst++ = ':';
*dst++ = '\0';

View File

@ -23,9 +23,7 @@ namespace DB
* - the routing rules that affect which network interface we go to the specified address are not checked.
*/
bool isLocalAddress(const Poco::Net::SocketAddress & address, UInt16 clickhouse_port);
bool isLocalAddress(const Poco::Net::SocketAddress & address);
bool isLocalAddress(const Poco::Net::IPAddress & address);
/// Returns number of different bytes in hostnames, used for load balancing

View File

@ -1,8 +1,3 @@
#pragma GCC diagnostic ignored "-Wsign-compare"
#ifdef __clang__
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#pragma clang diagnostic ignored "-Wundef"
#endif
#include <gtest/gtest.h>
#include <Common/RWLock.h>

View File

@ -9,11 +9,6 @@
#include <chrono>
#include <thread>
#pragma GCC diagnostic ignored "-Wsign-compare"
#ifdef __clang__
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#pragma clang diagnostic ignored "-Wundef"
#endif
#include <gtest/gtest.h>

View File

@ -1,10 +1,5 @@
#include <Common/ThreadPool.h>
#pragma GCC diagnostic ignored "-Wsign-compare"
#ifdef __clang__
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#pragma clang diagnostic ignored "-Wundef"
#endif
#include <gtest/gtest.h>
/** Reproduces bug in ThreadPool.

View File

@ -2,11 +2,6 @@
#include <iostream>
#include <Common/ThreadPool.h>
#pragma GCC diagnostic ignored "-Wsign-compare"
#ifdef __clang__
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#pragma clang diagnostic ignored "-Wundef"
#endif
#include <gtest/gtest.h>
/// Test for thread self-removal when number of free threads in pool is too large.

View File

@ -2,11 +2,6 @@
#include <iostream>
#include <Common/ThreadPool.h>
#pragma GCC diagnostic ignored "-Wsign-compare"
#ifdef __clang__
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#pragma clang diagnostic ignored "-Wundef"
#endif
#include <gtest/gtest.h>

View File

@ -2,11 +2,6 @@
#include <stdexcept>
#include <Common/ThreadPool.h>
#pragma GCC diagnostic ignored "-Wsign-compare"
#ifdef __clang__
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#pragma clang diagnostic ignored "-Wundef"
#endif
#include <gtest/gtest.h>

View File

@ -1,10 +1,5 @@
#include <Common/escapeForFileName.h>
#pragma GCC diagnostic ignored "-Wsign-compare"
#ifdef __clang__
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#pragma clang diagnostic ignored "-Wundef"
#endif
#include <gtest/gtest.h>

View File

@ -34,7 +34,7 @@ struct AggregateIndependent
{
results.reserve(num_threads);
for (size_t i = 0; i < num_threads; ++i)
results.emplace_back(new Map);
results.emplace_back(std::make_unique<Map>());
for (size_t i = 0; i < num_threads; ++i)
{
@ -77,7 +77,7 @@ struct AggregateIndependentWithSequentialKeysOptimization
{
results.reserve(num_threads);
for (size_t i = 0; i < num_threads; ++i)
results.emplace_back(new Map);
results.emplace_back(std::make_unique<Map>());
for (size_t i = 0; i < num_threads; ++i)
{

View File

@ -20,11 +20,6 @@
#include <bitset>
#include <string.h>
#pragma GCC diagnostic ignored "-Wsign-compare"
#ifdef __clang__
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#pragma clang diagnostic ignored "-Wundef"
#endif
#include <gtest/gtest.h>
using namespace DB;

View File

@ -717,8 +717,8 @@ class WriteBuffer;
/// It is assumed that all elements of the array have the same type.
void readBinary(Array & x, ReadBuffer & buf);
inline void readText(Array &, ReadBuffer &) { throw Exception("Cannot read Array.", ErrorCodes::NOT_IMPLEMENTED); }
inline void readQuoted(Array &, ReadBuffer &) { throw Exception("Cannot read Array.", ErrorCodes::NOT_IMPLEMENTED); }
[[noreturn]] inline void readText(Array &, ReadBuffer &) { throw Exception("Cannot read Array.", ErrorCodes::NOT_IMPLEMENTED); }
[[noreturn]] inline void readQuoted(Array &, ReadBuffer &) { throw Exception("Cannot read Array.", ErrorCodes::NOT_IMPLEMENTED); }
/// It is assumed that all elements of the array have the same type.
/// Also write size and type into buf. UInt64 and Int64 is written in variadic size form
@ -726,16 +726,16 @@ void writeBinary(const Array & x, WriteBuffer & buf);
void writeText(const Array & x, WriteBuffer & buf);
inline void writeQuoted(const Array &, WriteBuffer &) { throw Exception("Cannot write Array quoted.", ErrorCodes::NOT_IMPLEMENTED); }
[[noreturn]] inline void writeQuoted(const Array &, WriteBuffer &) { throw Exception("Cannot write Array quoted.", ErrorCodes::NOT_IMPLEMENTED); }
void readBinary(Tuple & x, ReadBuffer & buf);
inline void readText(Tuple &, ReadBuffer &) { throw Exception("Cannot read Tuple.", ErrorCodes::NOT_IMPLEMENTED); }
inline void readQuoted(Tuple &, ReadBuffer &) { throw Exception("Cannot read Tuple.", ErrorCodes::NOT_IMPLEMENTED); }
[[noreturn]] inline void readText(Tuple &, ReadBuffer &) { throw Exception("Cannot read Tuple.", ErrorCodes::NOT_IMPLEMENTED); }
[[noreturn]] inline void readQuoted(Tuple &, ReadBuffer &) { throw Exception("Cannot read Tuple.", ErrorCodes::NOT_IMPLEMENTED); }
void writeBinary(const Tuple & x, WriteBuffer & buf);
void writeText(const Tuple & x, WriteBuffer & buf);
inline void writeQuoted(const Tuple &, WriteBuffer &) { throw Exception("Cannot write Tuple quoted.", ErrorCodes::NOT_IMPLEMENTED); }
[[noreturn]] inline void writeQuoted(const Tuple &, WriteBuffer &) { throw Exception("Cannot write Tuple quoted.", ErrorCodes::NOT_IMPLEMENTED); }
}

View File

@ -275,7 +275,7 @@ namespace details
{
static void serializeName(const StringRef & name, WriteBuffer & buf);
static String deserializeName(ReadBuffer & buf);
static void throwNameNotFound(const StringRef & name);
[[noreturn]] static void throwNameNotFound(const StringRef & name);
};
}

View File

@ -68,6 +68,7 @@ try
CollapsingFinalBlockInputStream collapsed(inputs, descr, "Sign");
Context context = Context::createGlobal();
context.makeGlobalContext();
WriteBufferFromFileDescriptor out_buf(STDERR_FILENO);
BlockOutputStreamPtr output = context.getOutputFormat("TabSeparated", out_buf, block1);

View File

@ -35,6 +35,7 @@ try
ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0);
Context context = Context::createGlobal();
context.makeGlobalContext();
NamesAndTypesList source_columns = {{"number", std::make_shared<DataTypeUInt64>()}};
auto syntax_result = SyntaxAnalyzer(context, {}).analyze(ast, source_columns);

View File

@ -40,6 +40,7 @@ try
std::cerr << std::endl;
Context context = Context::createGlobal();
context.makeGlobalContext();
NamesAndTypesList source_columns = {{"number", std::make_shared<DataTypeUInt64>()}};
auto syntax_result = SyntaxAnalyzer(context, {}).analyze(ast, source_columns);

View File

@ -1,9 +1,3 @@
#pragma GCC diagnostic ignored "-Wsign-compare"
#ifdef __clang__
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#pragma clang diagnostic ignored "-Wundef"
#endif
#include <gtest/gtest.h>
#include <Core/Block.h>
#include <Columns/ColumnVector.h>

View File

@ -23,6 +23,7 @@ int main(int, char **)
try
{
Context context = Context::createGlobal();
context.makeGlobalContext();
Settings settings = context.getSettings();
context.setPath("./");

View File

@ -4,11 +4,6 @@
#include <sstream>
#pragma GCC diagnostic ignored "-Wsign-compare"
#ifdef __clang__
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#pragma clang diagnostic ignored "-Wundef"
#endif
#include <gtest/gtest.h>

View File

@ -27,6 +27,7 @@ public:
/// copy-constructor is provided in order to support cloneability
ClickHouseDictionarySource(const ClickHouseDictionarySource & other);
ClickHouseDictionarySource & operator=(const ClickHouseDictionarySource &) = delete;
BlockInputStreamPtr loadAll() override;

View File

@ -23,6 +23,7 @@ public:
const Context & context);
ExecutableDictionarySource(const ExecutableDictionarySource & other);
ExecutableDictionarySource & operator=(const ExecutableDictionarySource &) = delete;
BlockInputStreamPtr loadAll() override;

View File

@ -26,6 +26,7 @@ public:
const Context & context);
HTTPDictionarySource(const HTTPDictionarySource & other);
HTTPDictionarySource & operator=(const HTTPDictionarySource &) = delete;
BlockInputStreamPtr loadAll() override;

View File

@ -35,6 +35,7 @@ public:
Block & sample_block);
LibraryDictionarySource(const LibraryDictionarySource & other);
LibraryDictionarySource & operator=(const LibraryDictionarySource &) = delete;
~LibraryDictionarySource() override;

View File

@ -37,6 +37,7 @@ public:
/// copy-constructor is provided in order to support cloneability
MySQLDictionarySource(const MySQLDictionarySource & other);
MySQLDictionarySource & operator=(const MySQLDictionarySource &) = delete;
BlockInputStreamPtr loadAll() override;

View File

@ -36,6 +36,7 @@ public:
/// copy-constructor is provided in order to support cloneability
XDBCDictionarySource(const XDBCDictionarySource & other);
XDBCDictionarySource & operator=(const XDBCDictionarySource &) = delete;
BlockInputStreamPtr loadAll() override;

View File

@ -42,7 +42,7 @@ namespace
Int64 decodeZigZag(UInt64 n) { return static_cast<Int64>((n >> 1) ^ (~(n & 1) + 1)); }
void unknownFormat()
[[noreturn]] void unknownFormat()
{
throw Exception("Protobuf messages are corrupted or don't match the provided schema. Please note that Protobuf stream is length-delimited: every message is prefixed by its length in varint.", ErrorCodes::UNKNOWN_PROTOBUF_FORMAT);
}

View File

@ -41,7 +41,7 @@ struct CRC32Impl
}
}
static void array(const ColumnString::Offsets & /*offsets*/, PaddedPODArray<UInt32> & /*res*/)
[[noreturn]] static void array(const ColumnString::Offsets & /*offsets*/, PaddedPODArray<UInt32> & /*res*/)
{
throw Exception("Cannot apply function CRC32 to Array argument", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
}

View File

@ -7,18 +7,8 @@
#include <Common/Exception.h>
#include <Core/Types.h>
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wnewline-eof"
#endif
#include <simdjson/jsonparser.h>
#ifdef __clang__
#pragma clang diagnostic pop
#endif
namespace DB
{
@ -42,7 +32,7 @@ struct SimdJSONParser
bool parse(const StringRef & json) { return !json_parse(json.data, json.size, pj); }
using Iterator = ParsedJson::iterator;
using Iterator = simdjson::ParsedJson::iterator;
Iterator getRoot() { return Iterator{pj}; }
static bool isInt64(const Iterator & it) { return it.is_integer(); }
@ -143,7 +133,7 @@ struct SimdJSONParser
}
private:
ParsedJson pj;
simdjson::ParsedJson pj;
};
}

View File

@ -89,7 +89,7 @@ struct DecodeURLComponentImpl
res_data.resize(res_offset);
}
static void vector_fixed(const ColumnString::Chars &, size_t, ColumnString::Chars &)
[[noreturn]] static void vector_fixed(const ColumnString::Chars &, size_t, ColumnString::Chars &)
{
throw Exception("Column of type FixedString is not supported by URL functions", ErrorCodes::ILLEGAL_COLUMN);
}

View File

@ -1,5 +1,7 @@
#include <Functions/FunctionFactory.h>
#include <Functions/FunctionBinaryArithmetic.h>
#include <Core/Defines.h>
namespace DB
{
@ -10,7 +12,7 @@ struct BitTestImpl
using ResultType = UInt8;
template <typename Result = ResultType>
static inline Result apply(A a, B b)
NO_SANITIZE_UNDEFINED static inline Result apply(A a, B b)
{
return (typename NumberTraits::ToInteger<A>::Type(a) >> typename NumberTraits::ToInteger<B>::Type(b)) & 1;
}

View File

@ -309,7 +309,7 @@ SOFTWARE.
res[i] = isValidUTF8(data.data() + i * n, n);
}
static void array(const ColumnString::Offsets &, PaddedPODArray<UInt8> &)
[[noreturn]] static void array(const ColumnString::Offsets &, PaddedPODArray<UInt8> &)
{
throw Exception("Cannot apply function isValidUTF8 to Array argument", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
}

View File

@ -48,7 +48,7 @@ struct LengthUTF8Impl
}
}
static void array(const ColumnString::Offsets &, PaddedPODArray<UInt64> &)
[[noreturn]] static void array(const ColumnString::Offsets &, PaddedPODArray<UInt64> &)
{
throw Exception("Cannot apply function lengthUTF8 to Array argument", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
}

View File

@ -61,7 +61,7 @@ struct ReverseUTF8Impl
}
}
static void vector_fixed(const ColumnString::Chars &, size_t, ColumnString::Chars &)
[[noreturn]] static void vector_fixed(const ColumnString::Chars &, size_t, ColumnString::Chars &)
{
throw Exception("Cannot apply function reverseUTF8 to fixed string.", ErrorCodes::ILLEGAL_COLUMN);
}

View File

@ -124,7 +124,7 @@ struct ToValidUTF8Impl
write_buffer.finish();
}
static void vector_fixed(const ColumnString::Chars &, size_t, ColumnString::Chars &)
[[noreturn]] static void vector_fixed(const ColumnString::Chars &, size_t, ColumnString::Chars &)
{
throw Exception("Column of type FixedString is not supported by toValidUTF8 function", ErrorCodes::ILLEGAL_COLUMN);
}

View File

@ -2,6 +2,7 @@
#if USE_HDFS
#include <Common/Exception.h>
namespace DB
{
namespace ErrorCodes
@ -9,6 +10,7 @@ namespace ErrorCodes
extern const int BAD_ARGUMENTS;
extern const int NETWORK_ERROR;
}
HDFSBuilderPtr createHDFSBuilder(const Poco::URI & uri)
{
auto & host = uri.getHost();
@ -25,6 +27,18 @@ HDFSBuilderPtr createHDFSBuilder(const Poco::URI & uri)
hdfsBuilderConfSetStr(builder.get(), "input.write.timeout", "60000"); // 1 min
hdfsBuilderConfSetStr(builder.get(), "input.connect.timeout", "60000"); // 1 min
std::string user_info = uri.getUserInfo();
if (!user_info.empty() && user_info.front() != ':')
{
std::string user;
size_t delim_pos = user_info.find(":");
if (delim_pos != std::string::npos)
user = user_info.substr(0, delim_pos);
else
user = user_info;
hdfsBuilderSetUserName(builder.get(), user.c_str());
}
hdfsBuilderSetNameNode(builder.get(), host.c_str());
hdfsBuilderSetNameNodePort(builder.get(), port);
return builder;

View File

@ -676,7 +676,7 @@ inline void readText(String & x, ReadBuffer & buf) { readEscapedString(x, buf);
inline void readText(LocalDate & x, ReadBuffer & buf) { readDateText(x, buf); }
inline void readText(LocalDateTime & x, ReadBuffer & buf) { readDateTimeText(x, buf); }
inline void readText(UUID & x, ReadBuffer & buf) { readUUIDText(x, buf); }
inline void readText(UInt128 &, ReadBuffer &)
[[noreturn]] inline void readText(UInt128 &, ReadBuffer &)
{
/** Because UInt128 isn't a natural type, without arithmetic operator and only use as an intermediary type -for UUID-
* it should never arrive here. But because we used the DataTypeNumber class we should have at least a definition of it.
@ -755,7 +755,7 @@ inline void readCSV(String & x, ReadBuffer & buf, const FormatSettings::CSV & se
inline void readCSV(LocalDate & x, ReadBuffer & buf) { readCSVSimple(x, buf); }
inline void readCSV(LocalDateTime & x, ReadBuffer & buf) { readCSVSimple(x, buf); }
inline void readCSV(UUID & x, ReadBuffer & buf) { readCSVSimple(x, buf); }
inline void readCSV(UInt128 &, ReadBuffer &)
[[noreturn]] inline void readCSV(UInt128 &, ReadBuffer &)
{
/** Because UInt128 isn't a natural type, without arithmetic operator and only use as an intermediary type -for UUID-
* it should never arrive here. But because we used the DataTypeNumber class we should have at least a definition of it.

View File

@ -830,7 +830,7 @@ inline void writeCSV(const String & x, WriteBuffer & buf) { writeCSVString<>(x,
inline void writeCSV(const LocalDate & x, WriteBuffer & buf) { writeDoubleQuoted(x, buf); }
inline void writeCSV(const LocalDateTime & x, WriteBuffer & buf) { writeDoubleQuoted(x, buf); }
inline void writeCSV(const UUID & x, WriteBuffer & buf) { writeDoubleQuoted(x, buf); }
inline void writeCSV(const UInt128, WriteBuffer &)
[[noreturn]] inline void writeCSV(const UInt128, WriteBuffer &)
{
/** Because UInt128 isn't a natural type, without arithmetic operator and only use as an intermediary type -for UUID-
* it should never arrive here. But because we used the DataTypeNumber class we should have at least a definition of it.

View File

@ -1,10 +1,5 @@
#if defined(__linux__) || defined(__FreeBSD__)
#pragma GCC diagnostic ignored "-Wsign-compare"
#ifdef __clang__
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#pragma clang diagnostic ignored "-Wundef"
#endif
#include <gtest/gtest.h>
#include <Core/Defines.h>

View File

@ -16,12 +16,6 @@
#include <typeinfo>
#include <iostream>
#pragma GCC diagnostic ignored "-Wsign-compare"
#ifdef __clang__
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#pragma clang diagnostic ignored "-Wundef"
#endif
#include <gtest/gtest.h>
using namespace DB;

View File

@ -1,8 +1,3 @@
#pragma GCC diagnostic ignored "-Wsign-compare"
#ifdef __clang__
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#pragma clang diagnostic ignored "-Wundef"
#endif
#include <gtest/gtest.h>
#include <stdexcept>

View File

@ -18,7 +18,7 @@ void prepare2(std::string & filename, std::string & buf);
void prepare3(std::string & filename, std::string & buf);
void prepare4(std::string & filename, std::string & buf);
std::string createTmpFile();
void die(const std::string & msg);
[[noreturn]] void die(const std::string & msg);
void runTest(unsigned int num, const std::function<bool()> & func);
bool test1(const std::string & filename);

View File

@ -14,7 +14,7 @@ namespace
namespace fs = boost::filesystem;
void run();
void die(const std::string & msg);
[[noreturn]] void die(const std::string & msg);
void runTest(unsigned int num, const std::function<bool()> & func);
std::string createTmpFile();
std::string generateString(size_t n);

View File

@ -29,9 +29,9 @@ namespace
/// Default shard weight.
static constexpr UInt32 default_weight = 1;
inline bool isLocal(const Cluster::Address & address, const Poco::Net::SocketAddress & resolved_address, UInt16 clickhouse_port)
inline bool isLocalImpl(const Cluster::Address & address, const Poco::Net::SocketAddress & resolved_address, UInt16 clickhouse_port)
{
/// If there is replica, for which:
/// If there is replica, for which:
/// - its port is the same that the server is listening;
/// - its host is resolved to set of addresses, one of which is the same as one of addresses of network interfaces of the server machine*;
/// then we must go to this shard without any inter-process communication.
@ -48,10 +48,31 @@ inline bool isLocal(const Cluster::Address & address, const Poco::Net::SocketAdd
/// Implementation of Cluster::Address class
std::optional<Poco::Net::SocketAddress> Cluster::Address::getResolvedAddress() const
{
try
{
return DNSResolver::instance().resolveAddress(host_name, port);
}
catch (...)
{
/// Failure in DNS resolution in cluster initialization is Ok.
tryLogCurrentException("Cluster");
return {};
}
}
bool Cluster::Address::isLocal(UInt16 clickhouse_port) const
{
if (auto resolved = getResolvedAddress())
return isLocalImpl(*this, *resolved, clickhouse_port);
return false;
}
Cluster::Address::Address(const Poco::Util::AbstractConfiguration & config, const String & config_prefix)
{
UInt16 clickhouse_port = static_cast<UInt16>(config.getInt("tcp_port", 0));
host_name = config.getString(config_prefix + ".host");
port = static_cast<UInt16>(config.getInt(config_prefix + ".port"));
if (config.has(config_prefix + ".user"))
@ -60,10 +81,9 @@ Cluster::Address::Address(const Poco::Util::AbstractConfiguration & config, cons
user = config.getString(config_prefix + ".user", "default");
password = config.getString(config_prefix + ".password", "");
default_database = config.getString(config_prefix + ".default_database", "");
initially_resolved_address = DNSResolver::instance().resolveAddress(host_name, port);
is_local = isLocal(*this, initially_resolved_address, clickhouse_port);
secure = config.getBool(config_prefix + ".secure", false) ? Protocol::Secure::Enable : Protocol::Secure::Disable;
compression = config.getBool(config_prefix + ".compression", true) ? Protocol::Compression::Enable : Protocol::Compression::Disable;
is_local = isLocal(config.getInt("tcp_port", 0));
}
@ -74,9 +94,7 @@ Cluster::Address::Address(const String & host_port_, const String & user_, const
host_name = parsed_host_port.first;
port = parsed_host_port.second;
secure = secure_ ? Protocol::Secure::Enable : Protocol::Secure::Disable;
initially_resolved_address = DNSResolver::instance().resolveAddress(parsed_host_port.first, parsed_host_port.second);
is_local = isLocal(*this, initially_resolved_address, clickhouse_port);
is_local = isLocal(clickhouse_port);
}

View File

@ -60,7 +60,7 @@ public:
/// This database is selected when no database is specified for Distributed table
String default_database;
/// The locality is determined at the initialization, and is not changed even if DNS is changed
bool is_local;
bool is_local = false;
bool user_specified = false;
Protocol::Compression compression = Protocol::Compression::Enable;
@ -84,17 +84,14 @@ public:
String toFullString() const;
static Address fromFullString(const String & address_full_string);
/// Returns initially resolved address
Poco::Net::SocketAddress getResolvedAddress() const
{
return initially_resolved_address;
}
/// Returns resolved address if it does resolve.
std::optional<Poco::Net::SocketAddress> getResolvedAddress() const;
auto tuple() const { return std::tie(host_name, port, secure, user, password, default_database); }
bool operator==(const Address & other) const { return tuple() == other.tuple(); }
private:
Poco::Net::SocketAddress initially_resolved_address;
bool isLocal(UInt16 clickhouse_port) const;
};
using Addresses = std::vector<Address>;

View File

@ -133,7 +133,7 @@ private:
Tables table_function_results; /// Temporary tables obtained by execution of table functions. Keyed by AST tree id.
Context * query_context = nullptr;
Context * session_context = nullptr; /// Session context or nullptr. Could be equal to this.
Context * global_context = nullptr; /// Global context or nullptr. Could be equal to this.
Context * global_context = nullptr; /// Global context. Could be equal to this.
UInt64 session_close_cycle = 0;
bool session_is_used = false;
@ -344,7 +344,10 @@ public:
void setQueryContext(Context & context_) { query_context = &context_; }
void setSessionContext(Context & context_) { session_context = &context_; }
void setGlobalContext(Context & context_) { global_context = &context_; }
void makeQueryContext() { query_context = this; }
void makeSessionContext() { session_context = this; }
void makeGlobalContext() { global_context = this; }
const Settings & getSettingsRef() const { return settings; }
Settings & getSettingsRef() { return settings; }

View File

@ -506,8 +506,9 @@ void DDLWorker::parseQueryAndResolveHost(DDLTask & task)
{
const Cluster::Address & address = shards[shard_num][replica_num];
if (isLocalAddress(address.getResolvedAddress(), context.getTCPPort())
|| (context.getTCPPortSecure() && isLocalAddress(address.getResolvedAddress(), *context.getTCPPortSecure())))
if (auto resolved = address.getResolvedAddress();
resolved && (isLocalAddress(*resolved, context.getTCPPort())
|| (context.getTCPPortSecure() && isLocalAddress(*resolved, *context.getTCPPortSecure()))))
{
if (found_via_resolving)
{

View File

@ -144,7 +144,11 @@ void ExpressionAnalyzer::analyzeAggregation()
{
getRootActions(array_join_expression_list, true, temp_actions);
addMultipleArrayJoinAction(temp_actions, is_array_join_left);
array_join_columns = temp_actions->getSampleBlock().getNamesAndTypesList();
array_join_columns.clear();
for (auto & column : temp_actions->getSampleBlock().getNamesAndTypesList())
if (syntax->array_join_result_to_source.count(column.name))
array_join_columns.emplace_back(column);
}
const ASTTablesInSelectQueryElement * join = select_query->join();

View File

@ -4,6 +4,7 @@
#include <Parsers/ASTCheckQuery.h>
#include <DataStreams/OneBlockInputStream.h>
#include <DataTypes/DataTypesNumber.h>
#include <DataTypes/DataTypeString.h>
#include <Columns/ColumnsNumber.h>
#include <Common/typeid_cast.h>
@ -11,6 +12,21 @@
namespace DB
{
namespace
{
NamesAndTypes getBlockStructure()
{
return {
{"part_path", std::make_shared<DataTypeString>()},
{"is_passed", std::make_shared<DataTypeUInt8>()},
{"message", std::make_shared<DataTypeString>()},
};
}
}
InterpreterCheckQuery::InterpreterCheckQuery(const ASTPtr & query_ptr_, const Context & context_)
: query_ptr(query_ptr_), context(context_)
{
@ -19,18 +35,32 @@ InterpreterCheckQuery::InterpreterCheckQuery(const ASTPtr & query_ptr_, const Co
BlockIO InterpreterCheckQuery::execute()
{
const auto & alter = query_ptr->as<ASTCheckQuery &>();
const String & table_name = alter.table;
String database_name = alter.database.empty() ? context.getCurrentDatabase() : alter.database;
const auto & check = query_ptr->as<ASTCheckQuery &>();
const String & table_name = check.table;
String database_name = check.database.empty() ? context.getCurrentDatabase() : check.database;
StoragePtr table = context.getTable(database_name, table_name);
auto check_results = table->checkData(query_ptr, context);
auto column = ColumnUInt8::create();
column->insertValue(UInt64(table->checkData()));
result = Block{{ std::move(column), std::make_shared<DataTypeUInt8>(), "result" }};
auto block_structure = getBlockStructure();
auto path_column = block_structure[0].type->createColumn();
auto is_passed_column = block_structure[1].type->createColumn();
auto message_column = block_structure[2].type->createColumn();
for (const auto & check_result : check_results)
{
path_column->insert(check_result.fs_path);
is_passed_column->insert(static_cast<UInt8>(check_result.success));
message_column->insert(check_result.failure_message);
}
Block block({
{std::move(path_column), block_structure[0].type, block_structure[0].name},
{std::move(is_passed_column), block_structure[1].type, block_structure[1].name},
{std::move(message_column), block_structure[2].type, block_structure[2].name}});
BlockIO res;
res.in = std::make_shared<OneBlockInputStream>(result);
res.in = std::make_shared<OneBlockInputStream>(block);
return res;
}

View File

@ -625,7 +625,7 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create)
insert->select = create.select->clone();
if (create.temporary && !context.getSessionContext().hasQueryContext())
context.getSessionContext().setQueryContext(context.getSessionContext());
context.getSessionContext().makeQueryContext();
return InterpreterInsertQuery(insert,
create.temporary ? context.getSessionContext() : context,

View File

@ -151,7 +151,7 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
{
time_t current_time = time(nullptr);
context.setQueryContext(context);
context.makeQueryContext();
CurrentThread::attachQueryContext(context);
const Settings & settings = context.getSettingsRef();
@ -208,11 +208,10 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
{
ReplaceQueryParameterVisitor visitor(context.getQueryParameters());
visitor.visit(ast);
}
/// Get new query after substitutions.
if (context.hasQueryParameters())
/// Get new query after substitutions.
query = serializeAST(*ast);
}
logQuery(query.substr(0, settings.log_queries_cut_to_length), context, internal);

View File

@ -79,6 +79,7 @@ try
ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0);
Context context = Context::createGlobal();
context.makeGlobalContext();
context.setPath("./");
auto database = std::make_shared<DatabaseOrdinary>("test", "./metadata/test/", context);

View File

@ -47,6 +47,7 @@ int main(int argc, char ** argv)
std::cerr << std::endl;
Context context = Context::createGlobal();
context.makeGlobalContext();
NamesAndTypesList columns
{
{"x", std::make_shared<DataTypeInt16>()},

View File

@ -97,6 +97,7 @@ int main()
};
Context context = Context::createGlobal();
context.makeGlobalContext();
auto system_database = std::make_shared<DatabaseMemory>("system");
context.addDatabase("system", system_database);

View File

@ -277,7 +277,7 @@ struct Grower : public HashTableGrower<>
}
/// Set the buffer size by the number of elements in the hash table. Used when deserializing a hash table.
void set(size_t /*num_elems*/)
[[noreturn]] void set(size_t /*num_elems*/)
{
throw Poco::Exception(__PRETTY_FUNCTION__);
}

View File

@ -1159,6 +1159,7 @@ bool run()
TestResult check(const TestEntry & entry)
{
static DB::Context context = DB::Context::createGlobal();
context.makeGlobalContext();
try
{

View File

@ -31,6 +31,7 @@ try
DateLUT::instance();
Context context = Context::createGlobal();
context.makeGlobalContext();
context.setPath("./");

View File

@ -1,12 +1,17 @@
#pragma once
#include <Parsers/ASTQueryWithTableAndOutput.h>
#include <Parsers/ASTPartition.h>
namespace DB
{
struct ASTCheckQuery : public ASTQueryWithTableAndOutput
{
ASTPtr partition;
/** Get the text that identifies this element. */
String getID(char delim) const override { return "CheckQuery" + (delim + database) + delim + table; }
@ -19,7 +24,7 @@ struct ASTCheckQuery : public ASTQueryWithTableAndOutput
}
protected:
void formatQueryImpl(const FormatSettings & settings, FormatState &, FormatStateStacked frame) const override
void formatQueryImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override
{
std::string nl_or_nothing = settings.one_line ? "" : "\n";
@ -37,6 +42,12 @@ protected:
}
settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << backQuoteIfNeed(table) << (settings.hilite ? hilite_none : "");
}
if (partition)
{
settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << " PARTITION " << (settings.hilite ? hilite_none : "");
partition->formatImpl(settings, state, frame);
}
}
};

View File

@ -3,6 +3,7 @@
#include <Parsers/ASTIdentifier.h>
#include <Parsers/ExpressionElementParsers.h>
#include <Parsers/ASTCheckQuery.h>
#include <Parsers/ParserPartition.h>
namespace DB
@ -11,9 +12,11 @@ namespace DB
bool ParserCheckQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
{
ParserKeyword s_check_table("CHECK TABLE");
ParserKeyword s_partition("PARTITION");
ParserToken s_dot(TokenType::Dot);
ParserIdentifier table_parser;
ParserPartition partition_parser;
ASTPtr table;
ASTPtr database;
@ -23,24 +26,28 @@ bool ParserCheckQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
if (!table_parser.parse(pos, database, expected))
return false;
auto query = std::make_shared<ASTCheckQuery>();
if (s_dot.ignore(pos))
{
if (!table_parser.parse(pos, table, expected))
return false;
auto query = std::make_shared<ASTCheckQuery>();
getIdentifierName(database, query->database);
getIdentifierName(table, query->table);
node = query;
}
else
{
table = database;
auto query = std::make_shared<ASTCheckQuery>();
getIdentifierName(table, query->table);
node = query;
}
if (s_partition.ignore(pos, expected))
{
if (!partition_parser.parse(pos, query->partition, expected))
return false;
}
node = query;
return true;
}

View File

@ -0,0 +1,27 @@
#pragma once
#include <Core/Types.h>
#include <vector>
namespace DB
{
/// Result of CHECK TABLE query for single part of table
struct CheckResult
{
/// Part name for merge tree or file name for simplier tables
String fs_path;
/// Does check passed
bool success = false;
/// Failure message if any
String failure_message;
CheckResult() = default;
CheckResult(const String & fs_path_, bool success_, String failure_message_)
: fs_path(fs_path_), success(success_), failure_message(failure_message_)
{}
};
using CheckResults = std::vector<CheckResult>;
}

View File

@ -8,6 +8,7 @@
#include <Storages/IStorage_fwd.h>
#include <Storages/SelectQueryInfo.h>
#include <Storages/TableStructureLockHolder.h>
#include <Storages/CheckResults.h>
#include <Common/ActionLock.h>
#include <Common/Exception.h>
#include <Common/RWLock.h>
@ -285,7 +286,7 @@ public:
virtual bool mayBenefitFromIndexForIn(const ASTPtr & /* left_in_operand */, const Context & /* query_context */) const { return false; }
/// Checks validity of the data
virtual bool checkData() const { throw Exception("Check query is not supported for " + getName() + " storage", ErrorCodes::NOT_IMPLEMENTED); }
virtual CheckResults checkData(const ASTPtr & /* query */, const Context & /* context */) { throw Exception("Check query is not supported for " + getName() + " storage", ErrorCodes::NOT_IMPLEMENTED); }
/// Checks that table could be dropped right now
/// Otherwise - throws an exception with detailed information.

View File

@ -392,7 +392,7 @@ void MergedBlockOutputStream::writeImpl(const Block & block, const IColumn::Perm
auto & stream = *skip_indices_streams[i];
size_t prev_pos = 0;
size_t current_mark = 0;
size_t skip_index_current_mark = 0;
while (prev_pos < rows)
{
UInt64 limit = 0;
@ -402,7 +402,7 @@ void MergedBlockOutputStream::writeImpl(const Block & block, const IColumn::Perm
}
else
{
limit = index_granularity.getMarkRows(current_mark);
limit = index_granularity.getMarkRows(skip_index_current_mark);
if (skip_indices_aggregators[i]->empty())
{
skip_indices_aggregators[i] = index->createIndexAggregator();
@ -435,7 +435,7 @@ void MergedBlockOutputStream::writeImpl(const Block & block, const IColumn::Perm
}
}
prev_pos = pos;
current_mark++;
++skip_index_current_mark;
}
}
}

View File

@ -181,7 +181,7 @@ void ReplicatedMergeTreePartCheckThread::searchForMissingPart(const String & par
}
void ReplicatedMergeTreePartCheckThread::checkPart(const String & part_name)
CheckResult ReplicatedMergeTreePartCheckThread::checkPart(const String & part_name)
{
LOG_WARNING(log, "Checking part " << part_name);
ProfileEvents::increment(ProfileEvents::ReplicatedPartChecks);
@ -197,6 +197,7 @@ void ReplicatedMergeTreePartCheckThread::checkPart(const String & part_name)
if (!part)
{
searchForMissingPart(part_name);
return {part_name, false, "Part is missing, will search for it"};
}
/// We have this part, and it's active. We will check whether we need this part and whether it has the right data.
else if (part->name == part_name)
@ -242,7 +243,7 @@ void ReplicatedMergeTreePartCheckThread::checkPart(const String & part_name)
if (need_stop)
{
LOG_INFO(log, "Checking part was cancelled.");
return;
return {part_name, false, "Checking part was cancelled"};
}
LOG_INFO(log, "Part " << part_name << " looks good.");
@ -253,13 +254,15 @@ void ReplicatedMergeTreePartCheckThread::checkPart(const String & part_name)
tryLogCurrentException(log, __PRETTY_FUNCTION__);
LOG_ERROR(log, "Part " << part_name << " looks broken. Removing it and queueing a fetch.");
String message = "Part " + part_name + " looks broken. Removing it and queueing a fetch.";
LOG_ERROR(log, message);
ProfileEvents::increment(ProfileEvents::ReplicatedPartChecksFailed);
storage.removePartAndEnqueueFetch(part_name);
/// Delete part locally.
storage.forgetPartAndMoveToDetached(part, "broken");
return {part_name, false, message};
}
}
else if (part->modification_time + MAX_AGE_OF_LOCAL_PART_THAT_WASNT_ADDED_TO_ZOOKEEPER < time(nullptr))
@ -269,8 +272,10 @@ void ReplicatedMergeTreePartCheckThread::checkPart(const String & part_name)
/// Therefore, delete only if the part is old (not very reliable).
ProfileEvents::increment(ProfileEvents::ReplicatedPartChecksFailed);
LOG_ERROR(log, "Unexpected part " << part_name << " in filesystem. Removing.");
String message = "Unexpected part " + part_name + " in filesystem. Removing.";
LOG_ERROR(log, message);
storage.forgetPartAndMoveToDetached(part, "unexpected");
return {part_name, false, message};
}
else
{
@ -290,6 +295,8 @@ void ReplicatedMergeTreePartCheckThread::checkPart(const String & part_name)
/// In the worst case, errors will still appear `old_parts_lifetime` seconds in error log until the part is removed as the old one.
LOG_WARNING(log, "We have part " << part->name << " covering part " << part_name);
}
return {part_name, true, ""};
}

View File

@ -11,6 +11,7 @@
#include <Core/Types.h>
#include <common/logger_useful.h>
#include <Core/BackgroundSchedulePool.h>
#include <Storages/CheckResults.h>
namespace DB
{
@ -66,12 +67,12 @@ public:
/// Get the number of parts in the queue for check.
size_t size() const;
/// Check part by name
CheckResult checkPart(const String & part_name);
private:
void run();
void checkPart(const String & part_name);
void searchForMissingPart(const String & part_name);
StorageReplicatedMergeTree & storage;

View File

@ -53,6 +53,20 @@ public:
private:
ReadBufferFromFile mrk_file_buf;
std::pair<MarkInCompressedFile, size_t> readMarkFromFile()
{
size_t mrk_rows;
MarkInCompressedFile mrk_mark;
readIntBinary(mrk_mark.offset_in_compressed_file, mrk_hashing_buf);
readIntBinary(mrk_mark.offset_in_decompressed_block, mrk_hashing_buf);
if (mrk_file_extension == ".mrk2")
readIntBinary(mrk_rows, mrk_hashing_buf);
else
mrk_rows = index_granularity.getMarkRows(mark_position);
return {mrk_mark, mrk_rows};
}
public:
HashingReadBuffer mrk_hashing_buf;
@ -78,15 +92,8 @@ public:
void assertMark(bool only_read=false)
{
MarkInCompressedFile mrk_mark;
readIntBinary(mrk_mark.offset_in_compressed_file, mrk_hashing_buf);
readIntBinary(mrk_mark.offset_in_decompressed_block, mrk_hashing_buf);
size_t mrk_rows;
if (mrk_file_extension == ".mrk2")
readIntBinary(mrk_rows, mrk_hashing_buf);
else
mrk_rows = index_granularity.getMarkRows(mark_position);
auto [mrk_mark, mrk_rows] = readMarkFromFile();
bool has_alternative_mark = false;
MarkInCompressedFile alternative_data_mark = {};
MarkInCompressedFile data_mark = {};
@ -136,6 +143,12 @@ public:
+ toString(compressed_hashing_buf.count()) + " (compressed), "
+ toString(uncompressed_hashing_buf.count()) + " (uncompressed)", ErrorCodes::CORRUPTED_DATA);
if (index_granularity.hasFinalMark())
{
auto [final_mark, final_mark_rows] = readMarkFromFile();
if (final_mark_rows != 0)
throw Exception("Incorrect final mark at the end of " + mrk_file_path + " expected 0 rows, got " + toString(final_mark_rows), ErrorCodes::CORRUPTED_DATA);
}
if (!mrk_hashing_buf.eof())
throw Exception("EOF expected in " + mrk_file_path + " file"
+ " at position "

View File

@ -627,7 +627,7 @@ BlockOutputStreamPtr StorageLog::write(
return std::make_shared<LogBlockOutputStream>(*this);
}
bool StorageLog::checkData() const
CheckResults StorageLog::checkData(const ASTPtr & /* query */, const Context & /* context */)
{
std::shared_lock<std::shared_mutex> lock(rwlock);
return file_checker.check();

View File

@ -39,7 +39,7 @@ public:
void rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name) override;
bool checkData() const override;
CheckResults checkData(const ASTPtr & /* query */, const Context & /* context */) override;
void truncate(const ASTPtr &, const Context &) override;

View File

@ -7,8 +7,10 @@
#include <Common/ThreadPool.h>
#include <Interpreters/InterpreterAlterQuery.h>
#include <Interpreters/PartLog.h>
#include <Parsers/ASTCheckQuery.h>
#include <Parsers/ASTFunction.h>
#include <Parsers/ASTLiteral.h>
#include <Parsers/ASTPartition.h>
#include <Parsers/queryToString.h>
#include <Storages/MergeTree/MergeTreeData.h>
#include <Storages/MergeTree/ActiveDataPartSet.h>
@ -17,6 +19,7 @@
#include <Storages/MergeTree/MergeTreeBlockOutputStream.h>
#include <Storages/MergeTree/DiskSpaceMonitor.h>
#include <Storages/MergeTree/MergeList.h>
#include <Storages/MergeTree/checkDataPart.h>
#include <Poco/DirectoryIterator.h>
#include <Poco/File.h>
#include <optional>
@ -1122,4 +1125,59 @@ ActionLock StorageMergeTree::getActionLock(StorageActionBlockType action_type)
return {};
}
CheckResults StorageMergeTree::checkData(const ASTPtr & query, const Context & context)
{
CheckResults results;
DataPartsVector data_parts;
if (const auto & check_query = query->as<ASTCheckQuery &>(); check_query.partition)
{
String partition_id = getPartitionIDFromQuery(check_query.partition, context);
data_parts = getDataPartsVectorInPartition(MergeTreeDataPartState::Committed, partition_id);
}
else
data_parts = getDataPartsVector();
for (auto & part : data_parts)
{
String full_part_path = part->getFullPath();
/// If the checksums file is not present, calculate the checksums and write them to disk.
String checksums_path = full_part_path + "checksums.txt";
String tmp_checksums_path = full_part_path + "checksums.txt.tmp";
if (!Poco::File(checksums_path).exists())
{
try
{
auto calculated_checksums = checkDataPart(part, false, primary_key_data_types, skip_indices);
calculated_checksums.checkEqual(part->checksums, true);
WriteBufferFromFile out(tmp_checksums_path, 4096);
part->checksums.write(out);
Poco::File(tmp_checksums_path).renameTo(checksums_path);
results.emplace_back(part->name, true, "Checksums recounted and written to disk.");
}
catch (const Exception & ex)
{
Poco::File tmp_file(tmp_checksums_path);
if (tmp_file.exists())
tmp_file.remove();
results.emplace_back(part->name, false,
"Check of part finished with error: '" + ex.message() + "'");
}
}
else
{
try
{
checkDataPart(part, true, primary_key_data_types, skip_indices);
results.emplace_back(part->name, true, "");
}
catch (const Exception & ex)
{
results.emplace_back(part->name, false, ex.message());
}
}
}
return results;
}
}

View File

@ -70,6 +70,8 @@ public:
String getDataPath() const override { return full_path; }
CheckResults checkData(const ASTPtr & query, const Context & context) override;
private:
String path;

View File

@ -30,6 +30,7 @@
#include <Parsers/ASTOptimizeQuery.h>
#include <Parsers/ASTLiteral.h>
#include <Parsers/queryToString.h>
#include <Parsers/ASTCheckQuery.h>
#include <IO/ReadBufferFromString.h>
#include <IO/Operators.h>
@ -2380,7 +2381,7 @@ void StorageReplicatedMergeTree::removePartAndEnqueueFetch(const String & part_n
auto results = zookeeper->multi(ops);
String path_created = dynamic_cast<const Coordination::CreateResponse &>(*results[0]).path_created;
String path_created = dynamic_cast<const Coordination::CreateResponse &>(*results.back()).path_created;
log_entry->znode_name = path_created.substr(path_created.find_last_of('/') + 1);
queue.insert(zookeeper, log_entry);
}
@ -5107,4 +5108,31 @@ bool StorageReplicatedMergeTree::dropPartsInPartition(
return true;
}
CheckResults StorageReplicatedMergeTree::checkData(const ASTPtr & query, const Context & context)
{
CheckResults results;
DataPartsVector data_parts;
if (const auto & check_query = query->as<ASTCheckQuery &>(); check_query.partition)
{
String partition_id = getPartitionIDFromQuery(check_query.partition, context);
data_parts = getDataPartsVectorInPartition(MergeTreeDataPartState::Committed, partition_id);
}
else
data_parts = getDataPartsVector();
for (auto & part : data_parts)
{
try
{
results.push_back(part_check_thread.checkPart(part->name));
}
catch (const Exception & ex)
{
results.emplace_back(part->name, false, "Check of part finished with error: '" + ex.message() + "'");
}
}
return results;
}
}

View File

@ -170,6 +170,8 @@ public:
String getDataPath() const override { return full_path; }
CheckResults checkData(const ASTPtr & query, const Context & context) override;
private:
/// Delete old parts from disk and from ZooKeeper.
void clearOldPartsAndRemoveFromZK();

View File

@ -284,7 +284,7 @@ BlockOutputStreamPtr StorageStripeLog::write(
}
bool StorageStripeLog::checkData() const
CheckResults StorageStripeLog::checkData(const ASTPtr & /* query */, const Context & /* context */)
{
std::shared_lock<std::shared_mutex> lock(rwlock);
return file_checker.check();

View File

@ -41,7 +41,7 @@ public:
void rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name) override;
bool checkData() const override;
CheckResults checkData(const ASTPtr & /* query */, const Context & /* context */) override;
/// Data of the file.
struct ColumnData

View File

@ -32,6 +32,7 @@
#include <Storages/StorageTinyLog.h>
#include <Storages/StorageFactory.h>
#include <Storages/CheckResults.h>
#include <Poco/DirectoryIterator.h>
@ -406,7 +407,7 @@ BlockOutputStreamPtr StorageTinyLog::write(
}
bool StorageTinyLog::checkData() const
CheckResults StorageTinyLog::checkData(const ASTPtr & /* query */, const Context & /* context */)
{
return file_checker.check();
}

View File

@ -40,7 +40,7 @@ public:
void rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name) override;
bool checkData() const override;
CheckResults checkData(const ASTPtr & /* query */, const Context & /* context */) override;
/// Column data
struct ColumnData

Some files were not shown because too many files have changed in this diff Show More