Merge branch 'master' into disable_features

This commit is contained in:
Silviu Caragea 2019-03-13 09:51:20 +02:00 committed by GitHub
commit b3c674f893
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 43 additions and 23 deletions

View File

@ -364,6 +364,7 @@ if (DEFAULT_LIBS)
add_default_libs(zstd) add_default_libs(zstd)
add_default_libs(snappy) add_default_libs(snappy)
add_default_libs(arrow) add_default_libs(arrow)
add_default_libs(protoc)
add_default_libs(thrift_static) add_default_libs(thrift_static)
add_default_libs(boost_regex_internal) add_default_libs(boost_regex_internal)
endif () endif ()

View File

@ -13,5 +13,4 @@ ClickHouse is an open-source column-oriented database management system that all
## Upcoming Events ## Upcoming Events
* [ClickHouse Community Meetup](https://www.eventbrite.com/e/meetup-clickhouse-in-the-wild-deployment-success-stories-registration-55305051899) in San Francisco on February 19.
* [ClickHouse Community Meetup](https://www.eventbrite.com/e/clickhouse-meetup-in-madrid-registration-55376746339) in Madrid on April 2. * [ClickHouse Community Meetup](https://www.eventbrite.com/e/clickhouse-meetup-in-madrid-registration-55376746339) in Madrid on April 2.

View File

@ -4,12 +4,6 @@ if (ENABLE_PROTOBUF)
option(USE_INTERNAL_PROTOBUF_LIBRARY "Set to FALSE to use system protobuf instead of bundled" ${NOT_UNBUNDLED}) option(USE_INTERNAL_PROTOBUF_LIBRARY "Set to FALSE to use system protobuf instead of bundled" ${NOT_UNBUNDLED})
if(OS_FREEBSD AND SANITIZE STREQUAL "address")
# ../contrib/protobuf/src/google/protobuf/arena_impl.h:45:10: fatal error: 'sanitizer/asan_interface.h' file not found
set(MISSING_INTERNAL_PROTOBUF_LIBRARY 1)
set(USE_INTERNAL_PROTOBUF_LIBRARY 0)
endif()
if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/protobuf/cmake/CMakeLists.txt") if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/protobuf/cmake/CMakeLists.txt")
if(USE_INTERNAL_PROTOBUF_LIBRARY) if(USE_INTERNAL_PROTOBUF_LIBRARY)
message(WARNING "submodule contrib/protobuf is missing. to fix try run: \n git submodule update --init --recursive") message(WARNING "submodule contrib/protobuf is missing. to fix try run: \n git submodule update --init --recursive")
@ -98,6 +92,16 @@ elseif(NOT MISSING_INTERNAL_PROTOBUF_LIBRARY)
endfunction() endfunction()
endif() endif()
if(OS_FREEBSD AND SANITIZE STREQUAL "address")
# ../contrib/protobuf/src/google/protobuf/arena_impl.h:45:10: fatal error: 'sanitizer/asan_interface.h' file not found
# #include <sanitizer/asan_interface.h>
if(LLVM_INCLUDE_DIRS)
set(Protobuf_INCLUDE_DIR ${Protobuf_INCLUDE_DIR} ${LLVM_INCLUDE_DIRS})
else()
set(USE_PROTOBUF 0)
endif()
endif()
endif() endif()
message(STATUS "Using protobuf=${USE_PROTOBUF}: ${Protobuf_INCLUDE_DIR} : ${Protobuf_LIBRARY}") message(STATUS "Using protobuf=${USE_PROTOBUF}: ${Protobuf_INCLUDE_DIR} : ${Protobuf_LIBRARY}")

2
contrib/cppkafka vendored

@ -1 +1 @@
Subproject commit 860c90e92eee6690aa74a2ca7b7c5c6930dffecd Subproject commit 9b184d881c15cc50784b28688c7c99d3d764db24

2
contrib/librdkafka vendored

@ -1 +1 @@
Subproject commit 363dcad5a23dc29381cc626620e68ae418b3af19 Subproject commit 51ae5f5fd8b742e56f47a8bb0136344868818285

View File

@ -112,7 +112,7 @@ if (CLICKHOUSE_SPLIT_BINARY)
add_custom_target (clickhouse-bundle ALL DEPENDS ${CLICKHOUSE_ALL_TARGETS}) add_custom_target (clickhouse-bundle ALL DEPENDS ${CLICKHOUSE_ALL_TARGETS})
add_custom_target (clickhouse ALL DEPENDS clickhouse-bundle) add_custom_target (clickhouse ALL DEPENDS clickhouse-bundle)
install (PROGRAMS clickhouse DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) install(PROGRAMS clickhouse-split-helper DESTINATION ${CMAKE_INSTALL_BINDIR} RENAME clickhouse COMPONENT clickhouse)
else () else ()
if (USE_EMBEDDED_COMPILER) if (USE_EMBEDDED_COMPILER)
# before add_executable ! # before add_executable !

View File

@ -1,3 +1,4 @@
#include <new>
#include <iostream> #include <iostream>
#include <vector> #include <vector>
#include <string> #include <string>
@ -139,6 +140,10 @@ bool isClickhouseApp(const std::string & app_suffix, std::vector<char *> & argv)
int main(int argc_, char ** argv_) int main(int argc_, char ** argv_)
{ {
/// Reset new handler to default (that throws std::bad_alloc)
/// It is needed because LLVM library clobbers it.
std::set_new_handler(nullptr);
#if USE_EMBEDDED_COMPILER #if USE_EMBEDDED_COMPILER
if (argc_ >= 2 && 0 == strcmp(argv_[1], "-cc1")) if (argc_ >= 2 && 0 == strcmp(argv_[1], "-cc1"))
return mainEntryClickHouseClang(argc_, argv_); return mainEntryClickHouseClang(argc_, argv_);

View File

@ -260,6 +260,15 @@ int Server::main(const std::vector<std::string> & /*args*/)
StatusFile status{path + "status"}; StatusFile status{path + "status"};
SCOPE_EXIT({ SCOPE_EXIT({
/** Ask to cancel background jobs all table engines,
* and also query_log.
* It is important to do early, not in destructor of Context, because
* table engines could use Context on destroy.
*/
LOG_INFO(log, "Shutting down storages.");
global_context->shutdown();
LOG_DEBUG(log, "Shutted down storages.");
/** Explicitly destroy Context. It is more convenient than in destructor of Server, because logger is still available. /** Explicitly destroy Context. It is more convenient than in destructor of Server, because logger is still available.
* At this moment, no one could own shared part of Context. * At this moment, no one could own shared part of Context.
*/ */
@ -498,17 +507,6 @@ int Server::main(const std::vector<std::string> & /*args*/)
global_context->setCurrentDatabase(default_database); global_context->setCurrentDatabase(default_database);
SCOPE_EXIT({
/** Ask to cancel background jobs all table engines,
* and also query_log.
* It is important to do early, not in destructor of Context, because
* table engines could use Context on destroy.
*/
LOG_INFO(log, "Shutting down storages.");
global_context->shutdown();
LOG_DEBUG(log, "Shutted down storages.");
});
if (has_zookeeper && config().has("distributed_ddl")) if (has_zookeeper && config().has("distributed_ddl"))
{ {
/// DDL worker should be started after all tables were loaded /// DDL worker should be started after all tables were loaded

View File

@ -224,7 +224,7 @@ DDLWorker::DDLWorker(const std::string & zk_root_dir, Context & context_, const
{ {
task_max_lifetime = config->getUInt64(prefix + ".task_max_lifetime", static_cast<UInt64>(task_max_lifetime)); task_max_lifetime = config->getUInt64(prefix + ".task_max_lifetime", static_cast<UInt64>(task_max_lifetime));
cleanup_delay_period = config->getUInt64(prefix + ".cleanup_delay_period", static_cast<UInt64>(cleanup_delay_period)); cleanup_delay_period = config->getUInt64(prefix + ".cleanup_delay_period", static_cast<UInt64>(cleanup_delay_period));
max_tasks_in_queue = std::max(static_cast<UInt64>(1), config->getUInt64(prefix + ".max_tasks_in_queue", max_tasks_in_queue)); max_tasks_in_queue = std::max<UInt64>(1, config->getUInt64(prefix + ".max_tasks_in_queue", max_tasks_in_queue));
if (config->has(prefix + ".profile")) if (config->has(prefix + ".profile"))
context.setSetting("profile", config->getString(prefix + ".profile")); context.setSetting("profile", config->getString(prefix + ".profile"));

View File

@ -7,7 +7,7 @@ namespace DB
{ {
KafkaBlockInputStream::KafkaBlockInputStream( KafkaBlockInputStream::KafkaBlockInputStream(
StorageKafka & storage_, const Context & context_, const String & schema, UInt64 max_block_size_) StorageKafka & storage_, const Context & context_, const String & schema, size_t max_block_size_)
: storage(storage_), context(context_), max_block_size(max_block_size_) : storage(storage_), context(context_), max_block_size(max_block_size_)
{ {
context.setSetting("input_format_skip_unknown_fields", 1u); // Always skip unknown fields regardless of the context (JSON or TSKV) context.setSetting("input_format_skip_unknown_fields", 1u); // Always skip unknown fields regardless of the context (JSON or TSKV)

View File

@ -0,0 +1,5 @@
1
1
1
1
1 1

View File

@ -0,0 +1,8 @@
SELECT * FROM (SELECT 1 AS x) ALL LEFT JOIN (SELECT 1 AS x) USING x;
SELECT * FROM (SELECT 1 AS x) ALL LEFT JOIN (SELECT 2 AS x) USING x;
SELECT * FROM (SELECT 1 AS x) AS t1 ALL LEFT JOIN (SELECT 1 AS x) AS t2 USING x;
SELECT * FROM (SELECT 1 AS x) AS t1 ALL LEFT JOIN (SELECT 2 AS x) AS t2 USING x;
SELECT * FROM (SELECT 1 AS x) AS t1 ALL LEFT JOIN (SELECT 1 AS x) AS t2 ON t1.x = t2.x;
-- (bug) SELECT * FROM (SELECT 1 AS x) AS t1 ALL LEFT JOIN (SELECT 2 AS x) AS t2 ON t1.x = t2.x;