mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-19 14:11:58 +00:00
Merge remote-tracking branch 'upstream/master' into fix25
This commit is contained in:
commit
49314bef9f
@ -220,7 +220,6 @@ endif ()
|
||||
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||
find_package (Threads)
|
||||
|
||||
include (cmake/find_cxxabi.cmake)
|
||||
include (cmake/find_cxx.cmake)
|
||||
|
||||
include (cmake/test_compiler.cmake)
|
||||
@ -407,6 +406,11 @@ if (UNBUNDLED OR NOT (OS_LINUX OR APPLE) OR ARCH_32)
|
||||
option (NO_WERROR "Disable -Werror compiler option" ON)
|
||||
endif ()
|
||||
|
||||
if (USE_LIBCXX)
|
||||
set (HAVE_LIBCXX 1)
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
|
||||
endif()
|
||||
|
||||
if (USE_LIBCXX AND USE_INTERNAL_LIBCXX_LIBRARY)
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -nostdinc++ -isystem ${LIBCXX_INCLUDE_DIR} -isystem ${LIBCXXABI_INCLUDE_DIR}")
|
||||
endif ()
|
||||
|
@ -1,5 +1,5 @@
|
||||
if (NOT APPLE)
|
||||
option (USE_INTERNAL_LIBCXX_LIBRARY "Set to FALSE to use system libcxx library instead of bundled" ${NOT_UNBUNDLED})
|
||||
option (USE_INTERNAL_LIBCXX_LIBRARY "Set to FALSE to use system libcxx and libcxxabi libraries instead of bundled" ${NOT_UNBUNDLED})
|
||||
endif ()
|
||||
|
||||
if (USE_INTERNAL_LIBCXX_LIBRARY AND NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libcxx/include/vector")
|
||||
@ -7,17 +7,20 @@ if (USE_INTERNAL_LIBCXX_LIBRARY AND NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib
|
||||
set (USE_INTERNAL_LIBCXX_LIBRARY 0)
|
||||
endif ()
|
||||
|
||||
if (USE_INTERNAL_LIBCXX_LIBRARY AND NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libcxxabi/src")
|
||||
message (WARNING "submodule contrib/libcxxabi is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
set (USE_INTERNAL_LIBCXXABI_LIBRARY 0)
|
||||
endif ()
|
||||
|
||||
if (NOT USE_INTERNAL_LIBCXX_LIBRARY)
|
||||
find_library (LIBCXX_LIBRARY c++)
|
||||
find_path (LIBCXX_INCLUDE_DIR NAMES vector PATHS ${LIBCXX_INCLUDE_PATHS})
|
||||
endif ()
|
||||
|
||||
if (LIBCXX_LIBRARY AND LIBCXX_INCLUDE_DIR)
|
||||
find_library (LIBCXXABI_LIBRARY c++abi)
|
||||
else ()
|
||||
set (LIBCXX_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libcxx/include)
|
||||
set (USE_INTERNAL_LIBCXX_LIBRARY 1)
|
||||
set (LIBCXXABI_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libcxxabi/include)
|
||||
set (LIBCXX_LIBRARY cxx_static)
|
||||
set (HAVE_LIBCXX 1)
|
||||
set (LIBCXXABI_LIBRARY cxxabi_static)
|
||||
endif ()
|
||||
|
||||
message (STATUS "Using libcxx: ${LIBCXX_INCLUDE_DIR} : ${LIBCXX_LIBRARY}")
|
||||
message (STATUS "Using libcxx: ${LIBCXX_LIBRARY}")
|
||||
message (STATUS "Using libcxxabi: ${LIBCXXABI_LIBRARY}")
|
||||
|
@ -1,22 +0,0 @@
|
||||
if (NOT APPLE)
|
||||
option (USE_INTERNAL_LIBCXXABI_LIBRARY "Set to FALSE to use system libcxxabi library instead of bundled" ${NOT_UNBUNDLED})
|
||||
endif ()
|
||||
|
||||
if (USE_INTERNAL_LIBCXXABI_LIBRARY AND NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libcxxabi/src")
|
||||
message (WARNING "submodule contrib/libcxxabi is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
set (USE_INTERNAL_LIBCXXABI_LIBRARY 0)
|
||||
endif ()
|
||||
|
||||
if (NOT USE_INTERNAL_LIBCXXABI_LIBRARY)
|
||||
find_library (LIBCXXABI_LIBRARY cxxabi)
|
||||
find_path (LIBCXXABI_INCLUDE_DIR NAMES vector PATHS ${LIBCXXABI_INCLUDE_PATHS})
|
||||
endif ()
|
||||
|
||||
if (LIBCXXABI_LIBRARY AND LIBCXXABI_INCLUDE_DIR)
|
||||
else ()
|
||||
set (LIBCXXABI_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libcxxabi/include)
|
||||
set (USE_INTERNAL_LIBCXXABI_LIBRARY 1)
|
||||
set (LIBCXXABI_LIBRARY cxxabi_static)
|
||||
endif ()
|
||||
|
||||
message (STATUS "Using libcxxabi: ${LIBCXXABI_INCLUDE_DIR} : ${LIBCXXABI_LIBRARY}")
|
5
contrib/CMakeLists.txt
vendored
5
contrib/CMakeLists.txt
vendored
@ -15,12 +15,9 @@ if (USE_INTERNAL_UNWIND_LIBRARY)
|
||||
add_subdirectory (libunwind-cmake)
|
||||
endif ()
|
||||
|
||||
if (USE_LIBCXX AND USE_INTERNAL_LIBCXXABI_LIBRARY)
|
||||
add_subdirectory(libcxxabi-cmake)
|
||||
endif()
|
||||
|
||||
if (USE_LIBCXX AND USE_INTERNAL_LIBCXX_LIBRARY)
|
||||
add_subdirectory(libcxx-cmake)
|
||||
add_subdirectory(libcxxabi-cmake)
|
||||
endif()
|
||||
|
||||
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include <Storages/ColumnDefault.h>
|
||||
#include <DataTypes/DataTypeLowCardinality.h>
|
||||
#include <Compression/CompressionFactory.h>
|
||||
#include <common/logger_useful.h>
|
||||
|
||||
#include <Processors/Formats/LazyOutputFormat.h>
|
||||
|
||||
@ -172,12 +173,13 @@ void TCPHandler::runImpl()
|
||||
send_exception_with_stack_trace = query_context->getSettingsRef().calculate_text_stack_trace;
|
||||
|
||||
/// Should we send internal logs to client?
|
||||
const auto client_logs_level = query_context->getSettingsRef().send_logs_level;
|
||||
if (client_revision >= DBMS_MIN_REVISION_WITH_SERVER_LOGS
|
||||
&& query_context->getSettingsRef().send_logs_level.value != LogsLevel::none)
|
||||
&& client_logs_level.value != LogsLevel::none)
|
||||
{
|
||||
state.logs_queue = std::make_shared<InternalTextLogsQueue>();
|
||||
state.logs_queue->max_priority = Poco::Logger::parseLevel(query_context->getSettingsRef().send_logs_level.toString());
|
||||
CurrentThread::attachInternalTextLogsQueue(state.logs_queue);
|
||||
state.logs_queue->max_priority = Poco::Logger::parseLevel(client_logs_level.toString());
|
||||
CurrentThread::attachInternalTextLogsQueue(state.logs_queue, client_logs_level.value);
|
||||
}
|
||||
|
||||
query_context->setExternalTablesInitializer([&global_settings, this] (Context & context)
|
||||
|
@ -60,11 +60,12 @@ void CurrentThread::updateProgressOut(const Progress & value)
|
||||
current_thread->progress_out.incrementPiecewiseAtomically(value);
|
||||
}
|
||||
|
||||
void CurrentThread::attachInternalTextLogsQueue(const std::shared_ptr<InternalTextLogsQueue> & logs_queue)
|
||||
void CurrentThread::attachInternalTextLogsQueue(const std::shared_ptr<InternalTextLogsQueue> & logs_queue,
|
||||
LogsLevel client_logs_level)
|
||||
{
|
||||
if (unlikely(!current_thread))
|
||||
return;
|
||||
current_thread->attachInternalTextLogsQueue(logs_queue);
|
||||
current_thread->attachInternalTextLogsQueue(logs_queue, client_logs_level);
|
||||
}
|
||||
|
||||
std::shared_ptr<InternalTextLogsQueue> CurrentThread::getInternalTextLogsQueue()
|
||||
|
@ -39,7 +39,8 @@ public:
|
||||
static ThreadGroupStatusPtr getGroup();
|
||||
|
||||
/// A logs queue used by TCPHandler to pass logs to a client
|
||||
static void attachInternalTextLogsQueue(const std::shared_ptr<InternalTextLogsQueue> & logs_queue);
|
||||
static void attachInternalTextLogsQueue(const std::shared_ptr<InternalTextLogsQueue> & logs_queue,
|
||||
LogsLevel client_logs_level);
|
||||
static std::shared_ptr<InternalTextLogsQueue> getInternalTextLogsQueue();
|
||||
|
||||
/// Makes system calls to update ProfileEvents that contain info from rusage and taskstats
|
||||
|
@ -117,7 +117,8 @@ void ThreadStatus::assertState(const std::initializer_list<int> & permitted_stat
|
||||
throw Exception(ss.str(), ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
|
||||
void ThreadStatus::attachInternalTextLogsQueue(const InternalTextLogsQueuePtr & logs_queue)
|
||||
void ThreadStatus::attachInternalTextLogsQueue(const InternalTextLogsQueuePtr & logs_queue,
|
||||
LogsLevel client_logs_level)
|
||||
{
|
||||
logs_queue_ptr = logs_queue;
|
||||
|
||||
@ -126,6 +127,7 @@ void ThreadStatus::attachInternalTextLogsQueue(const InternalTextLogsQueuePtr &
|
||||
|
||||
std::lock_guard lock(thread_group->mutex);
|
||||
thread_group->logs_queue_ptr = logs_queue;
|
||||
thread_group->client_logs_level = client_logs_level;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -4,6 +4,8 @@
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <Common/MemoryTracker.h>
|
||||
|
||||
#include <Core/SettingsCommon.h>
|
||||
|
||||
#include <IO/Progress.h>
|
||||
|
||||
#include <memory>
|
||||
@ -62,6 +64,8 @@ public:
|
||||
UInt32 master_thread_number = 0;
|
||||
Int32 master_thread_os_id = -1;
|
||||
|
||||
LogsLevel client_logs_level = LogsLevel::none;
|
||||
|
||||
String query;
|
||||
};
|
||||
|
||||
@ -130,7 +134,8 @@ public:
|
||||
return thread_state == Died ? nullptr : logs_queue_ptr.lock();
|
||||
}
|
||||
|
||||
void attachInternalTextLogsQueue(const InternalTextLogsQueuePtr & logs_queue);
|
||||
void attachInternalTextLogsQueue(const InternalTextLogsQueuePtr & logs_queue,
|
||||
LogsLevel client_logs_level);
|
||||
|
||||
/// Sets query context for current thread and its thread group
|
||||
/// NOTE: query_context have to be alive until detachQuery() is called
|
||||
|
@ -332,7 +332,11 @@ struct Settings : public SettingsCollection<Settings>
|
||||
M(SettingBool, allow_simdjson, true, "Allow using simdjson library in 'JSON*' functions if AVX2 instructions are available. If disabled rapidjson will be used.") \
|
||||
\
|
||||
M(SettingUInt64, max_partitions_per_insert_block, 100, "Limit maximum number of partitions in single INSERTed block. Zero means unlimited. Throw exception if the block contains too many partitions. This setting is a safety threshold, because using large number of partitions is a common misconception.") \
|
||||
M(SettingBool, check_query_single_value_result, true, "Return check query result as single 1/0 value")
|
||||
M(SettingBool, check_query_single_value_result, true, "Return check query result as single 1/0 value") \
|
||||
\
|
||||
/** Obsolete settings that do nothing but left for compatibility reasons. Remove each one after half a year of obsolescence. */ \
|
||||
\
|
||||
M(SettingBool, allow_experimental_low_cardinality_type, true, "Obsolete setting, does nothing. Will be removed after 2019-08-13")
|
||||
|
||||
DECLARE_SETTINGS_COLLECTION(LIST_OF_SETTINGS)
|
||||
|
||||
|
@ -30,8 +30,6 @@ public:
|
||||
const DictionaryLifetime dict_lifetime,
|
||||
const size_t size);
|
||||
|
||||
std::exception_ptr getCreationException() const override { return {}; }
|
||||
|
||||
std::string getName() const override { return name; }
|
||||
|
||||
std::string getTypeName() const override { return "Cache"; }
|
||||
@ -62,8 +60,6 @@ public:
|
||||
|
||||
const DictionaryStructure & getStructure() const override { return dict_struct; }
|
||||
|
||||
std::chrono::time_point<std::chrono::system_clock> getCreationTime() const override { return creation_time; }
|
||||
|
||||
bool isInjective(const std::string & attribute_name) const override
|
||||
{
|
||||
return dict_struct.attributes[&getAttribute(attribute_name) - attributes.data()].injective;
|
||||
@ -284,8 +280,6 @@ private:
|
||||
mutable std::atomic<size_t> element_count{0};
|
||||
mutable std::atomic<size_t> hit_count{0};
|
||||
mutable std::atomic<size_t> query_count{0};
|
||||
|
||||
const std::chrono::time_point<std::chrono::system_clock> creation_time = std::chrono::system_clock::now();
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -50,8 +50,6 @@ public:
|
||||
|
||||
std::string getKeyDescription() const { return key_description; }
|
||||
|
||||
std::exception_ptr getCreationException() const override { return {}; }
|
||||
|
||||
std::string getName() const override { return name; }
|
||||
|
||||
std::string getTypeName() const override { return "ComplexKeyCache"; }
|
||||
@ -86,8 +84,6 @@ public:
|
||||
|
||||
const DictionaryStructure & getStructure() const override { return dict_struct; }
|
||||
|
||||
std::chrono::time_point<std::chrono::system_clock> getCreationTime() const override { return creation_time; }
|
||||
|
||||
bool isInjective(const std::string & attribute_name) const override
|
||||
{
|
||||
return dict_struct.attributes[&getAttribute(attribute_name) - attributes.data()].injective;
|
||||
|
@ -29,19 +29,9 @@ ComplexKeyHashedDictionary::ComplexKeyHashedDictionary(
|
||||
, saved_block{std::move(saved_block)}
|
||||
{
|
||||
createAttributes();
|
||||
|
||||
try
|
||||
{
|
||||
loadData();
|
||||
calculateBytesAllocated();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
creation_exception = std::current_exception();
|
||||
}
|
||||
|
||||
creation_time = std::chrono::system_clock::now();
|
||||
}
|
||||
|
||||
#define DECLARE(TYPE) \
|
||||
void ComplexKeyHashedDictionary::get##TYPE( \
|
||||
|
@ -32,8 +32,6 @@ public:
|
||||
|
||||
std::string getKeyDescription() const { return key_description; }
|
||||
|
||||
std::exception_ptr getCreationException() const override { return creation_exception; }
|
||||
|
||||
std::string getName() const override { return name; }
|
||||
|
||||
std::string getTypeName() const override { return "ComplexKeyHashed"; }
|
||||
@ -61,8 +59,6 @@ public:
|
||||
|
||||
const DictionaryStructure & getStructure() const override { return dict_struct; }
|
||||
|
||||
std::chrono::time_point<std::chrono::system_clock> getCreationTime() const override { return creation_time; }
|
||||
|
||||
bool isInjective(const std::string & attribute_name) const override
|
||||
{
|
||||
return dict_struct.attributes[&getAttribute(attribute_name) - attributes.data()].injective;
|
||||
@ -255,10 +251,6 @@ private:
|
||||
size_t bucket_count = 0;
|
||||
mutable std::atomic<size_t> query_count{0};
|
||||
|
||||
std::chrono::time_point<std::chrono::system_clock> creation_time;
|
||||
|
||||
std::exception_ptr creation_exception;
|
||||
|
||||
BlockPtr saved_block;
|
||||
};
|
||||
|
||||
|
@ -36,19 +36,9 @@ FlatDictionary::FlatDictionary(
|
||||
, saved_block{std::move(saved_block)}
|
||||
{
|
||||
createAttributes();
|
||||
|
||||
try
|
||||
{
|
||||
loadData();
|
||||
calculateBytesAllocated();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
creation_exception = std::current_exception();
|
||||
}
|
||||
|
||||
creation_time = std::chrono::system_clock::now();
|
||||
}
|
||||
|
||||
|
||||
void FlatDictionary::toParent(const PaddedPODArray<Key> & ids, PaddedPODArray<Key> & out) const
|
||||
|
@ -29,8 +29,6 @@ public:
|
||||
bool require_nonempty,
|
||||
BlockPtr saved_block = nullptr);
|
||||
|
||||
std::exception_ptr getCreationException() const override { return creation_exception; }
|
||||
|
||||
std::string getName() const override { return name; }
|
||||
|
||||
std::string getTypeName() const override { return "Flat"; }
|
||||
@ -58,8 +56,6 @@ public:
|
||||
|
||||
const DictionaryStructure & getStructure() const override { return dict_struct; }
|
||||
|
||||
std::chrono::time_point<std::chrono::system_clock> getCreationTime() const override { return creation_time; }
|
||||
|
||||
bool isInjective(const std::string & attribute_name) const override
|
||||
{
|
||||
return dict_struct.attributes[&getAttribute(attribute_name) - attributes.data()].injective;
|
||||
@ -244,10 +240,6 @@ private:
|
||||
size_t bucket_count = 0;
|
||||
mutable std::atomic<size_t> query_count{0};
|
||||
|
||||
std::chrono::time_point<std::chrono::system_clock> creation_time;
|
||||
|
||||
std::exception_ptr creation_exception;
|
||||
|
||||
BlockPtr saved_block;
|
||||
};
|
||||
|
||||
|
@ -30,19 +30,9 @@ HashedDictionary::HashedDictionary(
|
||||
, saved_block{std::move(saved_block)}
|
||||
{
|
||||
createAttributes();
|
||||
|
||||
try
|
||||
{
|
||||
loadData();
|
||||
calculateBytesAllocated();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
creation_exception = std::current_exception();
|
||||
}
|
||||
|
||||
creation_time = std::chrono::system_clock::now();
|
||||
}
|
||||
|
||||
|
||||
void HashedDictionary::toParent(const PaddedPODArray<Key> & ids, PaddedPODArray<Key> & out) const
|
||||
|
@ -28,8 +28,6 @@ public:
|
||||
bool require_nonempty,
|
||||
BlockPtr saved_block = nullptr);
|
||||
|
||||
std::exception_ptr getCreationException() const override { return creation_exception; }
|
||||
|
||||
std::string getName() const override { return name; }
|
||||
|
||||
std::string getTypeName() const override { return "Hashed"; }
|
||||
@ -57,8 +55,6 @@ public:
|
||||
|
||||
const DictionaryStructure & getStructure() const override { return dict_struct; }
|
||||
|
||||
std::chrono::time_point<std::chrono::system_clock> getCreationTime() const override { return creation_time; }
|
||||
|
||||
bool isInjective(const std::string & attribute_name) const override
|
||||
{
|
||||
return dict_struct.attributes[&getAttribute(attribute_name) - attributes.data()].injective;
|
||||
@ -248,10 +244,6 @@ private:
|
||||
size_t bucket_count = 0;
|
||||
mutable std::atomic<size_t> query_count{0};
|
||||
|
||||
std::chrono::time_point<std::chrono::system_clock> creation_time;
|
||||
|
||||
std::exception_ptr creation_exception;
|
||||
|
||||
BlockPtr saved_block;
|
||||
};
|
||||
|
||||
|
@ -80,19 +80,9 @@ RangeHashedDictionary::RangeHashedDictionary(
|
||||
, require_nonempty(require_nonempty)
|
||||
{
|
||||
createAttributes();
|
||||
|
||||
try
|
||||
{
|
||||
loadData();
|
||||
calculateBytesAllocated();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
creation_exception = std::current_exception();
|
||||
}
|
||||
|
||||
creation_time = std::chrono::system_clock::now();
|
||||
}
|
||||
|
||||
|
||||
#define DECLARE_MULTIPLE_GETTER(TYPE) \
|
||||
|
@ -24,8 +24,6 @@ public:
|
||||
const DictionaryLifetime dict_lifetime,
|
||||
bool require_nonempty);
|
||||
|
||||
std::exception_ptr getCreationException() const override { return creation_exception; }
|
||||
|
||||
std::string getName() const override { return dictionary_name; }
|
||||
|
||||
std::string getTypeName() const override { return "RangeHashed"; }
|
||||
@ -53,8 +51,6 @@ public:
|
||||
|
||||
const DictionaryStructure & getStructure() const override { return dict_struct; }
|
||||
|
||||
std::chrono::time_point<std::chrono::system_clock> getCreationTime() const override { return creation_time; }
|
||||
|
||||
bool isInjective(const std::string & attribute_name) const override
|
||||
{
|
||||
return dict_struct.attributes[&getAttribute(attribute_name) - attributes.data()].injective;
|
||||
@ -227,10 +223,6 @@ private:
|
||||
size_t element_count = 0;
|
||||
size_t bucket_count = 0;
|
||||
mutable std::atomic<size_t> query_count{0};
|
||||
|
||||
std::chrono::time_point<std::chrono::system_clock> creation_time;
|
||||
|
||||
std::exception_ptr creation_exception;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -33,8 +33,6 @@ public:
|
||||
|
||||
std::string getKeyDescription() const { return key_description; }
|
||||
|
||||
std::exception_ptr getCreationException() const override { return creation_exception; }
|
||||
|
||||
std::string getName() const override { return name; }
|
||||
|
||||
std::string getTypeName() const override { return "Trie"; }
|
||||
@ -62,8 +60,6 @@ public:
|
||||
|
||||
const DictionaryStructure & getStructure() const override { return dict_struct; }
|
||||
|
||||
std::chrono::time_point<std::chrono::system_clock> getCreationTime() const override { return creation_time; }
|
||||
|
||||
bool isInjective(const std::string & attribute_name) const override
|
||||
{
|
||||
return dict_struct.attributes[&getAttribute(attribute_name) - attributes.data()].injective;
|
||||
|
@ -159,10 +159,14 @@ void AsynchronousMetrics::update()
|
||||
|
||||
size_t max_part_count_for_partition = 0;
|
||||
|
||||
size_t number_of_databases = databases.size();
|
||||
size_t total_number_of_tables = 0;
|
||||
|
||||
for (const auto & db : databases)
|
||||
{
|
||||
for (auto iterator = db.second->getIterator(context); iterator->isValid(); iterator->next())
|
||||
{
|
||||
++total_number_of_tables;
|
||||
auto & table = iterator->table();
|
||||
StorageMergeTree * table_merge_tree = dynamic_cast<StorageMergeTree *>(table.get());
|
||||
StorageReplicatedMergeTree * table_replicated_merge_tree = dynamic_cast<StorageReplicatedMergeTree *>(table.get());
|
||||
@ -213,6 +217,9 @@ void AsynchronousMetrics::update()
|
||||
set("ReplicasMaxRelativeDelay", max_relative_delay);
|
||||
|
||||
set("MaxPartCountForPartition", max_part_count_for_partition);
|
||||
|
||||
set("NumberOfDatabases", number_of_databases);
|
||||
set("NumberOfTables", total_number_of_tables);
|
||||
}
|
||||
|
||||
#if USE_TCMALLOC
|
||||
|
@ -504,20 +504,6 @@ std::shared_ptr<CatBoostLibHolder> getCatBoostWrapperHolder(const std::string &
|
||||
CatBoostModel::CatBoostModel(std::string name_, std::string model_path_, std::string lib_path_,
|
||||
const ExternalLoadableLifetime & lifetime)
|
||||
: name(std::move(name_)), model_path(std::move(model_path_)), lib_path(std::move(lib_path_)), lifetime(lifetime)
|
||||
{
|
||||
try
|
||||
{
|
||||
init();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
creation_exception = std::current_exception();
|
||||
}
|
||||
|
||||
creation_time = std::chrono::system_clock::now();
|
||||
}
|
||||
|
||||
void CatBoostModel::init()
|
||||
{
|
||||
api_provider = getCatBoostWrapperHolder(lib_path);
|
||||
api = &api_provider->getAPI();
|
||||
|
@ -68,9 +68,6 @@ public:
|
||||
|
||||
std::shared_ptr<const IExternalLoadable> clone() const override;
|
||||
|
||||
std::chrono::time_point<std::chrono::system_clock> getCreationTime() const override { return creation_time; }
|
||||
std::exception_ptr getCreationException() const override { return creation_exception; }
|
||||
|
||||
private:
|
||||
std::string name;
|
||||
std::string model_path;
|
||||
@ -85,9 +82,6 @@ private:
|
||||
size_t cat_features_count;
|
||||
size_t tree_count;
|
||||
|
||||
std::chrono::time_point<std::chrono::system_clock> creation_time;
|
||||
std::exception_ptr creation_exception;
|
||||
|
||||
void init();
|
||||
};
|
||||
|
||||
|
@ -219,7 +219,7 @@ class ExternalLoader::LoadingDispatcher : private boost::noncopyable
|
||||
{
|
||||
public:
|
||||
/// Called to load or reload an object.
|
||||
using CreateObjectFunction = std::function<ObjectWithException(
|
||||
using CreateObjectFunction = std::function<LoadablePtr(
|
||||
const String & /* name */, const ObjectConfig & /* config */, bool config_changed, const LoadablePtr & /* previous_version */)>;
|
||||
|
||||
/// Called after loading/reloading an object to calculate the time of the next update.
|
||||
@ -527,18 +527,50 @@ public:
|
||||
/// Starts reloading all the object which update time is earlier than now.
|
||||
/// The function doesn't touch the objects which were never tried to load.
|
||||
void reloadOutdated()
|
||||
{
|
||||
std::unordered_map<LoadablePtr, bool> is_modified_map;
|
||||
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
TimePoint now = std::chrono::system_clock::now();
|
||||
for (const auto & name_and_info : infos)
|
||||
{
|
||||
const auto & info = name_and_info.second;
|
||||
if ((now >= info.next_update_time) && !info.loading() && info.was_loading())
|
||||
is_modified_map.emplace(info.object, true);
|
||||
}
|
||||
}
|
||||
|
||||
/// The `mutex` should be unlocked while we're calling the function is_object_modified().
|
||||
for (auto & [object, is_modified_flag] : is_modified_map)
|
||||
{
|
||||
try
|
||||
{
|
||||
is_modified_flag = is_object_modified(object);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(log, "Could not check if " + type_name + " '" + object->getName() + "' was modified");
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
TimePoint now = std::chrono::system_clock::now();
|
||||
for (auto & [name, info] : infos)
|
||||
if ((now >= info.next_update_time) && !info.loading() && info.was_loading())
|
||||
{
|
||||
if (info.loaded() && !is_object_modified(info.object))
|
||||
auto it = is_modified_map.find(info.object);
|
||||
if (it == is_modified_map.end())
|
||||
continue; /// Object has been just added, it can be simply omitted from this update of outdated.
|
||||
bool is_modified_flag = it->second;
|
||||
if (info.loaded() && !is_modified_flag)
|
||||
info.next_update_time = calculate_next_update_time(info.object, info.error_count);
|
||||
else
|
||||
startLoading(name, info);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
struct Info
|
||||
@ -751,7 +783,7 @@ private:
|
||||
std::exception_ptr new_exception;
|
||||
try
|
||||
{
|
||||
std::tie(new_object, new_exception) = create_object(name, config, config_changed, previous_version);
|
||||
new_object = create_object(name, config, config_changed, previous_version);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
@ -760,8 +792,6 @@ private:
|
||||
|
||||
if (!new_object && !new_exception)
|
||||
throw Exception("No object created and no exception raised for " + type_name, ErrorCodes::LOGICAL_ERROR);
|
||||
if (new_object && new_exception)
|
||||
new_object = nullptr;
|
||||
|
||||
/// Calculate a new update time.
|
||||
TimePoint next_update_time;
|
||||
@ -1120,17 +1150,13 @@ void ExternalLoader::reload(bool load_never_loading)
|
||||
loading_dispatcher->reload(load_never_loading);
|
||||
}
|
||||
|
||||
ExternalLoader::ObjectWithException ExternalLoader::createObject(
|
||||
ExternalLoader::LoadablePtr ExternalLoader::createObject(
|
||||
const String & name, const ObjectConfig & config, bool config_changed, const LoadablePtr & previous_version) const
|
||||
{
|
||||
if (previous_version && !config_changed)
|
||||
{
|
||||
auto new_object = previous_version->clone();
|
||||
return {new_object, new_object->getCreationException()};
|
||||
}
|
||||
return previous_version->clone();
|
||||
|
||||
auto new_object = create(name, *config.config, config.key_in_config);
|
||||
return {new_object, new_object->getCreationException()};
|
||||
return create(name, *config.config, config.key_in_config);
|
||||
}
|
||||
|
||||
ExternalLoader::TimePoint ExternalLoader::calculateNextUpdateTime(const LoadablePtr & loaded_object, size_t error_count) const
|
||||
|
@ -186,10 +186,8 @@ protected:
|
||||
|
||||
private:
|
||||
struct ObjectConfig;
|
||||
using ObjectWithException = std::pair<LoadablePtr, std::exception_ptr>;
|
||||
|
||||
ObjectWithException
|
||||
createObject(const String & name, const ObjectConfig & config, bool config_changed, const LoadablePtr & previous_version) const;
|
||||
LoadablePtr createObject(const String & name, const ObjectConfig & config, bool config_changed, const LoadablePtr & previous_version) const;
|
||||
TimePoint calculateNextUpdateTime(const LoadablePtr & loaded_object, size_t error_count) const;
|
||||
|
||||
class ConfigFilesReader;
|
||||
|
@ -1,6 +1,5 @@
|
||||
#pragma once
|
||||
|
||||
#include <chrono>
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include <boost/noncopyable.hpp>
|
||||
@ -41,10 +40,6 @@ public:
|
||||
virtual bool isModified() const = 0;
|
||||
/// Returns new object with the same configuration. Is used to update modified object when lifetime exceeded.
|
||||
virtual std::shared_ptr<const IExternalLoadable> clone() const = 0;
|
||||
|
||||
virtual std::chrono::time_point<std::chrono::system_clock> getCreationTime() const = 0;
|
||||
|
||||
virtual std::exception_ptr getCreationException() const = 0;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -101,6 +101,9 @@ BlockInputStreamPtr InterpreterDescribeQuery::executeImpl()
|
||||
|
||||
for (const auto & column : columns)
|
||||
{
|
||||
if (column.is_virtual)
|
||||
continue;
|
||||
|
||||
res_columns[0]->insert(column.name);
|
||||
res_columns[1]->insert(column.type->getName());
|
||||
|
||||
|
@ -44,6 +44,9 @@ class Client:
|
||||
return self.get_query_request(sql, stdin=stdin, timeout=timeout, settings=settings, user=user).get_error()
|
||||
|
||||
|
||||
def query_and_get_answer_with_error(self, sql, stdin=None, timeout=None, settings=None, user=None):
|
||||
return self.get_query_request(sql, stdin=stdin, timeout=timeout, settings=settings, user=user).get_answer_and_error()
|
||||
|
||||
class QueryTimeoutExceedException(Exception):
|
||||
pass
|
||||
|
||||
@ -110,3 +113,17 @@ class CommandRequest:
|
||||
raise QueryRuntimeException('Client expected to be failed but succeeded! stdout: {}'.format(stdout))
|
||||
|
||||
return stderr
|
||||
|
||||
|
||||
def get_answer_and_error(self):
|
||||
self.process.wait()
|
||||
self.stdout_file.seek(0)
|
||||
self.stderr_file.seek(0)
|
||||
|
||||
stdout = self.stdout_file.read()
|
||||
stderr = self.stderr_file.read()
|
||||
|
||||
if self.timer is not None and not self.process_finished_before_timeout and not self.ignore_error:
|
||||
raise QueryTimeoutExceedException('Client timed out!')
|
||||
|
||||
return (stdout, stderr)
|
||||
|
@ -527,6 +527,10 @@ class ClickHouseInstance:
|
||||
def query_and_get_error(self, sql, stdin=None, timeout=None, settings=None, user=None):
|
||||
return self.client.query_and_get_error(sql, stdin, timeout, settings, user)
|
||||
|
||||
# The same as query_and_get_error but ignores successful query.
|
||||
def query_and_get_answer_with_error(self, sql, stdin=None, timeout=None, settings=None, user=None):
|
||||
return self.client.query_and_get_answer_with_error(sql, stdin, timeout, settings, user)
|
||||
|
||||
# Connects to the instance via HTTP interface, sends a query and returns the answer
|
||||
def http_query(self, sql, data=None):
|
||||
return urllib.urlopen("http://"+self.ip_address+":8123/?query="+urllib.quote(sql,safe=''), data).read()
|
||||
|
@ -12,7 +12,7 @@
|
||||
<table>dep_z</table>
|
||||
</clickhouse>
|
||||
</source>
|
||||
<lifetime>60</lifetime>
|
||||
<lifetime>5</lifetime>
|
||||
<layout>
|
||||
<flat/>
|
||||
</layout>
|
||||
@ -21,7 +21,7 @@
|
||||
<name>id</name>
|
||||
</id>
|
||||
<attribute>
|
||||
<name>String_</name>
|
||||
<name>a</name>
|
||||
<type>String</type>
|
||||
<null_value>XX</null_value>
|
||||
</attribute>
|
||||
|
@ -9,10 +9,10 @@
|
||||
<user>default</user>
|
||||
<password></password>
|
||||
<db>test</db>
|
||||
<table>dictionary_source</table>
|
||||
<table>small_dict_source</table>
|
||||
</clickhouse>
|
||||
</source>
|
||||
<lifetime>60</lifetime>
|
||||
<lifetime>5</lifetime>
|
||||
<layout>
|
||||
<flat/>
|
||||
</layout>
|
||||
@ -21,17 +21,17 @@
|
||||
<name>id</name>
|
||||
</id>
|
||||
<attribute>
|
||||
<name>Int64_</name>
|
||||
<type>Int64</type>
|
||||
<null_value>121</null_value>
|
||||
<name>b</name>
|
||||
<type>Int32</type>
|
||||
<null_value>-1</null_value>
|
||||
</attribute>
|
||||
<attribute>
|
||||
<name>Float32_</name>
|
||||
<type>Float32</type>
|
||||
<null_value>121</null_value>
|
||||
<name>c</name>
|
||||
<type>Float64</type>
|
||||
<null_value>-2</null_value>
|
||||
</attribute>
|
||||
<attribute>
|
||||
<name>String_</name>
|
||||
<name>a</name>
|
||||
<type>String</type>
|
||||
<null_value>YY</null_value>
|
||||
</attribute>
|
||||
|
@ -10,9 +10,10 @@
|
||||
<password></password>
|
||||
<db>dict</db>
|
||||
<table>dep_y</table>
|
||||
<invalidate_query>SELECT intDiv(count(), 5) from dict.dep_y</invalidate_query>
|
||||
</clickhouse>
|
||||
</source>
|
||||
<lifetime>60</lifetime>
|
||||
<lifetime>5</lifetime>
|
||||
<layout>
|
||||
<flat/>
|
||||
</layout>
|
||||
@ -21,12 +22,12 @@
|
||||
<name>id</name>
|
||||
</id>
|
||||
<attribute>
|
||||
<name>Int64_</name>
|
||||
<type>Int64</type>
|
||||
<null_value>122</null_value>
|
||||
<name>b</name>
|
||||
<type>Int32</type>
|
||||
<null_value>-3</null_value>
|
||||
</attribute>
|
||||
<attribute>
|
||||
<name>String_</name>
|
||||
<name>a</name>
|
||||
<type>String</type>
|
||||
<null_value>ZZ</null_value>
|
||||
</attribute>
|
||||
|
@ -3,7 +3,7 @@ import os
|
||||
import time
|
||||
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.test_tools import TSV
|
||||
from helpers.test_tools import TSV, assert_eq_with_retry
|
||||
from generate_dictionaries import generate_structure, generate_dictionaries, DictionaryTestTable
|
||||
|
||||
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
|
||||
@ -54,6 +54,13 @@ def started_cluster():
|
||||
for line in TSV(instance.query('select name from system.dictionaries')).lines:
|
||||
print line,
|
||||
|
||||
# Create table `test.small_dict_source`
|
||||
instance.query('''
|
||||
drop table if exists test.small_dict_source;
|
||||
create table test.small_dict_source (id UInt64, a String, b Int32, c Float64) engine=Log;
|
||||
insert into test.small_dict_source values (0, 'water', 10, 1), (1, 'air', 40, 0.01), (2, 'earth', 100, 1.7);
|
||||
''')
|
||||
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
@ -166,17 +173,37 @@ def test_dictionary_dependency(started_cluster):
|
||||
|
||||
# Dictionary 'dep_x' depends on 'dep_z', which depends on 'dep_y'.
|
||||
# So they all should be loaded at once.
|
||||
assert query("SELECT dictGetString('dep_x', 'String_', toUInt64(1))") == "10577349846663553072\n"
|
||||
assert query("SELECT dictGetString('dep_x', 'a', toUInt64(1))") == "air\n"
|
||||
assert get_status('dep_x') == 'LOADED'
|
||||
assert get_status('dep_y') == 'LOADED'
|
||||
assert get_status('dep_z') == 'LOADED'
|
||||
|
||||
# Other dictionaries should work too.
|
||||
assert query("SELECT dictGetString('dep_y', 'String_', toUInt64(1))") == "10577349846663553072\n"
|
||||
assert query("SELECT dictGetString('dep_z', 'String_', toUInt64(1))") == "10577349846663553072\n"
|
||||
assert query("SELECT dictGetString('dep_x', 'String_', toUInt64(12121212))") == "XX\n"
|
||||
assert query("SELECT dictGetString('dep_y', 'String_', toUInt64(12121212))") == "YY\n"
|
||||
assert query("SELECT dictGetString('dep_z', 'String_', toUInt64(12121212))") == "ZZ\n"
|
||||
assert query("SELECT dictGetString('dep_y', 'a', toUInt64(1))") == "air\n"
|
||||
assert query("SELECT dictGetString('dep_z', 'a', toUInt64(1))") == "air\n"
|
||||
|
||||
assert query("SELECT dictGetString('dep_x', 'a', toUInt64(3))") == "XX\n"
|
||||
assert query("SELECT dictGetString('dep_y', 'a', toUInt64(3))") == "YY\n"
|
||||
assert query("SELECT dictGetString('dep_z', 'a', toUInt64(3))") == "ZZ\n"
|
||||
|
||||
# Update the source table.
|
||||
query("insert into test.small_dict_source values (3, 'fire', 30, 8)")
|
||||
|
||||
# Wait for dictionaries to be reloaded.
|
||||
assert_eq_with_retry(instance, "SELECT dictHas('dep_y', toUInt64(3))", "1", sleep_time = 2, retry_count = 10)
|
||||
assert query("SELECT dictGetString('dep_x', 'a', toUInt64(3))") == "XX\n"
|
||||
assert query("SELECT dictGetString('dep_y', 'a', toUInt64(3))") == "fire\n"
|
||||
assert query("SELECT dictGetString('dep_z', 'a', toUInt64(3))") == "ZZ\n"
|
||||
|
||||
# dep_x and dep_z are updated only when there `intDiv(count(), 4)` is changed.
|
||||
query("insert into test.small_dict_source values (4, 'ether', 404, 0.001)")
|
||||
assert_eq_with_retry(instance, "SELECT dictHas('dep_x', toUInt64(4))", "1", sleep_time = 2, retry_count = 10)
|
||||
assert query("SELECT dictGetString('dep_x', 'a', toUInt64(3))") == "fire\n"
|
||||
assert query("SELECT dictGetString('dep_y', 'a', toUInt64(3))") == "fire\n"
|
||||
assert query("SELECT dictGetString('dep_z', 'a', toUInt64(3))") == "fire\n"
|
||||
assert query("SELECT dictGetString('dep_x', 'a', toUInt64(4))") == "ether\n"
|
||||
assert query("SELECT dictGetString('dep_y', 'a', toUInt64(4))") == "ether\n"
|
||||
assert query("SELECT dictGetString('dep_z', 'a', toUInt64(4))") == "ether\n"
|
||||
|
||||
|
||||
def test_reload_while_loading(started_cluster):
|
||||
|
0
dbms/tests/integration/test_logs_level/__init__.py
Normal file
0
dbms/tests/integration/test_logs_level/__init__.py
Normal file
@ -0,0 +1,26 @@
|
||||
<?xml version="1.0"?>
|
||||
<yandex>
|
||||
<logger>
|
||||
<level>information</level>
|
||||
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
|
||||
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
|
||||
<size>1000M</size>
|
||||
<count>10</count>
|
||||
</logger>
|
||||
|
||||
<openSSL>
|
||||
<client>
|
||||
<cacheSessions>true</cacheSessions>
|
||||
<verificationMode>none</verificationMode>
|
||||
<invalidCertificateHandler>
|
||||
<name>AcceptCertificateHandler</name>
|
||||
</invalidCertificateHandler>
|
||||
</client>
|
||||
</openSSL>
|
||||
|
||||
<max_concurrent_queries>500</max_concurrent_queries>
|
||||
<mark_cache_size>5368709120</mark_cache_size>
|
||||
<users_config>users.xml</users_config>
|
||||
|
||||
<dictionaries_config>/etc/clickhouse-server/config.d/*.xml</dictionaries_config>
|
||||
</yandex>
|
18
dbms/tests/integration/test_logs_level/test.py
Normal file
18
dbms/tests/integration/test_logs_level/test.py
Normal file
@ -0,0 +1,18 @@
|
||||
import pytest
|
||||
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
node = cluster.add_instance('node', main_configs=['configs/config_information.xml'])
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def start_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
yield cluster
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
def test_check_client_logs_level(start_cluster):
|
||||
logs = node.query_and_get_answer_with_error("SELECT 1", settings={"send_logs_level": 'trace'})[1]
|
||||
assert logs.count('Trace') != 0
|
@ -0,0 +1,24 @@
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
<Debug>
|
||||
<Trace>
|
||||
<Debug>
|
||||
|
||||
|
||||
|
||||
|
||||
<Information>
|
||||
<Debug>
|
||||
<Debug>
|
||||
<Debug>
|
||||
|
||||
|
||||
|
||||
|
||||
<Information>
|
||||
<Debug>
|
||||
<Information>
|
17
dbms/tests/queries/0_stateless/00965_pocopatch_logs_level_bugfix.sh
Executable file
17
dbms/tests/queries/0_stateless/00965_pocopatch_logs_level_bugfix.sh
Executable file
@ -0,0 +1,17 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
. $CURDIR/../shell_config.sh
|
||||
|
||||
> 00965_logs_level_bugfix.tmp
|
||||
|
||||
clickhouse-client --send_logs_level="trace" --query="SELECT 1;" 2>> 00965_logs_level_bugfix.tmp
|
||||
clickhouse-client --send_logs_level="debug" --query="SELECT 1;" 2>> 00965_logs_level_bugfix.tmp
|
||||
clickhouse-client --send_logs_level="information" --query="SELECT 1;" 2>> 00965_logs_level_bugfix.tmp
|
||||
clickhouse-client --send_logs_level="warning" --query="SELECT 1;" 2>> 00965_logs_level_bugfix.tmp
|
||||
clickhouse-client --send_logs_level="error" --query="SELECT 1;" 2>> 00965_logs_level_bugfix.tmp
|
||||
clickhouse-client --send_logs_level="none" --query="SELECT 1;" 2>> 00965_logs_level_bugfix.tmp
|
||||
|
||||
awk '{ print $8 }' 00965_logs_level_bugfix.tmp
|
||||
|
||||
|
@ -0,0 +1,12 @@
|
||||
<Debug>
|
||||
<Trace>
|
||||
<Debug>
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<Information>
|
||||
<Debug>
|
||||
*****
|
||||
<Information>
|
@ -0,0 +1,17 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
. $CURDIR/../shell_config.sh
|
||||
|
||||
> 00965_send_logs_level_concurrent_queries_first.tmp
|
||||
> 00965_send_logs_level_concurrent_queries_second.tmp
|
||||
|
||||
clickhouse-client --send_logs_level="trace" --query="SELECT * from numbers(100000);" >> /dev/null 2>> 00965_send_logs_level_concurrent_queries_first.tmp &
|
||||
clickhouse-client --send_logs_level="information" --query="SELECT * from numbers(100000);" >> /dev/null 2>> 00965_send_logs_level_concurrent_queries_second.tmp
|
||||
|
||||
sleep 2
|
||||
|
||||
awk '{ print $8 }' 00965_send_logs_level_concurrent_queries_first.tmp
|
||||
echo "*****"
|
||||
awk '{ print $8 }' 00965_send_logs_level_concurrent_queries_second.tmp
|
||||
|
@ -0,0 +1 @@
|
||||
x UInt64
|
@ -0,0 +1,9 @@
|
||||
-- No virtual columns should be output in DESC TABLE query.
|
||||
|
||||
DROP TABLE IF EXISTS upyachka;
|
||||
CREATE TABLE upyachka (x UInt64) ENGINE = Memory;
|
||||
|
||||
-- Merge table has virtual column `_table`
|
||||
DESC TABLE merge(currentDatabase(), 'upyachka');
|
||||
|
||||
DROP TABLE upyachka;
|
2
debian/clickhouse-server.templates
vendored
2
debian/clickhouse-server.templates
vendored
@ -1,3 +1,3 @@
|
||||
Template: clickhouse-server/default-password
|
||||
Type: password
|
||||
Description: Password for default user.
|
||||
Description: Password for default user
|
||||
|
@ -514,7 +514,7 @@ Use the following parameters to configure logging:
|
||||
```
|
||||
|
||||
|
||||
## path
|
||||
## path {#server_settings-path}
|
||||
|
||||
The path to the directory containing data.
|
||||
|
||||
|
@ -71,7 +71,7 @@ $ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64
|
||||
|
||||
## Details of Implementation
|
||||
|
||||
- Multiple SELECT queries can be performed concurrently, but INSERT queries will wait each other.
|
||||
- Multiple `SELECT` queries can be performed concurrently, but `INSERT` queries will wait each other.
|
||||
- Not supported:
|
||||
- `ALTER`
|
||||
- `SELECT ... SAMPLE`
|
||||
|
@ -22,13 +22,21 @@ Example 2: `uniqArray(arr)` – Count the number of unique elements in all 'arr'
|
||||
|
||||
## -State
|
||||
|
||||
If you apply this combinator, the aggregate function doesn't return the resulting value (such as the number of unique values for the `uniq` function), but an intermediate state of the aggregation (for `uniq`, this is the hash table for calculating the number of unique values). This is an AggregateFunction(...) that can be used for further processing or stored in a table to finish aggregating later. To work with these states, use the [AggregatingMergeTree](../../operations/table_engines/aggregatingmergetree.md) table engine, the functions [`finalizeAggregation`](../functions/other_functions.md#finalizeaggregation) and [`runningAccumulate`](../functions/other_functions.md#function-runningaccumulate), and the combinators -Merge and -MergeState described below.
|
||||
If you apply this combinator, the aggregate function doesn't return the resulting value (such as the number of unique values for the [uniq](reference.md#agg_function-uniq) function), but an intermediate state of the aggregation (for `uniq`, this is the hash table for calculating the number of unique values). This is an `AggregateFunction(...)` that can be used for further processing or stored in a table to finish aggregating later.
|
||||
|
||||
## -Merge
|
||||
To work with these states, use:
|
||||
|
||||
- [AggregatingMergeTree](../../operations/table_engines/aggregatingmergetree.md) table engine.
|
||||
- [finalizeAggregation](../functions/other_functions.md#function-finalizeaggregation) function.
|
||||
- [runningAccumulate](../functions/other_functions.md#function-runningaccumulate) function.
|
||||
- [-Merge](#aggregate_functions_combinators_merge) combinator.
|
||||
- [-MergeState](#aggregate_functions_combinators_mergestate) combinator.
|
||||
|
||||
## -Merge {#aggregate_functions_combinators_merge}
|
||||
|
||||
If you apply this combinator, the aggregate function takes the intermediate aggregation state as an argument, combines the states to finish aggregation, and returns the resulting value.
|
||||
|
||||
## -MergeState.
|
||||
## -MergeState {#aggregate_functions_combinators_mergestate}
|
||||
|
||||
Merges the intermediate aggregation states in the same way as the -Merge combinator. However, it doesn't return the resulting value, but an intermediate aggregation state, similar to the -State combinator.
|
||||
|
||||
|
@ -107,22 +107,26 @@ Besides default data compression, defined in [server settings](../operations/ser
|
||||
|
||||
Supported compression algorithms:
|
||||
|
||||
- `NONE` - no compression for data applied
|
||||
- `LZ4`
|
||||
- `LZ4HC(level)` - (level) - LZ4\_HC compression algorithm with defined level.
|
||||
Possible `level` range: \[3, 12\]. Default value: 9. Greater values stands for better compression and higher CPU usage. Recommended value range: [4,9].
|
||||
- `ZSTD(level)` - ZSTD compression algorithm with defined `level`. Possible `level` value range: \[1, 22\]. Default value: 1.
|
||||
Greater values stands for better compression and higher CPU usage.
|
||||
- `Delta(delta_bytes)` - compression approach when raw values are replace with difference of two neighbour values. Up to `delta_bytes` are used for storing delta value.
|
||||
Possible `delta_bytes` values: 1, 2, 4, 8. Default value for delta bytes is `sizeof(type)`, if it is equals to 1, 2, 4, 8 and equals to 1 otherwise.
|
||||
- `DoubleDelta` - stores delta of deltas in compact binary form, compressing values down to 1 bit (in the best case). Best compression rates are achieved on monotonic sequences with constant stride, e.g. time samples. Can be used against any fixed-width type. Implementation is based on [Gorilla paper](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf), and extended to support 64bit types. The drawback is 1 extra bit for 32-byte wide deltas: 5-bit prefix instead of 4-bit prefix.
|
||||
- `Gorilla` - stores (parts of) xored values in compact binary form, compressing values down to 1 bit (in the best case). Best compression rate is achieved when neighbouring values are binary equal. Basic use case - floating point data that do not change rapidly. Implementation is based on [Gorilla paper](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf), and extended to support 64bit types.
|
||||
- `NONE` — No compression.
|
||||
- `LZ4` — Lossless [data compression algorithm](https://github.com/lz4/lz4) used by default. Applies LZ4 fast compression.
|
||||
- `LZ4HC[(level)]` — LZ4 CH (high compression) algorithm with configurable level. Default level: 9. If you set `level <= 0`, the default level is applied. Possible levels: [1, 12]. Recommended levels are in range: [4, 9].
|
||||
- `ZSTD[(level)]` — [ZSTD compression algorithm](https://en.wikipedia.org/wiki/Zstandard) with configurable `level`. Possible levels: [1, 22]. Default value: 1.
|
||||
- `Delta(delta_bytes)` — compression approach, when raw values are replaced with the difference of two neighbour values. Up to `delta_bytes` are used for storing delta value, so `delta_bytes` is a maximum size of raw values.
|
||||
Possible `delta_bytes` values: 1, 2, 4, 8. Default value for `delta_bytes` is `sizeof(type)`, if it is equals to 1, 2, 4, 8. Otherwise it equals 1.
|
||||
- `DoubleDelta` — Compresses values down to 1 bit (in the best case), using deltas calculation. Best compression rates are achieved on monotonic sequences with constant stride, for example, time series data. Can be used with any fixed-width type. Implements the algorithm used in Gorilla TSDB, extending it to support 64 bit types. Uses 1 extra bit for 32 byte deltas: 5 bit prefix instead of 4 bit prefix. For additional information, see the "Compressing time stamps" section of the [Gorilla: A Fast, Scalable, In-Memory Time Series Database](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf) document.
|
||||
- `Gorilla` — Compresses values down to 1 bit (in the best case). The codec is efficient when storing series of floating point values that change slowly, because the best compression rate is achieved when neighbouring values are binary equal. Implements the algorithm used in Gorilla TSDB, extending it to support 64 bit types. For additional information, see the "Compressing values" section of the [Gorilla: A Fast, Scalable, In-Memory Time Series Database](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf) document.
|
||||
|
||||
High compression levels useful for asymmetric scenarios, like compress once, decompress a lot of times. Greater levels stands for better compression and higher CPU usage.
|
||||
|
||||
!!!warning
|
||||
You cannot decompress ClickHouse database files with external utilities, for example, `lz4`. Use the special utility [clickhouse-compressor](https://github.com/yandex/ClickHouse/tree/master/dbms/programs/compressor).
|
||||
|
||||
Syntax example:
|
||||
|
||||
```
|
||||
CREATE TABLE codec_example
|
||||
(
|
||||
dt Date CODEC(ZSTD), /* используется уровень сжатия по-умолчанию */
|
||||
dt Date CODEC(ZSTD),
|
||||
ts DateTime CODEC(LZ4HC),
|
||||
float_value Float32 CODEC(NONE),
|
||||
double_value Float64 CODEC(LZ4HC(9))
|
||||
@ -134,6 +138,7 @@ ORDER BY dt
|
||||
|
||||
Codecs can be combined in a pipeline. Default table codec is not included into pipeline (if it should be applied to a column, you have to specify it explicitly in pipeline). Example below shows an optimization approach for storing timeseries metrics.
|
||||
Usually, values for particular metric, stored in `path` does not differ significantly from point to point. Using delta-encoding allows to reduce disk space usage significantly.
|
||||
|
||||
```
|
||||
CREATE TABLE timeseries_example
|
||||
(
|
||||
|
@ -151,4 +151,36 @@ SELECT geohashDecode('ezs42') AS res
|
||||
└─────────────────────────────────┘
|
||||
```
|
||||
|
||||
## geoToH3
|
||||
|
||||
Calculates [H3](https://uber.github.io/h3/#/documentation/overview/introduction) point index `(lon, lat)` with specified resolution.
|
||||
|
||||
```
|
||||
geoToH3(lon, lat, resolution)
|
||||
```
|
||||
|
||||
**Input values**
|
||||
|
||||
- `lon` — Longitude. Type: [Float64](../../data_types/float.md).
|
||||
- `lat` — Latitude. Type: [Float64](../../data_types/float.md).
|
||||
- `resolution` — Index resolution. Range: `[0, 15]`. Type: [UInt8](../../data_types/int_uint.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- Hexagon index number.
|
||||
- 0 in case of error.
|
||||
|
||||
Type: [UInt64](../../data_types/int_uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
SELECT geoToH3(37.79506683, 55.71290588, 15) as h3Index
|
||||
```
|
||||
```
|
||||
┌────────────h3Index─┐
|
||||
│ 644325524701193974 │
|
||||
└────────────────────┘
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.yandex/docs/en/query_language/functions/geo/) <!--hide-->
|
||||
|
@ -627,15 +627,36 @@ SELECT replicate(1, ['a', 'b', 'c'])
|
||||
└───────────────────────────────┘
|
||||
```
|
||||
|
||||
## filesystemAvailable
|
||||
## filesystemAvailable {#function-filesystemavailable}
|
||||
|
||||
Returns the remaining space information of the disk, in bytes. This information is evaluated using the configured by path.
|
||||
Returns the amount of remaining space in the filesystem where the files of the databases located. See the [path](../../operations/server_settings/settings.md#server_settings-path) server setting description.
|
||||
|
||||
```
|
||||
filesystemAvailable()
|
||||
```
|
||||
|
||||
**Returned values**
|
||||
|
||||
- Amount of remaining space in bytes.
|
||||
|
||||
Type: [UInt64](../../data_types/int_uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
SELECT filesystemAvailable() AS "Free space", toTypeName(filesystemAvailable()) AS "Type"
|
||||
```
|
||||
```text
|
||||
┌──Free space─┬─Type───┐
|
||||
│ 18152624128 │ UInt64 │
|
||||
└─────────────┴────────┘
|
||||
```
|
||||
|
||||
## filesystemCapacity
|
||||
|
||||
Returns the capacity information of the disk, in bytes. This information is evaluated using the configured by path.
|
||||
|
||||
## finalizeAggregation
|
||||
## finalizeAggregation {#function-finalizeaggregation}
|
||||
|
||||
Takes state of aggregate function. Returns result of aggregation (finalized state).
|
||||
|
||||
|
@ -101,11 +101,11 @@ CREATE TABLE lineorder_flat
|
||||
ENGINE = MergeTree
|
||||
PARTITION BY toYear(LO_ORDERDATE)
|
||||
ORDER BY (LO_ORDERDATE, LO_ORDERKEY) AS
|
||||
SELECT *
|
||||
FROM lineorder
|
||||
ANY INNER JOIN customer ON LO_CUSTKEY = C_CUSTKEY
|
||||
ANY INNER JOIN supplier ON LO_SUPPKEY = S_SUPPKEY
|
||||
ANY INNER JOIN part ON LO_PARTKEY = P_PARTKEY;
|
||||
SELECT l.*, c.*, s.*, p.*
|
||||
FROM lineorder l
|
||||
ANY INNER JOIN customer c ON (c.C_CUSTKEY = l.LO_CUSTKEY)
|
||||
ANY INNER JOIN supplier s ON (s.S_SUPPKEY = l.LO_SUPPKEY)
|
||||
ANY INNER JOIN part p ON (p.P_PARTKEY = l.LO_PARTKEY);
|
||||
|
||||
ALTER TABLE lineorder_flat DROP COLUMN C_CUSTKEY, DROP COLUMN S_SUPPKEY, DROP COLUMN P_PARTKEY;
|
||||
```
|
||||
|
@ -165,6 +165,8 @@ clickhouse-client --format_csv_delimiter="|" --query="INSERT INTO test.csv FORMA
|
||||
|
||||
При парсинге, все значения могут парситься как в кавычках, так и без кавычек. Поддерживаются как двойные, так и одинарные кавычки. Строки также могут быть без кавычек. В этом случае они парсятся до символа-разделителя или перевода строки (CR или LF). В нарушение RFC, в случае парсинга строк не в кавычках, начальные и конечные пробелы и табы игнорируются. В качестве перевода строки, поддерживаются как Unix (LF), так и Windows (CR LF) и Mac OS Classic (LF CR) варианты.
|
||||
|
||||
Если установлена настройка [input_format_defaults_for_omitted_fields = 1](../operations/settings/settings.md#session_settings-input_format_defaults_for_omitted_fields), то пустые значения без кавычек заменяются значениями по умолчанию для типа данных столбца.
|
||||
|
||||
`NULL` форматируется в виде `\N`.
|
||||
|
||||
Формат CSV поддерживает вывод totals и extremes аналогично `TabSeparated`.
|
||||
|
@ -514,7 +514,7 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat
|
||||
```
|
||||
|
||||
|
||||
## path
|
||||
## path {#server_settings-path}
|
||||
|
||||
Путь к каталогу с данными.
|
||||
|
||||
|
@ -181,20 +181,15 @@ Ok.
|
||||
|
||||
## input_format_defaults_for_omitted_fields {#session_settings-input_format_defaults_for_omitted_fields}
|
||||
|
||||
Включает/выключает расширенный обмен данными между клиентом ClickHouse и сервером ClickHouse. Параметр применяется для запросов `INSERT`.
|
||||
При вставке данных запросом `INSERT`, заменяет пропущенные поля значениям по умолчанию для типа данных столбца.
|
||||
|
||||
При выполнении запроса`INSERT`, клиент ClickHouse подготавливает данные и отправляет их на сервер для записи. При подготовке данных клиент получает структуру таблицы от сервера. В некоторых случаях клиенту требуется больше информации, чем сервер отправляет по умолчанию. Включите расширенный обмен данными с помощью настройки `input_format_defaults_for_omitted_fields = 1`.
|
||||
Поддерживаемые форматы вставки:
|
||||
|
||||
Если расширенный обмен данными включен, сервер отправляет дополнительные метаданные вместе со структурой таблицы. Состав метаданных зависит от операции.
|
||||
|
||||
Операции, для которых может потребоваться включить расширенный обмен данными:
|
||||
|
||||
- Вставка данных в формате [JSONEachRow](../../interfaces/formats.md#jsoneachrow).
|
||||
|
||||
Для всех остальных операций ClickHouse не применяет этот параметр.
|
||||
- [JSONEachRow](../../interfaces/formats.md#jsoneachrow)
|
||||
- [CSV](../../interfaces/formats.md#csv)
|
||||
|
||||
!!! note "Примечание"
|
||||
Функциональность расширенного обмена данными потребляет дополнительные вычислительные ресурсы на сервере и может снизить производительность.
|
||||
Когда опция включена, сервер отправляет клиенту расширенные метаданные. Это требует дополнительных вычислительных ресурсов на сервере и может снизить производительность.
|
||||
|
||||
Возможные значения:
|
||||
|
||||
|
@ -68,7 +68,7 @@ $ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64
|
||||
|
||||
## Детали реализации
|
||||
|
||||
- Поддерживается многопоточное чтение и однопоточная запись.
|
||||
- Поддерживается одновременное выполнение множества запросов `SELECT`, запросы `INSERT` могут выполняться только последовательно.
|
||||
- Не поддерживается:
|
||||
- использование операций `ALTER` и `SELECT...SAMPLE`;
|
||||
- индексы;
|
||||
|
@ -25,7 +25,7 @@ SETTINGS
|
||||
[kafka_row_delimiter = 'delimiter_symbol',]
|
||||
[kafka_schema = '',]
|
||||
[kafka_num_consumers = N,]
|
||||
[kafka_skip_broken_messages = <0|1>]
|
||||
[kafka_skip_broken_messages = N]
|
||||
```
|
||||
|
||||
Обязательные параметры:
|
||||
@ -40,7 +40,7 @@ SETTINGS
|
||||
- `kafka_row_delimiter` – символ-разделитель записей (строк), которым завершается сообщение.
|
||||
- `kafka_schema` – опциональный параметр, необходимый, если используется формат, требующий определения схемы. Например, [Cap'n Proto](https://capnproto.org/) требует путь к файлу со схемой и название корневого объекта `schema.capnp:Message`.
|
||||
- `kafka_num_consumers` – количество потребителей (consumer) на таблицу. По умолчанию: `1`. Укажите больше потребителей, если пропускная способность одного потребителя недостаточна. Общее число потребителей не должно превышать количество партиций в топике, так как на одну партицию может быть назначено не более одного потребителя.
|
||||
- `kafka_skip_broken_messages` – режим обработки сообщений Kafka. Если `kafka_skip_broken_messages = 1`, то движок отбрасывает сообщения Кафки, которые не получилось обработать. Одно сообщение в точности соответствует одной записи (строке).
|
||||
- `kafka_skip_broken_messages` – максимальное количество некорректных сообщений в блоке. Если `kafka_skip_broken_messages = N`, то движок отбрасывает `N` сообщений Кафки, которые не получилось обработать. Одно сообщение в точности соответствует одной записи (строке). Значение по умолчанию – 0.
|
||||
|
||||
Примеры
|
||||
|
||||
|
@ -23,13 +23,22 @@
|
||||
|
||||
## -State
|
||||
|
||||
В случае применения этого комбинатора, агрегатная функция возвращает не готовое значение (например, в случае функции `uniq` — количество уникальных значений), а промежуточное состояние агрегации (например, в случае функции `uniq` — хэш-таблицу для расчёта количества уникальных значений), которое имеет тип AggregateFunction(...) и может использоваться для дальнейшей обработки или может быть сохранено в таблицу для последующей доагрегации - смотрите разделы «AggregatingMergeTree» и «функции для работы с промежуточными состояниями агрегации».
|
||||
В случае применения этого комбинатора, агрегатная функция возвращает не готовое значение (например, в случае функции [uniq](reference.md#agg_function-uniq) — количество уникальных значений), а промежуточное состояние агрегации (например, в случае функции `uniq` — хэш-таблицу для расчёта количества уникальных значений), которое имеет тип `AggregateFunction(...)` и может использоваться для дальнейшей обработки или может быть сохранено в таблицу для последующей доагрегации.
|
||||
|
||||
## -Merge
|
||||
Для работы с промежуточными состояниями предназначены:
|
||||
|
||||
- Движок таблиц [AggregatingMergeTree](../../operations/table_engines/aggregatingmergetree.md).
|
||||
- Функция [finalizeAggregation](../functions/other_functions.md#function-finalizeaggregation).
|
||||
- Функция [runningAccumulate](../functions/other_functions.md#function-runningaccumulate).
|
||||
- Комбинатор [-Merge](#aggregate_functions_combinators_merge).
|
||||
- Комбинатор [-MergeState](#aggregate_functions_combinators_mergestate).
|
||||
|
||||
|
||||
## -Merge {#aggregate_functions_combinators_merge}
|
||||
|
||||
В случае применения этого комбинатора, агрегатная функция будет принимать в качестве аргумента промежуточное состояние агрегации, доагрегировать (объединять вместе) эти состояния, и возвращать готовое значение.
|
||||
|
||||
## -MergeState.
|
||||
## -MergeState {#aggregate_functions_combinators_mergestate}
|
||||
|
||||
Выполняет слияние промежуточных состояний агрегации, аналогично комбинатору -Merge, но возвращает не готовое значение, а промежуточное состояние агрегации, аналогично комбинатору -State.
|
||||
|
||||
|
@ -132,13 +132,17 @@ SELECT geohashEncode(-5.60302734375, 42.593994140625, 0) AS res
|
||||
|
||||
Декодирует любую строку, закодированную в geohash, на долготу и широту.
|
||||
|
||||
```
|
||||
geohashDecode(geohash_string)
|
||||
```
|
||||
|
||||
**Входные значения**
|
||||
|
||||
- encoded string — строка, содержащая geohash.
|
||||
- `geohash_string` — строка, содержащая geohash.
|
||||
|
||||
**Возвращаемые значения**
|
||||
|
||||
- (longitude, latitude) — широта и долгота. Кортеж из двух значений типа `Float64`.
|
||||
- `(longitude, latitude)` — широта и долгота. Кортеж из двух значений типа `Float64`.
|
||||
|
||||
**Пример**
|
||||
|
||||
@ -154,7 +158,7 @@ SELECT geohashDecode('ezs42') AS res
|
||||
|
||||
## geoToH3
|
||||
|
||||
Получает H3 индекс точки (lon, lat) с заданным разрешением
|
||||
Получает H3 индекс точки `(lon, lat)` с заданным разрешением
|
||||
|
||||
```
|
||||
geoToH3(lon, lat, resolution)
|
||||
@ -162,15 +166,16 @@ geoToH3(lon, lat, resolution)
|
||||
|
||||
**Входные значения**
|
||||
|
||||
- `lon` - географическая долгота. Тип данных — [Float64](../../data_types/float.md).
|
||||
- `lat` - географическая широта. Тип данных — [Float64](../../data_types/float.md).
|
||||
- `resolution` - требуемое разрешение индекса. Тип данных — [UInt8](../../data_types/int_uint.md). Диапазон возможных значение — `[0, 15]`.
|
||||
- `lon` — географическая долгота. Тип данных — [Float64](../../data_types/float.md).
|
||||
- `lat` — географическая широта. Тип данных — [Float64](../../data_types/float.md).
|
||||
- `resolution` — требуемое разрешение индекса. Тип данных — [UInt8](../../data_types/int_uint.md). Диапазон возможных значений — `[0, 15]`.
|
||||
|
||||
**Возвращаемые значения**
|
||||
|
||||
Возвращает значение с типом [UInt64] (../../data_types/int_uint.md).
|
||||
`0` в случае ошибки.
|
||||
Иначе возвращается индексный номер шестиугольника.
|
||||
- Порядковый номер шестиугольника.
|
||||
- 0 в случае ошибки.
|
||||
|
||||
Тип — [UInt64](../../data_types/int_uint.md).
|
||||
|
||||
**Пример**
|
||||
|
||||
|
@ -600,6 +600,39 @@ SELECT replicate(1, ['a', 'b', 'c'])
|
||||
└───────────────────────────────┘
|
||||
```
|
||||
|
||||
## filesystemAvailable {#function-filesystemavailable}
|
||||
|
||||
Возвращает объем оставшегося места в файловой системе, в которой расположены файлы баз данных. Смотрите описание конфигурационного параметра сервера [path](../../operations/server_settings/settings.md#server_settings-path).
|
||||
|
||||
```
|
||||
filesystemAvailable()
|
||||
```
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Объем свободного места.
|
||||
|
||||
Тип — [UInt64](../../data_types/int_uint.md).
|
||||
|
||||
**Пример**
|
||||
|
||||
```sql
|
||||
SELECT filesystemAvailable() AS "Free space", toTypeName(filesystemAvailable()) AS "Type"
|
||||
```
|
||||
```text
|
||||
┌──Free space─┬─Type───┐
|
||||
│ 18152624128 │ UInt64 │
|
||||
└─────────────┴────────┘
|
||||
```
|
||||
|
||||
## filesystemCapacity
|
||||
|
||||
Возвращает данные о ёмкости диска.
|
||||
|
||||
## finalizeAggregation {#function-finalizeaggregation}
|
||||
|
||||
Принимает состояние агрегатной функции. Возвращает результат агрегирования.
|
||||
|
||||
## runningAccumulate {#function-runningaccumulate}
|
||||
|
||||
Принимает на вход состояния агрегатной функции и возвращает столбец со значениями, которые представляют собой результат мёржа этих состояний для выборки строк из блока от первой до текущей строки. Например, принимает состояние агрегатной функции (например, `runningAccumulate(uniqState(UserID))`), и для каждой строки блока возвращает результат агрегатной функции после мёржа состояний функции для всех предыдущих строк и текущей. Таким образом, результат зависит от разбиения данных по блокам и от порядка данных в блоке.
|
||||
|
@ -637,7 +637,7 @@ SELECT replicate(1, ['a', 'b', 'c'])
|
||||
|
||||
返回磁盘的容量信息,以字节为单位。使用配置文件中的path配置评估此信息。
|
||||
|
||||
## finalizeAggregation
|
||||
## finalizeAggregation {#function-finalizeaggregation}
|
||||
|
||||
获取聚合函数的状态。返回聚合结果(最终状态)。
|
||||
|
||||
|
@ -4,41 +4,39 @@
|
||||
|
||||
#include <sstream>
|
||||
#include <Poco/Logger.h>
|
||||
#include <Poco/Message.h>
|
||||
#include <Poco/Version.h>
|
||||
#include <Core/SettingsCommon.h>
|
||||
#include <Common/CurrentThread.h>
|
||||
|
||||
#ifndef QUERY_PREVIEW_LENGTH
|
||||
#define QUERY_PREVIEW_LENGTH 160
|
||||
#endif
|
||||
|
||||
using Poco::Logger;
|
||||
using Poco::Message;
|
||||
using DB::LogsLevel;
|
||||
using DB::CurrentThread;
|
||||
|
||||
/// Logs a message to a specified logger with that level.
|
||||
|
||||
#define LOG_TRACE(logger, message) do { \
|
||||
if ((logger)->trace()) {\
|
||||
#define LOG_SIMPLE(logger, message, priority, PRIORITY) do \
|
||||
{ \
|
||||
const bool is_clients_log = (CurrentThread::getGroup() != nullptr) && \
|
||||
(CurrentThread::getGroup()->client_logs_level >= (priority)); \
|
||||
if ((logger)->is((PRIORITY)) || is_clients_log) { \
|
||||
std::stringstream oss_internal_rare; \
|
||||
oss_internal_rare << message; \
|
||||
(logger)->trace(oss_internal_rare.str());}} while(false)
|
||||
if (auto channel = (logger)->getChannel()) { \
|
||||
channel->log(Message((logger)->name(), oss_internal_rare.str(), (PRIORITY))); \
|
||||
} \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
#define LOG_DEBUG(logger, message) do { \
|
||||
if ((logger)->debug()) {\
|
||||
std::stringstream oss_internal_rare; \
|
||||
oss_internal_rare << message; \
|
||||
(logger)->debug(oss_internal_rare.str());}} while(false)
|
||||
|
||||
#define LOG_INFO(logger, message) do { \
|
||||
if ((logger)->information()) {\
|
||||
std::stringstream oss_internal_rare; \
|
||||
oss_internal_rare << message; \
|
||||
(logger)->information(oss_internal_rare.str());}} while(false)
|
||||
#define LOG_TRACE(logger, message) LOG_SIMPLE(logger, message, LogsLevel::trace, Message::PRIO_TRACE)
|
||||
#define LOG_DEBUG(logger, message) LOG_SIMPLE(logger, message, LogsLevel::debug, Message::PRIO_DEBUG)
|
||||
#define LOG_INFO(logger, message) LOG_SIMPLE(logger, message, LogsLevel::information, Message::PRIO_INFORMATION)
|
||||
#define LOG_WARNING(logger, message) LOG_SIMPLE(logger, message, LogsLevel::warning, Message::PRIO_WARNING)
|
||||
#define LOG_ERROR(logger, message) LOG_SIMPLE(logger, message, LogsLevel::error, Message::PRIO_ERROR)
|
||||
|
||||
#define LOG_WARNING(logger, message) do { \
|
||||
if ((logger)->warning()) {\
|
||||
std::stringstream oss_internal_rare; \
|
||||
oss_internal_rare << message; \
|
||||
(logger)->warning(oss_internal_rare.str());}} while(false)
|
||||
|
||||
#define LOG_ERROR(logger, message) do { \
|
||||
if ((logger)->error()) {\
|
||||
std::stringstream oss_internal_rare; \
|
||||
oss_internal_rare << message; \
|
||||
(logger)->error(oss_internal_rare.str());}} while(false)
|
||||
|
Loading…
Reference in New Issue
Block a user