diff --git a/dbms/CMakeLists.txt b/dbms/CMakeLists.txt index 167e5f60e65..1ea46ca8157 100644 --- a/dbms/CMakeLists.txt +++ b/dbms/CMakeLists.txt @@ -38,7 +38,8 @@ if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang") endif () if (USE_DEBUG_HELPERS) - set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -include ${ClickHouse_SOURCE_DIR}/libs/libcommon/include/common/iostream_debug_helpers.h") + set (INCLUDE_DEBUG_HELPERS "-include ${ClickHouse_SOURCE_DIR}/libs/libcommon/include/common/iostream_debug_helpers.h") + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${INCLUDE_DEBUG_HELPERS}") endif () if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") diff --git a/dbms/cmake/version.cmake b/dbms/cmake/version.cmake index 217e5bd7d0b..da0883529c5 100644 --- a/dbms/cmake/version.cmake +++ b/dbms/cmake/version.cmake @@ -2,10 +2,10 @@ set(VERSION_REVISION 54407 CACHE STRING "") set(VERSION_MAJOR 18 CACHE STRING "") set(VERSION_MINOR 12 CACHE STRING "") -set(VERSION_PATCH 2 CACHE STRING "") -set(VERSION_GITHASH d12c1b02bc50119d67db2690c6bc7aeeae9d55ef CACHE STRING "") -set(VERSION_DESCRIBE v18.12.2-testing CACHE STRING "") -set(VERSION_STRING 18.12.2 CACHE STRING "") +set(VERSION_PATCH 5 CACHE STRING "") +set(VERSION_GITHASH d8c528ea3973dbcfb68227fc0eff0feffa399d3d CACHE STRING "") +set(VERSION_DESCRIBE v18.12.5-testing CACHE STRING "") +set(VERSION_STRING 18.12.5 CACHE STRING "") # end of autochange set(VERSION_EXTRA "" CACHE STRING "") diff --git a/dbms/programs/clang/CMakeLists.txt b/dbms/programs/clang/CMakeLists.txt index 4844cb37c93..802ec00168c 100644 --- a/dbms/programs/clang/CMakeLists.txt +++ b/dbms/programs/clang/CMakeLists.txt @@ -13,18 +13,26 @@ if (CLICKHOUSE_SPLIT_BINARY) endif () endif () -set(TMP_HEADERS_DIR "${CMAKE_CURRENT_BINARY_DIR}/${INTERNAL_COMPILER_HEADERS_RELATIVE}") +set (TMP_HEADERS_DIR "${CMAKE_CURRENT_BINARY_DIR}/${INTERNAL_COMPILER_HEADERS_RELATIVE}") # Make and install empty dir for debian package if compiler disabled -add_custom_target(make-headers-directory ALL COMMAND ${CMAKE_COMMAND} -E make_directory ${TMP_HEADERS_DIR}) -install(DIRECTORY ${TMP_HEADERS_DIR} DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/clickhouse/${INTERNAL_COMPILER_HEADERS_DIR} COMPONENT clickhouse) +add_custom_target (make-headers-directory ALL COMMAND ${CMAKE_COMMAND} -E make_directory ${TMP_HEADERS_DIR}) +install (DIRECTORY ${TMP_HEADERS_DIR} DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/clickhouse/${INTERNAL_COMPILER_HEADERS_DIR} COMPONENT clickhouse) # TODO: fix on macos copy_headers.sh: sed --posix -if (USE_EMBEDDED_COMPILER AND NOT APPLE) - add_custom_target(copy-headers ALL env CLANG=${CMAKE_CURRENT_BINARY_DIR}/../clickhouse-clang BUILD_PATH=${ClickHouse_BINARY_DIR} DESTDIR=${ClickHouse_SOURCE_DIR} ${ClickHouse_SOURCE_DIR}/copy_headers.sh ${ClickHouse_SOURCE_DIR} ${TMP_HEADERS_DIR} DEPENDS clickhouse-clang WORKING_DIRECTORY ${ClickHouse_SOURCE_DIR} SOURCES ${ClickHouse_SOURCE_DIR}/copy_headers.sh) + +if (USE_EMBEDDED_COMPILER) + set (COPY_HEADERS_COMPILER "${CMAKE_CURRENT_BINARY_DIR}/../${INTERNAL_COMPILER_EXECUTABLE}") + set (COPY_HEADERS_DEPENDS clickhouse-clang) +elseif (EXISTS ${INTERNAL_COMPILER_BIN_ROOT}${INTERNAL_COMPILER_EXECUTABLE}) + set (COPY_HEADERS_COMPILER "${INTERNAL_COMPILER_BIN_ROOT}${INTERNAL_COMPILER_EXECUTABLE}") +endif () + +if (COPY_HEADERS_COMPILER AND NOT APPLE) + add_custom_target (copy-headers ALL env CLANG=${COPY_HEADERS_COMPILER} BUILD_PATH=${ClickHouse_BINARY_DIR} DESTDIR=${ClickHouse_SOURCE_DIR} ${ClickHouse_SOURCE_DIR}/copy_headers.sh ${ClickHouse_SOURCE_DIR} ${TMP_HEADERS_DIR} DEPENDS ${COPY_HEADERS_DEPENDS} WORKING_DIRECTORY ${ClickHouse_SOURCE_DIR} SOURCES ${ClickHouse_SOURCE_DIR}/copy_headers.sh) if (USE_INTERNAL_LLVM_LIBRARY) - set(CLANG_HEADERS_DIR "${ClickHouse_SOURCE_DIR}/contrib/llvm/clang/lib/Headers") - set(CLANG_HEADERS_DEST "${TMP_HEADERS_DIR}/usr/local/lib/clang/${LLVM_VERSION}/include") # original: ${LLVM_LIBRARY_OUTPUT_INTDIR}/clang/${CLANG_VERSION}/include - add_custom_target(copy-headers-clang ALL ${CMAKE_COMMAND} -E make_directory ${CLANG_HEADERS_DEST} && ${CMAKE_COMMAND} -E copy_if_different ${CLANG_HEADERS_DIR}/* ${CLANG_HEADERS_DEST} ) - add_dependencies(copy-headers copy-headers-clang) + set (CLANG_HEADERS_DIR "${ClickHouse_SOURCE_DIR}/contrib/llvm/clang/lib/Headers") + set (CLANG_HEADERS_DEST "${TMP_HEADERS_DIR}/usr/local/lib/clang/${LLVM_VERSION}/include") # original: ${LLVM_LIBRARY_OUTPUT_INTDIR}/clang/${CLANG_VERSION}/include + add_custom_target (copy-headers-clang ALL ${CMAKE_COMMAND} -E make_directory ${CLANG_HEADERS_DEST} && ${CMAKE_COMMAND} -E copy_if_different ${CLANG_HEADERS_DIR}/* ${CLANG_HEADERS_DEST} ) + add_dependencies (copy-headers copy-headers-clang) endif () endif () diff --git a/dbms/src/Common/Arena.h b/dbms/src/Common/Arena.h index c3c4fdc95e7..a6c98324ea8 100644 --- a/dbms/src/Common/Arena.h +++ b/dbms/src/Common/Arena.h @@ -220,7 +220,7 @@ public: { char * res = alloc(new_size); if (old_data) - memcpySmallAllowReadWriteOverflow15(res, old_data, old_size); + memcpy(res, old_data, old_size); return res; } @@ -228,7 +228,7 @@ public: { char * res = alignedAlloc(new_size, alignment); if (old_data) - memcpySmallAllowReadWriteOverflow15(res, old_data, old_size); + memcpy(res, old_data, old_size); return res; } @@ -236,14 +236,14 @@ public: const char * insert(const char * data, size_t size) { char * res = alloc(size); - memcpySmallAllowReadWriteOverflow15(res, data, size); + memcpy(res, data, size); return res; } const char * alignedInsert(const char * data, size_t size, size_t alignment) { char * res = alignedAlloc(size, alignment); - memcpySmallAllowReadWriteOverflow15(res, data, size); + memcpy(res, data, size); return res; } diff --git a/dbms/src/Common/CurrentThread.cpp b/dbms/src/Common/CurrentThread.cpp index 3b484167b6e..3b635d8af3e 100644 --- a/dbms/src/Common/CurrentThread.cpp +++ b/dbms/src/Common/CurrentThread.cpp @@ -21,6 +21,7 @@ namespace ErrorCodes thread_local ThreadStatusPtr current_thread = ThreadStatus::create(); thread_local CurrentThread::ThreadScopePtr current_thread_scope = std::make_shared(); + void CurrentThread::updatePerformanceCounters() { get()->updatePerformanceCounters(); diff --git a/dbms/src/Common/CurrentThread.h b/dbms/src/Common/CurrentThread.h index e6547e535f6..21b7f27a142 100644 --- a/dbms/src/Common/CurrentThread.h +++ b/dbms/src/Common/CurrentThread.h @@ -5,6 +5,7 @@ #include + namespace ProfileEvents { class Counters; @@ -21,10 +22,15 @@ class QueryStatus; struct Progress; class InternalTextLogsQueue; + +/** Collection of static methods to work with thread-local objects. + * Allows to attach and detach query/process (thread group) to a thread + * (to calculate query-related metrics and to allow to obtain query-related data from a thread). + * Thread will propagate it's metrics to attached query. + */ class CurrentThread { public: - /// Handler to current thread static ThreadStatusPtr get(); diff --git a/dbms/src/Common/ThreadProfileEvents.h b/dbms/src/Common/ThreadProfileEvents.h index 951125add4e..233d4eec047 100644 --- a/dbms/src/Common/ThreadProfileEvents.h +++ b/dbms/src/Common/ThreadProfileEvents.h @@ -13,6 +13,9 @@ struct taskstats {}; #endif +/** Implement ProfileEvents with statistics about resource consumption of the current thread. + */ + namespace ProfileEvents { extern const Event RealTimeMicroseconds; diff --git a/dbms/src/Common/ThreadStatus.h b/dbms/src/Common/ThreadStatus.h index 7e5ecaea409..822e1931447 100644 --- a/dbms/src/Common/ThreadStatus.h +++ b/dbms/src/Common/ThreadStatus.h @@ -34,6 +34,14 @@ using InternalTextLogsQueuePtr = std::shared_ptr; using InternalTextLogsQueueWeakPtr = std::weak_ptr; +/** Thread group is a collection of threads dedicated to single task + * (query or other process like background merge). + * + * ProfileEvents (counters) from a thread are propagated to thread group. + * + * Create via CurrentThread::initializeQuery (for queries) or directly (for various background tasks). + * Use via CurrentThread::getGroup. + */ class ThreadGroupStatus { public: @@ -60,10 +68,14 @@ public: using ThreadGroupStatusPtr = std::shared_ptr; +/** Encapsulates all per-thread info (ProfileEvents, MemoryTracker, query_id, query context, etc.). + * Used inside thread-local variable. See variables in CurrentThread.cpp + * + * This object should be used only via "CurrentThread", see CurrentThread.h + */ class ThreadStatus : public std::enable_shared_from_this { public: - /// Poco's thread number (the same number is used in logs) UInt32 thread_number = 0; /// Linux's PID (or TGID) (the same id is shown by ps util) @@ -78,7 +90,6 @@ public: Progress progress_out; public: - static ThreadStatusPtr create(); ThreadGroupStatusPtr getThreadGroup() const diff --git a/dbms/src/Core/Field.h b/dbms/src/Core/Field.h index f658e606c5a..ca8bd2fb234 100644 --- a/dbms/src/Core/Field.h +++ b/dbms/src/Core/Field.h @@ -421,8 +421,7 @@ private: void createConcrete(T && x) { using JustT = std::decay_t; - JustT * MAY_ALIAS ptr = reinterpret_cast(&storage); - new (ptr) JustT(std::forward(x)); + new (&storage) JustT(std::forward(x)); which = TypeToEnum::value; } @@ -492,8 +491,7 @@ private: void create(const char * data, size_t size) { - String * MAY_ALIAS ptr = reinterpret_cast(&storage); - new (ptr) String(data, size); + new (&storage) String(data, size); which = Types::String; } diff --git a/dbms/src/DataStreams/CollapsingSortedBlockInputStream.cpp b/dbms/src/DataStreams/CollapsingSortedBlockInputStream.cpp index 72df102a57f..f630494ae2f 100644 --- a/dbms/src/DataStreams/CollapsingSortedBlockInputStream.cpp +++ b/dbms/src/DataStreams/CollapsingSortedBlockInputStream.cpp @@ -48,37 +48,34 @@ void CollapsingSortedBlockInputStream::insertRows(MutableColumns & merged_column return; } - if (count_positive == count_negative && !last_is_positive) + if (last_is_positive || count_positive != count_negative) { - /// Input rows exactly cancel out. - return; - } + if (count_positive <= count_negative) + { + ++merged_rows; + for (size_t i = 0; i < num_columns; ++i) + merged_columns[i]->insertFrom(*(*first_negative.columns)[i], first_negative.row_num); - if (count_positive <= count_negative) - { - ++merged_rows; - for (size_t i = 0; i < num_columns; ++i) - merged_columns[i]->insertFrom(*(*first_negative.columns)[i], first_negative.row_num); + if (out_row_sources_buf) + current_row_sources[first_negative_pos].setSkipFlag(false); + } - if (out_row_sources_buf) - current_row_sources[first_negative_pos].setSkipFlag(false); - } + if (count_positive >= count_negative) + { + ++merged_rows; + for (size_t i = 0; i < num_columns; ++i) + merged_columns[i]->insertFrom(*(*last_positive.columns)[i], last_positive.row_num); - if (count_positive >= count_negative) - { - ++merged_rows; - for (size_t i = 0; i < num_columns; ++i) - merged_columns[i]->insertFrom(*(*last_positive.columns)[i], last_positive.row_num); + if (out_row_sources_buf) + current_row_sources[last_positive_pos].setSkipFlag(false); + } - if (out_row_sources_buf) - current_row_sources[last_positive_pos].setSkipFlag(false); - } - - if (!(count_positive == count_negative || count_positive + 1 == count_negative || count_positive == count_negative + 1)) - { - if (count_incorrect_data < MAX_ERROR_MESSAGES) - reportIncorrectData(); - ++count_incorrect_data; + if (!(count_positive == count_negative || count_positive + 1 == count_negative || count_positive == count_negative + 1)) + { + if (count_incorrect_data < MAX_ERROR_MESSAGES) + reportIncorrectData(); + ++count_incorrect_data; + } } if (out_row_sources_buf) diff --git a/dbms/src/DataStreams/RollupBlockInputStream.cpp b/dbms/src/DataStreams/RollupBlockInputStream.cpp new file mode 100644 index 00000000000..389dbf5bec5 --- /dev/null +++ b/dbms/src/DataStreams/RollupBlockInputStream.cpp @@ -0,0 +1,73 @@ +#include +#include +#include +#include +#include + +namespace DB +{ + +static void finalize(Block & block) +{ + for (size_t i = 0; i < block.columns(); ++i) + { + ColumnWithTypeAndName & current = block.getByPosition(i); + const DataTypeAggregateFunction * unfinalized_type = typeid_cast(current.type.get()); + + if (unfinalized_type) + { + current.type = unfinalized_type->getReturnType(); + if (current.column) + current.column = typeid_cast(*current.column).convertToValues(); + } + } +} + +RollupBlockInputStream::RollupBlockInputStream( + const BlockInputStreamPtr & input_, const Aggregator::Params & params_) : aggregator(params_), + keys(params_.keys) +{ + children.push_back(input_); + Aggregator::CancellationHook hook = [this]() { return this->isCancelled(); }; + aggregator.setCancellationHook(hook); +} + + +Block RollupBlockInputStream::getHeader() const +{ + Block res = children.at(0)->getHeader(); + finalize(res); + return res; +} + + +Block RollupBlockInputStream::readImpl() +{ + /** After reading a block from input stream, + * we will subsequently roll it up on next iterations of 'readImpl' + * by zeroing out every column one-by-one and re-merging a block. + */ + + if (current_key >= 0) + { + auto & current = rollup_block.getByPosition(keys[current_key]); + current.column = current.column->cloneEmpty()->cloneResized(rollup_block.rows()); + --current_key; + + BlocksList rollup_blocks = { rollup_block }; + rollup_block = aggregator.mergeBlocks(rollup_blocks, false); + + Block finalized = rollup_block; + finalize(finalized); + return finalized; + } + + Block block = children[0]->read(); + current_key = keys.size() - 1; + + rollup_block = block; + finalize(block); + + return block; +} +} diff --git a/dbms/src/DataStreams/RollupBlockInputStream.h b/dbms/src/DataStreams/RollupBlockInputStream.h new file mode 100644 index 00000000000..c3483db7f82 --- /dev/null +++ b/dbms/src/DataStreams/RollupBlockInputStream.h @@ -0,0 +1,41 @@ +#pragma once + +#include +#include +#include +#include + + +namespace DB +{ + +class ExpressionActions; + + +/** Takes blocks after grouping, with non-finalized aggregate functions. + * Calculates subtotals and grand totals values for a set of columns. + */ +class RollupBlockInputStream : public IProfilingBlockInputStream +{ +private: + using ExpressionActionsPtr = std::shared_ptr; + using AggregateColumns = std::vector; +public: + RollupBlockInputStream( + const BlockInputStreamPtr & input_, const Aggregator::Params & params_); + + String getName() const override { return "Rollup"; } + + Block getHeader() const override; + +protected: + Block readImpl() override; + +private: + Aggregator aggregator; + ColumnNumbers keys; + ssize_t current_key = -1; + Block rollup_block; +}; + +} diff --git a/dbms/src/DataStreams/TotalsHavingBlockInputStream.cpp b/dbms/src/DataStreams/TotalsHavingBlockInputStream.cpp index c76c9f4e9d1..e964065822c 100644 --- a/dbms/src/DataStreams/TotalsHavingBlockInputStream.cpp +++ b/dbms/src/DataStreams/TotalsHavingBlockInputStream.cpp @@ -14,10 +14,10 @@ namespace DB TotalsHavingBlockInputStream::TotalsHavingBlockInputStream( const BlockInputStreamPtr & input_, bool overflow_row_, const ExpressionActionsPtr & expression_, - const std::string & filter_column_, TotalsMode totals_mode_, double auto_include_threshold_) + const std::string & filter_column_, TotalsMode totals_mode_, double auto_include_threshold_, bool final_) : overflow_row(overflow_row_), expression(expression_), filter_column_name(filter_column_), totals_mode(totals_mode_), - auto_include_threshold(auto_include_threshold_) + auto_include_threshold(auto_include_threshold_), final(final_) { children.push_back(input_); @@ -100,7 +100,8 @@ Block TotalsHavingBlockInputStream::getTotals() Block TotalsHavingBlockInputStream::getHeader() const { Block res = children.at(0)->getHeader(); - finalize(res); + if (final) + finalize(res); if (expression) expression->execute(res); return res; @@ -127,7 +128,8 @@ Block TotalsHavingBlockInputStream::readImpl() return finalized; finalized = block; - finalize(finalized); + if (final) + finalize(finalized); total_keys += finalized.rows(); diff --git a/dbms/src/DataStreams/TotalsHavingBlockInputStream.h b/dbms/src/DataStreams/TotalsHavingBlockInputStream.h index 6c62b7a2544..31e72c6f4d5 100644 --- a/dbms/src/DataStreams/TotalsHavingBlockInputStream.h +++ b/dbms/src/DataStreams/TotalsHavingBlockInputStream.h @@ -26,7 +26,7 @@ public: TotalsHavingBlockInputStream( const BlockInputStreamPtr & input_, bool overflow_row_, const ExpressionActionsPtr & expression_, - const std::string & filter_column_, TotalsMode totals_mode_, double auto_include_threshold_); + const std::string & filter_column_, TotalsMode totals_mode_, double auto_include_threshold_, bool final_); String getName() const override { return "TotalsHaving"; } @@ -43,6 +43,7 @@ private: String filter_column_name; TotalsMode totals_mode; double auto_include_threshold; + bool final; size_t passed_keys = 0; size_t total_keys = 0; diff --git a/dbms/src/Dictionaries/CacheDictionary.cpp b/dbms/src/Dictionaries/CacheDictionary.cpp index 6b378faf928..6e5080ec2d5 100644 --- a/dbms/src/Dictionaries/CacheDictionary.cpp +++ b/dbms/src/Dictionaries/CacheDictionary.cpp @@ -432,7 +432,7 @@ void CacheDictionary::createAttributes() if (attribute.hierarchical) { - hierarchical_attribute = &attributes.back(); + hierarchical_attribute = & attributes.back(); if (hierarchical_attribute->type != AttributeUnderlyingType::UInt64) throw Exception{name + ": hierarchical attribute must be UInt64.", ErrorCodes::TYPE_MISMATCH}; diff --git a/dbms/src/Dictionaries/ComplexKeyHashedDictionary.cpp b/dbms/src/Dictionaries/ComplexKeyHashedDictionary.cpp index 5c07c5260ff..91fa37564ba 100644 --- a/dbms/src/Dictionaries/ComplexKeyHashedDictionary.cpp +++ b/dbms/src/Dictionaries/ComplexKeyHashedDictionary.cpp @@ -244,8 +244,8 @@ void ComplexKeyHashedDictionary::blockToAttributes(const Block & block) for (const auto attribute_idx : ext::range(0, attributes_size)) { - const auto &attribute_column = *attribute_column_ptrs[attribute_idx]; - auto &attribute = attributes[attribute_idx]; + const auto & attribute_column = *attribute_column_ptrs[attribute_idx]; + auto & attribute = attributes[attribute_idx]; const auto inserted = setAttributeValue(attribute, key, attribute_column[row_idx]); if (!inserted) should_rollback = true; diff --git a/dbms/src/Dictionaries/ExternalResultDescription.cpp b/dbms/src/Dictionaries/ExternalResultDescription.cpp index 18788606be7..4ac883d1c39 100644 --- a/dbms/src/Dictionaries/ExternalResultDescription.cpp +++ b/dbms/src/Dictionaries/ExternalResultDescription.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include @@ -55,6 +56,8 @@ void ExternalResultDescription::init(const Block & sample_block_) types.push_back(ValueType::Date); else if (typeid_cast(type)) types.push_back(ValueType::DateTime); + else if (typeid_cast(type)) + types.push_back(ValueType::UUID); else throw Exception{"Unsupported type " + type->getName(), ErrorCodes::UNKNOWN_TYPE}; diff --git a/dbms/src/Dictionaries/ExternalResultDescription.h b/dbms/src/Dictionaries/ExternalResultDescription.h index f3bfdf14fcc..ff9426ade57 100644 --- a/dbms/src/Dictionaries/ExternalResultDescription.h +++ b/dbms/src/Dictionaries/ExternalResultDescription.h @@ -24,7 +24,8 @@ struct ExternalResultDescription Float64, String, Date, - DateTime + DateTime, + UUID }; Block sample_block; diff --git a/dbms/src/Dictionaries/FlatDictionary.cpp b/dbms/src/Dictionaries/FlatDictionary.cpp index bca17a21f13..8884d83bbef 100644 --- a/dbms/src/Dictionaries/FlatDictionary.cpp +++ b/dbms/src/Dictionaries/FlatDictionary.cpp @@ -266,7 +266,7 @@ void FlatDictionary::createAttributes() if (attribute.hierarchical) { - hierarchical_attribute = &attributes.back(); + hierarchical_attribute = & attributes.back(); if (hierarchical_attribute->type != AttributeUnderlyingType::UInt64) throw Exception{name + ": hierarchical attribute must be UInt64.", ErrorCodes::TYPE_MISMATCH}; @@ -274,15 +274,15 @@ void FlatDictionary::createAttributes() } } -void FlatDictionary::blockToAttributes(const Block &block) +void FlatDictionary::blockToAttributes(const Block & block) { - const auto & id_column = *block.safeGetByPosition(0).column; + const IColumn & id_column = *block.safeGetByPosition(0).column; element_count += id_column.size(); - for (const auto attribute_idx : ext::range(0, attributes.size())) + for (const size_t attribute_idx : ext::range(0, attributes.size())) { - const auto &attribute_column = *block.safeGetByPosition(attribute_idx + 1).column; - auto &attribute = attributes[attribute_idx]; + const IColumn & attribute_column = *block.safeGetByPosition(attribute_idx + 1).column; + Attribute & attribute = attributes[attribute_idx]; for (const auto row_idx : ext::range(0, id_column.size())) setAttributeValue(attribute, id_column[row_idx].get(), attribute_column[row_idx]); diff --git a/dbms/src/Dictionaries/HashedDictionary.cpp b/dbms/src/Dictionaries/HashedDictionary.cpp index 561745e995d..2008f2a33a7 100644 --- a/dbms/src/Dictionaries/HashedDictionary.cpp +++ b/dbms/src/Dictionaries/HashedDictionary.cpp @@ -258,7 +258,7 @@ void HashedDictionary::createAttributes() if (attribute.hierarchical) { - hierarchical_attribute = &attributes.back(); + hierarchical_attribute = & attributes.back(); if (hierarchical_attribute->type != AttributeUnderlyingType::UInt64) throw Exception{name + ": hierarchical attribute must be UInt64.", ErrorCodes::TYPE_MISMATCH}; @@ -266,15 +266,15 @@ void HashedDictionary::createAttributes() } } -void HashedDictionary::blockToAttributes(const Block &block) +void HashedDictionary::blockToAttributes(const Block & block) { const auto & id_column = *block.safeGetByPosition(0).column; element_count += id_column.size(); - for (const auto attribute_idx : ext::range(0, attributes.size())) + for (const size_t attribute_idx : ext::range(0, attributes.size())) { - const auto &attribute_column = *block.safeGetByPosition(attribute_idx + 1).column; - auto &attribute = attributes[attribute_idx]; + const IColumn & attribute_column = *block.safeGetByPosition(attribute_idx + 1).column; + auto & attribute = attributes[attribute_idx]; for (const auto row_idx : ext::range(0, id_column.size())) setAttributeValue(attribute, id_column[row_idx].get(), attribute_column[row_idx]); diff --git a/dbms/src/Dictionaries/MongoDBBlockInputStream.cpp b/dbms/src/Dictionaries/MongoDBBlockInputStream.cpp index 89ae0d0357c..8bd4a28bb6f 100644 --- a/dbms/src/Dictionaries/MongoDBBlockInputStream.cpp +++ b/dbms/src/Dictionaries/MongoDBBlockInputStream.cpp @@ -135,6 +135,18 @@ namespace static_cast &>(value).value().epochTime()); break; } + case ValueType::UUID: + { + if (value.type() == Poco::MongoDB::ElementTraits::TypeId) + { + String string = static_cast &>(value).value(); + static_cast(column).getData().push_back(parse(string)); + } + else + throw Exception{"Type mismatch, expected String (UUID), got type id = " + toString(value.type()) + + " for column " + name, ErrorCodes::TYPE_MISMATCH}; + break; + } } } diff --git a/dbms/src/Dictionaries/MySQLBlockInputStream.cpp b/dbms/src/Dictionaries/MySQLBlockInputStream.cpp index c141e176ef6..250329eae2d 100644 --- a/dbms/src/Dictionaries/MySQLBlockInputStream.cpp +++ b/dbms/src/Dictionaries/MySQLBlockInputStream.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -53,6 +54,7 @@ namespace case ValueType::String: static_cast(column).insertData(value.data(), value.size()); break; case ValueType::Date: static_cast(column).insert(UInt16{value.getDate().getDayNum()}); break; case ValueType::DateTime: static_cast(column).insert(time_t{value.getDateTime()}); break; + case ValueType::UUID: static_cast(column).insert(parse(value.data(), value.size())); break; } } diff --git a/dbms/src/Dictionaries/ODBCBlockInputStream.cpp b/dbms/src/Dictionaries/ODBCBlockInputStream.cpp index 6ecc60c9925..d22fd1b0e83 100644 --- a/dbms/src/Dictionaries/ODBCBlockInputStream.cpp +++ b/dbms/src/Dictionaries/ODBCBlockInputStream.cpp @@ -3,6 +3,7 @@ #include #include +#include #include #include @@ -59,6 +60,8 @@ namespace case ValueType::String: static_cast(column).insert(value.convert()); break; case ValueType::Date: static_cast(column).insert(UInt16{LocalDate{value.convert()}.getDayNum()}); break; case ValueType::DateTime: static_cast(column).insert(time_t{LocalDateTime{value.convert()}}); break; + case ValueType::UUID: static_cast(column).insert(parse(value.convert())); break; + } } diff --git a/dbms/src/Functions/FunctionsArithmetic.h b/dbms/src/Functions/FunctionsArithmetic.h index 601fd1ef725..7132d342c0e 100644 --- a/dbms/src/Functions/FunctionsArithmetic.h +++ b/dbms/src/Functions/FunctionsArithmetic.h @@ -5,10 +5,12 @@ #include #include #include +#include #include #include #include #include +#include #include #include #include @@ -16,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -1145,6 +1148,14 @@ public: DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override { + /// Special case when multiply aggregate function state + if (isAggregateMultiply(arguments[0], arguments[1])) + { + if (checkDataType(arguments[0].get())) + return arguments[0]; + return arguments[1]; + } + /// Special case when the function is plus or minus, one of arguments is Date/DateTime and another is Interval. if (auto function_builder = getFunctionForIntervalArithmetic(arguments[0], arguments[1])) { @@ -1195,8 +1206,72 @@ public: return type_res; } + bool isAggregateMultiply(const DataTypePtr & type0, const DataTypePtr & type1) const + { + if constexpr (!std::is_same_v, MultiplyImpl>) + return false; + auto is_uint_type = [](const DataTypePtr & type) + { + return checkDataType(type.get()) || checkDataType(type.get()) + || checkDataType(type.get()) || checkDataType(type.get()); + }; + return ((checkDataType(type0.get()) && is_uint_type(type1)) + || (is_uint_type(type0) && checkDataType(type1.get()))); + } + void executeImpl(Block & block, const ColumnNumbers & arguments, size_t result, size_t input_rows_count) override { + /// Special case when multiply aggregate function state + if (isAggregateMultiply(block.getByPosition(arguments[0]).type, block.getByPosition(arguments[1]).type)) + { + ColumnNumbers new_arguments = arguments; + if (checkDataType(block.getByPosition(new_arguments[1]).type.get())) + std::swap(new_arguments[0], new_arguments[1]); + + const ColumnAggregateFunction * column = typeid_cast(block.getByPosition(new_arguments[0]).column.get()); + IAggregateFunction * function = column->getAggregateFunction().get(); + + auto arena = std::make_shared(); + + auto column_to = ColumnAggregateFunction::create(column->getAggregateFunction(), Arenas(1, arena)); + column_to->reserve(input_rows_count); + + auto column_from = ColumnAggregateFunction::create(column->getAggregateFunction(), Arenas(1, arena)); + column_from->reserve(input_rows_count); + + for (size_t i = 0; i < input_rows_count; ++i) + { + column_to->insertDefault(); + column_from->insertFrom(column->getData()[i]); + } + + auto & vec_to = column_to->getData(); + auto & vec_from = column_from->getData(); + + UInt64 m = block.getByPosition(new_arguments[1]).column->getUInt(0); + + /// We use exponentiation by squaring algorithm to perform multiplying aggregate states by N in O(log(N)) operations + /// https://en.wikipedia.org/wiki/Exponentiation_by_squaring + while (m) + { + if (m % 2) + { + for (size_t i = 0; i < input_rows_count; ++i) + function->merge(vec_to[i], vec_from[i], arena.get()); + --m; + } + else + { + for (size_t i = 0; i < input_rows_count; ++i) + function->merge(vec_from[i], vec_from[i], arena.get()); + m /= 2; + } + } + + block.getByPosition(result).column = std::move(column_to); + return; + } + /// Special case when the function is plus or minus, one of arguments is Date/DateTime and another is Interval. if (auto function_builder = getFunctionForIntervalArithmetic(block.getByPosition(arguments[0]).type, block.getByPosition(arguments[1]).type)) { diff --git a/dbms/src/Interpreters/CMakeLists.txt b/dbms/src/Interpreters/CMakeLists.txt index 8d9ea789104..a01a627fc64 100644 --- a/dbms/src/Interpreters/CMakeLists.txt +++ b/dbms/src/Interpreters/CMakeLists.txt @@ -27,6 +27,7 @@ set (INTERNAL_COMPILER_CUSTOM_ROOT ON CACHE INTERNAL "") set (INTERNAL_COMPILER_FLAGS "${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE_UC}} ${CXX_FLAGS_INTERNAL_COMPILER} -x c++ -march=native -fPIC -fvisibility=hidden -fno-implement-inlines -nostdinc -nostdinc++ -Wno-unused-command-line-argument -Bprefix=${PATH_SHARE}/clickhouse -isysroot=${INTERNAL_COMPILER_HEADERS_ROOT}" CACHE STRING "") # TODO: use libs from package: -nodefaultlibs -lm -lc -lgcc_s -lgcc -lc++ -lc++abi +string(REPLACE "${INCLUDE_DEBUG_HELPERS}" "" INTERNAL_COMPILER_FLAGS ${INTERNAL_COMPILER_FLAGS}) string(REPLACE "-no-pie" "" INTERNAL_COMPILER_FLAGS ${INTERNAL_COMPILER_FLAGS}) if (INTERNAL_COMPILER_NO_WARNING) string (REPLACE "-Wall" "" INTERNAL_COMPILER_FLAGS ${INTERNAL_COMPILER_FLAGS}) @@ -46,7 +47,7 @@ string (REPLACE ${ClickHouse_SOURCE_DIR} "" INTERNAL_Boost_INCLUDE_DIRS ${Boost_ string (REPLACE ${ClickHouse_SOURCE_DIR} "" INTERNAL_Poco_Foundation_INCLUDE_DIR ${Poco_Foundation_INCLUDE_DIR}) string (REPLACE ${ClickHouse_SOURCE_DIR} "" INTERNAL_Poco_Util_INCLUDE_DIR ${Poco_Util_INCLUDE_DIR}) -message (STATUS "Using internal=${USE_INTERNAL_LLVM_LIBRARY} compiler=${USE_EMBEDDED_COMPILER}: headers=${INTERNAL_COMPILER_HEADERS} : ${INTERNAL_COMPILER_ENV} ${INTERNAL_COMPILER_EXECUTABLE} ${INTERNAL_COMPILER_FLAGS}; ${INTERNAL_LINKER_EXECUTABLE}") +message (STATUS "Using internal=${USE_INTERNAL_LLVM_LIBRARY} compiler=${USE_EMBEDDED_COMPILER}: headers=${INTERNAL_COMPILER_HEADERS} : ${INTERNAL_COMPILER_ENV} ${INTERNAL_COMPILER_BIN_ROOT}${INTERNAL_COMPILER_EXECUTABLE} ${INTERNAL_COMPILER_FLAGS}; ${INTERNAL_LINKER_EXECUTABLE}") set (CONFIG_COMPILE ${ClickHouse_BINARY_DIR}/dbms/src/Interpreters/config_compile.h) configure_file (${ClickHouse_SOURCE_DIR}/dbms/src/Interpreters/config_compile.h.in ${CONFIG_COMPILE}) diff --git a/dbms/src/Interpreters/Compiler.cpp b/dbms/src/Interpreters/Compiler.cpp index 2aea6a40f69..1bf03f00ffd 100644 --- a/dbms/src/Interpreters/Compiler.cpp +++ b/dbms/src/Interpreters/Compiler.cpp @@ -244,23 +244,29 @@ void Compiler::compile( /// echo | g++ -x c++ -E -Wp,-v - " -isystem " << compiler_headers_root << "/usr/include/c++/*" + #if defined(CMAKE_LIBRARY_ARCHITECTURE) " -isystem " << compiler_headers_root << "/usr/include/" CMAKE_LIBRARY_ARCHITECTURE "/c++/*" + #endif " -isystem " << compiler_headers_root << "/usr/include/c++/*/backward" " -isystem " << compiler_headers_root << "/usr/include/clang/*/include" /// if compiler is clang (from package) " -isystem " << compiler_headers_root << "/usr/local/lib/clang/*/include" /// if clang installed manually " -isystem " << compiler_headers_root << "/usr/lib/clang/*/include" /// if clang build from submodules + #if defined(CMAKE_LIBRARY_ARCHITECTURE) " -isystem " << compiler_headers_root << "/usr/lib/gcc/" CMAKE_LIBRARY_ARCHITECTURE "/*/include-fixed" " -isystem " << compiler_headers_root << "/usr/lib/gcc/" CMAKE_LIBRARY_ARCHITECTURE "/*/include" + #endif " -isystem " << compiler_headers_root << "/usr/local/include" /// if something installed manually + #if defined(CMAKE_LIBRARY_ARCHITECTURE) " -isystem " << compiler_headers_root << "/usr/include/" CMAKE_LIBRARY_ARCHITECTURE + #endif " -isystem " << compiler_headers_root << "/usr/include" #endif " -I " << compiler_headers << "/dbms/src/" - " -I " << compiler_headers << "/contrib/cityhash102/include/" - " -I " << compiler_headers << "/contrib/libpcg-random/include/" - " -I " << compiler_headers << INTERNAL_DOUBLE_CONVERSION_INCLUDE_DIR - " -I " << compiler_headers << INTERNAL_Poco_Foundation_INCLUDE_DIR - " -I " << compiler_headers << INTERNAL_Boost_INCLUDE_DIRS + " -isystem " << compiler_headers << "/contrib/cityhash102/include/" + " -isystem " << compiler_headers << "/contrib/libpcg-random/include/" + " -isystem " << compiler_headers << INTERNAL_DOUBLE_CONVERSION_INCLUDE_DIR + " -isystem " << compiler_headers << INTERNAL_Poco_Foundation_INCLUDE_DIR + " -isystem " << compiler_headers << INTERNAL_Boost_INCLUDE_DIRS " -I " << compiler_headers << "/libs/libcommon/include/" " " << additional_compiler_flags << " -shared -o " << so_tmp_file_path << " " << cpp_file_path diff --git a/dbms/src/Interpreters/ExpressionActions.cpp b/dbms/src/Interpreters/ExpressionActions.cpp index 2158784a81b..48d703a69da 100644 --- a/dbms/src/Interpreters/ExpressionActions.cpp +++ b/dbms/src/Interpreters/ExpressionActions.cpp @@ -133,6 +133,14 @@ ExpressionAction ExpressionAction::project(const Names & projected_columns_) return a; } +ExpressionAction ExpressionAction::addAliases(const NamesWithAliases & aliased_columns_) +{ + ExpressionAction a; + a.type = ADD_ALIASES; + a.projection = aliased_columns_; + return a; +} + ExpressionAction ExpressionAction::arrayJoin(const NameSet & array_joined_columns, bool array_join_is_left, const Context & context) { if (array_joined_columns.empty()) @@ -256,6 +264,8 @@ void ExpressionAction::prepare(Block & sample_block) const std::string & name = projection[i].first; const std::string & alias = projection[i].second; ColumnWithTypeAndName column = sample_block.getByName(name); + if (column.column) + column.column = (*std::move(column.column)).mutate(); if (alias != "") column.name = alias; new_block.insert(std::move(column)); @@ -265,6 +275,19 @@ void ExpressionAction::prepare(Block & sample_block) break; } + case ADD_ALIASES: + { + for (size_t i = 0; i < projection.size(); ++i) + { + const std::string & name = projection[i].first; + const std::string & alias = projection[i].second; + const ColumnWithTypeAndName & column = sample_block.getByName(name); + if (alias != "" && !sample_block.has(alias)) + sample_block.insert({column.column, column.type, alias}); + } + break; + } + case REMOVE_COLUMN: { sample_block.erase(source_name); @@ -438,6 +461,8 @@ void ExpressionAction::execute(Block & block, std::unordered_map join_, const Names & join_key_names_left, const NamesAndTypesList & columns_added_by_join_); diff --git a/dbms/src/Interpreters/ExpressionAnalyzer.cpp b/dbms/src/Interpreters/ExpressionAnalyzer.cpp index 0bd7efaded9..ee15c1ee7d9 100644 --- a/dbms/src/Interpreters/ExpressionAnalyzer.cpp +++ b/dbms/src/Interpreters/ExpressionAnalyzer.cpp @@ -2894,7 +2894,7 @@ void ExpressionAnalyzer::getActionsBeforeAggregation(const ASTPtr & ast, Express } -ExpressionActionsPtr ExpressionAnalyzer::getActions(bool project_result) +ExpressionActionsPtr ExpressionAnalyzer::getActions(bool add_aliases, bool project_result) { ExpressionActionsPtr actions = std::make_shared(source_columns, settings); NamesWithAliases result_columns; @@ -2911,7 +2911,7 @@ ExpressionActionsPtr ExpressionAnalyzer::getActions(bool project_result) { std::string name = asts[i]->getColumnName(); std::string alias; - if (project_result) + if (add_aliases) alias = asts[i]->getAliasOrColumnName(); else alias = name; @@ -2920,11 +2920,15 @@ ExpressionActionsPtr ExpressionAnalyzer::getActions(bool project_result) getRootActions(asts[i], false, false, actions); } - if (project_result) + if (add_aliases) { - actions->add(ExpressionAction::project(result_columns)); + if (project_result) + actions->add(ExpressionAction::project(result_columns)); + else + actions->add(ExpressionAction::addAliases(result_columns)); } - else + + if (!(add_aliases && project_result)) { /// We will not delete the original columns. for (const auto & column_name_type : source_columns) diff --git a/dbms/src/Interpreters/ExpressionAnalyzer.h b/dbms/src/Interpreters/ExpressionAnalyzer.h index cf056698379..4b68d647040 100644 --- a/dbms/src/Interpreters/ExpressionAnalyzer.h +++ b/dbms/src/Interpreters/ExpressionAnalyzer.h @@ -155,9 +155,10 @@ public: void appendProjectResult(ExpressionActionsChain & chain) const; /// If `ast` is not a SELECT query, just gets all the actions to evaluate the expression. - /// If project_result, only the calculated values in the desired order, renamed to aliases, remain in the output block. + /// If add_aliases, only the calculated values in the desired order and add aliases. + /// If also project_result, than only aliases remain in the output block. /// Otherwise, only temporary columns will be deleted from the block. - ExpressionActionsPtr getActions(bool project_result); + ExpressionActionsPtr getActions(bool add_aliases, bool project_result = true); /// Actions that can be performed on an empty block: adding constants and applying functions that depend only on constants. /// Does not execute subqueries. diff --git a/dbms/src/Interpreters/ExpressionJIT.cpp b/dbms/src/Interpreters/ExpressionJIT.cpp index a6dab436f47..edeaca34a9a 100644 --- a/dbms/src/Interpreters/ExpressionJIT.cpp +++ b/dbms/src/Interpreters/ExpressionJIT.cpp @@ -594,6 +594,11 @@ void compileFunctions(ExpressionActions::Actions & actions, const Names & output current_dependents[proj.first].emplace(); break; + case ExpressionAction::ADD_ALIASES: + for (const auto & proj : actions[i].projection) + current_dependents[proj.first].emplace(); + break; + case ExpressionAction::ADD_COLUMN: case ExpressionAction::COPY_COLUMN: case ExpressionAction::ARRAY_JOIN: diff --git a/dbms/src/Interpreters/InterpreterSelectQuery.cpp b/dbms/src/Interpreters/InterpreterSelectQuery.cpp index 9333d6d8cf4..686d30dfb7a 100644 --- a/dbms/src/Interpreters/InterpreterSelectQuery.cpp +++ b/dbms/src/Interpreters/InterpreterSelectQuery.cpp @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -479,7 +480,7 @@ void InterpreterSelectQuery::executeImpl(Pipeline & pipeline, const BlockInputSt bool aggregate_final = expressions.need_aggregate && to_stage > QueryProcessingStage::WithMergeableState && - !query.group_by_with_totals; + !query.group_by_with_totals && !query.group_by_with_rollup; if (expressions.first_stage) { @@ -535,7 +536,13 @@ void InterpreterSelectQuery::executeImpl(Pipeline & pipeline, const BlockInputSt executeMergeAggregated(pipeline, aggregate_overflow_row, aggregate_final); if (!aggregate_final) - executeTotalsAndHaving(pipeline, expressions.has_having, expressions.before_having, aggregate_overflow_row); + { + if (query.group_by_with_totals) + executeTotalsAndHaving(pipeline, expressions.has_having, expressions.before_having, aggregate_overflow_row, !query.group_by_with_rollup); + + if (query.group_by_with_rollup) + executeRollup(pipeline); + } else if (expressions.has_having) executeHaving(pipeline, expressions.before_having); @@ -549,7 +556,10 @@ void InterpreterSelectQuery::executeImpl(Pipeline & pipeline, const BlockInputSt need_second_distinct_pass = query.distinct && pipeline.hasMoreThanOneStream(); if (query.group_by_with_totals && !aggregate_final) - executeTotalsAndHaving(pipeline, false, nullptr, aggregate_overflow_row); + executeTotalsAndHaving(pipeline, false, nullptr, aggregate_overflow_row, !query.group_by_with_rollup); + + if (query.group_by_with_rollup && !aggregate_final) + executeRollup(pipeline); } if (expressions.has_order_by) @@ -627,6 +637,9 @@ static void getLimitLengthAndOffset(ASTSelectQuery & query, size_t & length, siz void InterpreterSelectQuery::executeFetchColumns( QueryProcessingStage::Enum processing_stage, Pipeline & pipeline, const PrewhereInfoPtr & prewhere_info) { + + const Settings & settings = context.getSettingsRef(); + /// Actions to calculate ALIAS if required. ExpressionActionsPtr alias_actions; /// Are ALIAS columns required for query execution? @@ -647,26 +660,108 @@ void InterpreterSelectQuery::executeFetchColumns( if (alias_columns_required) { + /// Columns required for prewhere actions. + NameSet required_prewhere_columns; + /// Columns required for prewhere actions which are aliases in storage. + NameSet required_prewhere_aliases; + Block prewhere_actions_result; + if (prewhere_info) + { + auto required_columns = prewhere_info->prewhere_actions->getRequiredColumns(); + required_prewhere_columns.insert(required_columns.begin(), required_columns.end()); + prewhere_actions_result = prewhere_info->prewhere_actions->getSampleBlock(); + } + /// We will create an expression to return all the requested columns, with the calculation of the required ALIAS columns. auto required_columns_expr_list = std::make_shared(); + /// Separate expression for columns used in prewhere. + auto required_prewhere_columns_expr_list = std::make_shared(); + + /// Columns which we will get after prewhere execution. + auto source_columns = storage->getColumns().getAllPhysical(); for (const auto & column : required_columns) { + ASTPtr column_expr; const auto default_it = column_defaults.find(column); - if (default_it != std::end(column_defaults) && default_it->second.kind == ColumnDefaultKind::Alias) - required_columns_expr_list->children.emplace_back(setAlias(default_it->second.expression->clone(), column)); + bool is_alias = default_it != std::end(column_defaults) && default_it->second.kind == ColumnDefaultKind::Alias; + if (is_alias) + column_expr = setAlias(default_it->second.expression->clone(), column); else - required_columns_expr_list->children.emplace_back(std::make_shared(column)); + column_expr = std::make_shared(column); + + if (required_prewhere_columns.count(column)) + { + required_prewhere_columns_expr_list->children.emplace_back(std::move(column_expr)); + + if (is_alias) + required_prewhere_aliases.insert(column); + } + else + required_columns_expr_list->children.emplace_back(std::move(column_expr)); } - alias_actions = ExpressionAnalyzer(required_columns_expr_list, context, storage).getActions(true); + /// Add columns which will be added by prewhere (otherwise we will remove them in project action). + for (const auto & column : prewhere_actions_result) + { + if (prewhere_info->remove_prewhere_column && column.name == prewhere_info->prewhere_column_name) + continue; + + required_columns_expr_list->children.emplace_back(std::make_shared(column.name)); + source_columns.emplace_back(column.name, column.type); + } + + alias_actions = ExpressionAnalyzer(required_columns_expr_list, context, nullptr, source_columns).getActions(true); /// The set of required columns could be added as a result of adding an action to calculate ALIAS. required_columns = alias_actions->getRequiredColumns(); + + /// Do not remove prewhere filter if it is a column which is used as alias. + if (prewhere_info && prewhere_info->remove_prewhere_column) + if (required_columns.end() + != std::find(required_columns.begin(), required_columns.end(), prewhere_info->prewhere_column_name)) + prewhere_info->remove_prewhere_column = false; + + /// Remove columns which will be added by prewhere. + for (size_t i = 0; i < required_columns.size(); ++i) + { + if (!storage->getColumns().hasPhysical(required_columns[i])) + { + std::swap(required_columns[i], required_columns.back()); + required_columns.pop_back(); + } + } + + if (prewhere_info) + { + /// Don't remove columns which are needed to be aliased. + auto new_actions = std::make_shared(prewhere_info->prewhere_actions->getRequiredColumnsWithTypes(), settings); + for (const auto & action : prewhere_info->prewhere_actions->getActions()) + { + if (action.type != ExpressionAction::REMOVE_COLUMN + || required_columns.end() == std::find(required_columns.begin(), required_columns.end(), action.source_name)) + new_actions->add(action); + } + prewhere_info->prewhere_actions = std::move(new_actions); + + prewhere_info->alias_actions = ExpressionAnalyzer(required_prewhere_columns_expr_list, context, storage).getActions(true, false); + + /// Add columns required by alias actions. + auto required_aliased_columns = prewhere_info->alias_actions->getRequiredColumns(); + for (auto & column : required_aliased_columns) + if (!prewhere_actions_result.has(column)) + if (required_columns.end() == std::find(required_columns.begin(), required_columns.end(), column)) + required_columns.push_back(column); + + /// Add columns required by prewhere actions. + for (const auto & column : required_prewhere_columns) + if (required_prewhere_aliases.count(column) == 0) + if (required_columns.end() == std::find(required_columns.begin(), required_columns.end(), column)) + required_columns.push_back(column); + } } } - const Settings & settings = context.getSettingsRef(); /// Limitation on the number of columns to read. /// It's not applied in 'only_analyze' mode, because the query could be analyzed without removal of unnecessary columns. @@ -959,7 +1054,7 @@ void InterpreterSelectQuery::executeHaving(Pipeline & pipeline, const Expression } -void InterpreterSelectQuery::executeTotalsAndHaving(Pipeline & pipeline, bool has_having, const ExpressionActionsPtr & expression, bool overflow_row) +void InterpreterSelectQuery::executeTotalsAndHaving(Pipeline & pipeline, bool has_having, const ExpressionActionsPtr & expression, bool overflow_row, bool final) { executeUnion(pipeline); @@ -967,7 +1062,34 @@ void InterpreterSelectQuery::executeTotalsAndHaving(Pipeline & pipeline, bool ha pipeline.firstStream() = std::make_shared( pipeline.firstStream(), overflow_row, expression, - has_having ? query.having_expression->getColumnName() : "", settings.totals_mode, settings.totals_auto_threshold); + has_having ? query.having_expression->getColumnName() : "", settings.totals_mode, settings.totals_auto_threshold, final); +} + +void InterpreterSelectQuery::executeRollup(Pipeline & pipeline) +{ + executeUnion(pipeline); + + Names key_names; + AggregateDescriptions aggregates; + query_analyzer->getAggregateInfo(key_names, aggregates); + + Block header = pipeline.firstStream()->getHeader(); + + ColumnNumbers keys; + + for (const auto & name : key_names) + keys.push_back(header.getPositionByName(name)); + + const Settings & settings = context.getSettingsRef(); + + Aggregator::Params params(header, keys, aggregates, + false, settings.max_rows_to_group_by, settings.group_by_overflow_mode, + settings.compile ? &context.getCompiler() : nullptr, settings.min_count_to_compile, + SettingUInt64(0), SettingUInt64(0), + settings.max_bytes_before_external_group_by, settings.empty_result_for_aggregation_by_empty_set, + context.getTemporaryPath()); + + pipeline.firstStream() = std::make_shared(pipeline.firstStream(), params); } diff --git a/dbms/src/Interpreters/InterpreterSelectQuery.h b/dbms/src/Interpreters/InterpreterSelectQuery.h index fa8db71c469..9c534c2846a 100644 --- a/dbms/src/Interpreters/InterpreterSelectQuery.h +++ b/dbms/src/Interpreters/InterpreterSelectQuery.h @@ -177,7 +177,7 @@ private: void executeWhere(Pipeline & pipeline, const ExpressionActionsPtr & expression, bool remove_filter); void executeAggregation(Pipeline & pipeline, const ExpressionActionsPtr & expression, bool overflow_row, bool final); void executeMergeAggregated(Pipeline & pipeline, bool overflow_row, bool final); - void executeTotalsAndHaving(Pipeline & pipeline, bool has_having, const ExpressionActionsPtr & expression, bool overflow_row); + void executeTotalsAndHaving(Pipeline & pipeline, bool has_having, const ExpressionActionsPtr & expression, bool overflow_row, bool final); void executeHaving(Pipeline & pipeline, const ExpressionActionsPtr & expression); void executeExpression(Pipeline & pipeline, const ExpressionActionsPtr & expression); void executeOrder(Pipeline & pipeline); @@ -190,6 +190,7 @@ private: void executeDistinct(Pipeline & pipeline, bool before_order, Names columns); void executeExtremes(Pipeline & pipeline); void executeSubqueriesInSetsAndJoins(Pipeline & pipeline, std::unordered_map & subqueries_for_sets); + void executeRollup(Pipeline & pipeline); /** If there is a SETTINGS section in the SELECT query, then apply settings from it. * diff --git a/dbms/src/Interpreters/ProcessList.h b/dbms/src/Interpreters/ProcessList.h index b2a800c77bd..56d46f2aaa3 100644 --- a/dbms/src/Interpreters/ProcessList.h +++ b/dbms/src/Interpreters/ProcessList.h @@ -19,7 +19,6 @@ #include #include #include -#include #include diff --git a/dbms/src/Interpreters/ThreadStatusExt.cpp b/dbms/src/Interpreters/ThreadStatusExt.cpp index 4656385fa97..fe9002d92f0 100644 --- a/dbms/src/Interpreters/ThreadStatusExt.cpp +++ b/dbms/src/Interpreters/ThreadStatusExt.cpp @@ -203,6 +203,7 @@ void CurrentThread::attachTo(const ThreadGroupStatusPtr & thread_group) void CurrentThread::attachToIfDetached(const ThreadGroupStatusPtr & thread_group) { get()->attachQuery(thread_group, false); + getScope()->deleter = CurrentThread::defaultThreadDeleter; } std::string CurrentThread::getCurrentQueryID() diff --git a/dbms/src/Interpreters/config_compile.h.in b/dbms/src/Interpreters/config_compile.h.in index 52d4ab95f0b..be84d5219a1 100644 --- a/dbms/src/Interpreters/config_compile.h.in +++ b/dbms/src/Interpreters/config_compile.h.in @@ -1,9 +1,6 @@ #pragma once #cmakedefine CMAKE_LIBRARY_ARCHITECTURE "@CMAKE_LIBRARY_ARCHITECTURE@" -#if !defined(CMAKE_LIBRARY_ARCHITECTURE) -#define CMAKE_LIBRARY_ARCHITECTURE "" -#endif #cmakedefine PATH_SHARE "@PATH_SHARE@" #cmakedefine INTERNAL_COMPILER_FLAGS "@INTERNAL_COMPILER_FLAGS@" #cmakedefine INTERNAL_COMPILER_BIN_ROOT "@INTERNAL_COMPILER_BIN_ROOT@" diff --git a/dbms/src/Parsers/ASTSelectQuery.cpp b/dbms/src/Parsers/ASTSelectQuery.cpp index dc7a5341fe5..0e48ac585b7 100644 --- a/dbms/src/Parsers/ASTSelectQuery.cpp +++ b/dbms/src/Parsers/ASTSelectQuery.cpp @@ -103,6 +103,9 @@ void ASTSelectQuery::formatImpl(const FormatSettings & s, FormatState & state, F : typeid_cast(*group_expression_list).formatImplMultiline(s, state, frame); } + if (group_by_with_rollup) + s.ostr << (s.hilite ? hilite_keyword : "") << s.nl_or_ws << indent_str << (s.one_line ? "" : " ") << "WITH ROLLUP" << (s.hilite ? hilite_none : ""); + if (group_by_with_totals) s.ostr << (s.hilite ? hilite_keyword : "") << s.nl_or_ws << indent_str << (s.one_line ? "" : " ") << "WITH TOTALS" << (s.hilite ? hilite_none : ""); diff --git a/dbms/src/Parsers/ASTSelectQuery.h b/dbms/src/Parsers/ASTSelectQuery.h index 91d8d52172c..7422c70212c 100644 --- a/dbms/src/Parsers/ASTSelectQuery.h +++ b/dbms/src/Parsers/ASTSelectQuery.h @@ -28,6 +28,7 @@ public: ASTPtr where_expression; ASTPtr group_expression_list; bool group_by_with_totals = false; + bool group_by_with_rollup = false; ASTPtr having_expression; ASTPtr order_expression_list; ASTPtr limit_by_value; diff --git a/dbms/src/Parsers/ParserSelectQuery.cpp b/dbms/src/Parsers/ParserSelectQuery.cpp index b767fe7b586..480cb32b8bd 100644 --- a/dbms/src/Parsers/ParserSelectQuery.cpp +++ b/dbms/src/Parsers/ParserSelectQuery.cpp @@ -39,6 +39,7 @@ bool ParserSelectQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) ParserKeyword s_limit("LIMIT"); ParserKeyword s_settings("SETTINGS"); ParserKeyword s_by("BY"); + ParserKeyword s_rollup("ROLLUP"); ParserKeyword s_top("TOP"); ParserKeyword s_offset("OFFSET"); @@ -48,6 +49,9 @@ bool ParserSelectQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) ParserExpressionWithOptionalAlias exp_elem(false); ParserOrderByExpressionList order_list; + ParserToken open_bracket(TokenType::OpeningRoundBracket); + ParserToken close_bracket(TokenType::ClosingRoundBracket); + /// WITH expr list { if (s_with.ignore(pos, expected)) @@ -67,8 +71,6 @@ bool ParserSelectQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) if (s_top.ignore(pos, expected)) { - ParserToken open_bracket(TokenType::OpeningRoundBracket); - ParserToken close_bracket(TokenType::ClosingRoundBracket); ParserNumber num; if (open_bracket.ignore(pos, expected)) @@ -113,14 +115,35 @@ bool ParserSelectQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) /// GROUP BY expr list if (s_group_by.ignore(pos, expected)) { + if (s_rollup.ignore(pos, expected)) + { + select_query->group_by_with_rollup = true; + if (!open_bracket.ignore(pos, expected)) + return false; + } + if (!exp_list.parse(pos, select_query->group_expression_list, expected)) return false; + + if (select_query->group_by_with_rollup && !close_bracket.ignore(pos, expected)) + return false; + } + + /// WITH ROLLUP + if (s_with.ignore(pos, expected)) + { + if (s_rollup.ignore(pos, expected)) + select_query->group_by_with_rollup = true; + else if (s_totals.ignore(pos, expected)) + select_query->group_by_with_totals = true; + else + return false; } /// WITH TOTALS if (s_with.ignore(pos, expected)) { - if (!s_totals.ignore(pos, expected)) + if (select_query->group_by_with_totals || !s_totals.ignore(pos, expected)) return false; select_query->group_by_with_totals = true; diff --git a/dbms/src/Storages/Kafka/StorageKafka.cpp b/dbms/src/Storages/Kafka/StorageKafka.cpp index d7794ffe295..f8154fbefc5 100644 --- a/dbms/src/Storages/Kafka/StorageKafka.cpp +++ b/dbms/src/Storages/Kafka/StorageKafka.cpp @@ -425,6 +425,7 @@ void StorageKafka::pushConsumer(StorageKafka::ConsumerPtr c) void StorageKafka::streamThread() { setThreadName("KafkaStreamThr"); + CurrentThread::initializeQuery(); while (!stream_cancelled) { diff --git a/dbms/src/Storages/MergeTree/MergeTreeBaseBlockInputStream.cpp b/dbms/src/Storages/MergeTree/MergeTreeBaseBlockInputStream.cpp index ba137aa108f..c90c37fb9b4 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeBaseBlockInputStream.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeBaseBlockInputStream.cpp @@ -121,26 +121,28 @@ Block MergeTreeBaseBlockInputStream::readFromPart() if (reader->getColumns().empty()) { task->range_reader = MergeTreeRangeReader( - pre_reader.get(), index_granularity, nullptr, prewhere_info->prewhere_actions, + pre_reader.get(), index_granularity, nullptr, + prewhere_info->alias_actions, prewhere_info->prewhere_actions, &prewhere_info->prewhere_column_name, &task->ordered_names, task->should_reorder, task->remove_prewhere_column, true); } else { task->pre_range_reader = MergeTreeRangeReader( - pre_reader.get(), index_granularity, nullptr, prewhere_info->prewhere_actions, + pre_reader.get(), index_granularity, nullptr, + prewhere_info->alias_actions, prewhere_info->prewhere_actions, &prewhere_info->prewhere_column_name, &task->ordered_names, task->should_reorder, task->remove_prewhere_column, false); task->range_reader = MergeTreeRangeReader( - reader.get(), index_granularity, &task->pre_range_reader, nullptr, + reader.get(), index_granularity, &task->pre_range_reader, nullptr, nullptr, nullptr, &task->ordered_names, true, false, true); } } else { task->range_reader = MergeTreeRangeReader( - reader.get(), index_granularity, nullptr, nullptr, + reader.get(), index_granularity, nullptr, nullptr, nullptr, nullptr, &task->ordered_names, task->should_reorder, false, true); } } @@ -218,6 +220,9 @@ void MergeTreeBaseBlockInputStream::executePrewhereActions(Block & block, const { if (prewhere_info) { + if (prewhere_info->alias_actions) + prewhere_info->alias_actions->execute(block); + prewhere_info->prewhere_actions->execute(block); if (prewhere_info->remove_prewhere_column) block.erase(prewhere_info->prewhere_column_name); diff --git a/dbms/src/Storages/MergeTree/MergeTreeBlockInputStream.cpp b/dbms/src/Storages/MergeTree/MergeTreeBlockInputStream.cpp index 9e545360e02..8a3fccc5e43 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeBlockInputStream.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeBlockInputStream.cpp @@ -109,7 +109,10 @@ try if (prewhere_info) { - pre_column_names = prewhere_info->prewhere_actions->getRequiredColumns(); + if (prewhere_info->alias_actions) + pre_column_names = prewhere_info->alias_actions->getRequiredColumns(); + else + pre_column_names = prewhere_info->prewhere_actions->getRequiredColumns(); if (pre_column_names.empty()) pre_column_names.push_back(column_names[0]); diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index dc4b97ff6e3..7218e8b20d5 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -735,6 +735,16 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mergePartsToTempor rows_sources_write_buf->next(); rows_sources_uncompressed_write_buf->next(); + + size_t rows_sources_count = rows_sources_write_buf->count(); + /// In special case, when there is only one source part, and no rows were skipped, we may have + /// skipped writing rows_sources file. Otherwise rows_sources_count must be equal to the total + /// number of input rows. + if ((rows_sources_count > 0 || parts.size() > 1) && sum_input_rows_exact != rows_sources_count) + throw Exception("Number of rows in source parts (" + toString(sum_input_rows_exact) + + ") differs from number of bytes written to rows_sources file (" + toString(rows_sources_count) + + "). It is a bug.", ErrorCodes::LOGICAL_ERROR); + CompressedReadBufferFromFile rows_sources_read_buf(rows_sources_file_path, 0, 0); for (size_t column_num = 0, gathering_column_names_size = gathering_column_names.size(); diff --git a/dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp b/dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp index db0d9f9076a..d9aaf9acd84 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp @@ -366,13 +366,13 @@ void MergeTreeRangeReader::ReadResult::setFilter(const ColumnPtr & new_filter) MergeTreeRangeReader::MergeTreeRangeReader( - MergeTreeReader * merge_tree_reader, size_t index_granularity, - MergeTreeRangeReader * prev_reader, ExpressionActionsPtr prewhere_actions, + MergeTreeReader * merge_tree_reader, size_t index_granularity, MergeTreeRangeReader * prev_reader, + ExpressionActionsPtr alias_actions, ExpressionActionsPtr prewhere_actions, const String * prewhere_column_name, const Names * ordered_names, bool always_reorder, bool remove_prewhere_column, bool last_reader_in_chain) : index_granularity(index_granularity), merge_tree_reader(merge_tree_reader) , prev_reader(prev_reader), prewhere_column_name(prewhere_column_name) - , ordered_names(ordered_names), prewhere_actions(std::move(prewhere_actions)) + , ordered_names(ordered_names), alias_actions(alias_actions), prewhere_actions(std::move(prewhere_actions)) , always_reorder(always_reorder), remove_prewhere_column(remove_prewhere_column) , last_reader_in_chain(last_reader_in_chain), is_initialized(true) { @@ -571,6 +571,9 @@ void MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(ReadResult & r if (!prewhere_actions) return; + if (alias_actions) + alias_actions->execute(result.block); + prewhere_actions->execute(result.block); auto & prewhere_column = result.block.getByName(*prewhere_column_name); size_t prev_rows = result.block.rows(); diff --git a/dbms/src/Storages/MergeTree/MergeTreeRangeReader.h b/dbms/src/Storages/MergeTree/MergeTreeRangeReader.h index 21267b517e9..103ec2fb162 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeRangeReader.h +++ b/dbms/src/Storages/MergeTree/MergeTreeRangeReader.h @@ -19,8 +19,8 @@ class MergeTreeReader; class MergeTreeRangeReader { public: - MergeTreeRangeReader(MergeTreeReader * merge_tree_reader, size_t index_granularity, - MergeTreeRangeReader * prev_reader, ExpressionActionsPtr prewhere_actions, + MergeTreeRangeReader(MergeTreeReader * merge_tree_reader, size_t index_granularity, MergeTreeRangeReader * prev_reader, + ExpressionActionsPtr alias_actions, ExpressionActionsPtr prewhere_actions, const String * prewhere_column_name, const Names * ordered_names, bool always_reorder, bool remove_prewhere_column, bool last_reader_in_chain); @@ -175,6 +175,7 @@ private: const String * prewhere_column_name = nullptr; const Names * ordered_names = nullptr; + ExpressionActionsPtr alias_actions = nullptr; /// If not nullptr, calculate aliases. ExpressionActionsPtr prewhere_actions = nullptr; /// If not nullptr, calculate filter. Stream stream; diff --git a/dbms/src/Storages/MergeTree/MergeTreeReadPool.cpp b/dbms/src/Storages/MergeTree/MergeTreeReadPool.cpp index 55ba286dc08..16630e0d5c3 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeReadPool.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeReadPool.cpp @@ -196,7 +196,10 @@ std::vector MergeTreeReadPool::fillPerPartInfo( if (prewhere_info) { /// collect columns required for PREWHERE evaluation - required_pre_column_names = prewhere_info->prewhere_actions->getRequiredColumns(); + if (prewhere_info->alias_actions) + required_pre_column_names = prewhere_info->alias_actions->getRequiredColumns(); + else + required_pre_column_names = prewhere_info->prewhere_actions->getRequiredColumns(); /// there must be at least one column required for PREWHERE if (required_pre_column_names.empty()) diff --git a/dbms/src/Storages/SelectQueryInfo.h b/dbms/src/Storages/SelectQueryInfo.h index a448f2dc0bb..a6e40e4c27d 100644 --- a/dbms/src/Storages/SelectQueryInfo.h +++ b/dbms/src/Storages/SelectQueryInfo.h @@ -21,6 +21,8 @@ using PreparedSets = std::unordered_map; diff --git a/dbms/src/Storages/StorageReplicatedMergeTree.cpp b/dbms/src/Storages/StorageReplicatedMergeTree.cpp index 7c9b67ad2d3..836b2685949 100644 --- a/dbms/src/Storages/StorageReplicatedMergeTree.cpp +++ b/dbms/src/Storages/StorageReplicatedMergeTree.cpp @@ -3431,8 +3431,6 @@ void StorageReplicatedMergeTree::drop() if (is_readonly || !zookeeper) throw Exception("Can't drop readonly replicated table (need to drop data in ZooKeeper as well)", ErrorCodes::TABLE_IS_READ_ONLY); - // checkTableCanBeDropped(); // uncomment to feel yourself safe - shutdown(); if (zookeeper->expired()) diff --git a/dbms/tests/CMakeLists.txt b/dbms/tests/CMakeLists.txt index efa1e35861f..aece33b0c82 100644 --- a/dbms/tests/CMakeLists.txt +++ b/dbms/tests/CMakeLists.txt @@ -27,5 +27,5 @@ if (ENABLE_TESTS) # maybe add --no-long ? # if you want disable some tests: env TEST_OPT0='--skip compile' - add_test(NAME with_server COMMAND bash -c "env BUILD_DIR=${ClickHouse_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/clickhouse-test-server") + add_test(NAME with_server COMMAND bash -c "env BUILD_DIR=${ClickHouse_BINARY_DIR} INTERNAL_COMPILER_BIN_ROOT=${INTERNAL_COMPILER_BIN_ROOT} ${CMAKE_CURRENT_SOURCE_DIR}/clickhouse-test-server") endif () diff --git a/dbms/tests/clickhouse-test-server b/dbms/tests/clickhouse-test-server index bf054767c8d..69e933e06f5 100755 --- a/dbms/tests/clickhouse-test-server +++ b/dbms/tests/clickhouse-test-server @@ -75,7 +75,7 @@ fi VERSION=`${BIN_DIR}clickhouse-client --version-clean` # If run from compile dir - use in-place compile binary and headers -[ -n "$BIN_DIR" ] && INTERNAL_COMPILER_PARAMS="--compiler_executable_root=$BUILD_DIR/dbms/programs/ --compiler_headers=$BUILD_DIR/dbms/programs/clang/headers/$VERSION/ --compiler_headers_root=$BUILD_DIR/dbms/programs/clang/headers/$VERSION/" +[ -n "$BIN_DIR" ] && INTERNAL_COMPILER_PARAMS="--compiler_executable_root=${INTERNAL_COMPILER_BIN_ROOT:=$BUILD_DIR/dbms/programs/} --compiler_headers=$BUILD_DIR/dbms/programs/clang/headers/$VERSION/ --compiler_headers_root=$BUILD_DIR/dbms/programs/clang/headers/$VERSION/" $GDB ${BIN_DIR}clickhouse-server --config-file=$CLICKHOUSE_CONFIG --log=$CLICKHOUSE_LOG $TEST_SERVER_PARAMS -- \ --http_port=$CLICKHOUSE_PORT_HTTP \ diff --git a/dbms/tests/external_dictionaries/generate_and_test.py b/dbms/tests/external_dictionaries/generate_and_test.py index 36e2182a772..9c775610b2d 100755 --- a/dbms/tests/external_dictionaries/generate_and_test.py +++ b/dbms/tests/external_dictionaries/generate_and_test.py @@ -145,7 +145,7 @@ types = [ 'Int8', 'Int16', 'Int32', 'Int64', 'Float32', 'Float64', 'String', - 'Date', 'DateTime' + 'Date', 'DateTime', 'UUID' ] @@ -154,7 +154,7 @@ explicit_defaults = [ '-42', '-42', '-42', '-42', '1.5', '1.6', "'explicit-default'", - "'2015-01-01'", "'2015-01-01 00:00:00'" + "'2015-01-01'", "'2015-01-01 00:00:00'", "'550e8400-e29b-41d4-a716-446655440000'" ] @@ -163,7 +163,7 @@ implicit_defaults = [ '-1', '-1', '-1', '-1', '2.71828', '2.71828', 'implicit-default', - '2015-11-25', '2015-11-25 00:00:00' + '2015-11-25', '2015-11-25 00:00:00', "550e8400-e29b-41d4-a716-446655440000" ] @@ -210,7 +210,7 @@ def generate_data(args): 'Int8_ Int8, Int16_ Int16, Int32_ Int32, Int64_ Int64,' 'Float32_ Float32, Float64_ Float64,' 'String_ String,' - 'Date_ Date, DateTime_ DateTime, Parent UInt64' + 'Date_ Date, DateTime_ DateTime, Parent UInt64, UUID_ UUID' ') engine=Log; insert into test.dictionary_source format TabSeparated' '"'.format(source = args.source, ch = args.client, port = args.port)) @@ -232,7 +232,7 @@ def generate_data(args): 'UInt8_ tinyint unsigned, UInt16_ smallint unsigned, UInt32_ int unsigned, UInt64_ bigint unsigned, ' 'Int8_ tinyint, Int16_ smallint, Int32_ int, Int64_ bigint, ' 'Float32_ float, Float64_ double, ' - 'String_ text, Date_ date, DateTime_ datetime, Parent bigint unsigned' + 'String_ text, Date_ date, DateTime_ datetime, Parent bigint unsigned, UUID_ varchar(36)' ');' 'load data local infile \'{0}/source.tsv\' into table test.dictionary_source;" | mysql $MYSQL_OPTIONS --local-infile=1' .format(prefix), shell=True) diff --git a/dbms/tests/external_dictionaries/reference/UUID.reference b/dbms/tests/external_dictionaries/reference/UUID.reference new file mode 100644 index 00000000000..04b2c00c7eb --- /dev/null +++ b/dbms/tests/external_dictionaries/reference/UUID.reference @@ -0,0 +1,8 @@ +550e8400-e29b-41d4-a716-446655440000 +550e8400-e29b-41d4-a716-446655440001 +550e8400-e29b-41d4-a716-446655440002 +550e8400-e29b-41d4-a716-446655440003 +550e8400-e29b-41d4-a716-446655440000 +550e8400-e29b-41d4-a716-446655440000 +550e8400-e29b-41d4-a716-446655440000 +550e8400-e29b-41d4-a716-446655440000 diff --git a/dbms/tests/external_dictionaries/reference/UUIDOrDefault.reference b/dbms/tests/external_dictionaries/reference/UUIDOrDefault.reference new file mode 100644 index 00000000000..04b2c00c7eb --- /dev/null +++ b/dbms/tests/external_dictionaries/reference/UUIDOrDefault.reference @@ -0,0 +1,8 @@ +550e8400-e29b-41d4-a716-446655440000 +550e8400-e29b-41d4-a716-446655440001 +550e8400-e29b-41d4-a716-446655440002 +550e8400-e29b-41d4-a716-446655440003 +550e8400-e29b-41d4-a716-446655440000 +550e8400-e29b-41d4-a716-446655440000 +550e8400-e29b-41d4-a716-446655440000 +550e8400-e29b-41d4-a716-446655440000 diff --git a/dbms/tests/external_dictionaries/source.tsv b/dbms/tests/external_dictionaries/source.tsv index d3fc6e1f3cc..f10571bc1c0 100644 --- a/dbms/tests/external_dictionaries/source.tsv +++ b/dbms/tests/external_dictionaries/source.tsv @@ -1,4 +1,4 @@ -0 0 0 0 50 13874 980694578 980694579 50 13874 980694578 980694579 0 0 4761183170873013810 2007-12-27 1970-01-02 06:51:14 0 -1 1 1 1 48 57392 4083802160 4083802161 48 -8144 -211165136 -211165135 1.5 1.5 10577349846663553072 2037-06-02 1970-01-02 09:50:24 0 -2 2 2 2 69 35909 1447922757 1447922758 69 -29627 1447922757 1447922758 3 3 18198135717204167749 1978-08-08 1970-01-02 03:52:21 1 -3 3 3 3 250 1274 1029309690 1029309691 -6 1274 1029309690 1029309691 4.5 4.5 9624464864560415994 1973-06-28 1970-01-02 03:21:14 2 +0 0 0 0 50 13874 980694578 980694579 50 13874 980694578 980694579 0 0 4761183170873013810 2007-12-27 1970-01-02 06:51:14 0 550e8400-e29b-41d4-a716-446655440000 +1 1 1 1 48 57392 4083802160 4083802161 48 -8144 -211165136 -211165135 1.5 1.5 10577349846663553072 2037-06-02 1970-01-02 09:50:24 0 550e8400-e29b-41d4-a716-446655440001 +2 2 2 2 69 35909 1447922757 1447922758 69 -29627 1447922757 1447922758 3 3 18198135717204167749 1978-08-08 1970-01-02 03:52:21 1 550e8400-e29b-41d4-a716-446655440002 +3 3 3 3 250 1274 1029309690 1029309691 -6 1274 1029309690 1029309691 4.5 4.5 9624464864560415994 1973-06-28 1970-01-02 03:21:14 2 550e8400-e29b-41d4-a716-446655440003 diff --git a/dbms/tests/queries/0_stateless/00634_logging_shard.reference b/dbms/tests/queries/0_stateless/00634_logging_shard.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/queries/0_stateless/00634_logging_shard.sh b/dbms/tests/queries/0_stateless/00634_logging_shard.sh new file mode 100755 index 00000000000..0736354312b --- /dev/null +++ b/dbms/tests/queries/0_stateless/00634_logging_shard.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash +set -e + +# Get all server logs +export CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL="trace" + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CURDIR/../shell_config.sh + +cur_name=${BASH_SOURCE[0]} +server_logs_file=$cur_name"_server.logs" +server_logs="--server_logs_file=$server_logs_file" +rm -f "$server_logs_file" + +settings="$server_logs --log_queries=1 --log_query_threads=1 --log_profile_events=1 --log_query_settings=1" + + +# Check that logs from remote servers are passed from client + +# SELECT +> "$server_logs_file" +$CLICKHOUSE_CLIENT $settings -q "SELECT 1 FROM system.one FORMAT Null" +lines_one_server=`cat "$server_logs_file" | wc -l` + +> "$server_logs_file" +$CLICKHOUSE_CLIENT $settings -q "SELECT 1 FROM remote('127.0.0.2,127.0.0.3', system, one) FORMAT Null" +lines_two_servers=`cat "$server_logs_file" | wc -l` + +(( $lines_two_servers >= 2 * $lines_one_server )) || echo "Fail: $lines_two_servers $lines_one_server" + +# INSERT +$CLICKHOUSE_CLIENT $settings -q "DROP TABLE IF EXISTS test.null" +$CLICKHOUSE_CLIENT $settings -q "CREATE TABLE test.null (i Int8) ENGINE = Null" + +> "$server_logs_file" +$CLICKHOUSE_CLIENT $settings -q "INSERT INTO test.null VALUES (0)" +lines_one_server=`cat "$server_logs_file" | wc -l` + +> "$server_logs_file" +$CLICKHOUSE_CLIENT $settings -q "INSERT INTO TABLE FUNCTION remote('127.0.0.2', 'test', 'null') VALUES (0)" +lines_two_servers=`cat "$server_logs_file" | wc -l` + +$CLICKHOUSE_CLIENT $settings -q "DROP TABLE IF EXISTS test.null" +(( $lines_two_servers > $lines_one_server )) || echo "Fail: $lines_two_servers $lines_one_server" + + +# Clean +rm "$server_logs_file" diff --git a/dbms/tests/queries/0_stateless/00634_performance_introspection_and_logging_shard.reference b/dbms/tests/queries/0_stateless/00634_performance_introspection_and_logging.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00634_performance_introspection_and_logging_shard.reference rename to dbms/tests/queries/0_stateless/00634_performance_introspection_and_logging.reference diff --git a/dbms/tests/queries/0_stateless/00634_performance_introspection_and_logging_shard.sh b/dbms/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh similarity index 79% rename from dbms/tests/queries/0_stateless/00634_performance_introspection_and_logging_shard.sh rename to dbms/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh index 7ac29a7e10f..18efafac49d 100755 --- a/dbms/tests/queries/0_stateless/00634_performance_introspection_and_logging_shard.sh +++ b/dbms/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh @@ -101,35 +101,5 @@ WHERE NOT (PVq <= PVt AND PVt <= 1.1 * PVq) " - -# Check that logs from remote servers are passed from client - -# SELECT -> "$server_logs_file" -$CLICKHOUSE_CLIENT $settings -q "SELECT 1 FROM system.one FORMAT Null" -lines_one_server=`cat "$server_logs_file" | wc -l` - -> "$server_logs_file" -$CLICKHOUSE_CLIENT $settings -q "SELECT 1 FROM remote('127.0.0.2,127.0.0.3', system, one) FORMAT Null" -lines_two_servers=`cat "$server_logs_file" | wc -l` - -(( $lines_two_servers >= 2 * $lines_one_server )) || echo "Fail: $lines_two_servers $lines_one_server" - -# INSERT -$CLICKHOUSE_CLIENT $settings -q "DROP TABLE IF EXISTS test.null" -$CLICKHOUSE_CLIENT $settings -q "CREATE TABLE test.null (i Int8) ENGINE = Null" - -> "$server_logs_file" -$CLICKHOUSE_CLIENT $settings -q "INSERT INTO test.null VALUES (0)" -lines_one_server=`cat "$server_logs_file" | wc -l` - -> "$server_logs_file" -$CLICKHOUSE_CLIENT $settings -q "INSERT INTO TABLE FUNCTION remote('127.0.0.2', 'test', 'null') VALUES (0)" -lines_two_servers=`cat "$server_logs_file" | wc -l` - -$CLICKHOUSE_CLIENT $settings -q "DROP TABLE IF EXISTS test.null" -(( $lines_two_servers > $lines_one_server )) || echo "Fail: $lines_two_servers $lines_one_server" - - # Clean rm "$server_logs_file" diff --git a/dbms/tests/queries/0_stateless/00647_multiply_aggregation_state.reference b/dbms/tests/queries/0_stateless/00647_multiply_aggregation_state.reference new file mode 100644 index 00000000000..ff08aade246 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00647_multiply_aggregation_state.reference @@ -0,0 +1,13 @@ +2 +0 +33 +2 +0 +18 +20 4 +2 +3 +2 +[1,1,1,1,1] +[1,1] +[1] diff --git a/dbms/tests/queries/0_stateless/00647_multiply_aggregation_state.sql b/dbms/tests/queries/0_stateless/00647_multiply_aggregation_state.sql new file mode 100644 index 00000000000..d7ebd7b0313 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00647_multiply_aggregation_state.sql @@ -0,0 +1,22 @@ +SELECT countMerge(x) AS y FROM ( SELECT countState() * 2 AS x FROM ( SELECT 1 )); +SELECT countMerge(x) AS y FROM ( SELECT countState() * 0 AS x FROM ( SELECT 1 UNION ALL SELECT 2)); +SELECT sumMerge(y) AS z FROM ( SELECT sumState(x) * 11 AS y FROM ( SELECT 1 AS x UNION ALL SELECT 2 AS x)); +SELECT countMerge(x) AS y FROM ( SELECT 2 * countState() AS x FROM ( SELECT 1 )); +SELECT countMerge(x) AS y FROM ( SELECT 0 * countState() AS x FROM ( SELECT 1 UNION ALL SELECT 2)); +SELECT sumMerge(y) AS z FROM ( SELECT 3 * sumState(x) * 2 AS y FROM ( SELECT 1 AS x UNION ALL SELECT 2 AS x)); + +DROP TABLE IF EXISTS test.mult_aggregation; +CREATE TABLE test.mult_aggregation(a UInt32, b UInt32) ENGINE = Memory; +INSERT INTO test.mult_aggregation VALUES(1, 1); +INSERT INTO test.mult_aggregation VALUES(1, 3); + +SELECT sumMerge(x * 5), sumMerge(x) FROM (SELECT sumState(b) AS x FROM test.mult_aggregation); +SELECT uniqMerge(x * 10) FROM (SELECT uniqState(b) AS x FROM test.mult_aggregation); +SELECT maxMerge(x * 10) FROM (SELECT maxState(b) AS x FROM test.mult_aggregation); +SELECT avgMerge(x * 10) FROM (SELECT avgState(b) AS x FROM test.mult_aggregation); + +SELECT groupArrayMerge(y * 5) FROM (SELECT groupArrayState(x) AS y FROM (SELECT 1 AS x)); +SELECT groupArrayMerge(2)(y * 5) FROM (SELECT groupArrayState(2)(x) AS y FROM (SELECT 1 AS x)); +SELECT groupUniqArrayMerge(y * 5) FROM (SELECT groupUniqArrayState(x) AS y FROM (SELECT 1 AS x)); + +DROP TABLE IF EXISTS test.mult_aggregation; diff --git a/dbms/tests/queries/0_stateless/00701_rollup.reference b/dbms/tests/queries/0_stateless/00701_rollup.reference new file mode 100644 index 00000000000..ec07ad52cae --- /dev/null +++ b/dbms/tests/queries/0_stateless/00701_rollup.reference @@ -0,0 +1,27 @@ + 0 120 8 +a 0 70 4 +a 1 25 2 +a 2 45 2 +b 0 50 4 +b 1 15 2 +b 2 35 2 + 0 120 8 +a 0 70 4 +a 1 25 2 +a 2 45 2 +b 0 50 4 +b 1 15 2 +b 2 35 2 + + 0 120 8 + 120 8 +a 70 4 +b 50 4 + 120 8 +a 70 4 +b 50 4 + 120 8 +a 70 4 +b 50 4 + + 120 8 diff --git a/dbms/tests/queries/0_stateless/00701_rollup.sql b/dbms/tests/queries/0_stateless/00701_rollup.sql new file mode 100644 index 00000000000..1a52ccd09dc --- /dev/null +++ b/dbms/tests/queries/0_stateless/00701_rollup.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS test.rollup; +CREATE TABLE test.rollup(a String, b Int32, s Int32) ENGINE = Memory; + +INSERT INTO test.rollup VALUES('a', 1, 10); +INSERT INTO test.rollup VALUES('a', 1, 15); +INSERT INTO test.rollup VALUES('a', 2, 20); +INSERT INTO test.rollup VALUES('a', 2, 25); +INSERT INTO test.rollup VALUES('b', 1, 10); +INSERT INTO test.rollup VALUES('b', 1, 5); +INSERT INTO test.rollup VALUES('b', 2, 20); +INSERT INTO test.rollup VALUES('b', 2, 15); + +SELECT a, b, sum(s), count() from test.rollup GROUP BY ROLLUP(a, b) ORDER BY a, b; + +SELECT a, b, sum(s), count() from test.rollup GROUP BY ROLLUP(a, b) WITH TOTALS ORDER BY a, b; + +SELECT a, sum(s), count() from test.rollup GROUP BY ROLLUP(a) ORDER BY a; + +SELECT a, sum(s), count() from test.rollup GROUP BY a WITH ROLLUP ORDER BY a; + +SELECT a, sum(s), count() from test.rollup GROUP BY a WITH ROLLUP WITH TOTALS ORDER BY a; diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_alias.reference b/dbms/tests/queries/0_stateless/00712_prewhere_with_alias.reference new file mode 100644 index 00000000000..6ee9c223dfd --- /dev/null +++ b/dbms/tests/queries/0_stateless/00712_prewhere_with_alias.reference @@ -0,0 +1,30 @@ +- +1 +1 +1 +2 +- +1 +1 +1 +2 +- +1 +1 +1 +2 +- +1 +1 +1 +2 +- +1 +1 +1 +2 +- +1 +1 +1 +2 diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_alias.sql b/dbms/tests/queries/0_stateless/00712_prewhere_with_alias.sql new file mode 100644 index 00000000000..5cfc2318f3d --- /dev/null +++ b/dbms/tests/queries/0_stateless/00712_prewhere_with_alias.sql @@ -0,0 +1,35 @@ +drop table if exists test.prewhere_alias; +CREATE TABLE test.prewhere_alias (a UInt8, b Int32, c UInt8 ALIAS a, d Int64 ALIAS b + 1, e Int32 alias a + b) ENGINE = MergeTree ORDER BY tuple(); +insert into test.prewhere_alias values (1, 1); +select '-'; +select a from test.prewhere_alias prewhere a = 1; +select b from test.prewhere_alias prewhere a = 1; +select c from test.prewhere_alias prewhere a = 1; +select d from test.prewhere_alias prewhere a = 1; +select '-'; +select a from test.prewhere_alias prewhere b = 1; +select b from test.prewhere_alias prewhere b = 1; +select c from test.prewhere_alias prewhere b = 1; +select d from test.prewhere_alias prewhere b = 1; +select '-'; +select a from test.prewhere_alias prewhere c = 1; +select b from test.prewhere_alias prewhere c = 1; +select c from test.prewhere_alias prewhere c = 1; +select d from test.prewhere_alias prewhere c = 1; +select '-'; +select a from test.prewhere_alias prewhere d = 2; +select b from test.prewhere_alias prewhere d = 2; +select c from test.prewhere_alias prewhere d = 2; +select d from test.prewhere_alias prewhere d = 2; +select '-'; +select a from test.prewhere_alias prewhere a; +select b from test.prewhere_alias prewhere a; +select c from test.prewhere_alias prewhere a; +select d from test.prewhere_alias prewhere a; +select '-'; +select a from test.prewhere_alias prewhere c; +select b from test.prewhere_alias prewhere c; +select c from test.prewhere_alias prewhere c; +select d from test.prewhere_alias prewhere c; +drop table if exists test.prewhere_alias; + diff --git a/dbms/tests/queries/0_stateless/00713_collapsing_merge_tree.reference b/dbms/tests/queries/0_stateless/00713_collapsing_merge_tree.reference new file mode 100644 index 00000000000..b6b4bc9e89b --- /dev/null +++ b/dbms/tests/queries/0_stateless/00713_collapsing_merge_tree.reference @@ -0,0 +1,3 @@ +k1 k1v2 1 +k3 k3v1 1 +k4 k4v1 -1 diff --git a/dbms/tests/queries/0_stateless/00713_collapsing_merge_tree.sql b/dbms/tests/queries/0_stateless/00713_collapsing_merge_tree.sql new file mode 100644 index 00000000000..ea9d8f6b45a --- /dev/null +++ b/dbms/tests/queries/0_stateless/00713_collapsing_merge_tree.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS test.collapsing; + +CREATE TABLE test.collapsing(key String, value String, sign Int8) ENGINE CollapsingMergeTree(sign) + ORDER BY key + SETTINGS enable_vertical_merge_algorithm=1, + vertical_merge_algorithm_min_rows_to_activate=0, + vertical_merge_algorithm_min_columns_to_activate=0; + +INSERT INTO test.collapsing VALUES ('k1', 'k1v1', 1); +INSERT INTO test.collapsing VALUES ('k1', 'k1v1', -1), ('k1', 'k1v2', 1); +INSERT INTO test.collapsing VALUES ('k2', 'k2v1', 1), ('k2', 'k2v1', -1), ('k3', 'k3v1', 1); +INSERT INTO test.collapsing VALUES ('k4', 'k4v1', -1), ('k4', 'k4v2', 1), ('k4', 'k4v2', -1); + +OPTIMIZE TABLE test.collapsing PARTITION tuple() FINAL; + +SELECT * FROM test.collapsing ORDER BY key; + +DROP TABLE test.collapsing; diff --git a/debian/changelog b/debian/changelog index 17cfdd1c287..8f6ba10cc42 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,5 +1,5 @@ -clickhouse (18.12.2) unstable; urgency=low +clickhouse (18.12.5) unstable; urgency=low * Modified source code - -- Wed, 05 Sep 2018 00:28:49 +0300 + -- Thu, 06 Sep 2018 07:25:55 +0300 diff --git a/docker/client/Dockerfile b/docker/client/Dockerfile index 8655329065c..3372539be81 100644 --- a/docker/client/Dockerfile +++ b/docker/client/Dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:18.04 ARG repository="deb http://repo.yandex.ru/clickhouse/deb/stable/ main/" -ARG version=18.12.2 +ARG version=18.12.5 RUN apt-get update && \ apt-get install -y apt-transport-https dirmngr && \ diff --git a/docker/server/Dockerfile b/docker/server/Dockerfile index 953eab83e33..7e3b4c63d7e 100644 --- a/docker/server/Dockerfile +++ b/docker/server/Dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:18.04 ARG repository="deb http://repo.yandex.ru/clickhouse/deb/stable/ main/" -ARG version=18.12.2 +ARG version=18.12.5 RUN apt-get update && \ apt-get install -y apt-transport-https dirmngr && \ diff --git a/docker/test/Dockerfile b/docker/test/Dockerfile index f1b5dda8e2a..41f4d9dd303 100644 --- a/docker/test/Dockerfile +++ b/docker/test/Dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:18.04 ARG repository="deb http://repo.yandex.ru/clickhouse/deb/stable/ main/" -ARG version=18.12.2 +ARG version=18.12.5 RUN apt-get update && \ apt-get install -y apt-transport-https dirmngr && \ diff --git a/docs/tools/build.py b/docs/tools/build.py index f5e94e2e076..861825f9c4f 100755 --- a/docs/tools/build.py +++ b/docs/tools/build.py @@ -70,6 +70,7 @@ def build_for_lang(lang, args): cfg = config.load_config( config_file=config_path, site_name='ClickHouse Documentation' if lang == 'en' or 'fa' else 'Документация ClickHouse', + site_url='https://clickhouse.yandex/docs/%s/' % lang, docs_dir=os.path.join(args.docs_dir, lang), site_dir=os.path.join(args.output_dir, lang), strict=True, @@ -88,7 +89,7 @@ def build_for_lang(lang, args): ], plugins=[{ 'search': { - 'lang': ['en'] if lang == 'en' or 'fa' else ['en', lang] + 'lang': ['en'] if lang == 'en' else ['en', lang] } }], extra={ diff --git a/release b/release index be7a5e1411a..b732e941612 100755 --- a/release +++ b/release @@ -11,6 +11,8 @@ # env DIST=unstable EXTRAPACKAGES="clang-7 libstdc++-8-dev lld-7 liblld-7-dev libclang-7-dev liblld-7" DEB_CC=clang-7 DEB_CXX=clang++-7 CMAKE_FLAGS=" -DNO_WERROR=1 " ./release # Clang6 without internal compiler (for low memory arm64): # env DIST=bionic DISABLE_PARALLEL=1 EXTRAPACKAGES="clang-6.0 libstdc++-8-dev" DEB_CC=clang-6.0 DEB_CXX=clang++-6.0 CMAKE_FLAGS=" -DNO_WERROR=1 " ./release +# Do not compile internal compiler but use from system: +# env CMAKE_FLAGS="-DUSE_INTERNAL_LLVM_LIBRARY=0 -DENABLE_EMBEDDED_COMPILER=0 -DINTERNAL_COMPILER_EXECUTABLE=clang-6.0 -DINTERNAL_LINKER_EXECUTABLE=ld.lld-6.0 -DINTERNAL_COMPILER_BIN_ROOT=/usr/bin/" EXTRAPACKAGES="clang-6.0 lld-6.0 libstdc++-8-dev" DEB_CXX=clang++-6.0 DEB_CC=clang-6.0 TEST_RUN=1 TEST_OPT="compile" ./release # Build with ASan: # env SANITIZER=address ./release diff --git a/website/sitemap.xml b/website/sitemap.xml index 18bfe5875fd..01dc5c6297e 100644 --- a/website/sitemap.xml +++ b/website/sitemap.xml @@ -3,13 +3,10 @@ https://clickhouse.yandex/docs/ru/sitemap.xml - - https://clickhouse.yandex/docs/ru/single_page/sitemap.xml - https://clickhouse.yandex/docs/en/sitemap.xml - https://clickhouse.yandex/docs/en/single_pabe/sitemap.xml + https://clickhouse.yandex/docs/sitemap_static.xml diff --git a/website/sitemap_static.xml b/website/sitemap_static.xml new file mode 100644 index 00000000000..c1bc94bf00a --- /dev/null +++ b/website/sitemap_static.xml @@ -0,0 +1,23 @@ + + + + https://clickhouse.yandex/ + daily + + + https://clickhouse.yandex/benchmark.html + daily + + + https://clickhouse.yandex/tutorial.html + daily + + + https://clickhouse.yandex/blog/en + daily + + + https://clickhouse.yandex/blog/ru + daily + + \ No newline at end of file